org.apache.flink.runtime.jobgraph.JobGraph

Here are the examples of the java api org.apache.flink.runtime.jobgraph.JobGraph taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

420 Examples 7

19 Source : CompilationResult.java
with Apache License 2.0
from uber-archive

public clreplaced CompilationResult implements Serializable {

    private JobGraph jobGraph;

    private Throwable remoteThrowable;

    public JobGraph jobGraph() {
        return jobGraph;
    }

    void jobGraph(JobGraph jobGraph) {
        this.jobGraph = jobGraph;
    }

    void remoteThrowable(Throwable e) {
        this.remoteThrowable = e;
    }

    public Throwable remoteThrowable() {
        return remoteThrowable;
    }

    byte[] serialize() {
        ByteArrayOutputStream bos = new ByteArrayOutputStream();
        try (ObjectOutputStream os = new ObjectOutputStream(bos)) {
            os.writeObject(this);
        } catch (IOException e) {
            return null;
        }
        return bos.toByteArray();
    }
}

19 Source : CompilationResult.java
with Apache License 2.0
from uber-archive

void jobGraph(JobGraph jobGraph) {
    this.jobGraph = jobGraph;
}

19 Source : JobDeployer.java
with Apache License 2.0
from uber-archive

@VisibleForTesting
void start(AthenaXYarnClusterDescriptor descriptor, JobGraph job) throws Exception {
    ClusterClient<ApplicationId> client = descriptor.deploy();
    try {
        client.runDetached(job, null);
        stopAfterJob(client, job.getJobID());
    } finally {
        client.shutdown();
    }
}

19 Source : JobDeployer.java
with Apache License 2.0
from uber-archive

void start(JobGraph job, JobConf desc) throws Exception {
    AthenaXYarnClusterDescriptor descriptor = new AthenaXYarnClusterDescriptor(clusterConf, yarnClient, flinkConf, desc);
    start(descriptor, job);
}

19 Source : AthenaXYarnClusterDescriptor.java
with Apache License 2.0
from uber-archive

@Override
public ClusterClient<ApplicationId> deployJobCluster(ClusterSpecification clusterSpecification, JobGraph jobGraph, boolean b) throws ClusterDeploymentException {
    return null;
}

19 Source : JobSubmissionFailsITCase.java
with Apache License 2.0
from ljygz

@Nonnull
private static JobGraph getJobGraphWithMissingBlobKey() {
    final JobGraph jobGraph = getWorkingJobGraph();
    jobGraph.addUserJarBlobKey(new PermanentBlobKey());
    return jobGraph;
}

19 Source : ResumeCheckpointManuallyITCase.java
with Apache License 2.0
from ljygz

private static String runJobAndGetExternalizedCheckpoint(StateBackend backend, File checkpointDir, @Nullable String externalCheckpoint, ClusterClient<?> client) throws Exception {
    JobGraph initialJobGraph = getJobGraph(backend, externalCheckpoint);
    NotifyingInfiniteTupleSource.countDownLatch = new CountDownLatch(PARALLELISM);
    client.submitJob(initialJobGraph, ResumeCheckpointManuallyITCase.clreplaced.getClreplacedLoader());
    // wait until all sources have been started
    NotifyingInfiniteTupleSource.countDownLatch.await();
    waitUntilExternalizedCheckpointCreated(checkpointDir, initialJobGraph.getJobID());
    client.cancel(initialJobGraph.getJobID());
    waitUntilCanceled(initialJobGraph.getJobID(), client);
    return getExternalizedCheckpointCheckpointPath(checkpointDir, initialJobGraph.getJobID());
}

19 Source : AccumulatorLiveITCase.java
with Apache License 2.0
from ljygz

@Test
public void testStreaming() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    DataStream<Integer> input = env.fromCollection(inputData);
    input.flatMap(new NotifyingMapper()).writeUsingOutputFormat(new DummyOutputFormat()).disableChaining();
    JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    submitJobAndVerifyResults(jobGraph);
}

19 Source : AccumulatorLiveITCase.java
with Apache License 2.0
from ljygz

@Test
public void testBatch() throws Exception {
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    DataSet<Integer> input = env.fromCollection(inputData);
    input.flatMap(new NotifyingMapper()).output(new DummyOutputFormat());
    // Extract job graph and set job id for the task to notify of acreplacedulator changes.
    JobGraph jobGraph = getJobGraph(env.createProgramPlan());
    submitJobAndVerifyResults(jobGraph);
}

19 Source : ProgramDeployer.java
with Apache License 2.0
from ljygz

/**
 * Deploys a job. Depending on the deployment creates a new job cluster. It saves the cluster id in
 * the result and blocks until job completion.
 */
private <T> void deployJob(ExecutionContext<T> context, JobGraph jobGraph, Result<T> result) {
    // create or retrieve cluster and deploy job
    try (final ClusterDescriptor<T> clusterDescriptor = context.createClusterDescriptor()) {
        try {
            // new cluster
            if (context.getClusterId() == null) {
                deployJobOnNewCluster(clusterDescriptor, jobGraph, result, context.getClreplacedLoader());
            } else // reuse existing cluster
            {
                deployJobOnExistingCluster(context.getClusterId(), clusterDescriptor, jobGraph, result);
            }
        } catch (Exception e) {
            throw new SqlExecutionException("Could not retrieve or create a cluster.", e);
        }
    } catch (SqlExecutionException e) {
        throw e;
    } catch (Exception e) {
        throw new SqlExecutionException("Could not locate a cluster.", e);
    }
}

19 Source : LocalExecutor.java
with Apache License 2.0
from ljygz

private <C> ProgramTargetDescriptor executeUpdateInternal(ExecutionContext<C> context, String statement) {
    final ExecutionContext.EnvironmentInstance envInst = context.createEnvironmentInstance();
    applyUpdate(context, envInst.getTableEnvironment(), envInst.getQueryConfig(), statement);
    // create job graph with dependencies
    final String jobName = context.getSessionContext().getName() + ": " + statement;
    final JobGraph jobGraph;
    try {
        jobGraph = envInst.createJobGraph(jobName);
    } catch (Throwable t) {
        // catch everything such that the statement does not crash the executor
        throw new SqlExecutionException("Invalid SQL statement.", t);
    }
    // create execution
    final BasicResult<C> result = new BasicResult<>();
    final ProgramDeployer<C> deployer = new ProgramDeployer<>(context, jobName, jobGraph, result, false);
    // blocking deployment
    deployer.run();
    return ProgramTargetDescriptor.of(result.getClusterId(), jobGraph.getJobID(), result.getWebInterfaceUrl());
}

19 Source : StreamingJobGraphGeneratorTest.java
with Apache License 2.0
from ljygz

/**
 * Tests that disabled checkpointing sets the checkpointing interval to Long.MAX_VALUE.
 */
@Test
public void testDisabledCheckpointing() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamGraph streamGraph = new StreamGraph(env);
    replacedertFalse("Checkpointing enabled", streamGraph.getCheckpointConfig().isCheckpointingEnabled());
    JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
    JobCheckpointingSettings snapshottingSettings = jobGraph.getCheckpointingSettings();
    replacedertEquals(Long.MAX_VALUE, snapshottingSettings.getCheckpointCoordinatorConfiguration().getCheckpointInterval());
}

19 Source : JarRunHandlerParameterTest.java
with Apache License 2.0
from ljygz

@Override
JobGraph validateGraph() {
    JobGraph jobGraph = super.validateGraph();
    final SavepointRestoreSettings savepointRestoreSettings = jobGraph.getSavepointRestoreSettings();
    replacedert.replacedertTrue(savepointRestoreSettings.allowNonRestoredState());
    replacedert.replacedertEquals(RESTORE_PATH, savepointRestoreSettings.getRestorePath());
    return jobGraph;
}

19 Source : JarHandlerParameterTest.java
with Apache License 2.0
from ljygz

JobGraph validateGraph() {
    JobGraph jobGraph = LAST_SUBMITTED_JOB_GRAPH_REFERENCE.getAndSet(null);
    replacedert.replacedertArrayEquals(PROG_ARGS, ParameterProgram.actualArguments);
    replacedert.replacedertEquals(PARALLELISM, getExecutionConfig(jobGraph).getParallelism());
    return jobGraph;
}

19 Source : JarHandlerParameterTest.java
with Apache License 2.0
from ljygz

private static ExecutionConfig getExecutionConfig(JobGraph jobGraph) {
    ExecutionConfig executionConfig;
    try {
        executionConfig = jobGraph.getSerializedExecutionConfig().deserializeValue(ParameterProgram.clreplaced.getClreplacedLoader());
    } catch (Exception e) {
        throw new replacedertionError("Exception while deserializing ExecutionConfig.", e);
    }
    return executionConfig;
}

19 Source : JarHandlerParameterTest.java
with Apache License 2.0
from ljygz

JobGraph validateDefaultGraph() {
    JobGraph jobGraph = LAST_SUBMITTED_JOB_GRAPH_REFERENCE.getAndSet(null);
    replacedert.replacedertEquals(0, ParameterProgram.actualArguments.length);
    replacedert.replacedertEquals(ExecutionConfig.PARALLELISM_DEFAULT, getExecutionConfig(jobGraph).getParallelism());
    return jobGraph;
}

19 Source : TaskExecutorITCase.java
with Apache License 2.0
from ljygz

private JobGraph createJobGraphWithRestartStrategy(int parallelism) throws IOException {
    final JobGraph jobGraph = createJobGraph(parallelism);
    final ExecutionConfig executionConfig = new ExecutionConfig();
    executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L));
    jobGraph.setExecutionConfig(executionConfig);
    return jobGraph;
}

19 Source : BackPressureStatsTrackerImplITCase.java
with Apache License 2.0
from ljygz

private static JobGraph createJobWithBackPressure() {
    final JobGraph jobGraph = new JobGraph(TEST_JOB_ID, "Test Job");
    TEST_JOB_VERTEX.setInvokableClreplaced(BackPressuredTask.clreplaced);
    TEST_JOB_VERTEX.setParallelism(JOB_PARALLELISM);
    jobGraph.addVertex(TEST_JOB_VERTEX);
    return jobGraph;
}

19 Source : BackPressureStatsTrackerImplITCase.java
with Apache License 2.0
from ljygz

private static JobGraph createJobWithoutBackPressure() {
    final JobGraph jobGraph = new JobGraph(TEST_JOB_ID, "Test Job");
    TEST_JOB_VERTEX.setInvokableClreplaced(BlockingNoOpInvokable.clreplaced);
    TEST_JOB_VERTEX.setParallelism(JOB_PARALLELISM);
    jobGraph.addVertex(TEST_JOB_VERTEX);
    return jobGraph;
}

19 Source : ZooKeeperSubmittedJobGraphsStoreITCase.java
with Apache License 2.0
from ljygz

private void verifyJobGraphs(SubmittedJobGraph expected, SubmittedJobGraph actual) {
    JobGraph expectedJobGraph = expected.getJobGraph();
    JobGraph actualJobGraph = actual.getJobGraph();
    replacedertEquals(expectedJobGraph.getName(), actualJobGraph.getName());
    replacedertEquals(expectedJobGraph.getJobID(), actualJobGraph.getJobID());
}

19 Source : SlotCountExceedingParallelismTest.java
with Apache License 2.0
from ljygz

// ---------------------------------------------------------------------------------------------
private void submitJobGraphAndWait(final JobGraph jobGraph) throws JobExecutionException, InterruptedException {
    MINI_CLUSTER_RESOURCE.getMiniCluster().executeJobBlocking(jobGraph);
}

19 Source : ExecutionGraphSchedulingTest.java
with Apache License 2.0
from ljygz

/**
 * Tests that a partially completed eager scheduling operation fails if a
 * completed slot is released. See FLINK-9099.
 */
@Test
public void testSlotReleasingFailsSchedulingOperation() throws Exception {
    final int parallelism = 2;
    final JobVertex jobVertex = new JobVertex("Testing job vertex");
    jobVertex.setInvokableClreplaced(NoOpInvokable.clreplaced);
    jobVertex.setParallelism(parallelism);
    final JobGraph jobGraph = new JobGraph(jobVertex);
    jobGraph.setAllowQueuedScheduling(true);
    jobGraph.setScheduleMode(ScheduleMode.EAGER);
    final ProgrammedSlotProvider slotProvider = new ProgrammedSlotProvider(parallelism);
    final SimpleSlot slot = createSlot(new SimpleAckingTaskManagerGateway(), jobGraph.getJobID(), new DummySlotOwner());
    slotProvider.addSlot(jobVertex.getID(), 0, CompletableFuture.completedFuture(slot));
    final CompletableFuture<LogicalSlot> slotFuture = new CompletableFuture<>();
    slotProvider.addSlot(jobVertex.getID(), 1, slotFuture);
    final ExecutionGraph executionGraph = createExecutionGraph(jobGraph, slotProvider);
    executionGraph.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread());
    executionGraph.scheduleForExecution();
    replacedertThat(executionGraph.getState(), is(JobStatus.RUNNING));
    final ExecutionJobVertex executionJobVertex = executionGraph.getJobVertex(jobVertex.getID());
    final ExecutionVertex[] taskVertices = executionJobVertex.getTaskVertices();
    replacedertThat(taskVertices[0].getExecutionState(), is(ExecutionState.SCHEDULED));
    replacedertThat(taskVertices[1].getExecutionState(), is(ExecutionState.SCHEDULED));
    // fail the single allocated slot --> this should fail the scheduling operation
    slot.releaseSlot(new FlinkException("Test failure"));
    replacedertThat(executionGraph.getTerminationFuture().get(), is(JobStatus.FAILED));
}

19 Source : ExecutionGraphSchedulingTest.java
with Apache License 2.0
from ljygz

/**
 * This test verifies that before deploying a pipelined connected component, the
 * full set of slots is available, and that not some tasks are deployed, and later the
 * system realizes that not enough resources are available.
 */
@Test
public void testDeployPipelinedConnectedComponentsTogether() throws Exception {
    // [pipelined]
    // we construct a simple graph    (source) ----------------> (target)
    final int parallelism = 8;
    final JobVertex sourceVertex = new JobVertex("source");
    sourceVertex.setParallelism(parallelism);
    sourceVertex.setInvokableClreplaced(NoOpInvokable.clreplaced);
    final JobVertex targetVertex = new JobVertex("target");
    targetVertex.setParallelism(parallelism);
    targetVertex.setInvokableClreplaced(NoOpInvokable.clreplaced);
    targetVertex.connectNewDataSetAsInput(sourceVertex, DistributionPattern.ALL_TO_ALL, ResultParreplacedionType.PIPELINED);
    final JobID jobId = new JobID();
    final JobGraph jobGraph = new JobGraph(jobId, "test", sourceVertex, targetVertex);
    @SuppressWarnings({ "unchecked", "rawtypes" })
    final CompletableFuture<LogicalSlot>[] sourceFutures = new CompletableFuture[parallelism];
    @SuppressWarnings({ "unchecked", "rawtypes" })
    final CompletableFuture<LogicalSlot>[] targetFutures = new CompletableFuture[parallelism];
    // 
    // Create the slots, futures, and the slot provider
    final InteractionsCountingTaskManagerGateway[] sourceTaskManagers = new InteractionsCountingTaskManagerGateway[parallelism];
    final InteractionsCountingTaskManagerGateway[] targetTaskManagers = new InteractionsCountingTaskManagerGateway[parallelism];
    final SimpleSlot[] sourceSlots = new SimpleSlot[parallelism];
    final SimpleSlot[] targetSlots = new SimpleSlot[parallelism];
    for (int i = 0; i < parallelism; i++) {
        sourceTaskManagers[i] = createTaskManager();
        targetTaskManagers[i] = createTaskManager();
        sourceSlots[i] = createSlot(sourceTaskManagers[i], jobId);
        targetSlots[i] = createSlot(targetTaskManagers[i], jobId);
        sourceFutures[i] = new CompletableFuture<>();
        targetFutures[i] = new CompletableFuture<>();
    }
    ProgrammedSlotProvider slotProvider = new ProgrammedSlotProvider(parallelism);
    slotProvider.addSlots(sourceVertex.getID(), sourceFutures);
    slotProvider.addSlots(targetVertex.getID(), targetFutures);
    final ExecutionGraph eg = createExecutionGraph(jobGraph, slotProvider);
    // 
    // we complete some of the futures
    for (int i = 0; i < parallelism; i += 2) {
        sourceFutures[i].complete(sourceSlots[i]);
    }
    // 
    // kick off the scheduling
    eg.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread());
    eg.setScheduleMode(ScheduleMode.EAGER);
    eg.setQueuedSchedulingAllowed(true);
    eg.scheduleForExecution();
    verifyNothingDeployed(eg, sourceTaskManagers);
    // complete the remaining sources
    for (int i = 1; i < parallelism; i += 2) {
        sourceFutures[i].complete(sourceSlots[i]);
    }
    verifyNothingDeployed(eg, sourceTaskManagers);
    // complete the targets except for one
    for (int i = 1; i < parallelism; i++) {
        targetFutures[i].complete(targetSlots[i]);
    }
    verifyNothingDeployed(eg, targetTaskManagers);
    // complete the last target slot future
    targetFutures[0].complete(targetSlots[0]);
    // 
    // verify that all deployments have happened
    for (InteractionsCountingTaskManagerGateway gateway : sourceTaskManagers) {
        replacedertThat(gateway.getSubmitTaskCount(), is(1));
    }
    for (InteractionsCountingTaskManagerGateway gateway : targetTaskManagers) {
        replacedertThat(gateway.getSubmitTaskCount(), is(1));
    }
}

19 Source : ExecutionGraphDeploymentTest.java
with Apache License 2.0
from ljygz

private ExecutionGraph createExecutionGraph(Configuration configuration) throws Exception {
    final ScheduledExecutorService executor = TestingUtils.defaultExecutor();
    final JobID jobId = new JobID();
    final JobGraph jobGraph = new JobGraph(jobId, "test");
    jobGraph.setSnapshotSettings(new JobCheckpointingSettings(Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), new CheckpointCoordinatorConfiguration(100, 10 * 60 * 1000, 0, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, false), null));
    final Time timeout = Time.seconds(10L);
    return ExecutionGraphBuilder.buildGraph(null, jobGraph, configuration, executor, executor, new ProgrammedSlotProvider(1), getClreplaced().getClreplacedLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), 1, blobWriter, timeout, LoggerFactory.getLogger(getClreplaced()));
}

19 Source : JobSubmitHandler.java
with Apache License 2.0
from ljygz

private CompletableFuture<JobGraph> loadJobGraph(JobSubmitRequestBody requestBody, Map<String, Path> nameToFile) throws MissingFileException {
    final Path jobGraphFile = getPathAndreplacedertUpload(requestBody.jobGraphFileName, FILE_TYPE_JOB_GRAPH, nameToFile);
    return CompletableFuture.supplyAsync(() -> {
        JobGraph jobGraph;
        try (ObjectInputStream objectIn = new ObjectInputStream(jobGraphFile.getFileSystem().open(jobGraphFile))) {
            jobGraph = (JobGraph) objectIn.readObject();
        } catch (Exception e) {
            throw new CompletionException(new RestHandlerException("Failed to deserialize JobGraph.", HttpResponseStatus.BAD_REQUEST, e));
        }
        return jobGraph;
    }, executor);
}

19 Source : MiniCluster.java
with Apache License 2.0
from ljygz

/**
 * This method runs a job in blocking mode. The method returns only after the job
 * completed successfully, or after it failed terminally.
 *
 * @param job  The Flink job to execute
 * @return The result of the job execution
 *
 * @throws JobExecutionException Thrown if anything went amiss during initial job launch,
 *         or if the job terminally failed.
 */
@Override
public JobExecutionResult executeJobBlocking(JobGraph job) throws JobExecutionException, InterruptedException {
    checkNotNull(job, "job is null");
    final CompletableFuture<JobSubmissionResult> submissionFuture = submitJob(job);
    final CompletableFuture<JobResult> jobResultFuture = submissionFuture.thenCompose((JobSubmissionResult ignored) -> requestJobResult(job.getJobID()));
    final JobResult jobResult;
    try {
        jobResult = jobResultFuture.get();
    } catch (ExecutionException e) {
        throw new JobExecutionException(job.getJobID(), "Could not retrieve JobResult.", ExceptionUtils.stripExecutionException(e));
    }
    try {
        return jobResult.toJobExecutionResult(Thread.currentThread().getContextClreplacedLoader());
    } catch (IOException | ClreplacedNotFoundException e) {
        throw new JobExecutionException(job.getJobID(), e);
    }
}

19 Source : MiniCluster.java
with Apache License 2.0
from ljygz

private CompletableFuture<Void> uploadAndSetJobFiles(final CompletableFuture<InetSocketAddress> blobServerAddressFuture, final JobGraph job) {
    return blobServerAddressFuture.thenAccept(blobServerAddress -> {
        try {
            ClientUtils.extractAndUploadJobGraphFiles(job, () -> new BlobClient(blobServerAddress, miniClusterConfiguration.getConfiguration()));
        } catch (FlinkException e) {
            throw new CompletionException(e);
        }
    });
}

19 Source : DefaultJobManagerJobMetricGroupFactory.java
with Apache License 2.0
from ljygz

@Override
public JobManagerJobMetricGroup create(@Nonnull JobGraph jobGraph) {
    return jobManagerMetricGroup.addJob(jobGraph);
}

19 Source : SubmittedJobGraph.java
with Apache License 2.0
from ljygz

/**
 * A recoverable {@link JobGraph}.
 */
public clreplaced SubmittedJobGraph implements Serializable {

    private static final long serialVersionUID = 2836099271734771825L;

    /**
     * The submitted {@link JobGraph}.
     */
    private final JobGraph jobGraph;

    /**
     * Creates a {@link SubmittedJobGraph}.
     *
     * @param jobGraph The submitted {@link JobGraph}
     */
    public SubmittedJobGraph(JobGraph jobGraph) {
        this.jobGraph = checkNotNull(jobGraph, "Job graph");
    }

    /**
     * Returns the submitted {@link JobGraph}.
     */
    public JobGraph getJobGraph() {
        return jobGraph;
    }

    /**
     * Returns the {@link JobID} of the submitted {@link JobGraph}.
     */
    public JobID getJobId() {
        return jobGraph.getJobID();
    }

    @Override
    public String toString() {
        return String.format("SubmittedJobGraph(%s)", jobGraph.getJobID());
    }
}

19 Source : SingleJobSubmittedJobGraphStore.java
with Apache License 2.0
from ljygz

/**
 * {@link SubmittedJobGraphStore} implementation for a single job.
 */
public clreplaced SingleJobSubmittedJobGraphStore implements SubmittedJobGraphStore {

    private final JobGraph jobGraph;

    public SingleJobSubmittedJobGraphStore(JobGraph jobGraph) {
        this.jobGraph = Preconditions.checkNotNull(jobGraph);
    }

    @Override
    public void start(SubmittedJobGraphListener jobGraphListener) throws Exception {
    // noop
    }

    @Override
    public void stop() throws Exception {
    // noop
    }

    @Override
    public SubmittedJobGraph recoverJobGraph(JobID jobId) throws Exception {
        if (jobGraph.getJobID().equals(jobId)) {
            return new SubmittedJobGraph(jobGraph);
        } else {
            throw new FlinkException("Could not recover job graph " + jobId + '.');
        }
    }

    @Override
    public void putJobGraph(SubmittedJobGraph jobGraph) throws Exception {
        if (!jobGraph.getJobId().equals(jobGraph.getJobId())) {
            throw new FlinkException("Cannot put additional jobs into this submitted job graph store.");
        }
    }

    @Override
    public void removeJobGraph(JobID jobId) {
    // ignore
    }

    @Override
    public void releaseJobGraph(JobID jobId) {
    // ignore
    }

    @Override
    public Collection<JobID> getJobIds() {
        return Collections.singleton(jobGraph.getJobID());
    }
}

19 Source : MiniDispatcher.java
with Apache License 2.0
from ljygz

@Override
public CompletableFuture<Acknowledge> submitJob(JobGraph jobGraph, Time timeout) {
    final CompletableFuture<Acknowledge> acknowledgeCompletableFuture = super.submitJob(jobGraph, timeout);
    acknowledgeCompletableFuture.whenComplete((Acknowledge ignored, Throwable throwable) -> {
        if (throwable != null) {
            onFatalError(new FlinkException("Failed to submit job " + jobGraph.getJobID() + " in job mode.", throwable));
        }
    });
    return acknowledgeCompletableFuture;
}

19 Source : MiniClusterClient.java
with Apache License 2.0
from ljygz

@Override
public CompletableFuture<JobSubmissionResult> submitJob(@Nonnull JobGraph jobGraph) {
    return miniCluster.submitJob(jobGraph);
}

19 Source : ClusterClient.java
with Apache License 2.0
from ljygz

public JobSubmissionResult run(FlinkPlan compiledPlan, List<URL> libraries, List<URL> clreplacedpaths, ClreplacedLoader clreplacedLoader, SavepointRestoreSettings savepointSettings) throws ProgramInvocationException {
    // 生成jobgraph通过传入的streamGraph
    JobGraph job = getJobGraph(flinkConfig, compiledPlan, libraries, clreplacedpaths, savepointSettings);
    return submitJob(job, clreplacedLoader);
}

19 Source : LocalExecutor.java
with Apache License 2.0
from ljygz

/**
 * Executes the given program on a local runtime and waits for the job to finish.
 *
 * <p>If the executor has not been started before, this starts the executor and shuts it down
 * after the job finished. If the job runs in session mode, the executor is kept alive until
 * no more references to the executor exist.</p>
 *
 * @param plan The plan of the program to execute.
 * @return The net runtime of the program, in milliseconds.
 *
 * @throws Exception Thrown, if either the startup of the local execution context, or the execution
 *                   caused an exception.
 */
@Override
public JobExecutionResult executePlan(Plan plan) throws Exception {
    if (plan == null) {
        throw new IllegalArgumentException("The plan may not be null.");
    }
    synchronized (this.lock) {
        // check if we start a session dedicated for this execution
        final boolean shutDownAtEnd;
        if (jobExecutorService == null) {
            shutDownAtEnd = true;
            // configure the number of local slots equal to the parallelism of the local plan
            if (this.taskManagerNumSlots == DEFAULT_TASK_MANAGER_NUM_SLOTS) {
                int maxParallelism = plan.getMaximumParallelism();
                if (maxParallelism > 0) {
                    this.taskManagerNumSlots = maxParallelism;
                }
            }
            // start the cluster for us
            start();
        } else {
            // we use the existing session
            shutDownAtEnd = false;
        }
        try {
            // TODO: Set job's default parallelism to max number of slots
            final int slotsPerTaskManager = jobExecutorServiceConfiguration.getInteger(TaskManagerOptions.NUM_TASK_SLOTS, taskManagerNumSlots);
            final int numTaskManagers = jobExecutorServiceConfiguration.getInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 1);
            plan.setDefaultParallelism(slotsPerTaskManager * numTaskManagers);
            Optimizer pc = new Optimizer(new DataStatistics(), jobExecutorServiceConfiguration);
            OptimizedPlan op = pc.compile(plan);
            JobGraphGenerator jgg = new JobGraphGenerator(jobExecutorServiceConfiguration);
            JobGraph jobGraph = jgg.compileJobGraph(op, plan.getJobId());
            return jobExecutorService.executeJobBlocking(jobGraph);
        } finally {
            if (shutDownAtEnd) {
                stop();
            }
        }
    }
}

19 Source : CliFrontend.java
with Apache License 2.0
from ljygz

private <T> void runProgram(CustomCommandLine<T> customCommandLine, CommandLine commandLine, RunOptions runOptions, PackagedProgram program) throws ProgramInvocationException, FlinkException {
    final ClusterDescriptor<T> clusterDescriptor = customCommandLine.createClusterDescriptor(commandLine);
    try {
        final T clusterId = customCommandLine.getClusterId(commandLine);
        final ClusterClient<T> client;
        // directly deploy the job if the cluster is started in job mode and detached
        if (clusterId == null && runOptions.getDetachedMode()) {
            int parallelism = runOptions.getParallelism() == -1 ? defaultParallelism : runOptions.getParallelism();
            final JobGraph jobGraph = PackagedProgramUtils.createJobGraph(program, configuration, parallelism);
            final ClusterSpecification clusterSpecification = customCommandLine.getClusterSpecification(commandLine);
            client = clusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, runOptions.getDetachedMode());
            logAndSysout("Job has been submitted with JobID " + jobGraph.getJobID());
            try {
                client.shutdown();
            } catch (Exception e) {
                LOG.info("Could not properly shut down the client.", e);
            }
        } else {
            final Thread shutdownHook;
            if (clusterId != null) {
                client = clusterDescriptor.retrieve(clusterId);
                shutdownHook = null;
            } else {
                // also in job mode we have to deploy a session cluster because the job
                // might consist of multiple parts (e.g. when using collect)
                final ClusterSpecification clusterSpecification = customCommandLine.getClusterSpecification(commandLine);
                // !!!!!!调度集群在yarn上运行,为了后面向他提交任务
                // 常遇见的异常Couldn't deploy Yarn session cluster 就是从这里出现的
                client = clusterDescriptor.deploySessionCluster(clusterSpecification);
                // if not running in detached mode, add a shutdown hook to shut down cluster if client exits
                // there's a race-condition here if cli is killed before shutdown hook is installed
                if (!runOptions.getDetachedMode() && runOptions.isShutdownOnAttachedExit()) {
                    shutdownHook = ShutdownHookUtil.addShutdownHook(client::shutDownCluster, client.getClreplaced().getSimpleName(), LOG);
                } else {
                    shutdownHook = null;
                }
            }
            try {
                client.setPrintStatusDuringExecution(runOptions.getStdoutLogging());
                client.setDetached(runOptions.getDetachedMode());
                LOG.debug("Client slots is set to {}", client.getMaxSlots());
                LOG.debug("{}", runOptions.getSavepointRestoreSettings());
                int userParallelism = runOptions.getParallelism();
                LOG.debug("User parallelism is set to {}", userParallelism);
                if (client.getMaxSlots() != MAX_SLOTS_UNKNOWN && userParallelism == -1) {
                    logAndSysout("Using the parallelism provided by the remote cluster (" + client.getMaxSlots() + "). " + "To use another parallelism, set it at the ./bin/flink client.");
                    userParallelism = client.getMaxSlots();
                } else if (ExecutionConfig.PARALLELISM_DEFAULT == userParallelism) {
                    userParallelism = defaultParallelism;
                }
                // 运行程序,这里的clientrest往远端提交
                executeProgram(program, client, userParallelism);
            } finally {
                if (clusterId == null && !client.isDetached()) {
                    // terminate the cluster only if we have started it before and if it's not detached
                    try {
                        client.shutDownCluster();
                    } catch (final Exception e) {
                        LOG.info("Could not properly terminate the Flink cluster.", e);
                    }
                    if (shutdownHook != null) {
                        // we do not need the hook anymore as we have just tried to shutdown the cluster.
                        ShutdownHookUtil.removeShutdownHook(shutdownHook, client.getClreplaced().getSimpleName(), LOG);
                    }
                }
                try {
                    client.shutdown();
                } catch (Exception e) {
                    LOG.info("Could not properly shut down the client.", e);
                }
            }
        }
    } finally {
        try {
            clusterDescriptor.close();
        } catch (Exception e) {
            LOG.info("Could not properly close the cluster descriptor.", e);
        }
    }
}

19 Source : ClusterClient.java
with Apache License 2.0
from ljygz

public JobSubmissionResult run(FlinkPlan compiledPlan, List<URL> libraries, List<URL> clreplacedpaths, ClreplacedLoader clreplacedLoader, SavepointRestoreSettings savepointSettings) throws ProgramInvocationException {
    JobGraph job = getJobGraph(flinkConfig, compiledPlan, libraries, clreplacedpaths, savepointSettings);
    return submitJob(job, clreplacedLoader);
}

19 Source : CliFrontend.java
with Apache License 2.0
from ljygz

private <T> void runProgram(CustomCommandLine<T> customCommandLine, CommandLine commandLine, RunOptions runOptions, PackagedProgram program) throws ProgramInvocationException, FlinkException {
    final ClusterDescriptor<T> clusterDescriptor = customCommandLine.createClusterDescriptor(commandLine);
    try {
        final T clusterId = customCommandLine.getClusterId(commandLine);
        final ClusterClient<T> client;
        // directly deploy the job if the cluster is started in job mode and detached
        if (clusterId == null && runOptions.getDetachedMode()) {
            int parallelism = runOptions.getParallelism() == -1 ? defaultParallelism : runOptions.getParallelism();
            final JobGraph jobGraph = PackagedProgramUtils.createJobGraph(program, configuration, parallelism);
            final ClusterSpecification clusterSpecification = customCommandLine.getClusterSpecification(commandLine);
            client = clusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, runOptions.getDetachedMode());
            logAndSysout("Job has been submitted with JobID " + jobGraph.getJobID());
            try {
                client.shutdown();
            } catch (Exception e) {
                LOG.info("Could not properly shut down the client.", e);
            }
        } else {
            final Thread shutdownHook;
            if (clusterId != null) {
                client = clusterDescriptor.retrieve(clusterId);
                shutdownHook = null;
            } else {
                // also in job mode we have to deploy a session cluster because the job
                // might consist of multiple parts (e.g. when using collect)
                final ClusterSpecification clusterSpecification = customCommandLine.getClusterSpecification(commandLine);
                client = clusterDescriptor.deploySessionCluster(clusterSpecification);
                // if not running in detached mode, add a shutdown hook to shut down cluster if client exits
                // there's a race-condition here if cli is killed before shutdown hook is installed
                if (!runOptions.getDetachedMode() && runOptions.isShutdownOnAttachedExit()) {
                    shutdownHook = ShutdownHookUtil.addShutdownHook(client::shutDownCluster, client.getClreplaced().getSimpleName(), LOG);
                } else {
                    shutdownHook = null;
                }
            }
            try {
                client.setPrintStatusDuringExecution(runOptions.getStdoutLogging());
                client.setDetached(runOptions.getDetachedMode());
                LOG.debug("Client slots is set to {}", client.getMaxSlots());
                LOG.debug("{}", runOptions.getSavepointRestoreSettings());
                int userParallelism = runOptions.getParallelism();
                LOG.debug("User parallelism is set to {}", userParallelism);
                if (client.getMaxSlots() != MAX_SLOTS_UNKNOWN && userParallelism == -1) {
                    logAndSysout("Using the parallelism provided by the remote cluster (" + client.getMaxSlots() + "). " + "To use another parallelism, set it at the ./bin/flink client.");
                    userParallelism = client.getMaxSlots();
                } else if (ExecutionConfig.PARALLELISM_DEFAULT == userParallelism) {
                    userParallelism = defaultParallelism;
                }
                executeProgram(program, client, userParallelism);
            } finally {
                if (clusterId == null && !client.isDetached()) {
                    // terminate the cluster only if we have started it before and if it's not detached
                    try {
                        client.shutDownCluster();
                    } catch (final Exception e) {
                        LOG.info("Could not properly terminate the Flink cluster.", e);
                    }
                    if (shutdownHook != null) {
                        // we do not need the hook anymore as we have just tried to shutdown the cluster.
                        ShutdownHookUtil.removeShutdownHook(shutdownHook, client.getClreplaced().getSimpleName(), LOG);
                    }
                }
                try {
                    client.shutdown();
                } catch (Exception e) {
                    LOG.info("Could not properly shut down the client.", e);
                }
            }
        }
    } finally {
        try {
            clusterDescriptor.close();
        } catch (Exception e) {
            LOG.info("Could not properly close the cluster descriptor.", e);
        }
    }
}

19 Source : FlinkYarnJobLauncher.java
with Apache License 2.0
from harbby

public ApplicationId start(Job job) throws Exception {
    JobGraph jobGraph = job.getJobDAG();
    List<File> userProvidedJars = getUserAdditionalJars(job.getDepends());
    final Configuration flinkConfiguration = GlobalConfiguration.loadConfiguration();
    String flinkHome = requireNonNull(System.getenv("FLINK_HOME"), "FLINK_HOME env not setting");
    if (!new File(flinkHome).exists()) {
        throw new IllegalArgumentException("FLINK_HOME " + flinkHome + " not exists");
    }
    String flinkConfDirectory = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR);
    if (flinkConfDirectory == null) {
        flinkConfDirectory = new File(flinkHome, "conf").getPath();
    }
    flinkConfiguration.setString(YarnConfigOptions.FLINK_DIST_JAR, getFlinkJarFile(flinkHome).getPath());
    final YarnJobDescriptor descriptor = new YarnJobDescriptor(flinkConfiguration, yarnClient, yarnConfiguration, job.getConfig(), job.getName());
    descriptor.addShipFiles(userProvidedJars);
    YarnLogConfigUtil.setLogConfigFileInConfig(flinkConfiguration, flinkConfDirectory);
    // List<File> logFiles = Stream.of("log4j.properties", "logback.xml")   //"conf/flink-conf.yaml"
    // .map(x -> new File(flinkDonfDirectory, x)).collect(Collectors.toList());
    // descriptor.addShipFiles(logFiles);
    logger.info("start flink job {}", jobGraph.getJobID());
    try (ClusterClient<ApplicationId> client = descriptor.deploy(jobGraph, true)) {
        return client.getClusterId();
    } catch (Throwable e) {
        logger.error("submitting job {} failed", jobGraph.getJobID(), e);
        throw e;
    }
}

19 Source : MiniExecutor.java
with Apache License 2.0
from harbby

public static JobExecutionResult execute(JobGraph jobGraph) throws Exception {
    try (MiniExecutor localExecutor = new MiniExecutor(jobGraph)) {
        return localExecutor.executeJobBlocking();
    }
}

19 Source : MiniExecutor.java
with Apache License 2.0
from harbby

public static VmCallable<Boolean> createVmCallable(JobGraph jobGraph) {
    return () -> {
        try (MiniExecutor executor = new MiniExecutor(jobGraph)) {
            System.out.println(FLINK_WEB + executor.getWebUi());
            executor.executeJobBlocking();
        }
        return true;
    };
}

19 Source : FlinkContainerFactory.java
with Apache License 2.0
from harbby

public static void setJobConfig(JobGraph jobGraph, FlinkJobConfig jobConfig, ClreplacedLoader jobClreplacedLoader, String jobId) throws IOException, ClreplacedNotFoundException {
    // set Parallelism
    ExecutionConfig executionConfig = jobGraph.getSerializedExecutionConfig().deserializeValue(jobClreplacedLoader);
    executionConfig.setParallelism(jobConfig.getParallelism());
    jobGraph.setExecutionConfig(executionConfig);
    // set check config
    if (jobConfig.getCheckpointInterval() <= 0) {
        return;
    }
    // ---setting flink job
    CheckpointCoordinatorConfiguration config = CheckpointCoordinatorConfiguration.builder().setCheckpointInterval(// default is -1 表示关闭 建议1minutes
    jobConfig.getCheckpointInterval()).setCheckpointTimeout(// 10 minutes  this default
    jobConfig.getCheckpointTimeout()).setMinPauseBetweenCheckpoints(// make sure 1000 ms of progress happen between checkpoints
    jobConfig.getMinPauseBetweenCheckpoints()).setMaxConcurrentCheckpoints(// The maximum number of concurrent checkpoint attempts.
    1).setCheckpointRetentionPolicy(RETAIN_ON_CANCELLATION).setExactlyOnce(// CheckpointingMode.EXACTLY_ONCE //这是默认值
    true).setUnalignedCheckpointsEnabled(// todo: cfg.isPreferCheckpointForRecovery()
    true).setTolerableCheckpointFailureNumber(// todo: cfg.getTolerableCheckpointFailureNumber()
    0).build();
    // set checkPoint
    // default  execEnv.getStateBackend() is null default is asynchronousSnapshots = true;
    // see: https://ci.apache.org/projects/flink/flink-docs-release-1.7/dev/stream/state/checkpointing.html#enabling-and-configuring-checkpointing
    Path appCheckPath = new Path(jobConfig.getCheckpointDir(), jobId);
    StateBackend stateBackend = new FsStateBackend(appCheckPath.toString(), true) {

        @Override
        public FsStateBackend configure(ReadableConfig config, ClreplacedLoader clreplacedLoader) {
            FsStateBackend fsStateBackend = super.configure(config, clreplacedLoader);
            return AopGo.proxy(FsStateBackend.clreplaced).byInstance(fsStateBackend).aop(binder -> {
                binder.doAround(proxyContext -> {
                    // Object value = proxyContext.proceed();
                    JobID jobId = (JobID) proxyContext.getArgs()[0];
                    logger.info("mock {}", proxyContext.getMethod());
                    return new SylphFsCheckpointStorage(getCheckpointPath(), getSavepointPath(), jobId, getMinFileSizeThreshold());
                }).when().createCheckpointStorage(any());
            }).build();
        }
    };
    JobCheckpointingSettings settings = jobGraph.getCheckpointingSettings();
    JobCheckpointingSettings checkSettings = new JobCheckpointingSettings(settings.getVerticesToTrigger(), settings.getVerticesToAcknowledge(), settings.getVerticesToConfirm(), config, new SerializedValue<>(stateBackend), settings.getMasterHooks());
    jobGraph.setSnapshotSettings(checkSettings);
}

19 Source : LocalExecutor.java
with Apache License 2.0
from flink-tpc-ds

private <C> ProgramTargetDescriptor executeUpdateInternal(ExecutionContext<C> context, String statement) {
    final ExecutionContext<C>.EnvironmentInstance envInst = context.createEnvironmentInstance();
    applyUpdate(context, envInst.getTableEnvironment(), envInst.getQueryConfig(), statement);
    // create job graph with dependencies
    final String jobName = context.getSessionContext().getName() + ": " + statement;
    final JobGraph jobGraph;
    try {
        jobGraph = envInst.createJobGraph(jobName);
    } catch (Throwable t) {
        // catch everything such that the statement does not crash the executor
        throw new SqlExecutionException("Invalid SQL statement.", t);
    }
    // create execution
    final BasicResult<C> result = new BasicResult<>();
    final ProgramDeployer<C> deployer = new ProgramDeployer<>(context, jobName, jobGraph, result, false);
    // blocking deployment
    deployer.run();
    return ProgramTargetDescriptor.of(result.getClusterId(), jobGraph.getJobID(), result.getWebInterfaceUrl());
}

19 Source : StreamingJobGraphGeneratorTest.java
with Apache License 2.0
from flink-tpc-ds

/**
 * Test default schedule mode.
 */
@Test
public void testDefaultScheduleMode() {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    // use eager schedule mode by default
    StreamGraph streamGraph = new StreamGraphGenerator(Collections.emptyList(), env.getConfig(), env.getCheckpointConfig()).generate();
    JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
    replacedertEquals(ScheduleMode.EAGER, jobGraph.getScheduleMode());
}

19 Source : StreamingJobGraphGeneratorTest.java
with Apache License 2.0
from flink-tpc-ds

/**
 * Test schedule mode is configurable or not.
 */
@Test
public void testSetScheduleMode() {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamGraph streamGraph = new StreamGraphGenerator(Collections.emptyList(), env.getConfig(), env.getCheckpointConfig()).setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES).generate();
    JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
    replacedertEquals(ScheduleMode.LAZY_FROM_SOURCES, jobGraph.getScheduleMode());
}

19 Source : StreamingJobGraphGeneratorTest.java
with Apache License 2.0
from flink-tpc-ds

/**
 * Tests that disabled checkpointing sets the checkpointing interval to Long.MAX_VALUE.
 */
@Test
public void testDisabledCheckpointing() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamGraph streamGraph = new StreamGraph(env.getConfig(), env.getCheckpointConfig());
    replacedertFalse("Checkpointing enabled", streamGraph.getCheckpointConfig().isCheckpointingEnabled());
    JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
    JobCheckpointingSettings snapshottingSettings = jobGraph.getCheckpointingSettings();
    replacedertEquals(Long.MAX_VALUE, snapshottingSettings.getCheckpointCoordinatorConfiguration().getCheckpointInterval());
}

19 Source : PipelinedFailoverRegionBuildingTest.java
with Apache License 2.0
from flink-tpc-ds

// ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
private ExecutionGraph createExecutionGraph(JobGraph jobGraph) throws JobException, JobExecutionException {
    // configure the pipelined failover strategy
    final Configuration jobManagerConfig = new Configuration();
    jobManagerConfig.setString(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, FailoverStrategyLoader.LEGACY_PIPELINED_REGION_RESTART_STRATEGY_NAME);
    final Time timeout = Time.seconds(10L);
    return ExecutionGraphBuilder.buildGraph(null, jobGraph, jobManagerConfig, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.clreplaced), PipelinedFailoverRegionBuildingTest.clreplaced.getClreplacedLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), VoidBlobWriter.getInstance(), timeout, log, NettyShuffleMaster.INSTANCE, NoOpParreplacedionTracker.INSTANCE);
}

19 Source : ExecutionGraphRestartTest.java
with Apache License 2.0
from flink-tpc-ds

@Test
public void failGlobalIfExecutionIsStillRunning_failingAnExecutionTwice_ShouldTriggerOnlyOneFailover() throws Exception {
    JobVertex sender = ExecutionGraphTestUtils.createJobVertex("Task1", 1, NoOpInvokable.clreplaced);
    JobVertex receiver = ExecutionGraphTestUtils.createJobVertex("Task2", 1, NoOpInvokable.clreplaced);
    JobGraph jobGraph = new JobGraph("Pointwise job", sender, receiver);
    try (SlotPool slotPool = createSlotPoolImpl()) {
        ExecutionGraph eg = TestingExecutionGraphBuilder.newBuilder().setRestartStrategy(new TestRestartStrategy(1, false)).setJobGraph(jobGraph).setNumberOfTasks(2).buildAndScheduleForExecution(slotPool);
        Iterator<ExecutionVertex> executionVertices = eg.getAllExecutionVertices().iterator();
        Execution finishedExecution = executionVertices.next().getCurrentExecutionAttempt();
        Execution failedExecution = executionVertices.next().getCurrentExecutionAttempt();
        finishedExecution.markFinished();
        failedExecution.fail(new Exception("Test Exception"));
        failedExecution.completeCancelling();
        replacedertEquals(JobStatus.RUNNING, eg.getState());
        // At this point all resources have been replacedigned
        for (ExecutionVertex vertex : eg.getAllExecutionVertices()) {
            replacedertNotNull("No replacedigned resource (test instability).", vertex.getCurrentreplacedignedResource());
            vertex.getCurrentExecutionAttempt().switchToRunning();
        }
        // fail global with old finished execution, this should not affect the execution
        eg.failGlobalIfExecutionIsStillRunning(new Exception("This should have no effect"), finishedExecution.getAttemptId());
        replacedertThat(eg.getState(), is(JobStatus.RUNNING));
        // the state of the finished execution should have not changed since it is terminal
        replacedertThat(finishedExecution.getState(), is(ExecutionState.FINISHED));
    }
}

19 Source : ExecutionGraphRestartTest.java
with Apache License 2.0
from flink-tpc-ds

/**
 * Tests that a failing execution does not affect a restarted job. This is important if a
 * callback handler fails an execution after it has already reached a final state and the job
 * has been restarted.
 */
@Test
public void testFailingExecutionAfterRestart() throws Exception {
    JobVertex sender = ExecutionGraphTestUtils.createJobVertex("Task1", 1, NoOpInvokable.clreplaced);
    JobVertex receiver = ExecutionGraphTestUtils.createJobVertex("Task2", 1, NoOpInvokable.clreplaced);
    JobGraph jobGraph = new JobGraph("Pointwise job", sender, receiver);
    try (SlotPool slotPool = createSlotPoolImpl()) {
        ExecutionGraph eg = TestingExecutionGraphBuilder.newBuilder().setRestartStrategy(TestRestartStrategy.directExecuting()).setJobGraph(jobGraph).setNumberOfTasks(2).buildAndScheduleForExecution(slotPool);
        Iterator<ExecutionVertex> executionVertices = eg.getAllExecutionVertices().iterator();
        Execution finishedExecution = executionVertices.next().getCurrentExecutionAttempt();
        Execution failedExecution = executionVertices.next().getCurrentExecutionAttempt();
        finishedExecution.markFinished();
        failedExecution.fail(new Exception("Test Exception"));
        failedExecution.completeCancelling();
        replacedertEquals(JobStatus.RUNNING, eg.getState());
        // At this point all resources have been replacedigned
        for (ExecutionVertex vertex : eg.getAllExecutionVertices()) {
            replacedertNotNull("No replacedigned resource (test instability).", vertex.getCurrentreplacedignedResource());
            vertex.getCurrentExecutionAttempt().switchToRunning();
        }
        // fail old finished execution, this should not affect the execution
        finishedExecution.fail(new Exception("This should have no effect"));
        for (ExecutionVertex vertex : eg.getAllExecutionVertices()) {
            vertex.getCurrentExecutionAttempt().markFinished();
        }
        // the state of the finished execution should have not changed since it is terminal
        replacedertEquals(ExecutionState.FINISHED, finishedExecution.getState());
        replacedertEquals(JobStatus.FINISHED, eg.getState());
    }
}

19 Source : ExecutionGraphRestartTest.java
with Apache License 2.0
from flink-tpc-ds

private static JobGraph createJobGraphToCancel() throws IOException {
    JobVertex vertex = ExecutionGraphTestUtils.createJobVertex("Test Vertex", 1, NoOpInvokable.clreplaced);
    ExecutionConfig executionConfig = new ExecutionConfig();
    executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, Integer.MAX_VALUE));
    JobGraph jobGraph = new JobGraph("Test Job", vertex);
    jobGraph.setExecutionConfig(executionConfig);
    return jobGraph;
}

19 Source : ExecutionGraphDeploymentTest.java
with Apache License 2.0
from flink-tpc-ds

private ExecutionGraph createExecutionGraph(Configuration configuration) throws Exception {
    final ScheduledExecutorService executor = TestingUtils.defaultExecutor();
    final JobID jobId = new JobID();
    final JobGraph jobGraph = new JobGraph(jobId, "test");
    jobGraph.setSnapshotSettings(new JobCheckpointingSettings(Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), new CheckpointCoordinatorConfiguration(100, 10 * 60 * 1000, 0, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, false, false, 0), null));
    final Time timeout = Time.seconds(10L);
    return ExecutionGraphBuilder.buildGraph(null, jobGraph, configuration, executor, executor, new ProgrammedSlotProvider(1), getClreplaced().getClreplacedLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), blobWriter, timeout, LoggerFactory.getLogger(getClreplaced()), NettyShuffleMaster.INSTANCE, NoOpParreplacedionTracker.INSTANCE);
}

See More Examples