def test_schedule_start_works_when_conditions_are_met_auto(self): operation_run = OperationRunFactory() operation_run.operation.trigger_policy = TriggerPolicy.ONE_DONE operation_run.operation.save() pipeline_run = operation_run.pipeline_run # Add a failed upstream upstream_run1 = OperationRunFactory(pipeline_run=pipeline_run) assert start_operation_run(upstream_run1) is False upstream_run1.refresh_from_db() operation_run.upstream_runs.set([upstream_run1]) upstream_run1.set_status(OperationStatuses.FAILED) operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.CREATED
def test_scheduling_operation_run_sets_pipeline_run_to_scheduled(self): operation_run = OperationRunFactory() start_operation_run(operation_run) operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.CREATED pipeline_run = operation_run.pipeline_run assert pipeline_run.last_status == PipelineLifeCycle.CREATED assert pipeline_run.statuses.count() == 1 operation_run.set_status(OperationStatuses.SCHEDULED) operation_run.refresh_from_db() pipeline_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.SCHEDULED assert pipeline_run.last_status == PipelineLifeCycle.SCHEDULED assert pipeline_run.statuses.count() == 2
def test_failed_operation_runs_sets_pipeline_run_to_finished(self): operation_run = OperationRunFactory() start_operation_run(operation_run) operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.CREATED pipeline_run = operation_run.pipeline_run assert pipeline_run.last_status == PipelineLifeCycle.CREATED assert pipeline_run.statuses.count() == 1 # Stopping the first operation does not stop the pipeline operation_run.set_status(OperationStatuses.SCHEDULED) operation_run.set_status(OperationStatuses.RUNNING) operation_run.on_failure() pipeline_run.refresh_from_db() operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.FAILED assert pipeline_run.last_status == PipelineLifeCycle.DONE assert pipeline_run.statuses.count() == 4
def test_schedule_start_works_with_operation_concurrency(self): operation_run = OperationRunFactory() operation_run.operation.trigger_policy = TriggerPolicy.ONE_DONE operation_run.operation.save() pipeline_run = operation_run.pipeline_run # Set operation concurrency to 1 operation_run.operation.concurrency = 1 operation_run.operation.save() # Add a failed upstream upstream_run1 = OperationRunFactory(pipeline_run=pipeline_run) upstream_run2 = OperationRunFactory(pipeline_run=pipeline_run) assert start_operation_run(upstream_run1) is False assert start_operation_run(upstream_run2) is False upstream_run1.refresh_from_db() upstream_run2.refresh_from_db() operation_run.upstream_runs.set([upstream_run1, upstream_run2]) with patch('polyflow.tasks.pipelines_start_operation.apply_async' ) as mock_fct: upstream_run1.set_status(OperationStatuses.FAILED) upstream_run2.set_status(OperationStatuses.RUNNING) assert mock_fct.call_count == 1 operation_run.refresh_from_db() assert operation_run.last_status is None assert start_operation_run(operation_run) is False operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.CREATED # Add another operation run for this operation with scheduled new_operation_run = OperationRunFactory( operation=operation_run.operation) new_operation_run.upstream_runs.set([upstream_run1, upstream_run2]) assert new_operation_run.status is None # Check if we can start another instance new_operation_run.refresh_from_db() assert start_operation_run(new_operation_run) is True new_operation_run.refresh_from_db() assert new_operation_run.last_status is None
def test_running_operation_run_sets_pipeline_run_to_running(self): operation_run = OperationRunFactory() start_operation_run(operation_run) operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.CREATED pipeline_run = operation_run.pipeline_run assert pipeline_run.last_status == PipelineLifeCycle.CREATED assert pipeline_run.statuses.count() == 1 # Create another operation run for this pipeline_run operation_run2 = OperationRunFactory(pipeline_run=pipeline_run) start_operation_run(operation_run2) operation_run2.refresh_from_db() operation_run.set_status(OperationStatuses.SCHEDULED) pipeline_run.refresh_from_db() operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.SCHEDULED assert pipeline_run.last_status == PipelineLifeCycle.SCHEDULED assert pipeline_run.statuses.count() == 2 operation_run.set_status(OperationStatuses.RUNNING) pipeline_run.refresh_from_db() operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.RUNNING assert pipeline_run.last_status == PipelineLifeCycle.RUNNING assert pipeline_run.statuses.count() == 3 operation_run2.set_status(OperationStatuses.SCHEDULED) assert pipeline_run.last_status == PipelineLifeCycle.RUNNING assert pipeline_run.statuses.count() == 3
def test_schedule_start_works_when_conditions_are_met_manual(self): operation_run = OperationRunFactory() operation_run.operation.trigger_policy = TriggerPolicy.ONE_DONE operation_run.operation.save() pipeline_run = operation_run.pipeline_run # Add a failed upstream upstream_run1 = OperationRunFactory(pipeline_run=pipeline_run) assert start_operation_run(upstream_run1) is False upstream_run1.refresh_from_db() operation_run.upstream_runs.set([upstream_run1]) with patch('polyflow.tasks.pipelines_start_operation.apply_async' ) as mock_fct: upstream_run1.set_status(OperationStatuses.FAILED) assert mock_fct.call_count == 1 operation_run.refresh_from_db() assert operation_run.last_status is None assert start_operation_run(operation_run) is False operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.CREATED
def test_schedule_start_with_failed_upstream(self): operation_run = OperationRunFactory() operation_run.operation.trigger_policy = TriggerPolicy.ALL_SUCCEEDED operation_run.operation.save() # Add a failed upstream upstream_run1 = OperationRunFactory() assert start_operation_run(upstream_run1) is False upstream_run1.refresh_from_db() operation_run.upstream_runs.set([upstream_run1]) with patch('polyflow.tasks.pipelines_start_operation.apply_async' ) as mock_fct: upstream_run1.set_status(OperationStatuses.FAILED) assert mock_fct.call_count == 1 assert start_operation_run(operation_run) is False # Check also that the task is marked as UPSTREAM_FAILED # Since this operation cannot be started anymore operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.UPSTREAM_FAILED
def test_schedule_start_works_with_pipeline_concurrency(self): operation_run = OperationRunFactory() operation_run.operation.trigger_policy = TriggerPolicy.ONE_DONE operation_run.operation.save() pipeline_run = operation_run.pipeline_run # Set pipeline concurrency to 1 pipeline_run.pipeline.concurrency = 1 pipeline_run.pipeline.save() # Add a failed upstream upstream_run1 = OperationRunFactory(pipeline_run=pipeline_run) upstream_run2 = OperationRunFactory(pipeline_run=pipeline_run) assert start_operation_run(upstream_run1) is False assert start_operation_run(upstream_run2) is True upstream_run1.refresh_from_db() upstream_run2.refresh_from_db() operation_run.upstream_runs.set([upstream_run1, upstream_run2]) with patch('polyflow.tasks.pipelines_start_operation.apply_async' ) as mock_fct: upstream_run1.set_status(OperationStatuses.FAILED) assert mock_fct.call_count == 1 operation_run.refresh_from_db() assert operation_run.last_status is None upstream_run2.refresh_from_db() assert upstream_run2.last_status is None # Should be started but e mocked the process with patch('polyflow.tasks.pipelines_start_operation.apply_async' ) as mock_fct: assert start_operation_run(upstream_run2) is False upstream_run2.refresh_from_db() upstream_run2.set_status(OperationStatuses.RUNNING) assert mock_fct.call_count == 0 assert start_operation_run(operation_run) is True assert operation_run.last_status is None upstream_run2.set_status(OperationStatuses.SUCCEEDED) operation_run.refresh_from_db() assert operation_run.last_status == OperationStatuses.CREATED