def setUp(self): clear_db_runs() drs = _create_dagruns(self.dag1, self.execution_dates, state=State.RUNNING, run_id_template="scheduled__{}") for dr in drs: dr.dag = self.dag1 dr.verify_integrity() drs = _create_dagruns(self.dag2, [self.dag2.default_args['start_date']], state=State.RUNNING, run_id_template="scheduled__{}") for dr in drs: dr.dag = self.dag2 dr.verify_integrity()
def tearDown(self) -> None: super().tearDown() clear_db_runs() clear_db_xcom()
def setUp(self): clear_db_jobs() clear_db_runs() patcher = patch('airflow.jobs.base_job.sleep') self.addCleanup(patcher.stop) self.mock_base_job_sleep = patcher.start()
def clear_db(): clear_db_runs() yield clear_db_runs()
def setUp(self): clear_db_runs() clear_db_pools() self.parser = cli_parser.get_parser()
def setUp(self): clear_db_runs() clear_db_pools()
def setUp(self): clear_db_runs()
def setUp(self) -> None: db.clear_db_runs() db.clear_db_jobs()
def setUp(self) -> None: db.clear_db_dags() db.clear_db_serialized_dags() db.clear_db_runs() db.clear_db_task_execution() db.clear_db_task_fail()
def teardown_method(self): clear_db_runs()
def tearDown(self) -> None: clear_db_runs() shutil.rmtree(self.local_log_location, ignore_errors=True)
def clean_db(): clear_db_runs() clear_db_pools()
def reset_dagruns(): """Clean up stray garbage from other tests.""" clear_db_runs()
def clean_db(): db.clear_db_runs() db.clear_db_task_fail()
def tearDown(self): clear_db_runs()
def setUp(self) -> None: self.client = self.app.test_client() # type:ignore self.default_time = "2020-06-11T18:00:00+00:00" self.default_time_2 = "2020-06-12T18:00:00+00:00" clear_db_runs() clear_db_dags()
def tearDown(self): super().tearDown() clear_db_runs()
def clean_db(): clear_db_runs() clear_db_xcom()
def setUp(self): clear_db_runs() clear_db_pools() self.parser = cli.CLIFactory.get_parser()
def setUp(self) -> None: clear_db_runs()
def tearDown(self): clear_db_runs()
def setUp(self) -> None: clear_db_runs() self.log_name = 'stackdriver-tests-'.join( random.sample(string.ascii_lowercase, 16))
def tearDown(self): clear_db_runs() clear_db_pools()
def tearDown(self) -> None: from airflow.config_templates import airflow_local_settings importlib.reload(airflow_local_settings) settings.configure_logging() clear_db_runs()
def clean_db(): clear_db_runs() clear_db_dags() clear_db_serialized_dags()
def clean_db(): db.clear_db_runs() db.clear_db_task_reschedule() db.clear_db_xcom()
def clean_db_helper(): yield clear_db_jobs() clear_db_runs()
def setUp(self) -> None: clear_db_runs() self.default_time = DEFAULT_TIME
def tearDown(self) -> None: clear_db_jobs() clear_db_runs()
def tearDownClass(cls) -> None: clear_db_runs() clear_db_dags()
def test_get_states_count_upstream_ti(self): """ this test tests the helper function '_get_states_count_upstream_ti' as a unit and inside update_state """ from airflow.ti_deps.dep_context import DepContext get_states_count_upstream_ti = TriggerRuleDep._get_states_count_upstream_ti session = settings.Session() now = timezone.utcnow() dag = DAG('test_dagrun_with_pre_tis', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) with dag: op1 = DummyOperator(task_id='A') op2 = DummyOperator(task_id='B') op3 = DummyOperator(task_id='C') op4 = DummyOperator(task_id='D') op5 = DummyOperator(task_id='E', trigger_rule=TriggerRule.ONE_FAILED) op1.set_downstream([op2, op3]) # op1 >> op2, op3 op4.set_upstream([op3, op2]) # op3, op2 >> op4 op5.set_upstream([op2, op3, op4]) # (op2, op3, op4) >> op5 clear_db_runs() dag.clear() dr = dag.create_dagrun(run_id='test_dagrun_with_pre_tis', state=State.RUNNING, execution_date=now, start_date=now) ti_op1 = TaskInstance(task=dag.get_task(op1.task_id), execution_date=dr.execution_date) ti_op2 = TaskInstance(task=dag.get_task(op2.task_id), execution_date=dr.execution_date) ti_op3 = TaskInstance(task=dag.get_task(op3.task_id), execution_date=dr.execution_date) ti_op4 = TaskInstance(task=dag.get_task(op4.task_id), execution_date=dr.execution_date) ti_op5 = TaskInstance(task=dag.get_task(op5.task_id), execution_date=dr.execution_date) ti_op1.set_state(state=State.SUCCESS, session=session) ti_op2.set_state(state=State.FAILED, session=session) ti_op3.set_state(state=State.SUCCESS, session=session) ti_op4.set_state(state=State.SUCCESS, session=session) ti_op5.set_state(state=State.SUCCESS, session=session) session.commit() # check handling with cases that tasks are triggered from backfill with no finished tasks finished_tasks = DepContext().ensure_finished_tasks( ti_op2.task.dag, ti_op2.execution_date, session) self.assertEqual( get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op2), (1, 0, 0, 0, 1)) finished_tasks = dr.get_task_instances(state=State.finished | {State.UPSTREAM_FAILED}, session=session) self.assertEqual( get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op4), (1, 0, 1, 0, 2)) self.assertEqual( get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op5), (2, 0, 1, 0, 3)) dr.update_state() self.assertEqual(State.SUCCESS, dr.state)
def tearDown(self) -> None: db.clear_db_runs() db.clear_db_jobs()
def tearDown(self): clear_db_runs() clear_db_pools()
def setUp(self): clear_db_runs()