Beispiel #1
0
    def test_get_num_task_instances(self):
        test_dag_id = 'test_get_num_task_instances_dag'
        test_task_id = 'task_1'

        test_dag = DAG(dag_id=test_dag_id, start_date=DEFAULT_DATE)
        test_task = DummyOperator(task_id=test_task_id, dag=test_dag)

        ti1 = TI(task=test_task, execution_date=DEFAULT_DATE)
        ti1.state = None
        ti2 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
        ti2.state = State.RUNNING
        ti3 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
        ti3.state = State.QUEUED
        ti4 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=3))
        ti4.state = State.RUNNING
        session = settings.Session()
        session.merge(ti1)
        session.merge(ti2)
        session.merge(ti3)
        session.merge(ti4)
        session.commit()

        self.assertEqual(
            0,
            DAG.get_num_task_instances(test_dag_id, ['fakename'], session=session)
        )
        self.assertEqual(
            4,
            DAG.get_num_task_instances(test_dag_id, [test_task_id], session=session)
        )
        self.assertEqual(
            4,
            DAG.get_num_task_instances(
                test_dag_id, ['fakename', test_task_id], session=session)
        )
        self.assertEqual(
            1,
            DAG.get_num_task_instances(
                test_dag_id, [test_task_id], states=[None], session=session)
        )
        self.assertEqual(
            2,
            DAG.get_num_task_instances(
                test_dag_id, [test_task_id], states=[State.RUNNING], session=session)
        )
        self.assertEqual(
            3,
            DAG.get_num_task_instances(
                test_dag_id, [test_task_id],
                states=[None, State.RUNNING], session=session)
        )
        self.assertEqual(
            4,
            DAG.get_num_task_instances(
                test_dag_id, [test_task_id],
                states=[None, State.QUEUED, State.RUNNING], session=session)
        )
        session.close()
 def test_try_number(self):
     """
     Test the try_number accessor behaves in various running states
     """
     dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
     task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
     ti = TI(task=task, execution_date=timezone.utcnow())
     self.assertEqual(1, ti.try_number)
     ti.try_number = 2
     ti.state = State.RUNNING
     self.assertEqual(2, ti.try_number)
     ti.state = State.SUCCESS
     self.assertEqual(3, ti.try_number)
    def test_success_callbak_no_race_condition(self):
        class CallbackWrapper(object):
            def wrap_task_instance(self, ti):
                self.task_id = ti.task_id
                self.dag_id = ti.dag_id
                self.execution_date = ti.execution_date
                self.task_state_in_callback = ""
                self.callback_ran = False

            def success_handler(self, context):
                self.callback_ran = True
                session = settings.Session()
                temp_instance = session.query(TI).filter(
                    TI.task_id == self.task_id).filter(
                    TI.dag_id == self.dag_id).filter(
                    TI.execution_date == self.execution_date).one()
                self.task_state_in_callback = temp_instance.state
        cw = CallbackWrapper()
        dag = DAG('test_success_callbak_no_race_condition', start_date=DEFAULT_DATE,
                  end_date=DEFAULT_DATE + datetime.timedelta(days=10))
        task = DummyOperator(task_id='op', email='*****@*****.**',
                             on_success_callback=cw.success_handler, dag=dag)
        ti = TI(task=task, execution_date=datetime.datetime.now())
        ti.state = State.RUNNING
        session = settings.Session()
        session.merge(ti)
        session.commit()
        cw.wrap_task_instance(ti)
        ti._run_raw_task()
        self.assertTrue(cw.callback_ran)
        self.assertEqual(cw.task_state_in_callback, State.RUNNING)
        ti.refresh_from_db()
        self.assertEqual(ti.state, State.SUCCESS)
    def test_success_callbak_no_race_condition(self):
        class CallbackWrapper:
            def wrap_task_instance(self, ti):
                self.task_id = ti.task_id
                self.dag_id = ti.dag_id
                self.execution_date = ti.execution_date
                self.task_state_in_callback = ""
                self.callback_ran = False

            def success_handler(self, context):
                self.callback_ran = True
                session = settings.Session()
                temp_instance = session.query(TI).filter(
                    TI.task_id == self.task_id).filter(
                    TI.dag_id == self.dag_id).filter(
                    TI.execution_date == self.execution_date).one()
                self.task_state_in_callback = temp_instance.state

        cw = CallbackWrapper()
        dag = DAG('test_success_callbak_no_race_condition', start_date=DEFAULT_DATE,
                  end_date=DEFAULT_DATE + datetime.timedelta(days=10))
        task = DummyOperator(task_id='op', email='*****@*****.**',
                             on_success_callback=cw.success_handler, dag=dag)
        ti = TI(task=task, execution_date=datetime.datetime.now())
        ti.state = State.RUNNING
        session = settings.Session()
        session.merge(ti)
        session.commit()
        cw.wrap_task_instance(ti)
        ti._run_raw_task()
        self.assertTrue(cw.callback_ran)
        self.assertEqual(cw.task_state_in_callback, State.RUNNING)
        ti.refresh_from_db()
        self.assertEqual(ti.state, State.SUCCESS)
Beispiel #5
0
    def _run_task_instance(self, ti, mark_success, pool, session=None):
        # set proper state and try number to keep logger in sync
        if isinstance(ti, SimpleTaskInstance):
            from airflow.models import TaskInstance

            dag = self.dag_bag.get_dag(ti.dag_id)
            task = dag.get_task(ti.task_id)
            ti = TaskInstance(task, ti.execution_date)
        ti.state = State.RUNNING
        ti._try_number += 1
        # Update hostname to allow runtime logging across all environments
        ti.hostname = get_hostname()
        # let save state
        session.merge(ti)
        session.commit()
        # backward compatible with airflow loggers
        from airflow.utils.log import logging_mixin

        logging_mixin.set_context(logging.root, ti)

        try:
            ti._run_raw_task(mark_success=mark_success,
                             job_id=ti.job_id,
                             pool=pool)
        finally:
            for handler in logging.root.handlers:
                if handler.name == "task":
                    handler.close()
            self._sync_remote_logs(ti)
    def execute(self, context):
        # If the DAG Run is externally triggered, then return without
        # skipping downstream tasks
        if context['dag_run'] and context['dag_run'].external_trigger:
            logging.info("""Externally triggered DAG_Run:
                         allowing execution to proceed.""")
            return

        now = datetime.datetime.now()
        left_window = context['dag'].following_schedule(
            context['execution_date'])
        right_window = context['dag'].following_schedule(left_window)
        logging.info(
            'Checking latest only with left_window: %s right_window: %s '
            'now: %s', left_window, right_window, now)

        if not left_window < now <= right_window:
            logging.info('Not latest execution, skipping downstream.')
            session = settings.Session()

            TI = TaskInstance
            tis = session.query(TI).filter(
                TI.execution_date == context['ti'].execution_date,
                TI.task_id.in_(context['task'].downstream_task_ids)
            ).with_for_update().all()

            for ti in tis:
                logging.info('Skipping task: %s', ti.task_id)
                ti.state = State.SKIPPED
                ti.start_date = now
                ti.end_date = now
                session.merge(ti)

            # this is defensive against dag runs that are not complete
            for task in context['task'].downstream_list:
                if task.task_id in tis:
                    continue

                logging.warning("Task {} was not part of a dag run. "
                                "This should not happen."
                                .format(task))
                now = datetime.datetime.now()
                ti = TaskInstance(task, execution_date=context['ti'].execution_date)
                ti.state = State.SKIPPED
                ti.start_date = now
                ti.end_date = now
                session.merge(ti)

            session.commit()
            session.close()
            logging.info('Done.')
        else:
            logging.info('Latest, allowing execution to proceed.')
    def test_get_num_running_task_instances(self):
        session = settings.Session()

        dag = models.DAG(dag_id='test_get_num_running_task_instances')
        dag2 = models.DAG(dag_id='test_get_num_running_task_instances_dummy')
        task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
        task2 = DummyOperator(task_id='task', dag=dag2, start_date=DEFAULT_DATE)

        ti1 = TI(task=task, execution_date=DEFAULT_DATE)
        ti2 = TI(task=task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
        ti3 = TI(task=task2, execution_date=DEFAULT_DATE)
        ti1.state = State.RUNNING
        ti2.state = State.QUEUED
        ti3.state = State.RUNNING
        session.add(ti1)
        session.add(ti2)
        session.add(ti3)
        session.commit()

        self.assertEqual(1, ti1.get_num_running_task_instances(session=session))
        self.assertEqual(1, ti2.get_num_running_task_instances(session=session))
        self.assertEqual(1, ti3.get_num_running_task_instances(session=session))
Beispiel #8
0
def test_execution_fails_on_failed_tasks():
    ti = TaskInstance(DummyOperator(task_id='test'), DEFAULT_DATE)
    ti.state = State.FAILED

    dag_run_mock = Mock()
    dag_run_mock.get_task_instances.return_value = [ti]

    op = RunEvaluationOperator(task_id='evaluation')

    with pytest.raises(RuntimeError) as e:
        op.execute({'dag_run': dag_run_mock})

    assert "Failed tasks instances detected" in str(e)
 def execute(self, context):
     branch = super(BranchPythonOperator, self).execute(context)
     logging.info("Following branch " + branch)
     logging.info("Marking other directly downstream tasks as skipped")
     session = settings.Session()
     for task in context['task'].downstream_list:
         if task.task_id != branch:
             ti = TaskInstance(
                 task, execution_date=context['ti'].execution_date)
             ti.state = State.SKIPPED
             ti.start_date = datetime.now()
             ti.end_date = datetime.now()
             session.merge(ti)
     session.commit()
     session.close()
     logging.info("Done.")
Beispiel #10
0
    def test_heartbeat_failed_fast(self, mock_getpid):
        """
        Test that task heartbeat will sleep when it fails fast
        """
        mock_getpid.return_value = 1

        heartbeat_records = []

        def heartbeat_recorder(**kwargs):
            heartbeat_records.append(timezone.utcnow())

        with create_session() as session:
            dagbag = models.DagBag(
                dag_folder=TEST_DAG_FOLDER,
                include_examples=False,
            )
            dag_id = 'test_heartbeat_failed_fast'
            task_id = 'test_heartbeat_failed_fast_op'
            dag = dagbag.get_dag(dag_id)
            task = dag.get_task(task_id)

            dag.create_dagrun(run_id="test_heartbeat_failed_fast_run",
                              state=State.RUNNING,
                              execution_date=DEFAULT_DATE,
                              start_date=DEFAULT_DATE,
                              session=session)
            ti = TI(task=task, execution_date=DEFAULT_DATE)
            ti.refresh_from_db()
            ti.state = State.RUNNING
            ti.hostname = get_hostname()
            ti.pid = 1
            session.commit()

            job = LocalTaskJob(task_instance=ti,
                               executor=MockExecutor(do_update=False))
            job.heartrate = 2
            job.heartbeat_callback = heartbeat_recorder
            job._execute()
            self.assertGreater(len(heartbeat_records), 1)
            for i in range(1, len(heartbeat_records)):
                time1 = heartbeat_records[i - 1]
                time2 = heartbeat_records[i]
                # Assert that difference small enough
                delta = (time2 - time1).total_seconds()
                self.assertAlmostEqual(delta, job.heartrate, delta=0.05)
 def execute(self, context):
     condition = super(ShortCircuitOperator, self).execute(context)
     logging.info("Condition result is {}".format(condition))
     if condition:
         logging.info('Proceeding with downstream tasks...')
         return
     else:
         logging.info('Skipping downstream tasks...')
         session = settings.Session()
         for task in context['task'].downstream_list:
             ti = TaskInstance(
                 task, execution_date=context['ti'].execution_date)
             ti.state = State.SKIPPED
             ti.start_date = datetime.now()
             ti.end_date = datetime.now()
             session.merge(ti)
         session.commit()
         session.close()
         logging.info("Done.")
    def test_file_task_handler_running(self):
        def task_callable(ti, **kwargs):
            ti.log.info("test")

        dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE)
        task = PythonOperator(
            task_id='task_for_testing_file_log_handler',
            dag=dag,
            python_callable=task_callable,
        )
        ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
        ti.try_number = 2
        ti.state = State.RUNNING

        logger = ti.log
        ti.log.disabled = False

        file_handler = next(
            (handler for handler in logger.handlers if handler.name == FILE_TASK_HANDLER), None
        )
        assert file_handler is not None

        set_context(logger, ti)
        assert file_handler.handler is not None
        # We expect set_context generates a file locally.
        log_filename = file_handler.handler.baseFilename
        assert os.path.isfile(log_filename)
        assert log_filename.endswith("2.log"), log_filename

        logger.info("Test")

        # Return value of read must be a tuple of list and list.
        logs, metadatas = file_handler.read(ti)
        assert isinstance(logs, list)
        # Logs for running tasks should show up too.
        assert isinstance(logs, list)
        assert isinstance(metadatas, list)
        assert len(logs) == 2
        assert len(logs) == len(metadatas)
        assert isinstance(metadatas[0], dict)

        # Remove the generated tmp log file.
        os.remove(log_filename)
    def test_file_task_handler_running(self):
        def task_callable(ti, **kwargs):
            ti.log.info("test")
        dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE)
        task = PythonOperator(
            task_id='task_for_testing_file_log_handler',
            dag=dag,
            python_callable=task_callable,
            provide_context=True
        )
        ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
        ti.try_number = 2
        ti.state = State.RUNNING

        logger = ti.log
        ti.log.disabled = False

        file_handler = next((handler for handler in logger.handlers
                             if handler.name == FILE_TASK_HANDLER), None)
        self.assertIsNotNone(file_handler)

        set_context(logger, ti)
        self.assertIsNotNone(file_handler.handler)
        # We expect set_context generates a file locally.
        log_filename = file_handler.handler.baseFilename
        self.assertTrue(os.path.isfile(log_filename))
        self.assertTrue(log_filename.endswith("2.log"), log_filename)

        logger.info("Test")

        # Return value of read must be a tuple of list and list.
        logs, metadatas = file_handler.read(ti)
        self.assertTrue(isinstance(logs, list))
        # Logs for running tasks should show up too.
        self.assertTrue(isinstance(logs, list))
        self.assertTrue(isinstance(metadatas, list))
        self.assertEqual(len(logs), 2)
        self.assertEqual(len(logs), len(metadatas))
        self.assertTrue(isinstance(metadatas[0], dict))

        # Remove the generated tmp log file.
        os.remove(log_filename)
    def execute(self, context):
        condition = super(ShortCircuitOperator, self).execute(context)
        logging.info("Condition result is {}".format(condition))

        if condition:
            logging.info('Proceeding with downstream tasks...')
            return

        logging.info('Skipping downstream tasks...')
        session = settings.Session()

        TI = TaskInstance
        tis = session.query(TI).filter(
            TI.execution_date == context['ti'].execution_date,
            TI.task_id.in_(context['task'].downstream_task_ids),
        ).with_for_update().all()

        for ti in tis:
            logging.info('Skipping task: %s', ti.task_id)
            ti.state = State.SKIPPED
            ti.start_date = datetime.now()
            ti.end_date = datetime.now()

        # this is defensive against dag runs that are not complete
        for task in context['task'].downstream_list:
            if task.task_id in tis:
                continue

            logging.warning(
                "Task {} was not part of a dag run. This should not happen.".
                format(task))
            ti = TaskInstance(task,
                              execution_date=context['ti'].execution_date)
            ti.state = State.SKIPPED
            ti.start_date = datetime.now()
            ti.end_date = datetime.now()
            session.merge(ti)

        session.commit()
        session.close()
        logging.info("Done.")
Beispiel #15
0
    def skip(self, dag_run, execution_date, tasks, session=None):
        """
        Sets tasks instances to skipped from the same dag run.

        :param dag_run: the DagRun for which to set the tasks to skipped
        :param execution_date: execution_date
        :param tasks: tasks to skip (not task_ids)
        :param session: db session to use
        """
        if not tasks:
            return

        task_ids = [d.task_id for d in tasks]
        now = timezone.utcnow()

        if dag_run:
            session.query(TaskInstance).filter(
                TaskInstance.dag_id == dag_run.dag_id,
                TaskInstance.execution_date == dag_run.execution_date,
                TaskInstance.task_id.in_(task_ids)).update(
                    {
                        TaskInstance.state: State.SKIPPED,
                        TaskInstance.start_date: now,
                        TaskInstance.end_date: now
                    },
                    synchronize_session=False)
            session.commit()
        else:
            assert execution_date is not None, "Execution date is None and no dag run"

            self.log.warning("No DAG RUN present this should not happen")
            # this is defensive against dag runs that are not complete
            for task in tasks:
                ti = TaskInstance(task, execution_date=execution_date)
                ti.state = State.SKIPPED
                ti.start_date = now
                ti.end_date = now
                session.merge(ti)

            session.commit()
    def execute(self, context):
        branch = super(BranchPythonOperator, self).execute(context)
        logging.info("Following branch " + branch)
        logging.info("Marking other directly downstream tasks as skipped")
        session = settings.Session()

        TI = TaskInstance
        tis = session.query(TI).filter(
            TI.execution_date == context['ti'].execution_date,
            TI.task_id.in_(context['task'].downstream_task_ids),
            TI.task_id != branch,
        ).with_for_update().all()

        for ti in tis:
            logging.info('Skipping task: %s', ti.task_id)
            ti.state = State.SKIPPED
            ti.start_date = datetime.now()
            ti.end_date = datetime.now()

        # this is defensive against dag runs that are not complete
        for task in context['task'].downstream_list:
            if task.task_id in tis:
                continue

            if task.task_id == branch:
                continue

            logging.warning(
                "Task {} was not part of a dag run. This should not happen.".
                format(task))
            ti = TaskInstance(task,
                              execution_date=context['ti'].execution_date)
            ti.state = State.SKIPPED
            ti.start_date = datetime.now()
            ti.end_date = datetime.now()
            session.merge(ti)

        session.commit()
        session.close()
        logging.info("Done.")
Beispiel #17
0
    def execute(self, context):
        condition = super(ShortCircuitOperator, self).execute(context)
        logging.info("Condition result is {}".format(condition))

        if condition:
            logging.info('Proceeding with downstream tasks...')
            return

        logging.info('Skipping downstream tasks...')
        session = settings.Session()

        TI = TaskInstance
        tis = session.query(TI).filter(
            TI.execution_date == context['ti'].execution_date,
            TI.task_id.in_(context['task'].downstream_task_ids),
        ).with_for_update().all()

        for ti in tis:
            logging.info('Skipping task: %s', ti.task_id)
            ti.state = State.SKIPPED
            ti.start_date = datetime.now()
            ti.end_date = datetime.now()

        # this is defensive against dag runs that are not complete
        for task in context['task'].downstream_list:
            if task.task_id in tis:
                continue

            logging.warning("Task {} was not part of a dag run. This should not happen."
                            .format(task))
            ti = TaskInstance(task, execution_date=context['ti'].execution_date)
            ti.state = State.SKIPPED
            ti.start_date = datetime.now()
            ti.end_date = datetime.now()
            session.merge(ti)

        session.commit()
        session.close()
        logging.info("Done.")
Beispiel #18
0
    def skip(self, dag_run, execution_date, tasks, session=None):
        """
        Sets tasks instances to skipped from the same dag run.

        :param dag_run: the DagRun for which to set the tasks to skipped
        :param execution_date: execution_date
        :param tasks: tasks to skip (not task_ids)
        :param session: db session to use
        """
        if not tasks:
            return

        task_ids = [d.task_id for d in tasks]
        now = timezone.utcnow()

        if dag_run:
            session.query(TaskInstance).filter(
                TaskInstance.dag_id == dag_run.dag_id,
                TaskInstance.execution_date == dag_run.execution_date,
                TaskInstance.task_id.in_(task_ids)
            ).update({TaskInstance.state: State.SKIPPED,
                      TaskInstance.start_date: now,
                      TaskInstance.end_date: now},
                     synchronize_session=False)
            session.commit()
        else:
            assert execution_date is not None, "Execution date is None and no dag run"

            self.log.warning("No DAG RUN present this should not happen")
            # this is defensive against dag runs that are not complete
            for task in tasks:
                ti = TaskInstance(task, execution_date=execution_date)
                ti.state = State.SKIPPED
                ti.start_date = now
                ti.end_date = now
                session.merge(ti)

            session.commit()
Beispiel #19
0
    def execute(self, context):
        branch = super(BranchPythonOperator, self).execute(context)
        logging.info("Following branch " + branch)
        logging.info("Marking other directly downstream tasks as skipped")
        session = settings.Session()

        TI = TaskInstance
        tis = session.query(TI).filter(
            TI.execution_date == context['ti'].execution_date,
            TI.task_id.in_(context['task'].downstream_task_ids),
            TI.task_id != branch,
        ).with_for_update().all()

        for ti in tis:
            logging.info('Skipping task: %s', ti.task_id)
            ti.state = State.SKIPPED
            ti.start_date = datetime.now()
            ti.end_date = datetime.now()

        # this is defensive against dag runs that are not complete
        for task in context['task'].downstream_list:
            if task.task_id in tis:
                continue

            if task.task_id == branch:
                continue

            logging.warning("Task {} was not part of a dag run. This should not happen."
                            .format(task))
            ti = TaskInstance(task, execution_date=context['ti'].execution_date)
            ti.state = State.SKIPPED
            ti.start_date = datetime.now()
            ti.end_date = datetime.now()
            session.merge(ti)

        session.commit()
        session.close()
        logging.info("Done.")
Beispiel #20
0
 def execute(self, context):
     now = datetime.datetime.now()
     left_window = context['dag'].following_schedule(
         context['execution_date'])
     right_window = context['dag'].following_schedule(left_window)
     logging.info(
         'Checking latest only with left_window: %s right_window: %s '
         'now: %s', left_window, right_window, now)
     if not left_window < now <= right_window:
         logging.info('Not latest execution, skipping downstream.')
         session = settings.Session()
         for task in context['task'].downstream_list:
             ti = TaskInstance(
                 task, execution_date=context['ti'].execution_date)
             logging.info('Skipping task: %s', ti.task_id)
             ti.state = State.SKIPPED
             ti.start_date = now
             ti.end_date = now
             session.merge(ti)
         session.commit()
         session.close()
         logging.info('Done.')
     else:
         logging.info('Latest, allowing execution to proceed.')
Beispiel #21
0
def test_marquez_dag(mock_get_or_create_marquez_client,
                     mock_uuid,
                     clear_db_airflow_dags,
                     session=None):

    dag = DAG(DAG_ID,
              schedule_interval='@daily',
              default_args=DAG_DEFAULT_ARGS,
              description=DAG_DESCRIPTION)
    # (1) Mock the marquez client method calls
    mock_marquez_client = mock.Mock()
    mock_get_or_create_marquez_client.return_value = mock_marquez_client
    run_id_completed = "my-test_marquez_dag-uuid-completed"
    run_id_failed = "my-test_marquez_dag-uuid-failed"
    mock_uuid.side_effect = [run_id_completed, run_id_failed]

    # (2) Add task that will be marked as completed
    task_will_complete = DummyOperator(task_id=TASK_ID_COMPLETED, dag=dag)
    completed_task_location = get_location(task_will_complete.dag.fileloc)

    # (3) Add task that will be marked as failed
    task_will_fail = DummyOperator(task_id=TASK_ID_FAILED, dag=dag)
    failed_task_location = get_location(task_will_complete.dag.fileloc)

    # (4) Create DAG run and mark as running
    dagrun = dag.create_dagrun(run_id=DAG_RUN_ID,
                               execution_date=DEFAULT_DATE,
                               state=State.RUNNING)

    # Assert namespace meta call
    mock_marquez_client.create_namespace.assert_called_once_with(
        DAG_NAMESPACE, DAG_OWNER)

    # Assert source and dataset meta calls
    mock_marquez_client.create_source.assert_not_called()
    mock_marquez_client.create_dataset.assert_not_called()

    # Assert job meta calls
    create_job_calls = [
        mock.call(job_name=f"{DAG_ID}.{TASK_ID_COMPLETED}",
                  job_type=JobType.BATCH,
                  location=completed_task_location,
                  input_dataset=None,
                  output_dataset=None,
                  context=mock.ANY,
                  description=DAG_DESCRIPTION,
                  namespace_name=DAG_NAMESPACE,
                  run_id=None),
        mock.call(job_name=f"{DAG_ID}.{TASK_ID_FAILED}",
                  job_type=JobType.BATCH,
                  location=failed_task_location,
                  input_dataset=None,
                  output_dataset=None,
                  context=mock.ANY,
                  description=DAG_DESCRIPTION,
                  namespace_name=DAG_NAMESPACE,
                  run_id=None)
    ]
    log.info(
        f"{ [name for name, args, kwargs in mock_marquez_client.mock_calls]}")
    mock_marquez_client.create_job.assert_has_calls(create_job_calls)

    # Assert job run meta calls
    create_job_run_calls = [
        mock.call(job_name=f"{DAG_ID}.{TASK_ID_COMPLETED}",
                  run_id=mock.ANY,
                  run_args=DAG_RUN_ARGS,
                  nominal_start_time=mock.ANY,
                  nominal_end_time=mock.ANY,
                  namespace_name=DAG_NAMESPACE),
        mock.call(job_name=f"{DAG_ID}.{TASK_ID_FAILED}",
                  run_id=mock.ANY,
                  run_args=DAG_RUN_ARGS,
                  nominal_start_time=mock.ANY,
                  nominal_end_time=mock.ANY,
                  namespace_name=DAG_NAMESPACE)
    ]
    mock_marquez_client.create_job_run.assert_has_calls(create_job_run_calls)

    # (5) Start task that will be marked as completed
    task_will_complete.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)

    # (6) Start task that will be marked as failed
    ti1 = TaskInstance(task=task_will_fail, execution_date=DEFAULT_DATE)
    ti1.state = State.FAILED
    session.add(ti1)
    session.commit()

    dag.handle_callback(dagrun, success=True, session=session)

    # Assert start run meta calls
    start_job_run_calls = [
        mock.call(run_id_completed, mock.ANY),
        mock.call(run_id_failed, mock.ANY)
    ]
    mock_marquez_client.mark_job_run_as_started.assert_has_calls(
        start_job_run_calls)

    mock_marquez_client.mark_job_run_as_completed.assert_called_once_with(
        run_id=run_id_completed)

    # When a task run completes, the task outputs are also updated in order
    # to link a job version (=task version) to a dataset version.
    # Using a DummyOperator, no outputs exists, so assert that the create
    # dataset call is not invoked.
    mock_marquez_client.create_dataset.assert_not_called()

    dag.handle_callback(dagrun, success=False, session=session)
    mock_marquez_client.mark_job_run_as_failed.assert_called_once_with(
        run_id=run_id_failed)

    # Assert an attempt to version the outputs of a task is not made when
    # a task fails
    mock_marquez_client.create_dataset.assert_not_called()