예제 #1
0
 def test_healthy_scheduler_status(self, session):
     last_scheduler_heartbeat_for_testing_1 = timezone.utcnow()
     session.add(
         BaseJob(
             job_type="SchedulerJob",
             state=State.RUNNING,
             latest_heartbeat=last_scheduler_heartbeat_for_testing_1,
         ))
     session.commit()
     resp_json = self.client.get("/api/v1/health").json
     assert "healthy" == resp_json["metadatabase"]["status"]
     assert "healthy" == resp_json["scheduler"]["status"]
     assert (last_scheduler_heartbeat_for_testing_1.isoformat() ==
             resp_json["scheduler"]["latest_scheduler_heartbeat"])
 def test_unhealthy_scheduler_is_slow(self, session):
     last_scheduler_heartbeat_for_testing_2 = timezone.utcnow() - timedelta(
         minutes=1)
     session.add(
         BaseJob(
             job_type="SchedulerJob",
             state=State.RUNNING,
             latest_heartbeat=last_scheduler_heartbeat_for_testing_2,
         ))
     session.commit()
     resp_json = self.client.get("/api/v1/health").json
     self.assertEqual("healthy", resp_json["metadatabase"]["status"])
     self.assertEqual("unhealthy", resp_json["scheduler"]["status"])
     self.assertEqual(
         last_scheduler_heartbeat_for_testing_2.isoformat(),
         resp_json["scheduler"]["latest_scheduler_heartbeat"],
     )
예제 #3
0
def heartbeat_healthy():
    # case-1: healthy scheduler status
    last_heartbeat = timezone.utcnow()
    job = BaseJob(
        job_type='SchedulerJob',
        state='running',
        latest_heartbeat=last_heartbeat,
    )
    with create_session() as session:
        session.add(job)
    yield 'healthy', last_heartbeat.isoformat()
    with create_session() as session:
        session.query(BaseJob).filter(
            BaseJob.job_type == 'SchedulerJob',
            BaseJob.state == 'running',
            BaseJob.latest_heartbeat == last_heartbeat,
        ).delete()
예제 #4
0
def heartbeat_too_slow():
    # case-2: unhealthy scheduler status - scenario 1 (SchedulerJob is running too slowly)
    last_heartbeat = timezone.utcnow() - datetime.timedelta(minutes=1)
    job = BaseJob(
        job_type='SchedulerJob',
        state='running',
        latest_heartbeat=last_heartbeat,
    )
    with create_session() as session:
        session.query(BaseJob).filter(
            BaseJob.job_type == 'SchedulerJob',
        ).update({'latest_heartbeat': last_heartbeat - datetime.timedelta(seconds=1)})
        session.add(job)
    yield 'unhealthy', last_heartbeat.isoformat()
    with create_session() as session:
        session.query(BaseJob).filter(
            BaseJob.job_type == 'SchedulerJob',
            BaseJob.state == 'running',
            BaseJob.latest_heartbeat == last_heartbeat,
        ).delete()
예제 #5
0
    def test_delete_dag_run_and_task_instance_does_not_raise_error(self):
        clear_db_jobs()
        clear_db_runs()

        job_id = 22
        dag = DAG(dag_id='test_delete_dag_run', start_date=days_ago(1))
        _ = BashOperator(task_id='task1', dag=dag, bash_command="echo hi")

        # Simulate DagRun is created by a job inherited by BaseJob with an id
        # This is so that same foreign key exists on DagRun.creating_job_id & BaseJob.id
        dag_run = self.create_dag_run(dag=dag, creating_job_id=job_id)
        assert dag_run is not None

        session = settings.Session()

        job = BaseJob(id=job_id)
        session.add(job)

        # Simulate TaskInstance is created by a job inherited by BaseJob with an id
        # This is so that same foreign key exists on TaskInstance.queued_by_job_id & BaseJob.id
        ti1 = dag_run.get_task_instance(task_id="task1")
        ti1.queued_by_job_id = job_id
        session.merge(ti1)
        session.commit()

        # Test Deleting DagRun does not raise an error
        session.delete(dag_run)

        # Test Deleting TaskInstance does not raise an error
        ti1 = dag_run.get_task_instance(task_id="task1")
        session.delete(ti1)
        session.commit()

        # CleanUp
        clear_db_runs()
        clear_db_jobs()