Ejemplo n.º 1
0
def run_scheduler_job(with_db_reset=False) -> None:
    from airflow.jobs.scheduler_job import SchedulerJob

    if with_db_reset:
        reset_db()
    SchedulerJob(subdir=DAG_FOLDER, do_pickle=False, num_runs=3).run()
Ejemplo n.º 2
0
def main(num_runs, repeat, pre_create_dag_runs, executor_class, dag_ids):  # pylint: disable=too-many-locals
    """
    This script can be used to measure the total "scheduler overhead" of Airflow.

    By overhead we mean if the tasks executed instantly as soon as they are
    executed (i.e. they do nothing) how quickly could we schedule them.

    It will monitor the task completion of the Mock/stub executor (no actual
    tasks are run) and after the required number of dag runs for all the
    specified dags have completed all their tasks, it will cleanly shut down
    the scheduler.

    The dags you run with need to have an early enough start_date to create the
    desired number of runs.

    Care should be taken that other limits (DAG concurrency, pool size etc) are
    not the bottleneck. This script doesn't help you in that regard.

    It is recommended to repeat the test at least 3 times (`--repeat=3`, the
    default) so that you can get somewhat-accurate variance on the reported
    timing numbers, but this can be disabled for longer runs if needed.
    """

    # Turn on unit test mode so that we don't do any sleep() in the scheduler
    # loop - not needed on master, but this script can run against older
    # releases too!
    os.environ['AIRFLOW__CORE__UNIT_TEST_MODE'] = 'True'

    os.environ['AIRFLOW__CORE__DAG_CONCURRENCY'] = '500'

    # Set this so that dags can dynamically configure their end_date
    os.environ['AIRFLOW_BENCHMARK_MAX_DAG_RUNS'] = str(num_runs)
    os.environ['PERF_MAX_RUNS'] = str(num_runs)

    if pre_create_dag_runs:
        os.environ['AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE'] = 'False'

    from airflow.jobs.scheduler_job import SchedulerJob
    from airflow.models.dagbag import DagBag
    from airflow.utils import db

    dagbag = DagBag()

    dags = []

    with db.create_session() as session:
        pause_all_dags(session)
        for dag_id in dag_ids:
            dag = dagbag.get_dag(dag_id)
            dag.sync_to_db(session=session)
            dags.append(dag)
            reset_dag(dag, session)

            next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))

            for _ in range(num_runs - 1):
                next_run_date = dag.following_schedule(next_run_date)

            end_date = dag.end_date or dag.default_args.get('end_date')
            if end_date != next_run_date:
                message = (
                    f"DAG {dag_id} has incorrect end_date ({end_date}) for number of runs! "
                    f"It should be "
                    f" {next_run_date}")
                sys.exit(message)

            if pre_create_dag_runs:
                create_dag_runs(dag, num_runs, session)

    ShortCircuitExecutor = get_executor_under_test(executor_class)

    executor = ShortCircuitExecutor(dag_ids_to_watch=dag_ids, num_runs=num_runs)
    scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
    executor.scheduler_job = scheduler_job

    total_tasks = sum(len(dag.tasks) for dag in dags)

    if 'PYSPY' in os.environ:
        pid = str(os.getpid())
        filename = os.environ.get('PYSPY_O', 'flame-' + pid + '.html')
        os.spawnlp(os.P_NOWAIT, 'sudo', 'sudo', 'py-spy', 'record', '-o', filename, '-p', pid, '--idle')

    times = []

    # Need a lambda to refer to the _latest_ value fo scheduler_job, not just
    # the initial one
    code_to_test = lambda: scheduler_job.run()  # pylint: disable=unnecessary-lambda

    for count in range(repeat):
        gc.disable()
        start = time.perf_counter()

        code_to_test()
        times.append(time.perf_counter() - start)
        gc.enable()
        print("Run %d time: %.5f" % (count + 1, times[-1]))

        if count + 1 != repeat:
            with db.create_session() as session:
                for dag in dags:
                    reset_dag(dag, session)

            executor.reset(dag_ids)
            scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
            executor.scheduler_job = scheduler_job

    print()
    print()
    msg = "Time for %d dag runs of %d dags with %d total tasks: %.4fs"

    if len(times) > 1:
        print((msg + " (±%.3fs)") % (
            num_runs,
            len(dags),
            total_tasks,
            statistics.mean(times),
            statistics.stdev(times)
        ))
    else:
        print(msg % (num_runs, len(dags), total_tasks, times[0]))

    print()
    print()
Ejemplo n.º 3
0
class TestCliConfigList(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.parser = cli_parser.get_parser()

    def setUp(self) -> None:
        clear_db_jobs()
        self.scheduler_job = None

    def tearDown(self) -> None:
        if self.scheduler_job and self.scheduler_job.processor_agent:
            self.scheduler_job.processor_agent.end()
        clear_db_jobs()

    def test_should_report_success_for_one_working_scheduler(self):
        with create_session() as session:
            self.scheduler_job = SchedulerJob()
            self.scheduler_job.state = State.RUNNING
            session.add(self.scheduler_job)
            session.commit()
            self.scheduler_job.heartbeat()

        with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
            jobs_command.check(
                self.parser.parse_args(
                    ['jobs', 'check', '--job-type', 'SchedulerJob']))
        self.assertIn("Found one alive job.", temp_stdout.getvalue())

    def test_should_report_success_for_one_working_scheduler_with_hostname(
            self):
        with create_session() as session:
            self.scheduler_job = SchedulerJob()
            self.scheduler_job.state = State.RUNNING
            self.scheduler_job.hostname = 'HOSTNAME'
            session.add(self.scheduler_job)
            session.commit()
            self.scheduler_job.heartbeat()

        with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
            jobs_command.check(
                self.parser.parse_args([
                    'jobs', 'check', '--job-type', 'SchedulerJob',
                    '--hostname', 'HOSTNAME'
                ]))
        self.assertIn("Found one alive job.", temp_stdout.getvalue())

    def test_should_report_success_for_ha_schedulers(self):
        scheduler_jobs = []
        with create_session() as session:
            for _ in range(3):
                scheduler_job = SchedulerJob()
                scheduler_job.state = State.RUNNING
                session.add(scheduler_job)
                scheduler_jobs.append(scheduler_job)
            session.commit()
            scheduler_job.heartbeat()

        with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
            jobs_command.check(
                self.parser.parse_args([
                    'jobs', 'check', '--job-type', 'SchedulerJob', '--limit',
                    '100', '--allow-multiple'
                ]))
        self.assertIn("Found 3 alive jobs.", temp_stdout.getvalue())
        for scheduler_job in scheduler_jobs:
            if scheduler_job.processor_agent:
                scheduler_job.processor_agent.end()

    def test_should_ignore_not_running_jobs(self):
        scheduler_jobs = []
        with create_session() as session:
            for _ in range(3):
                scheduler_job = SchedulerJob()
                scheduler_job.state = State.SHUTDOWN
                session.add(scheduler_job)
                scheduler_jobs.append(scheduler_job)
            session.commit()
        # No alive jobs found.
        with pytest.raises(SystemExit, match=r"No alive jobs found."):
            jobs_command.check(self.parser.parse_args(['jobs', 'check']))
        for scheduler_job in scheduler_jobs:
            if scheduler_job.processor_agent:
                scheduler_job.processor_agent.end()

    def test_should_raise_exception_for_multiple_scheduler_on_one_host(self):
        scheduler_jobs = []
        with create_session() as session:
            for _ in range(3):
                scheduler_job = SchedulerJob()
                scheduler_job.state = State.RUNNING
                scheduler_job.hostname = 'HOSTNAME'
                session.add(scheduler_job)
            session.commit()
            scheduler_job.heartbeat()

        with pytest.raises(SystemExit,
                           match=r"Found 3 alive jobs. Expected only one."):
            jobs_command.check(
                self.parser.parse_args([
                    'jobs',
                    'check',
                    '--job-type',
                    'SchedulerJob',
                    '--limit',
                    '100',
                ]))
        for scheduler_job in scheduler_jobs:
            if scheduler_job.processor_agent:
                scheduler_job.processor_agent.end()

    def test_should_raise_exception_for_allow_multiple_and_limit_1(self):
        with pytest.raises(
                SystemExit,
                match=
                r"To use option --allow-multiple, you must set the limit to a value greater than 1.",
        ):
            jobs_command.check(
                self.parser.parse_args(['jobs', 'check', '--allow-multiple']))