def test_delete_all(client_fixture: fixture) -> None:
    page = client_fixture.get(f"/api/delete")
    assert page.json == {"message": "Scheduler: all jobs deleted!"}
    assert page.status_code == 200

    # one maintenance job should be left
    assert len(atlas_scheduler.get_jobs()) == 0

    # add a job with no args
    from .conftest import demo_task

    atlas_scheduler.add_job(
        func=demo_task,
        trigger="interval",
        seconds=10,
        id="test job 2",
        name="test job 2",
        replace_existing=True,
    )

    # add a job and try again
    p_id, t_id = create_demo_task()
    page = client_fixture.get(f"/api/add/{t_id}")
    assert page.json == {"message": "Scheduler: task job added!"}
    assert page.status_code == 200

    page = client_fixture.get(f"/api/delete")
    assert page.json == {"message": "Scheduler: all jobs deleted!"}
    assert page.status_code == 200
    # one  job should be left
    assert len(atlas_scheduler.get_jobs()) == 1
def test_schedule(client_fixture: fixture) -> None:
    p_id, t_id = create_demo_task()

    page = client_fixture.get(f"/api/add/{t_id}")
    assert page.json == {"message": "Scheduler: task job added!"}
    assert page.status_code == 200

    # check that status is now enabled
    t = Task.query.get(t_id)
    assert t.enabled == 1

    p_id, t_id = create_demo_task(2021)

    page = client_fixture.get(f"/api/add/{t_id}")
    assert page.json == {"message": "Scheduler: task job added!"}
    assert page.status_code == 200

    # check that status is now enabled
    t = Task.query.get(t_id)
    assert t.enabled == 1

    # add a maintenance task
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="interval",
        hours=1,
        id="job_sync",
        name="job_sync",
        replace_existing=True,
    )

    page = client_fixture.get(f"/api/schedule")
    assert {"case": "now", "count": 0} in page.json
    assert page.status_code == 200
def test_details(client_fixture: fixture) -> None:
    p_id, t_id = create_demo_task()
    page = client_fixture.get(f"/api/add/{t_id}")
    assert page.json == {"message": "Scheduler: task job added!"}
    assert page.status_code == 200

    # check that status is now enabled
    t = Task.query.get(t_id)
    assert t.enabled == 1

    # job with no args
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="interval",
        seconds=10,
        id="test job 3",
        name="test job 3",
        replace_existing=True,
    )

    page = client_fixture.get(f"/api/details")
    job = atlas_scheduler.get_job(f"{p_id}-{t.id}-cron")
    text = page.json[0]

    del text["next_run_time"]
    assert text == {
        "name": job.name,
        "job_id": job.id,
        "id": job.args[0],
    }
    assert page.status_code == 200
Exemple #4
0
def test_job_added(event_fixture: fixture, caplog: fixture) -> None:
    # get a dummy task id
    p_id, t_id = create_demo_task()

    # add a task that will run
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )
    # give time to execute
    time.sleep(1)

    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6).filter(
            TaskLog.message.like("%Job added. Scheduled for:%")
        )  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is not None

    assert 'Added job "test job 2"' in caplog.text

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    # test with invalid task id
    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)

    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6).filter(
            TaskLog.message.like("%Job added. Scheduled for:%")
        )  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is None

    assert 'Added job "test job 2"' in caplog.text
def run_task_delay(task_id: int, minutes: str) -> Response:
    """Run task in x minutes."""
    task = Task.query.filter_by(id=task_id).first()
    project = task.project

    my_hash = hashlib.sha256()
    my_hash.update(str(time.time()).encode("utf-8"))

    atlas_scheduler.add_job(
        func=scheduler_task_runner,
        trigger="date",
        run_date=datetime.datetime.now() +
        datetime.timedelta(minutes=int(minutes)),
        args=[str(task_id)],
        id=str(project.id) + "-" + str(task.id) + "-" +
        my_hash.hexdigest()[:10],
        name="(one off delay) " + project.name + ": " + task.name,
    )

    return jsonify({"message": "Scheduler: task scheduled!"})
Exemple #6
0
def test_job_missed(event_fixture: fixture, caplog: fixture) -> None:
    # grace time is 30 seconds, so attempt to run 40 seconds ago
    # get a task ID.. task isnt' used
    p_id, t_id = create_demo_task()

    # create an interval task (only interval and cron can capture
    # a missed job and add to logs. one-off jobs disappear after run.)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        args=[str(t_id)],
        run_date=datetime.now() - timedelta(minutes=1),
        replace_existing=True,
    )
    # wait for logs to be added by background process
    time.sleep(1)
    # check that log was added
    log = TaskLog.query.filter_by(task_id=t_id, status_id=6, error=1).first()
    assert "Job missed. Scheduled for:" in log.message

    # check logs
    assert "was missed by 0:01" in caplog.text
    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    caplog.clear()

    # check without id
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id="asdf",
        name="test job 2",
        args=[str(t_id)],
        run_date=datetime.now() - timedelta(minutes=1),
        replace_existing=True,
    )
    time.sleep(1)
    assert "was missed by 0:01" in caplog.text
    caplog.clear()

    # task that does not exists
    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        args=[str(t_id)],
        run_date=datetime.now() - timedelta(minutes=1),
        replace_existing=True,
    )
    # wait for logs to be added by background process
    time.sleep(1)
    # check that log was added
    log = TaskLog.query.filter_by(task_id=t_id, status_id=6, error=1).first()
    assert "Job missed. Scheduled for:" in log.message
def test_delete_orphans(client_fixture: fixture) -> None:
    # run without any orphans
    page = client_fixture.get(f"/api/delete-orphans")
    assert page.json == {"message": "Scheduler: orphans deleted!"}
    assert page.status_code == 200

    # manually add a job to scheduler
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="interval",
        seconds=10,
        id="test job 2",
        name="test job 2",
        args=["99"],
        replace_existing=True,
    )

    assert len(atlas_scheduler.get_jobs()) == 1

    # job with no args
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="interval",
        seconds=10,
        id="test job 3",
        name="test job 3",
        replace_existing=True,
    )

    assert len(atlas_scheduler.get_jobs()) == 2

    page = client_fixture.get(f"/api/delete-orphans")
    assert page.json == {"message": "Scheduler: orphans deleted!"}
    assert page.status_code == 200

    assert len(atlas_scheduler.get_jobs()) == 1
def test_scheduled(client_fixture: fixture) -> None:
    p_id, t_id = create_demo_task()
    page = client_fixture.get(f"/api/add/{t_id}")
    assert page.json == {"message": "Scheduler: task job added!"}
    assert page.status_code == 200

    # check that status is now enabled
    t = Task.query.get(t_id)
    assert t.enabled == 1

    # job with no args
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="interval",
        seconds=10,
        id="test job 3",
        name="test job 3",
        replace_existing=True,
    )

    # job should be in list
    page = client_fixture.get(f"/api/scheduled")
    assert page.json == [t_id, t_id, t_id]  # we have three schedules
    assert page.status_code == 200
Exemple #9
0
def test_job_error(event_fixture: fixture, caplog: fixture) -> None:
    # get a dummy task id
    p_id, t_id = create_demo_task()

    # add a task that will fail
    atlas_scheduler.add_job(
        func=bad_demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)

    # verify failure
    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6, error=1).filter(
            TaskLog.message.like("%Job error. Scheduled for:%")
        )  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is not None

    assert "raised an exception" in caplog.text

    caplog.clear()

    # try with invalid job
    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=bad_demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )
    time.sleep(1)
    # verify failure
    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6, error=1).filter(
            TaskLog.message.like("%Job error. Scheduled for:%")
        )  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is None

    # test job without id
    atlas_scheduler.add_job(
        func=bad_demo_task,
        trigger="date",
        id="ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)
Exemple #10
0
def test_job_removed(event_fixture: fixture, caplog: fixture) -> None:
    # get a dummy task id
    p_id, t_id = create_demo_task()

    # add a task that will run
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )

    # give time to process
    time.sleep(1)

    assert "Removed job" in caplog.text
    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    log = TaskLog.query.filter_by(task_id=t_id,
                                  status_id=6,
                                  message="Job removed.").first()
    assert log is not None
    caplog.clear()
    # try invalid task_id

    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )
    time.sleep(1)

    assert "Removed job" in caplog.text

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    log = TaskLog.query.filter_by(task_id=t_id,
                                  status_id=6,
                                  message="Job removed.").first()
    assert log is None
    caplog.clear()

    # test with no id
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id="ooff",
        name="test job 2",
        replace_existing=True,
    )
    time.sleep(1)

    assert "Removed job" in caplog.text
    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    log = TaskLog.query.filter_by(task_id=t_id,
                                  status_id=6,
                                  message="Job removed.").first()
    assert log is None
Exemple #11
0
def test_job_executed(event_fixture: fixture, caplog: fixture) -> None:
    # get a dummy task id
    p_id, t_id = create_demo_task()

    # add a task that will run
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )
    # give time to execute
    time.sleep(1)

    # verify execution
    # the 2nd to last log should be that the job has executed.
    # last log will be job removed.
    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6).filter(
            TaskLog.message.like(
                "%Job excecuted in%"))  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is not None

    assert "Running job" in caplog.text
    assert "executed successfully" in caplog.text

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    caplog.clear()

    # try with invalid job
    p_id, t_id = (9, 9)
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id=f"{p_id}-{t_id}-ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)

    # verify no logs
    log = (
        TaskLog.query.filter_by(task_id=t_id, status_id=6).filter(
            TaskLog.message.like(
                "%Job excecuted in%"))  # type: ignore[attr-defined,union-attr]
        .first())
    assert log is None

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]

    caplog.clear()

    # test job without id
    atlas_scheduler.add_job(
        func=demo_task,
        trigger="date",
        id="ooff",
        name="test job 2",
        replace_existing=True,
    )

    time.sleep(1)

    for record in caplog.records:
        assert record.levelname not in ["CRITICAL", "ERROR"]
def scheduler_add_task(task_id: int) -> bool:
    """Create job for task in the scheduler.

    :param task_id: id of task to create a schedule for

    *Parameters for APScheduler*

    :func: function to run
    :trigger: date, interval or cron
    :id: used to match job up to db
    :name: desc of job
    :misfire_grace_time: seconds a job can run late
    :coalesce: merge mul into one run
    :max_instances: max concurrent runs allowed
    :next_run_time: when to start schedule (None: job paused)
    :jobstore: alias of jobstore to use
    :executor: alias of excutor to use
    :replace_existing: true to replace jobs with same id

    *Parameters for Cron Jobs*

    :year: 4 digit year
    :month: 1-12
    :day: 1-31
    :week: 1-53
    :day_of_week: 0-6 or mon,tue,wed,thu,fri,sat,sun
    :hour: 0-23
    :minute: 0-59
    :second: 0-59
    :start_date: (datetime) soonest start
    :end_date: (datetime) latest run

    *Paramters for Interval Jobs*

    :weeks: (int) number of weeks between runs
    :days: (int) number of days between runs
    :hours: (int) number of hours between runs
    :minutes: (int) number of minutes between runs
    :seconds: (int) number of seconds between runs
    :start_date: (datetime) soonest start date
    :end_date: (datetime) latest run date

    *Parameters for One Off Jobs*

    :run_date: (datetime) when to run

    *Notes*

    If multiple triggers are specified on the task a schedule will be added for each trigger
    type. So it is possible to have multiple triggers per task. Because of this, the id
    assigned to the job is project_id-task_id-<time hash>. The tasks id is sent as an arg
    to the job.

    """
    task = Task.query.filter_by(id=task_id).first()

    # fyi, task must be re-queried after each type is
    # scheduled, as the scheduler events will modify
    # the task causing a session break!!
    if not task:
        return False

    task.enabled = 1
    db.session.commit()

    my_hash = hashlib.sha256()

    # schedule cron
    if task.project.cron == 1:
        project = task.project
        atlas_scheduler.add_job(
            func=scheduler_task_runner,
            trigger="cron",
            second=project.cron_sec,
            minute=project.cron_min,
            hour=project.cron_hour,
            year=project.cron_year,
            month=project.cron_month,
            week=project.cron_week,
            day=project.cron_day,
            day_of_week=project.cron_week_day,
            start_date=project.cron_start_date,
            end_date=project.cron_end_date,
            args=[str(task_id)],
            id=str(project.id) + "-" + str(task.id) + "-cron",
            name="(cron) " + project.name + ": " + task.name,
            replace_existing=True,
        )

    # schedule interval
    task = Task.query.filter_by(id=task_id).first()
    if task.project.intv == 1:
        project = task.project
        weeks = project.intv_value or 999 if project.intv_type == "w" else 0
        days = project.intv_value or 999 if project.intv_type == "d" else 0
        hours = project.intv_value or 999 if project.intv_type == "h" else 0
        minutes = project.intv_value or 999 if project.intv_type == "m" else 0
        seconds = project.intv_value or 999 if project.intv_type == "s" else 0

        atlas_scheduler.add_job(
            func=scheduler_task_runner,
            trigger="interval",
            seconds=seconds,
            minutes=minutes,
            hours=hours,
            days=days,
            weeks=weeks,
            start_date=project.intv_start_date,
            end_date=project.intv_end_date,
            args=[str(task_id)],
            id=str(project.id) + "-" + str(task.id) + "-intv",
            name="(inverval) " + project.name + ": " + task.name,
            replace_existing=True,
        )

    # ooff tasks use a hash to identify jobs as a job can have multiple one-off runs scheduled.
    task = Task.query.filter_by(id=task_id).first()
    if task.project.ooff == 1:
        project = task.project
        my_hash.update(str(time.time()).encode("utf-8"))
        atlas_scheduler.add_job(
            func=scheduler_task_runner,
            trigger="date",
            run_date=project.ooff_date,
            args=[str(task_id)],
            id=str(project.id) + "-" + str(task.id) + "-" +
            my_hash.hexdigest()[:10],
            name="(one off) " + project.name + ": " + task.name,
            replace_existing=True,
        )

    return True