Exemple #1
0
def enabled_job(job_key):
    """Temporarily enabled job."""
    all_jobs = Job.get_all(include_disabled=True)
    job = next((j for j in all_jobs if j.key == job_key))
    Job.update_disabled(job_id=job.id, disable=False)
    std_commit(allow_test_environment=True)
    try:
        yield
    finally:
        Job.update_disabled(job_id=job.id, disable=True)
        std_commit(allow_test_environment=True)
Exemple #2
0
    def test_alert_on_job_failure(self):
        admin_uid = app.config['EMAIL_DIABLO_ADMIN_UID']
        email_count = _get_email_count(admin_uid)
        # No alert on happy job.
        CanvasJob(simply_yield).run()
        assert _get_email_count(admin_uid) == email_count
        # Alert on sad job.
        all_jobs = Job.get_all(include_disabled=True)
        doomed_job = next(
            (j for j in all_jobs if j.key == DoomedToFailure.key()))

        # Make sure job is enabled
        Job.update_disabled(job_id=doomed_job.id, disable=False)
        std_commit(allow_test_environment=True)
        DoomedToFailure(simply_yield).run()
        # Failure alerts do not go through the queue.
        assert _get_email_count(admin_uid) == email_count + 1
def job_schedule():
    api_json = {
        'autoStart': app.config['JOBS_AUTO_START'],
        'jobs': [],
        'secondsBetweenJobsCheck':
        app.config['JOBS_SECONDS_BETWEEN_PENDING_CHECK'],
        'startedAt': to_isoformat(background_job_manager.get_started_at()),
    }
    for job in Job.get_all(include_disabled=True):
        job_class = next(
            (j for j in BackgroundJobManager.available_job_classes()
             if j.key() == job.key), None)
        if job_class:
            api_json['jobs'].append({
                **job.to_api_json(),
                **_job_class_to_json(job_class),
            })
    return tolerant_jsonify(api_json)
Exemple #4
0
    def start(self, app):
        """Continuously run, executing pending jobs per time interval.

        It is intended behavior that ScheduleThread does not run missed jobs. For example, if you register a job that
        should run every minute and yet JOBS_SECONDS_BETWEEN_PENDING_CHECK is set to one hour, then your job won't run
        60 times at each interval. It will run once.
        """
        if self.is_running():
            return
        else:
            self.monitor.notify(is_running=True)
            self.started_at = datetime.now()

        class JobRunnerThread(threading.Thread):

            active = False

            @classmethod
            def run(cls):
                cls.active = True
                while self.monitor.is_running():
                    schedule.run_pending()
                    time.sleep(interval)
                schedule.clear()
                cls.active = False

        interval = app.config['JOBS_SECONDS_BETWEEN_PENDING_CHECK']
        all_jobs = Job.get_all()
        app.logger.info(f"""

            Starting background job manager.
            Seconds between pending jobs check = {interval}
            Jobs:
                {[job.to_api_json() for job in all_jobs]}

            """)

        # If running on EC2, tell the database that this instance is the one now running scheduled jobs.
        instance_id = os.environ.get('EC2_INSTANCE_ID')
        if instance_id:
            rds.execute(
                'DELETE FROM job_runner; INSERT INTO job_runner (ec2_instance_id) VALUES (%s);',
                params=(instance_id, ),
            )

        # Clean up history for any older jobs that got lost.
        JobHistory.fail_orphans()

        if all_jobs:
            for job_config in all_jobs:
                self._load_job(
                    app=app,
                    job_key=job_config.key,
                    schedule_type=job_config.job_schedule_type,
                    schedule_value=job_config.job_schedule_value,
                )

            self.continuous_thread = JobRunnerThread(daemon=True)
            self.continuous_thread.start()
        else:
            app.logger.warn('No jobs. Nothing scheduled.')