Exemple #1
0
    def configure(self, gconfig={}, prefix='apscheduler.', **options):
        """
        Reconfigures the scheduler with the given options. Can only be done when the scheduler isn't running.

        :param dict gconfig: a "global" configuration dictionary whose values can be overridden by keyword arguments to
                             this method
        :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with this string
                                   (pass an empty string or ``None`` to use all keys)
        :raises SchedulerAlreadyRunningError: if the scheduler is already running
        """

        if self.running:
            raise SchedulerAlreadyRunningError

        # If a non-empty prefix was given, strip it from the keys in the global configuration dict
        if prefix:
            prefixlen = len(prefix)
            gconfig = dict((key[prefixlen:], value)
                           for key, value in six.iteritems(gconfig)
                           if key.startswith(prefix))

        # Create a structure from the dotted options (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}})
        config = {}
        for key, value in six.iteritems(gconfig):
            parts = key.split('.')
            parent = config
            key = parts.pop(0)
            while parts:
                parent = parent.setdefault(key, {})
                key = parts.pop(0)
            parent[key] = value

        # Override any options with explicit keyword arguments
        config.update(options)
        self._configure(config)
Exemple #2
0
    def print_jobs(self, jobstore=None, out=None):
        """
        print_jobs(jobstore=None, out=sys.stdout)

        Prints out a textual listing of all jobs currently scheduled on either all job stores or just a specific one.

        :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores
        :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is given)
        """

        out = out or sys.stdout
        with self._jobstores_lock:
            if self._pending_jobs:
                print(six.u('Pending jobs:'), file=out)
                for job, jobstore_alias, replace_existing in self._pending_jobs:
                    if jobstore in (None, jobstore_alias):
                        print(six.u('    %s') % job, file=out)

            for alias, store in six.iteritems(self._jobstores):
                if jobstore in (None, alias):
                    print(six.u('Jobstore %s:') % alias, file=out)
                    jobs = store.get_all_jobs()
                    if jobs:
                        for job in jobs:
                            print(six.u('    %s') % job, file=out)
                    else:
                        print(six.u('    No scheduled jobs'), file=out)
Exemple #3
0
    def get_jobs(self, jobstore=None, pending=None):
        """
        Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled jobs, either from a
        specific job store or from all of them.

        :param str|unicode jobstore: alias of the job store
        :param bool pending: ``False`` to leave out pending jobs (jobs that are waiting for the scheduler start to be
                             added to their respective job stores), ``True`` to only include pending jobs, anything else
                             to return both
        :rtype: list[Job]
        """

        with self._jobstores_lock:
            jobs = []

            if pending is not False:
                for job, alias, replace_existing in self._pending_jobs:
                    if jobstore is None or alias == jobstore:
                        jobs.append(job)

            if pending is not True:
                for alias, store in six.iteritems(self._jobstores):
                    if jobstore is None or alias == jobstore:
                        jobs.extend(store.get_all_jobs())

            return jobs
Exemple #4
0
    def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None,
                 second=None, start_date=None, end_date=None, timezone=None):
        if timezone:
            self.timezone = astimezone(timezone)
        elif start_date and start_date.tzinfo:
            self.timezone = start_date.tzinfo
        elif end_date and end_date.tzinfo:
            self.timezone = end_date.tzinfo
        else:
            self.timezone = get_localzone()

        self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date')
        self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')

        values = dict((key, value) for (key, value) in six.iteritems(locals())
                      if key in self.FIELD_NAMES and value is not None)
        self.fields = []
        assign_defaults = False
        for field_name in self.FIELD_NAMES:
            if field_name in values:
                exprs = values.pop(field_name)
                is_default = False
                assign_defaults = not values
            elif assign_defaults:
                exprs = DEFAULT_VALUES[field_name]
                is_default = True
            else:
                exprs = '*'
                is_default = True

            field_class = self.FIELDS_MAP[field_name]
            field = field_class(field_name, exprs, is_default)
            self.fields.append(field)
Exemple #5
0
    def start(self):
        """
        Starts the scheduler. The details of this process depend on the implementation.

        :raises SchedulerAlreadyRunningError: if the scheduler is already running
        """

        if self.running:
            raise SchedulerAlreadyRunningError

        with self._executors_lock:
            # Create a default executor if nothing else is configured
            if 'default' not in self._executors:
                self.add_executor(self._create_default_executor(), 'default')

            # Start all the executors
            for alias, executor in six.iteritems(self._executors):
                executor.start(self, alias)

        with self._jobstores_lock:
            # Create a default job store if nothing else is configured
            if 'default' not in self._jobstores:
                self.add_jobstore(self._create_default_jobstore(), 'default')

            # Start all the job stores
            for alias, store in six.iteritems(self._jobstores):
                store.start(self, alias)

            # Schedule all pending jobs
            for job, jobstore_alias, replace_existing in self._pending_jobs:
                self._real_add_job(job, jobstore_alias, replace_existing,
                                   False)
            del self._pending_jobs[:]

        self._stopped = False
        self._logger.info('Scheduler started')

        # Notify listeners that the scheduler has been started
        self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START))
Exemple #6
0
    def __init__(self,
                 year=None,
                 month=None,
                 day=None,
                 week=None,
                 day_of_week=None,
                 hour=None,
                 minute=None,
                 second=None,
                 start_date=None,
                 end_date=None,
                 timezone=None):
        if timezone:
            self.timezone = astimezone(timezone)
        elif start_date and start_date.tzinfo:
            self.timezone = start_date.tzinfo
        elif end_date and end_date.tzinfo:
            self.timezone = end_date.tzinfo
        else:
            self.timezone = get_localzone()

        self.start_date = convert_to_datetime(start_date, self.timezone,
                                              'start_date')
        self.end_date = convert_to_datetime(end_date, self.timezone,
                                            'end_date')

        values = dict((key, value) for (key, value) in six.iteritems(locals())
                      if key in self.FIELD_NAMES and value is not None)
        self.fields = []
        assign_defaults = False
        for field_name in self.FIELD_NAMES:
            if field_name in values:
                exprs = values.pop(field_name)
                is_default = False
                assign_defaults = not values
            elif assign_defaults:
                exprs = DEFAULT_VALUES[field_name]
                is_default = True
            else:
                exprs = '*'
                is_default = True

            field_class = self.FIELDS_MAP[field_name]
            field = field_class(field_name, exprs, is_default)
            self.fields.append(field)
Exemple #7
0
    def _real_add_job(self, job, jobstore_alias, replace_existing, wakeup):
        """
        :param Job job: the job to add
        :param bool replace_existing: ``True`` to use update_job() in case the job already exists in the store
        :param bool wakeup: ``True`` to wake up the scheduler after adding the job
        """

        # Fill in undefined values with defaults
        replacements = {}
        for key, value in six.iteritems(self._job_defaults):
            if not hasattr(job, key):
                replacements[key] = value

        # Calculate the next run time if there is none defined
        if not hasattr(job, 'next_run_time'):
            now = datetime.now(self.timezone)
            replacements['next_run_time'] = job.trigger.get_next_fire_time(
                None, now)

        # Apply any replacements
        job._modify(**replacements)

        # Add the job to the given job store
        store = self._lookup_jobstore(jobstore_alias)
        try:
            store.add_job(job)
        except ConflictingIdError:
            if replace_existing:
                store.update_job(job)
            else:
                raise

        # Mark the job as no longer pending
        job._jobstore_alias = jobstore_alias

        # Notify listeners that a new job has been added
        event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias)
        self._dispatch_event(event)

        self._logger.info('Added job "%s" to job store "%s"', job.name,
                          jobstore_alias)

        # Notify the scheduler about the new job
        if wakeup:
            self.wakeup()
Exemple #8
0
    def remove_all_jobs(self, jobstore=None):
        """
        Removes all jobs from the specified job store, or all job stores if none is given.

        :param str|unicode jobstore: alias of the job store
        """

        with self._jobstores_lock:
            if jobstore:
                self._pending_jobs = [
                    pending for pending in self._pending_jobs
                    if pending[1] != jobstore
                ]
            else:
                self._pending_jobs = []

            for alias, store in six.iteritems(self._jobstores):
                if jobstore in (None, alias):
                    store.remove_all_jobs()

        self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore))
Exemple #9
0
    def _lookup_job(self, job_id, jobstore_alias):
        """
        Finds a job by its ID.

        :type job_id: str
        :param str jobstore_alias: alias of a job store to look in
        :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of a pending job)
        :raises JobLookupError: if no job by the given ID is found.
        """

        # Check if the job is among the pending jobs
        for job, alias, replace_existing in self._pending_jobs:
            if job.id == job_id:
                return job, None

        # Look in all job stores
        for alias, store in six.iteritems(self._jobstores):
            if jobstore_alias in (None, alias):
                job = store.lookup_job(job_id)
                if job is not None:
                    return job, alias

        raise JobLookupError(job_id)
Exemple #10
0
    def remove_job(self, job_id, jobstore=None):
        """
        Removes a job, preventing it from being run any more.

        :param str|unicode job_id: the identifier of the job
        :param str|unicode jobstore: alias of the job store that contains the job
        :raises JobLookupError: if the job was not found
        """

        with self._jobstores_lock:
            # Check if the job is among the pending jobs
            for i, (job, jobstore_alias,
                    replace_existing) in enumerate(self._pending_jobs):
                if job.id == job_id:
                    del self._pending_jobs[i]
                    jobstore = jobstore_alias
                    break
            else:
                # Otherwise, try to remove it from each store until it succeeds or we run out of stores to check
                for alias, store in six.iteritems(self._jobstores):
                    if jobstore in (None, alias):
                        try:
                            store.remove_job(job_id)
                        except JobLookupError:
                            continue

                        jobstore = alias
                        break

        if jobstore is None:
            raise JobLookupError(job_id)

        # Notify listeners that a job has been removed
        event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore)
        self._dispatch_event(event)

        self._logger.info('Removed job %s', job_id)
Exemple #11
0
 def get_all_jobs(self):
     job_states = self.redis.hgetall(self.jobs_key)
     jobs = self._reconstitute_jobs(six.iteritems(job_states))
     paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
     return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
Exemple #12
0
    def _process_jobs(self):
        """
        Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next
        round.
        """

        self._logger.debug('Looking for jobs to run')
        now = datetime.now(self.timezone)
        next_wakeup_time = None

        with self._jobstores_lock:
            for jobstore_alias, jobstore in six.iteritems(self._jobstores):
                for job in jobstore.get_due_jobs(now):
                    # Look up the job's executor
                    try:
                        executor = self._lookup_executor(job.executor)
                    except:
                        self._logger.error(
                            'Executor lookup ("%s") failed for job "%s" -- removing it from the job store',
                            job.executor, job)
                        self.remove_job(job.id, jobstore_alias)
                        continue

                    run_times = job._get_run_times(now)
                    run_times = run_times[
                        -1:] if run_times and job.coalesce else run_times
                    if run_times:
                        try:
                            executor.submit_job(job, run_times)
                        except MaxInstancesReachedError:
                            self._logger.warning(
                                'Execution of job "%s" skipped: maximum number of running instances reached (%d)',
                                job, job.max_instances)
                        except:
                            self._logger.exception(
                                'Error submitting job "%s" to executor "%s"',
                                job, job.executor)

                        # Update the job if it has a next execution time. Otherwise remove it from the job store.
                        job_next_run = job.trigger.get_next_fire_time(
                            run_times[-1], now)
                        if job_next_run:
                            job._modify(next_run_time=job_next_run)
                            jobstore.update_job(job)
                        else:
                            self.remove_job(job.id, jobstore_alias)

                # Set a new next wakeup time if there isn't one yet or the jobstore has an even earlier one
                jobstore_next_run_time = jobstore.get_next_run_time()
                if jobstore_next_run_time and (
                        next_wakeup_time is None
                        or jobstore_next_run_time < next_wakeup_time):
                    next_wakeup_time = jobstore_next_run_time

        # Determine the delay until this method should be called again
        if next_wakeup_time is not None:
            wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0)
            self._logger.debug('Next wakeup is due at %s (in %f seconds)',
                               next_wakeup_time, wait_seconds)
        else:
            wait_seconds = None
            self._logger.debug('No jobs; waiting until a job is added')

        return wait_seconds
Exemple #13
0
    def _configure(self, config):
        # Set general options
        self._logger = maybe_ref(config.pop(
            'logger', None)) or getLogger('apscheduler.scheduler')
        self.timezone = astimezone(config.pop('timezone',
                                              None)) or get_localzone()

        # Set the job defaults
        job_defaults = config.get('job_defaults', {})
        self._job_defaults = {
            'misfire_grace_time':
            asint(job_defaults.get('misfire_grace_time', 1)),
            'coalesce': asbool(job_defaults.get('coalesce', True)),
            'max_instances': asint(job_defaults.get('max_instances', 1))
        }

        # Configure executors
        self._executors.clear()
        for alias, value in six.iteritems(config.get('executors', {})):
            if isinstance(value, BaseExecutor):
                self.add_executor(value, alias)
            elif isinstance(value, MutableMapping):
                executor_class = value.pop('class', None)
                plugin = value.pop('type', None)
                if plugin:
                    executor = self._create_plugin_instance(
                        'executor', plugin, value)
                elif executor_class:
                    cls = maybe_ref(executor_class)
                    executor = cls(**value)
                else:
                    raise ValueError(
                        'Cannot create executor "%s" -- either "type" or "class" must be defined'
                        % alias)

                self.add_executor(executor, alias)
            else:
                raise TypeError(
                    "Expected executor instance or dict for executors['%s'], got %s instead"
                    % (alias, value.__class__.__name__))

        # Configure job stores
        self._jobstores.clear()
        for alias, value in six.iteritems(config.get('jobstores', {})):
            if isinstance(value, BaseJobStore):
                self.add_jobstore(value, alias)
            elif isinstance(value, MutableMapping):
                jobstore_class = value.pop('class', None)
                plugin = value.pop('type', None)
                if plugin:
                    jobstore = self._create_plugin_instance(
                        'jobstore', plugin, value)
                elif jobstore_class:
                    cls = maybe_ref(jobstore_class)
                    jobstore = cls(**value)
                else:
                    raise ValueError(
                        'Cannot create job store "%s" -- either "type" or "class" must be defined'
                        % alias)

                self.add_jobstore(jobstore, alias)
            else:
                raise TypeError(
                    "Expected job store instance or dict for jobstores['%s'], got %s instead"
                    % (alias, value.__class__.__name__))
Exemple #14
0
    def add_job(self,
                func,
                trigger=None,
                args=None,
                kwargs=None,
                id=None,
                name=None,
                misfire_grace_time=undefined,
                coalesce=undefined,
                max_instances=undefined,
                next_run_time=undefined,
                jobstore='default',
                executor='default',
                replace_existing=False,
                **trigger_args):
        """
        add_job(func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, \
            coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', \
            executor='default', replace_existing=False, **trigger_args)

        Adds the given job to the job list and wakes up the scheduler if it's already running.

        Any option that defaults to ``undefined`` will be replaced with the corresponding default value when the job is
        scheduled (which happens when the scheduler is started, or immediately if the scheduler is already running).

        The ``func`` argument can be given either as a callable object or a textual reference in the
        ``package.module:some.object`` format, where the first half (separated by ``:``) is an importable module and the
        second half is a reference to the callable object, relative to the module.

        The ``trigger`` argument can either be:
          #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case any extra keyword
             arguments to this method are passed on to the trigger's constructor
          #. an instance of a trigger class

        :param func: callable (or a textual reference to one) to run at the given time
        :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when ``func`` is called
        :param list|tuple args: list of positional arguments to call func with
        :param dict kwargs: dict of keyword arguments to call func with
        :param str|unicode id: explicit identifier for the job (for modifying it later)
        :param str|unicode name: textual description of the job
        :param int misfire_grace_time: seconds after the designated run time that the job is still allowed to be run
        :param bool coalesce: run once instead of many times if the scheduler determines that the job should be run more
                              than once in succession
        :param int max_instances: maximum number of concurrently running instances allowed for this job
        :param datetime next_run_time: when to first run the job, regardless of the trigger (pass ``None`` to add the
                                       job as paused)
        :param str|unicode jobstore: alias of the job store to store the job in
        :param str|unicode executor: alias of the executor to run the job with
        :param bool replace_existing: ``True`` to replace an existing job with the same ``id`` (but retain the
                                      number of runs from the existing one)
        :rtype: Job
        """

        job_kwargs = {
            'trigger': self._create_trigger(trigger, trigger_args),
            'executor': executor,
            'func': func,
            'args': tuple(args) if args is not None else (),
            'kwargs': dict(kwargs) if kwargs is not None else {},
            'id': id,
            'name': name,
            'misfire_grace_time': misfire_grace_time,
            'coalesce': coalesce,
            'max_instances': max_instances,
            'next_run_time': next_run_time
        }
        job_kwargs = dict((key, value)
                          for key, value in six.iteritems(job_kwargs)
                          if value is not undefined)
        job = Job(self, **job_kwargs)

        # Don't really add jobs to job stores before the scheduler is up and running
        with self._jobstores_lock:
            if not self.running:
                self._pending_jobs.append((job, jobstore, replace_existing))
                self._logger.info(
                    'Adding job tentatively -- it will be properly scheduled when the scheduler starts'
                )
            else:
                self._real_add_job(job, jobstore, replace_existing, True)

        return job
Exemple #15
0
    def _modify(self, **changes):
        """Validates the changes to the Job and makes the modifications if and only if all of them validate."""

        approved = {}

        if 'id' in changes:
            value = changes.pop('id')
            if not isinstance(value, six.string_types):
                raise TypeError("id must be a nonempty string")
            if hasattr(self, 'id'):
                raise ValueError('The job ID may not be changed')
            approved['id'] = value

        if 'func' in changes or 'args' in changes or 'kwargs' in changes:
            func = changes.pop('func') if 'func' in changes else self.func
            args = changes.pop('args') if 'args' in changes else self.args
            kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs

            if isinstance(func, str):
                func_ref = func
                func = ref_to_obj(func)
            elif callable(func):
                try:
                    func_ref = obj_to_ref(func)
                except ValueError:
                    # If this happens, this Job won't be serializable
                    func_ref = None
            else:
                raise TypeError('func must be a callable or a textual reference to one')

            if not hasattr(self, 'name') and changes.get('name', None) is None:
                changes['name'] = get_callable_name(func)

            if isinstance(args, six.string_types) or not isinstance(args, Iterable):
                raise TypeError('args must be a non-string iterable')
            if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping):
                raise TypeError('kwargs must be a dict-like object')

            check_callable_args(func, args, kwargs)

            approved['func'] = func
            approved['func_ref'] = func_ref
            approved['args'] = args
            approved['kwargs'] = kwargs

        if 'name' in changes:
            value = changes.pop('name')
            if not value or not isinstance(value, six.string_types):
                raise TypeError("name must be a nonempty string")
            approved['name'] = value

        if 'misfire_grace_time' in changes:
            value = changes.pop('misfire_grace_time')
            if value is not None and (not isinstance(value, six.integer_types) or value <= 0):
                raise TypeError('misfire_grace_time must be either None or a positive integer')
            approved['misfire_grace_time'] = value

        if 'coalesce' in changes:
            value = bool(changes.pop('coalesce'))
            approved['coalesce'] = value

        if 'max_instances' in changes:
            value = changes.pop('max_instances')
            if not isinstance(value, six.integer_types) or value <= 0:
                raise TypeError('max_instances must be a positive integer')
            approved['max_instances'] = value

        if 'trigger' in changes:
            trigger = changes.pop('trigger')
            if not isinstance(trigger, BaseTrigger):
                raise TypeError('Expected a trigger instance, got %s instead' % trigger.__class__.__name__)

            approved['trigger'] = trigger

        if 'executor' in changes:
            value = changes.pop('executor')
            if not isinstance(value, six.string_types):
                raise TypeError('executor must be a string')
            approved['executor'] = value

        if 'next_run_time' in changes:
            value = changes.pop('next_run_time')
            approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone, 'next_run_time')

        if changes:
            raise AttributeError('The following are not modifiable attributes of Job: %s' % ', '.join(changes))

        for key, value in six.iteritems(approved):
            setattr(self, key, value)