Exemple #1
0
    def __init__(self,
                 url=None,
                 engine=None,
                 tablename='apscheduler_jobs',
                 metadata=None,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL):
        super(SQLAlchemyJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol
        metadata = maybe_ref(metadata) or MetaData()

        if engine:
            self.engine = maybe_ref(engine)
        elif url:
            self.engine = create_engine(url)
        else:
            raise ValueError('Need either "engine" or "url" defined')

        # 191 = max key length in MySQL for InnoDB/utf8mb4 tables, 25 = precision that translates to an 8-byte float
        self.jobs_t = Table(
            tablename, metadata,
            Column('id',
                   Unicode(191, _warn_on_bytestring=False),
                   primary_key=True),
            Column('next_run_time', Float(25), index=True),
            Column('job_state', LargeBinary, nullable=False))

        self.jobs_t.create(self.engine, True)
Exemple #2
0
    def _configure(self, config):
        # Set general options
        self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler')
        self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()

        # Set the job defaults
        job_defaults = config.get('job_defaults', {})
        self._job_defaults = {
            'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)),
            'coalesce': asbool(job_defaults.get('coalesce', True)),
            'max_instances': asint(job_defaults.get('max_instances', 1))
        }

        # Configure executors
        self._executors.clear()
        for alias, value in six.iteritems(config.get('executors', {})):
            if isinstance(value, BaseExecutor):
                self.add_executor(value, alias)
            elif isinstance(value, MutableMapping):
                executor_class = value.pop('class', None)
                plugin = value.pop('type', None)
                if plugin:
                    executor = self._create_plugin_instance('executor', plugin, value)
                elif executor_class:
                    cls = maybe_ref(executor_class)
                    executor = cls(**value)
                else:
                    raise ValueError(
                        'Cannot create executor "%s" -- either "type" or "class" must be defined' %
                        alias)

                self.add_executor(executor, alias)
            else:
                raise TypeError(
                    "Expected executor instance or dict for executors['%s'], got %s instead" %
                    (alias, value.__class__.__name__))

        # Configure job stores
        self._jobstores.clear()
        for alias, value in six.iteritems(config.get('jobstores', {})):
            if isinstance(value, BaseJobStore):
                self.add_jobstore(value, alias)
            elif isinstance(value, MutableMapping):
                jobstore_class = value.pop('class', None)
                plugin = value.pop('type', None)
                if plugin:
                    jobstore = self._create_plugin_instance('jobstore', plugin, value)
                elif jobstore_class:
                    cls = maybe_ref(jobstore_class)
                    jobstore = cls(**value)
                else:
                    raise ValueError(
                        'Cannot create job store "%s" -- either "type" or "class" must be '
                        'defined' % alias)

                self.add_jobstore(jobstore, alias)
            else:
                raise TypeError(
                    "Expected job store instance or dict for jobstores['%s'], got %s instead" %
                    (alias, value.__class__.__name__))
Exemple #3
0
    def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL):
        super(SQLAlchemyJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol
        metadata = maybe_ref(metadata) or MetaData()

        if engine:
            self.engine = maybe_ref(engine)
        elif url:
            self.engine = create_engine(url)
        else:
            raise ValueError('Need either "engine" or "url" defined')

        # 191 = max key length in MySQL for InnoDB/utf8mb4 tables,
        # 25 = precision that translates to an 8-byte float
        self.jobs_t = Table(
            tablename, metadata,
            Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True),
            Column('next_run_time', Float(25), index=True),
            Column('job_state', LargeBinary, nullable=False),

        )

        self.job_submissions_t = Table(
            "apscheduler_job_submissions", metadata,
            Column("id", Integer(), primary_key=True),
            Column("state", Enum("submitted", "success", "failure", "missed", "orphaned")),
            Column("func", String()),
            Column("submitted_at", DateTime()),
            Column("completed_at", DateTime()),
            Column("apscheduler_job_id", Integer(), ForeignKey(tablename + ".id"))
        )
Exemple #4
0
    def _configure(self, config):
        # Set general options
        self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler')
        self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()

        # Set the job defaults
        job_defaults = config.get('job_defaults', {})
        self._job_defaults = {
            'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)),
            'coalesce': asbool(job_defaults.get('coalesce', True)),
            'max_instances': asint(job_defaults.get('max_instances', 1))
        }

        # Configure executors
        self._executors.clear()
        for alias, value in six.iteritems(config.get('executors', {})):
            if isinstance(value, BaseExecutor):
                self.add_executor(value, alias)
            elif isinstance(value, MutableMapping):
                executor_class = value.pop('class', None)
                plugin = value.pop('type', None)
                if plugin:
                    executor = self._create_plugin_instance('executor', plugin, value)
                elif executor_class:
                    cls = maybe_ref(executor_class)
                    executor = cls(**value)
                else:
                    raise ValueError(
                        'Cannot create executor "%s" -- either "type" or "class" must be defined' %
                        alias)

                self.add_executor(executor, alias)
            else:
                raise TypeError(
                    "Expected executor instance or dict for executors['%s'], got %s instead" %
                    (alias, value.__class__.__name__))

        # Configure job stores
        self._jobstores.clear()
        for alias, value in six.iteritems(config.get('jobstores', {})):
            if isinstance(value, BaseJobStore):
                self.add_jobstore(value, alias)
            elif isinstance(value, MutableMapping):
                jobstore_class = value.pop('class', None)
                plugin = value.pop('type', None)
                if plugin:
                    jobstore = self._create_plugin_instance('jobstore', plugin, value)
                elif jobstore_class:
                    cls = maybe_ref(jobstore_class)
                    jobstore = cls(**value)
                else:
                    raise ValueError(
                        'Cannot create job store "%s" -- either "type" or "class" must be '
                        'defined' % alias)

                self.add_jobstore(jobstore, alias)
            else:
                raise TypeError(
                    "Expected job store instance or dict for jobstores['%s'], got %s instead" %
                    (alias, value.__class__.__name__))
Exemple #5
0
    def __init__(self, url=None, engine=None, metadata=None):
        super(HistoryStore, self).__init__()
        metadata = maybe_ref(metadata) or MetaData()

        url = url if url else Config.DATABASE_URL
        if engine:
            self.engine = maybe_ref(engine)
        elif url:
            self.engine = create_engine(url, encoding='utf-8', echo=False)
        else:
            raise ValueError('Need either "engine" or "url" defined')

        self.__ini_schema(metadata)
Exemple #6
0
    def __init__(self, url=None, engine=None, metadata=None):
        super(HistoryStore, self).__init__()
        metadata = maybe_ref(metadata) or MetaData()

        url = url if url else Config.DATABASE_URL
        if engine:
            self.engine = maybe_ref(engine)
        elif url:
            self.engine = create_engine(url, encoding="utf-8", echo=False)
        else:
            raise ValueError('Need either "engine" or "url" defined')

        self.__ini_schema(metadata)
    def __init__(self,
                 url=None,
                 engine=None,
                 tablename='scheduler',
                 metadata=None):
        super(DbJobStore, self).__init__()
        metadata = maybe_ref(metadata) or MetaData()

        if engine:
            self.engine = maybe_ref(engine)
        elif url:
            self.engine = create_engine(url)
        else:
            raise ValueError('Need either "engine" or "url" defined')

        # 191 = max key length in MySQL for InnoDB/utf8mb4 tables,
        # 25 = precision that translates to an 8-byte float
        self.jobs_t = Table(
            tablename,
            metadata,
            Column('id',
                   Unicode(191, _warn_on_bytestring=False),
                   primary_key=True),
            Column('name', String(100), index=False, default=''),  #my_job2
            Column('func', String(500), index=False,
                   default=''),  #__main__:my_job2
            Column('args', String(500), index=False, default=''),  #('my_job2')
            Column('kwargs', String(500), index=False, default=''),  #{}
            Column('version', String(10), index=False, default=''),  #版本,默认是1
            Column('trigger_type', Text,
                   index=False),  #触发器类型   interval/cron/date 
            Column('crontab', String(1000), index=False),  #crontab
            Column('interval', Integer, index=False),  #interval
            Column('run_date', TIMESTAMP(timezone=True),
                   index=False),  #datetime
            Column('coalesce', Integer,
                   index=False),  #False/True, default:False
            Column('start_date', TIMESTAMP(timezone=True),
                   index=False),  #开始时间,针对crontab/interval
            Column('end_date', TIMESTAMP(timezone=True),
                   index=False),  #结束时间,针对crontab/interval
            Column('next_run_time', TIMESTAMP(timezone=True),
                   index=False),  #下次执行时间
            Column('max_instances', Integer, index=False, default=''),  #3
            Column('executor', String(50), index=False, default=''),  #default
            Column('misfire_grace_time', Integer, index=False,
                   default=''),  #过时任务,是否补齐
        )
Exemple #8
0
    def __init__(self, database='apscheduler', table='jobs', client=None,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(RethinkDBJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol

        if not database:
            raise ValueError('The "database" parameter must not be empty')
        if not table:
            raise ValueError('The "table" parameter must not be empty')

        if client:
            self.conn = maybe_ref(client)
        else:
            self.conn = r.connect(db=database, **connect_args)

        if database not in r.db_list().run(self.conn):
            r.db_create(database).run(self.conn)

        if table not in r.table_list().run(self.conn):
            r.table_create(table).run(self.conn)

        if 'next_run_time' not in r.table(table).index_list().run(self.conn):
            r.table(table).index_create('next_run_time').run(self.conn)

        self.table = r.db(database).table(table)
Exemple #9
0
    def __init__(self,
                 database='apscheduler',
                 table='jobs',
                 client=None,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL,
                 **connect_args):
        super(RethinkDBJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol

        if not database:
            raise ValueError('The "database" parameter must not be empty')
        if not table:
            raise ValueError('The "table" parameter must not be empty')

        if client:
            self.conn = maybe_ref(client)
        else:
            self.conn = r.connect(db=database, **connect_args)

        if database not in r.db_list().run(self.conn):
            r.db_create(database).run(self.conn)

        if table not in r.table_list().run(self.conn):
            r.table_create(table).run(self.conn)

        if 'next_run_time' not in r.table(table).index_list().run(self.conn):
            r.table(table).index_create('next_run_time').run(self.conn)

        self.table = r.db(database).table(table)
Exemple #10
0
    def __init__(self,
                 path='/apscheduler',
                 job_submission_path='/apscheduler_job_submissions',
                 client=None,
                 close_connection_on_exit=False,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL,
                 **connect_args):
        super(ZooKeeperJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol
        self.close_connection_on_exit = close_connection_on_exit

        if not path:
            raise ValueError('The "path" parameter must not be empty')

        if not job_submission_path:
            raise ValueError(
                'The "job_submission_path" parameter must not be empty')

        self.path = path
        self.job_submission_path = job_submission_path

        if client:
            self.client = maybe_ref(client)
        else:
            self.client = KazooClient(**connect_args)
        self._ensured_path = False
Exemple #11
0
    def __init__(self, collection='jobs', client=None,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(FirestoreDBJobstore, self).__init__()
        self.pickle_protocol = pickle_protocol

        if not collection:
            raise ValueError('The "collection" parameter must not be empty')
        if not client:
            raise ValueError('The "client" parameter must not be empty')
        self.client = maybe_ref(client)
        self.collection = self.client.collection(collection)
Exemple #12
0
    def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL):
        super(SQLAlchemyJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol
        metadata = maybe_ref(metadata) or MetaData()

        if engine:
            self.engine = maybe_ref(engine)
        elif url:
            self.engine = create_engine(url)
        else:
            raise ValueError('Need either "engine" or "url" defined')

        # 191 = max key length in MySQL for InnoDB/utf8mb4 tables,
        # 25 = precision that translates to an 8-byte float
        self.jobs_t = Table(
            tablename, metadata,
            Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True),
            Column('next_run_time', Float(25), index=True),
            Column('job_state', LargeBinary, nullable=False)
        )
Exemple #13
0
    def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(ZooKeeperJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol
        self.close_connection_on_exit = close_connection_on_exit

        if not path:
            raise ValueError('The "path" parameter must not be empty')

        self.path = path

        if client:
            self.client = maybe_ref(client)
        else:
            self.client = KazooClient(**connect_args)
        self._ensured_path = False
Exemple #14
0
    def __init__(self, database='apscheduler', collection='jobs', client=None,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(MongoDBJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol

        if not database:
            raise ValueError('The "database" parameter must not be empty')
        if not collection:
            raise ValueError('The "collection" parameter must not be empty')

        if client:
            self.client = maybe_ref(client)
        else:
            connect_args.setdefault('w', 1)
            self.client = MongoClient(**connect_args)

        self.collection = self.client[database][collection]
Exemple #15
0
    def __init__(self, database='apscheduler', collection='jobs', client=None,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(MongoDBJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol

        if not database:
            raise ValueError('The "database" parameter must not be empty')
        if not collection:
            raise ValueError('The "collection" parameter must not be empty')

        if client:
            self.client = maybe_ref(client)
        else:
            connect_args.setdefault('w', 1)
            self.client = MongoClient(**connect_args)

        self.collection = self.client[database][collection]
Exemple #16
0
    def start(self, scheduler, alias):
        super(RethinkDBJobStore, self).start(scheduler, alias)

        if self.client:
            self.conn = maybe_ref(self.client)
        else:
            self.conn = r.connect(db=self.database, **self.connect_args)

        if self.database not in r.db_list().run(self.conn):
            r.db_create(self.database).run(self.conn)

        if self.table not in r.table_list().run(self.conn):
            r.table_create(self.table).run(self.conn)

        if 'next_run_time' not in r.table(self.table).index_list().run(self.conn):
            r.table(self.table).index_create('next_run_time').run(self.conn)

        self.table = r.db(self.database).table(self.table)
Exemple #17
0
    def _configure(self, config):
        # Set general options
        logging = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler')
#         handler = logging.handlers.RotatingFileHandler('/opt/logs/windmill/app.log', maxBytes=104857600, backupCount=10, encoding='utf_8')
#         handler.setLevel(logging.INFO)
#         handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(name)s:%(lineno)d]'))
#         logging.addHandler(handler)
        
        #self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()
        self.timezone = get_localzone()

        # Set the job defaults
        job_defaults = config.get('job_defaults', {})
        self._job_defaults = {
            'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)),
            'coalesce': asbool(job_defaults.get('coalesce', True)),
            'max_instances': asint(job_defaults.get('max_instances', 1))
        }
Exemple #18
0
 def __init__(self,
              url=None,
              engine=None,
              tablename='apscheduler_jobs',
              metadata=None,
              pickle_protocol=pickle.HIGHEST_PROTOCOL,
              tag=None):
     super(WatcherJobStore, self).__init__(url, engine, tablename, metadata,
                                           pickle_protocol)
     metadata = maybe_ref(metadata) or MetaData()
     self.jobs_t = Table(tablename,
                         metadata,
                         autoload=True,
                         autoload_with=engine)
     service_ident = service.ServiceHeartbeat.get_service_name()
     self.tag = tag or {'host': service_ident[0], 'name': service_ident[1]}
     self.service_id = objects.Service.list(context=context.make_context(),
                                            filters=self.tag)[0].id
Exemple #19
0
    def _configure(self, config):
        # Set general options
        logging = maybe_ref(config.pop(
            'logger', None)) or getLogger('apscheduler.scheduler')
        #         handler = logging.handlers.RotatingFileHandler('/opt/logs/windmill/app.log', maxBytes=104857600, backupCount=10, encoding='utf_8')
        #         handler.setLevel(logging.INFO)
        #         handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(name)s:%(lineno)d]'))
        #         logging.addHandler(handler)

        #self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()
        self.timezone = get_localzone()

        # Set the job defaults
        job_defaults = config.get('job_defaults', {})
        self._job_defaults = {
            'misfire_grace_time':
            asint(job_defaults.get('misfire_grace_time', 1)),
            'coalesce': asbool(job_defaults.get('coalesce', True)),
            'max_instances': asint(job_defaults.get('max_instances', 1))
        }
Exemple #20
0
    def __init__(self,
                 database=None,
                 database_type=None,
                 database_ref=None,
                 tablename='apscheduler_jobs',
                 pickle_protocol=pickle.HIGHEST_PROTOCOL,
                 tableschema=None,
                 database_options=None):
        super(PeeweeJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol

        if database_ref:
            self.database = maybe_ref(database_ref)
        elif database and database_type:
            self.database = self._init_database(database, database_type,
                                                database_options)
        else:
            raise ValueError(
                'Need either "database" and "database_type" or "database_ref"'
                ' defined')

        # incase there is a connection issue it will be found here
        # instead of downstream
        self.database.connect()

        class PeeweeJob(Model):
            class Meta:
                database = self.database
                table_name = tablename
                schema = tableschema

            id = TextField(primary_key=True)
            next_run_time = FloatField(null=True)
            job_state = BlobField()

        self.job_model = PeeweeJob
Exemple #21
0
 def _configure(self, config):
     self._eventloop = maybe_ref(config.pop(
         'event_loop', None)) or asyncio.get_event_loop()
     super(AsyncIOScheduler, self)._configure(config)
Exemple #22
0
 def _configure(self, config):
     self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current()
     super(TornadoScheduler, self)._configure(config)
Exemple #23
0
def test_maybe_ref(input, expected):
    assert maybe_ref(input) == expected
Exemple #24
0
 def _configure(self, config):
     self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current()
     super(TornadoScheduler, self)._configure(config)
Exemple #25
0
 def _configure(self, config):
     self._reactor = maybe_ref(config.pop('reactor', default_reactor))
     super()._configure(config)
Exemple #26
0
def test_maybe_ref(input, expected):
    assert maybe_ref(input) == expected
Exemple #27
0
 def _configure(self, config):
     self._reactor = maybe_ref(config.pop('reactor', default_reactor))
     super(TwistedScheduler, self)._configure(config)
Exemple #28
0
 def _configure(self, config):
     self._reactor = maybe_ref(config.pop('reactor', default_reactor))
     super(TwistedScheduler, self)._configure(config)
Exemple #29
0
 def _configure(self, config):
     self._ioloop = maybe_ref(config.pop('io_loop',
                                         None)) or IOLoop.current()
     super()._configure(config)
Exemple #30
0
 def _configure(self, config):
     self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop()
     super(AsyncIOScheduler, self)._configure(config)