Exemplo n.º 1
0
def setup_schedule_config(self, endpoint_explorer, node_address):
    scheduler = GeventScheduler()
    scheduler.add_job(
        lambda: notice_explorer_to_be_alive(endpoint_explorer, node_address),
        'interval',
        minutes=30)
    scheduler.start()
Exemplo n.º 2
0
class WatchDog(WebSocketWatchDog):
    websocket_manage_dict = dict()

    def __init__(self, is_auto_connect=True, heart_beat_limit_ms=HEART_BEAT_MS, reconnect_after_ms=RECONNECT_MS, restart_ms=RESTART_MS):
        threading.Thread.__init__(self)
        self.is_auto_connect = is_auto_connect
        self.heart_beat_limit_ms = heart_beat_limit_ms
        self.reconnect_after_ms = reconnect_after_ms if reconnect_after_ms > heart_beat_limit_ms else heart_beat_limit_ms
        self.restart_ms = restart_ms
        self.logger = logger
        self.scheduler = Scheduler()
        self.scheduler.add_job(check_reconnect, "interval", max_instances=1, seconds=1, args=[self])
        self.start()

    def get_random_restart_at(self, wm):
        return wm.created_at + self.restart_ms + hash(wm) % RESTART_RANGE

    def on_connection_closed(self, websocket_manage):
        self.mutex.acquire()
        self.websocket_manage_list.remove(websocket_manage)
        [name] = [name for name, wm in self.websocket_manage_dict.items() if wm == websocket_manage]
        del self.websocket_manage_dict[name]
        self.mutex.release()

    def after_connection_created(self, name):
        [wm] = [wm for wm in self.websocket_manage_list if wm not in self.websocket_manage_dict.values()]
        self.mutex.acquire()
        self.websocket_manage_dict[name] = wm
        self.mutex.release()
Exemplo n.º 3
0
def delayed_finish():
    final_sched = Scheduler()
    final_sched.start()
    now = datetime.today()
    deltaFinal = timedelta( seconds = final_wait )
    starttime = now + deltaFinal
    final_sched.add_job( final_finish, 'date', run_date = starttime, args= [ ] )
    def run(self):
        """Start apscheduler tasks"""
        jobstores = {'mongo': MongoDBJobStore()}

        executors = {
            'default': ThreadPoolExecutor(self.poolsize),
            'processpool': ProcessPoolExecutor(self.procsize)
        }

        job_defaults = {'coalesce': False, 'max_instances': 3}

        scheduler = GeventScheduler()
        scheduler.configure(jobstores=jobstores,
                            executors=executors,
                            job_defaults=job_defaults,
                            timezone=utc)
        scheduler.add_job(self.job_worker, 'interval', seconds=0.001)

        green_let = scheduler.start()
        print('Ctrl+{0} to exit.'.format('Break' if os.name == 'nt' else 'C'))

        # Execution will block here util Ctrl+C (Ctrl+Break on Windows).
        try:
            green_let.join()
        except (KeyboardInterrupt, SystemExit):
            pass
Exemplo n.º 5
0
def setup_schedule_config(self, endpoint_explorer, discoverable, node_address,
                          raiden_instance):
    scheduler = GeventScheduler()
    scheduler.add_job(lambda: notice_explorer_to_be_alive(
        endpoint_explorer, discoverable, node_address, raiden_instance),
                      'interval',
                      minutes=30)
    scheduler.start()
Exemplo n.º 6
0
 def deactivate_player(self, player):
     player.active = False
     scheduler = GeventScheduler()
     timeout_date = datetime.now() + timedelta(seconds=PLAYER_TIMEOUT)
     scheduler.add_job(self.delete_player,
                       'date',
                       run_date=timeout_date,
                       args=[player])
     g = scheduler.start()
     g.join()
Exemplo n.º 7
0
def GeventScheduler_test():
    sched = GeventScheduler()
    sched.add_job(tick, 'interval', seconds=3)
    g = sched.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
def startScheduledExecution(interval):
    scheduler = GeventScheduler()
    fetchAndSaveData()
    scheduler.add_job(fetchAndSaveData, 'interval', minutes=interval)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 9
0
class TaskManager:
    """Simple wrapper for Advanced Python Scheduler"""
    def __init__(self):
        print "init Task Manager"
        self.logger = logging.getLogger('dls')
        executors = {
            'default': ThreadPoolExecutor(config.EXECUTOR_THREADS_NUMBER),
            'monitor': GeventExecutor(),
        }

        self.scheduler = GeventScheduler(executors=executors)
        self.scheduler.start()

        # Map of tasks for tracking them on UI
        self.tasks = {}
        self.scheduler.add_job(self.report_progress,
                               'interval',
                               seconds=config.JOB_MONITOR_INTERVAL,
                               executor='monitor')

    # Starts new task
    def start_task(self, task):

        self.scheduler.add_job(func=task.execute,
                               misfire_grace_time=config.MISFIRE_GRACE_TIME)
        self.tasks[task.id] = task

    # Kills task by it's ID
    def term_task(self, index):
        task = self.tasks[index]
        task.kill()

    def shutdown(self):
        self.scheduler.shutdown()

    def report_progress(self):
        """Gathers information from task and sends to clients"""
        # self.logger.info("sending tasks progress")
        task_data = []
        for t in self.tasks.values():
            task_data.append(t.status())
        # from pprint import pprint
        # pprint (task_data)
        socketio.emit('task_monitor', json.dumps(task_data))
        return task_data

    def task_info(self, id):
        t = self.tasks[int(id)]
        return t.detailed_status()
Exemplo n.º 10
0
def register_schedule(minutes=0):
    minutes = minutes or 60
    scheduler = GeventScheduler()
    func = ansible_inventory_patrol
    name = func.__name__
    job_id = '5db150f3e3f7e0677091329f'
    if scheduler.state != 1:
        scheduler.start()
    job = scheduler.get_job(job_id=job_id)
    if not job:
        scheduler.add_job(func=func,
                          trigger='interval',
                          minutes=minutes,
                          name=name,
                          id=job_id)
Exemplo n.º 11
0
def watch():
    scheduler = GeventScheduler()

    for web in watchweb.get_watch_webs():
        s = int(web[watchweb.INTERVAL_SECONDS])
        scheduler.add_job(check_web, "interval", seconds=s, kwargs=web)

    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print ("Press Ctrl+{0} to exit".format("Break" if os.name == "nt" else "C"))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 12
0
class Schedule:
    def __init__(self, irc_c, config):
        self.config = config
        self.scheduler = GeventScheduler()

        self._scheduler_greenlet = self.scheduler.start()

        log_propagation_message = partial(
            irc_c.PRIVMSG,
            CONFIG.external['propagation']['logging']['channel'],
        )

        # Scheduled full wiki update
        self.scheduler.add_job(
            Propagate.get_wiki_data,
            'cron',
            kwargs={'reply': log_propagation_message},
            **self.cron_to_kwargs(
                CONFIG.external['propagation']['all_articles']['often']),
        )

        # Scheduled recent pages update
        self.scheduler.add_job(
            Propagate.get_wiki_data,
            'cron',
            kwargs={
                'reply': log_propagation_message,
                'seconds': 259200
            },
            **self.cron_to_kwargs(
                CONFIG.external['propagation']['new_articles']['often']),
        )

    @staticmethod
    def cron_to_kwargs(cronstring):
        """Converts a cron string to cron kwargs"""
        crons = cronstring.split(" ")
        if len(crons) != 5:
            raise ValueError("Invalid cron {}".format(cronstring))
        crons = [cron.replace("_", " ") for cron in crons]
        kwargs = {
            'minute': crons[0],
            'hour': crons[1],
            'day': crons[2],
            'month': crons[3],
            'day_of_week': crons[4],
        }
        return kwargs
Exemplo n.º 13
0
def watch():
	scheduler = GeventScheduler()
	
	for web in watchweb.get_watch_webs():
		s = int(web[watchweb.INTERVAL_SECONDS])
		scheduler.add_job(check_web, 'interval', seconds=s,kwargs=web)


	g = scheduler.start()  # g is the greenlet that runs the scheduler loop
	print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

	# Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
	try:
		g.join()
	except (KeyboardInterrupt, SystemExit):
		pass
Exemplo n.º 14
0
def gevent_schedule():
    from apscheduler.schedulers.gevent import GeventScheduler

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = GeventScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 15
0
def gevent_schedule():
    from apscheduler.schedulers.gevent import GeventScheduler

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = GeventScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 16
0
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """
    config = Configurator(settings=settings)
    config.add_subscriber(add_logging_context, ContextFound)
    config.include('pyramid_exclog')
    config.add_route('home', '/')
    config.add_route('resync_all', '/resync_all')
    config.add_route('resync_back', '/resync_back')
    config.add_route('resync', '/resync/{tender_id}')
    config.add_route('recheck', '/recheck/{tender_id}')
    config.add_route('calendar', '/calendar')
    config.add_route('calendar_entry', '/calendar/{date}')
    config.add_route('streams', '/streams')
    config.scan(ignore='openprocurement.chronograph.tests')
    config.add_subscriber(start_scheduler, ApplicationCreated)
    config.registry.api_token = os.environ.get('API_TOKEN', settings.get('api.token'))

    server, db = set_chronograph_security(settings)
    config.registry.couchdb_server = server
    config.registry.db = db

    jobstores = {}
    job_defaults = {
        'coalesce': False,
        'max_instances': 3
    }
    config.registry.api_url = settings.get('api.url')
    config.registry.callback_url = settings.get('callback.url')
    scheduler = Scheduler(jobstores=jobstores,
                          job_defaults=job_defaults,
                          timezone=TZ)
    if 'jobstore_db' in settings:
        scheduler.add_jobstore(SQLAlchemyJobStore(url=settings['jobstore_db']))
    config.registry.scheduler = scheduler
    resync_all_job = scheduler.get_job('resync_all')
    now = datetime.now(TZ)
    if not resync_all_job or resync_all_job.next_run_time < now - timedelta(hours=1):
        if resync_all_job:
            args = resync_all_job.args
        else:
            args = [settings.get('callback.url') + 'resync_all', None]
        run_date = now + timedelta(seconds=60)
        scheduler.add_job(push, 'date', run_date=run_date, timezone=TZ,
                          id='resync_all', args=args,
                          replace_existing=True, misfire_grace_time=60 * 60)
    return config.make_wsgi_app()
Exemplo n.º 17
0
def production(*_, **settings):
    """Hooks exceptions and returns the Flask app."""

    hook_exceptions()

    app.shiptoasts = ShipToasts()
    app.shiptoasts.initial_fill()

    scheduler = GeventScheduler()
    scheduler.add_job(app.shiptoasts.periodic_call, "interval", seconds=30)
    cleaner = scheduler.start()
    listener = gevent.Greenlet.spawn(app.shiptoasts.listen_for_updates)

    atexit.register(cleaner.join, timeout=2)
    atexit.register(listener.join, timeout=2)
    atexit.register(scheduler.shutdown)

    return app
Exemplo n.º 18
0
def main():

    scheduler = GeventScheduler()

    url = os.environ.get('SQLALCHEMY_DATABASE_URI', 'sqlite:///database.db')

    scheduler.add_jobstore('sqlalchemy', url=url)

    scheduler.add_job(tick, 'interval', seconds=3, id='example_job', replace_existing=True)

    # g is the greenlet that runs the scheduler loop.
    g = scheduler.start()

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 19
0
class Scheduler:

    def __init__(self, app=None):
        self._scheduler = GeventScheduler()
        self.jobs_list = [monitor_ac_usage, monitor_temperatures]
        if app:
            self.init_app(app)

    def init_app(self, app):
        for job in self.jobs_list:
            self.add_jobs(app, job)

    def add_jobs(self, app, job):
        def call_func(*args, **kwargs):
            with app.app_context():
                func(*args, **kwargs)
        func = job

        self._scheduler.add_job(call_func, 'interval', minutes=func.minutes, coalesce=True)

    def start(self):
        self._scheduler.start()
Exemplo n.º 20
0
def main():

    scheduler = GeventScheduler()

    url = os.environ.get('SQLALCHEMY_DATABASE_URI', 'sqlite:///database.db')

    scheduler.add_jobstore('sqlalchemy', url=url)

    scheduler.add_job(tick,
                      'interval',
                      seconds=3,
                      id='example_job',
                      replace_existing=True)

    # g is the greenlet that runs the scheduler loop.
    g = scheduler.start()

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """
    config = Configurator(settings=settings)
    if JournalHandler:
        config.add_subscriber(set_journal_handler, ContextFound)
        config.add_subscriber(clear_journal_handler, BeforeRender)
    config.include('pyramid_exclog')
    config.add_route('home', '/')
    config.add_route('resync_all', '/resync_all')
    config.add_route('resync', '/resync/{tender_id}')
    config.add_route('calendar', '/calendar')
    config.add_route('calendar_entry', '/calendar/{date}')
    config.add_route('streams', '/streams')
    config.scan(ignore='openprocurement.chronograph.tests')
    config.add_subscriber(start_scheduler, ApplicationCreated)
    config.registry.api_token = os.environ.get('API_TOKEN', settings.get('api.token'))

    db_name = os.environ.get('DB_NAME', settings['couchdb.db_name'])
    server = Server(settings.get('couchdb.url'), session=Session(retry_delays=range(60)))
    if 'couchdb.admin_url' not in settings and server.resource.credentials:
        try:
            server.version()
        except Unauthorized:
            server = Server(extract_credentials(settings.get('couchdb.url'))[0])
    config.registry.couchdb_server = server
    if 'couchdb.admin_url' in settings and server.resource.credentials:
        aserver = Server(settings.get('couchdb.admin_url'), session=Session(retry_delays=range(10)))
        users_db = aserver['_users']
        if SECURITY != users_db.security:
            INIT_LOGGER.info("Updating users db security", extra={'MESSAGE_ID': 'update_users_security'})
            users_db.security = SECURITY
        username, password = server.resource.credentials
        user_doc = users_db.get('org.couchdb.user:{}'.format(username), {'_id': 'org.couchdb.user:{}'.format(username)})
        if not user_doc.get('derived_key', '') or PBKDF2(password, user_doc.get('salt', ''), user_doc.get('iterations', 10)).hexread(int(len(user_doc.get('derived_key', '')) / 2)) != user_doc.get('derived_key', ''):
            user_doc.update({
                "name": username,
                "roles": [],
                "type": "user",
                "password": password
            })
            INIT_LOGGER.info("Updating chronograph db main user", extra={'MESSAGE_ID': 'update_chronograph_main_user'})
            users_db.save(user_doc)
        security_users = [username, ]
        if db_name not in aserver:
            aserver.create(db_name)
        db = aserver[db_name]
        SECURITY[u'members'][u'names'] = security_users
        if SECURITY != db.security:
            INIT_LOGGER.info("Updating chronograph db security", extra={'MESSAGE_ID': 'update_chronograph_security'})
            db.security = SECURITY
        auth_doc = db.get(VALIDATE_DOC_ID, {'_id': VALIDATE_DOC_ID})
        if auth_doc.get('validate_doc_update') != VALIDATE_DOC_UPDATE % username:
            auth_doc['validate_doc_update'] = VALIDATE_DOC_UPDATE % username
            INIT_LOGGER.info("Updating chronograph db validate doc", extra={'MESSAGE_ID': 'update_chronograph_validate_doc'})
            db.save(auth_doc)
    else:
        if db_name not in server:
            server.create(db_name)
    config.registry.db = server[db_name]

    jobstores = {
        #'default': CouchDBJobStore(database=db_name, client=server)
    }
    #executors = {
        #'default': ThreadPoolExecutor(5),
        #'processpool': ProcessPoolExecutor(5)
    #}
    job_defaults = {
        'coalesce': False,
        'max_instances': 5
    }
    config.registry.api_url = settings.get('api.url')
    config.registry.callback_url = settings.get('callback.url')
    scheduler = Scheduler(jobstores=jobstores,
                          #executors=executors,
                          job_defaults=job_defaults,
                          timezone=TZ)
    if 'jobstore_db' in settings:
        scheduler.add_jobstore('sqlalchemy', url=settings['jobstore_db'])
    config.registry.scheduler = scheduler
    # scheduler.remove_all_jobs()
    # scheduler.start()
    resync_all_job = scheduler.get_job('resync_all')
    now = datetime.now(TZ)
    if not resync_all_job or resync_all_job.next_run_time < now - timedelta(hours=1):
        if resync_all_job:
            args = resync_all_job.args
        else:
            args = [settings.get('callback.url') + 'resync_all', None]
        run_date = now + timedelta(seconds=60)
        scheduler.add_job(push, 'date', run_date=run_date, timezone=TZ,
                          id='resync_all', args=args,
                          replace_existing=True, misfire_grace_time=60 * 60)
    return config.make_wsgi_app()
Exemplo n.º 22
0
def setup():
    print('setting up Scheduler')
    scheduler = GeventScheduler()
    scheduler.add_job(func=schedule, trigger='cron', **data.update_cron)
    scheduler.start()
Exemplo n.º 23
0
class EventGenerator(Actor):

    '''**Generates a test event at the chosen interval.**

    Parameters:

        name (str):
            | The instance name
        event_class (Optional[compysition.event.Event]):
            | The class that the generated event should be created as
            | Default: Event
        event_kwargs (Optional[int]):
            | Any additional kwargs to add to the event, including data
        producers (Optional[int]):
            | The number of greenthreads to spawn that each spawn events at the provided interval
            | Default: 1
        interval (Optional[float] OR dict):
            | The interval (in seconds) between each generated event.
            | Should have a value > 0.
            | Can also be a dict, supporting values of weeks, days, hours, minutes, and seconds
            | default: 5
        delay (Optional[float]):
            | The time (in seconds) to wait before initial event generation.
            | Default: 0
        generate_error (Optional[bool]):
            | Whether or not to also send the event via Actor.send_error
            | Default: False

    '''

    DEFAULT_INTERVAL = {'weeks': 0,
                         'days': 0,
                         'hours': 0,
                         'minutes': 0,
                         'seconds': 5}

    def __init__(self, name, event_class=Event, event_kwargs=None, producers=1, interval=5, delay=0, generate_error=False, *args, **kwargs):
        super(EventGenerator, self).__init__(name, *args, **kwargs)
        self.blockdiag_config["shape"] = "flowchart.input"
        self.generate_error = generate_error
        self.interval = self._parse_interval(interval)
        self.delay = delay
        self.event_kwargs = event_kwargs or {}
        self.output = event_class
        self.producers = producers
        self.scheduler = GeventScheduler()

    def _parse_interval(self, interval):
        _interval = self.DEFAULT_INTERVAL

        if isinstance(interval, int):
            _interval['seconds'] = interval
        elif isinstance(interval, dict):
            _interval.update(interval)

        return _interval

    def _initialize_jobs(self):
        for i in xrange(self.producers):
            self.scheduler.add_job(self._do_produce, 'interval', **self.interval)

    def pre_hook(self):
        self._initialize_jobs()
        gevent.sleep(self.delay)
        self.scheduler.start()

    def post_hook(self):
        self.scheduler.shutdown()

    def _do_produce(self):
        event = self.output[0](**self.event_kwargs)
        self.logger.debug("Generated new event {event_id}".format(event_id=event.event_id))
        self.send_event(event)
        if self.generate_error:
            event = self.output(**self.event_kwargs)
            self.send_error(event)

    def consume(self, event, *args, **kwargs):
        self._do_produce()
Exemplo n.º 24
0
class AuctionsDataBridge(object):
    """Auctions Data Bridge"""
    def __init__(self, config, activate=False):
        super(AuctionsDataBridge, self).__init__()
        self.config = config
        self.tenders_ids_list = []
        self.activate = activate
        self.client = ApiClient(
            '',
            host_url=self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version'))
        params = {'opt_fields': 'status,auctionPeriod', 'mode': '_all_'}
        if parse_version(
                self.config_get('tenders_api_version')) > parse_version('0.9'):
            params['opt_fields'] += ',lots'
        self.client.params.update(params)
        self.tz = tzlocal()

        self.couch_url = urljoin(self.config_get('couch_url'),
                                 self.config_get('auctions_db'))
        self.db = Database(self.couch_url,
                           session=Session(retry_delays=range(10)))

        if self.activate:
            self.queue = Queue()
            self.scheduler = GeventScheduler()
            self.scheduler.add_job(self.run_systemd_cmds,
                                   'interval',
                                   max_instances=1,
                                   minutes=2,
                                   id='run_systemd_cmds')
            self.scheduler.start()

    def config_get(self, name):
        return self.config.get('main').get(name)

    def run_systemd_cmds(self):
        auctions = []
        logger.info('Start systemd units activator')
        while True:
            try:
                auctions.append(self.queue.get_nowait())
            except Empty, e:
                break
        if auctions:
            logger.info('Handle systemctl daemon-reload')
            do_until_success(
                check_call,
                (['/usr/bin/systemctl', '--user', 'daemon-reload'], ))
            for planning_data in auctions:
                if len(planning_data) == 1:
                    logger.info('Tender {0} selected for activate'.format(
                        *planning_data))
                    self.start_auction_worker_cmd('activate', planning_data[0])
                elif len(planning_data) == 2:
                    logger.info(
                        'Lot {1} of tender {0} selected for activate'.format(
                            *planning_data))
                    self.start_auction_worker_cmd('activate',
                                                  planning_data[0],
                                                  lot_id=planning_data[1])
        else:
            logger.info('No auctions to activate')
Exemplo n.º 25
0
class Scheduler(object):
    def __init__(self):
        self.scheduler = GeventScheduler()
        self.scheduler.add_listener(self.__scheduler_listener(),
                                    EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
                                    | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
                                    | EVENT_JOB_ADDED | EVENT_JOB_REMOVED
                                    | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.id = 'controller'
        self.app = None

    def schedule_workflows(self, task_id, executable, workflow_ids, trigger):
        """
        Schedules a workflow for execution

        Args:
            task_id (int): Id of the scheduled task
            executable (func): A callable to execute must take in one argument -- a workflow id
            workflow_ids (iterable(str)): An iterable of workflow ids
            trigger (Trigger): The trigger to use for this scheduled task
        """

        def execute(id_):
            with self.app.app_context():
                executable(id_)

        for workflow_id in workflow_ids:
            self.scheduler.add_job(execute, args=(workflow_id,),
                                   id=construct_task_id(task_id, workflow_id),
                                   trigger=trigger, replace_existing=True)

    def get_all_scheduled_workflows(self):
        """
        Gets all the scheduled workflows

        Returns:
             (dict{str: list[str]}) A dict of task_id to workflow execution ids
        """
        tasks = {}
        for job in self.scheduler.get_jobs():
            task, workflow_execution_id = split_task_id(job.id)
            if task not in tasks:
                tasks[task] = [workflow_execution_id]
            else:
                tasks[task].append(workflow_execution_id)
        return tasks

    def get_scheduled_workflows(self, task_id):
        """
        Gets all the scheduled worfklows for a given task id

        Args:
            task_id (str): The task id

        Returns:
            (list[str]) A list fo workflow execution id associated with this task id
        """
        tasks = []
        for job in self.scheduler.get_jobs():
            task, workflow_execution_id = split_task_id(job.id)
            if task == task_id:
                tasks.append(workflow_execution_id)
        return tasks

    def update_workflows(self, task_id, trigger):
        """
        Updates the workflows for a given task id to use a different trigger

        Args:
            task_id (str|int): The task id to update
            trigger (Trigger): The new trigger to use
        """
        existing_tasks = {construct_task_id(task_id, workflow_execution_id) for workflow_execution_id in
                          self.get_scheduled_workflows(task_id)}
        for job_id in existing_tasks:
            self.scheduler.reschedule_job(job_id=job_id, trigger=trigger)

    def unschedule_workflows(self, task_id, workflow_execution_ids):
        """
        Unschedules a workflow

        Args:
            task_id (str|int): The task ID to unschedule
            workflow_execution_ids (list[str]): The list of workflow execution IDs to update
        """
        for workflow_execution_id in workflow_execution_ids:
            try:
                self.scheduler.remove_job(construct_task_id(task_id, workflow_execution_id))
            except JobLookupError:
                logger.warning('Cannot delete task {}. '
                               'No task found in scheduler'.format(construct_task_id(task_id, workflow_execution_id)))

    def start(self):
        """Starts the scheduler for active execution. This function must be called before any workflows are executed.

        Returns:
            The state of the scheduler if successful, error message if scheduler is in "stopped" state.
        """
        if self.scheduler.state == STATE_STOPPED:
            logger.info('Starting scheduler')
            self.scheduler.start()
        else:
            logger.warning('Cannot start scheduler. Scheduler is already running or is paused')
            return "Scheduler already running."
        return self.scheduler.state

    def stop(self, wait=True):
        """Stops active execution.

        Args:
            wait (bool, optional): Boolean to synchronously or asynchronously wait for the scheduler to shutdown.
                Default is True.

        Returns:
            The state of the scheduler if successful, error message if scheduler is already in "stopped" state.
        """
        if self.scheduler.state != STATE_STOPPED:
            logger.info('Stopping scheduler')
            self.scheduler.shutdown(wait=wait)
        else:
            logger.warning('Cannot stop scheduler. Scheduler is already stopped')
            return "Scheduler already stopped."
        return self.scheduler.state

    def pause(self):
        """Pauses active execution.

        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "running" state.
        """
        if self.scheduler.state == STATE_RUNNING:
            logger.info('Pausing scheduler')
            self.scheduler.pause()
        elif self.scheduler.state == STATE_PAUSED:
            logger.warning('Cannot pause scheduler. Scheduler is already paused')
            return "Scheduler already paused."
        elif self.scheduler.state == STATE_STOPPED:
            logger.warning('Cannot pause scheduler. Scheduler is stopped')
            return "Scheduler is in STOPPED state and cannot be paused."
        return self.scheduler.state

    def resume(self):
        """Resumes active execution.

        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "paused" state.
        """
        if self.scheduler.state == STATE_PAUSED:
            logger.info('Resuming scheduler')
            self.scheduler.resume()
        else:
            logger.warning("Scheduler is not in PAUSED state and cannot be resumed.")
            return "Scheduler is not in PAUSED state and cannot be resumed."
        return self.scheduler.state

    def pause_workflows(self, task_id, workflow_execution_ids):
        """
        Pauses some workflows associated with a task

        Args:
            task_id (int|str): The id of the task to pause
            workflow_execution_ids (list[str]): The list of workflow execution IDs to pause
        """
        for workflow_execution_id in workflow_execution_ids:
            job_id = construct_task_id(task_id, workflow_execution_id)
            try:
                self.scheduler.pause_job(job_id=job_id)
                logger.info('Paused job {0}'.format(job_id))
            except JobLookupError:
                logger.warning('Cannot pause scheduled workflow {}. Workflow ID not found'.format(job_id))

    def resume_workflows(self, task_id, workflow_execution_ids):
        """
        Resumes some workflows associated with a task

        Args:
            task_id (int|str): The id of the task to pause
            workflow_execution_ids (list[str]): The list of workflow execution IDs to resume
        """
        for workflow_execution_id in workflow_execution_ids:
            job_id = construct_task_id(task_id, workflow_execution_id)
            try:
                self.scheduler.resume_job(job_id=job_id)
                logger.info('Resumed job {0}'.format(job_id))
            except JobLookupError:
                logger.warning('Cannot resume scheduled workflow {}. Workflow ID not found'.format(job_id))

    def __scheduler_listener(self):
        event_selector_map = {EVENT_SCHEDULER_START: WalkoffEvent.SchedulerStart,
                              EVENT_SCHEDULER_SHUTDOWN: WalkoffEvent.SchedulerShutdown,
                              EVENT_SCHEDULER_PAUSED: WalkoffEvent.SchedulerPaused,
                              EVENT_SCHEDULER_RESUMED: WalkoffEvent.SchedulerResumed,
                              EVENT_JOB_ADDED: WalkoffEvent.SchedulerJobAdded,
                              EVENT_JOB_REMOVED: WalkoffEvent.SchedulerJobRemoved,
                              EVENT_JOB_EXECUTED: WalkoffEvent.SchedulerJobExecuted,
                              EVENT_JOB_ERROR: WalkoffEvent.SchedulerJobError}

        def event_selector(event):
            try:
                event = event_selector_map[event.code]
                event.send(self)
            except KeyError:  # pragma: no cover
                logger.error('Unknown event sent triggered in scheduler {}'.format(event))

        return event_selector
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """
    config = Configurator(settings=settings)
    config.add_subscriber(add_logging_context, ContextFound)
    config.include('pyramid_exclog')
    config.add_route('home', '/')
    config.add_route('resync_all', '/resync_all')
    config.add_route('resync_back', '/resync_back')
    config.add_route('resync', '/resync/{auction_id}')
    config.add_route('recheck', '/recheck/{auction_id}')
    config.add_route('calendar', '/calendar')
    config.add_route('calendar_entry', '/calendar/{date}')
    config.add_route('streams', '/streams')
    config.scan(ignore='openprocurement.chronograph.tests')
    config.add_subscriber(start_scheduler, ApplicationCreated)
    config.registry.api_token = os.environ.get('API_TOKEN',
                                               settings.get('api.token'))

    db_name = os.environ.get('DB_NAME', settings['couchdb.db_name'])
    server = Server(settings.get('couchdb.url'),
                    session=Session(retry_delays=range(60)))
    if 'couchdb.admin_url' not in settings and server.resource.credentials:
        try:
            server.version()
        except Unauthorized:
            server = Server(extract_credentials(
                settings.get('couchdb.url'))[0],
                            session=Session(retry_delays=range(60)))
    config.registry.couchdb_server = server
    if 'couchdb.admin_url' in settings and server.resource.credentials:
        aserver = Server(settings.get('couchdb.admin_url'),
                         session=Session(retry_delays=range(10)))
        users_db = aserver['_users']
        if SECURITY != users_db.security:
            LOGGER.info("Updating users db security",
                        extra={'MESSAGE_ID': 'update_users_security'})
            users_db.security = SECURITY
        username, password = server.resource.credentials
        user_doc = users_db.get(
            'org.couchdb.user:{}'.format(username),
            {'_id': 'org.couchdb.user:{}'.format(username)})
        if not user_doc.get(
                'derived_key', '') or PBKDF2(password, user_doc.get(
                    'salt', ''), user_doc.get('iterations', 10)).hexread(
                        int(len(user_doc.get('derived_key', '')) /
                            2)) != user_doc.get('derived_key', ''):
            user_doc.update({
                "name": username,
                "roles": [],
                "type": "user",
                "password": password
            })
            LOGGER.info("Updating chronograph db main user",
                        extra={'MESSAGE_ID': 'update_chronograph_main_user'})
            users_db.save(user_doc)
        security_users = [
            username,
        ]
        if db_name not in aserver:
            aserver.create(db_name)
        db = aserver[db_name]
        SECURITY[u'members'][u'names'] = security_users
        if SECURITY != db.security:
            LOGGER.info("Updating chronograph db security",
                        extra={'MESSAGE_ID': 'update_chronograph_security'})
            db.security = SECURITY
        auth_doc = db.get(VALIDATE_DOC_ID, {'_id': VALIDATE_DOC_ID})
        if auth_doc.get(
                'validate_doc_update') != VALIDATE_DOC_UPDATE % username:
            auth_doc['validate_doc_update'] = VALIDATE_DOC_UPDATE % username
            LOGGER.info(
                "Updating chronograph db validate doc",
                extra={'MESSAGE_ID': 'update_chronograph_validate_doc'})
            db.save(auth_doc)
        # sync couchdb views
        sync_design(db)
        db = server[db_name]
    else:
        if db_name not in server:
            server.create(db_name)
        db = server[db_name]
        # sync couchdb views
        sync_design(db)
    config.registry.db = db

    jobstores = {
        #'default': CouchDBJobStore(database=db_name, client=server)
    }
    #executors = {
    #'default': ThreadPoolExecutor(5),
    #'processpool': ProcessPoolExecutor(5)
    #}
    job_defaults = {'coalesce': False, 'max_instances': 3}
    config.registry.api_url = settings.get('api.url')
    config.registry.callback_url = settings.get('callback.url')
    scheduler = Scheduler(
        jobstores=jobstores,
        #executors=executors,
        job_defaults=job_defaults,
        timezone=TZ)
    if 'jobstore_db' in settings:
        scheduler.add_jobstore('sqlalchemy', url=settings['jobstore_db'])
    config.registry.scheduler = scheduler
    # scheduler.remove_all_jobs()
    # scheduler.start()
    resync_all_job = scheduler.get_job('resync_all')
    now = datetime.now(TZ)
    if not resync_all_job or resync_all_job.next_run_time < now - timedelta(
            hours=1):
        if resync_all_job:
            args = resync_all_job.args
        else:
            args = [settings.get('callback.url') + 'resync_all', None]
        run_date = now + timedelta(seconds=60)
        scheduler.add_job(push,
                          'date',
                          run_date=run_date,
                          timezone=TZ,
                          id='resync_all',
                          args=args,
                          replace_existing=True,
                          misfire_grace_time=60 * 60)
    return config.make_wsgi_app()
Exemplo n.º 27
0
"""
Demonstrates how to use the gevent compatible scheduler to schedule a job that executes on 3 second intervals.
"""

from datetime import datetime
import os

from apscheduler.schedulers.gevent import GeventScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = GeventScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 28
0

def doPdf():
    print('doPdf start')
    try:
        info = pdfkit.from_url('http://localhost:7070/table', 'tableOut.pdf')
        if (info):
            print('merging pdfs...')
            main2.merge4()
        print('Info: ', info)
    except (Exception):
        print('Cannot open url or sth.')


def tick():
    print('Tick! The time is: %s' % datetime.now())


scheduler = GeventScheduler()

if __name__ == '__main__':
    scheduler.add_job(webapp_run)
    # scheduler.add_job(doPdf, 'interval', seconds=15)
    # scheduler.add_job(tick, 'interval', seconds=4)

    g = scheduler.start()
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
Exemplo n.º 29
0
import os
import time
from apscheduler.schedulers.gevent import GeventScheduler


def tick(scheduler):
    now = datetime.now()
    print('Tick! The time is: %s' % now)
    scheduler.add_job(tick2,
                      'date',
                      next_run_time=datetime.now() + timedelta(seconds=5),
                      args=(now, ))


def tick2(now):
    print "ttt start : %s" % now
    time.sleep(10)
    print "ttt end : %s" % now


if __name__ == '__main__':
    scheduler = GeventScheduler()
    scheduler.add_job(tick, 'interval', seconds=3, args=(scheduler, ))
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 30
0
            client.connect(broker, port=9001)

            client.subscribe(topic=channel_common_000, qos=0)
            client.subscribe(topic=channel_liwei_000, qos=0)
            client.subscribe(topic=channel_liwei_111, qos=0)
            client.subscribe(topic=channel_lot_message, qos=0)

            client.loop_forever()

        except (KeyboardInterrupt, SystemExit):
            client.disconnect()

    elif s == 'p':
        #         sched = BackgroundScheduler()
        #         sched.add_job(transmitMQTT, 'interval', seconds=1, args=(m,))  #interval 闂撮殧璋冨害锛堟瘡闅斿涔呮墽琛岋級
        #         sched.start()
        #         try:
        #             while True:
        #                 time.sleep(10)
        #         except (KeyboardInterrupt, SystemExit):
        #             sched.shutdown()

        sched = GeventScheduler()
        sched.add_job(transmitMQTT, 'interval', seconds=1, args=(m, ))
        g = sched.start()

        try:
            g.join()
        except (KeyboardInterrupt, SystemExit):
            pass
Exemplo n.º 31
0
        logger.error('Missing: config > app > dir_root')
        sys.exit(1)
    if not dir_processed:
        logger.error('Missing: config > app > dir_processed')
        sys.exit(1)
    if not dir_error:
        logger.error('Missing: config > app > dir_error')
        sys.exit(1)

    es_init(es_addr, es_port)

    scheduler.add_job(main,
                      'interval',
                      minutes=interval,
                      name='main_job',
                      next_run_time=datetime.now(),
                      replace_existing=True,
                      max_instances=1,
                      args=(es_addr, es_port, dir_root, dir_processed,
                            dir_error))

    g = scheduler.start()  # g is the greenlet that runs the scheduler loop

    logger.info(
        'Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here
    # until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
Exemplo n.º 32
0
class Application(object):

    def __init__(self, config_file, data_dir, verbose=0):
        """
        Construct a new application instance.
        """

        self.config_file = config_file
        self.data_dir = data_dir
        self.verbose = verbose

        self.server = None
        self.provider = None
        self.connections = {}

        # Setup all parts of the application
        self.setup_config()
        self.setup_open_files()
        self.setup_database()
        self.setup_state()
        self.setup_connections()
        self.setup_cache()
        self.setup_provider()
        self.setup_server()
        self.setup_tasks()

    def setup_config(self):
        """
        Load the application config from file.
        """

        logger.debug("Loading config from %s", self.config_file)
        self.config = config.get_config(self.config_file)

    def setup_open_files(self):
        """
        Get and set open files limit.
        """

        open_files_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
        new_open_files_limit = self.config["Advanced"]["open files limit"]

        logger.info(
            "System reports open files limit is %d.", open_files_limit)

        if new_open_files_limit != -1:
            logger.info(
                "Changing open files limit to %d.", new_open_files_limit)

            try:
                resource.setrlimit(resource.RLIMIT_NOFILE, (
                    new_open_files_limit, resource.RLIM_INFINITY))
            except resource.error as e:
                logger.warning(
                    "Failed to increase the number of open files: %s", e)

    def setup_database(self):
        """
        Initialize database.
        """

        self.db = Database(self.config["Provider"]["database"])
        self.db.create_database(drop_all=False)

    def setup_state(self):
        """
        Setup state.
        """

        self.state = State(os.path.join(
            self.get_cache_dir(), "provider.state"))

    def setup_cache(self):
        """
        Setup the caches for items and artwork.
        """

        # Initialize caches for items and artwork.
        item_cache = cache.ItemCache(
            path=self.get_cache_dir(
                self.config["Provider"]["item cache dir"]),
            max_size=self.config["Provider"]["item cache size"],
            prune_threshold=self.config[
                "Provider"]["item cache prune threshold"])
        artwork_cache = cache.ArtworkCache(
            path=self.get_cache_dir(self.config[
                "Provider"]["artwork cache dir"]),
            max_size=self.config["Provider"]["artwork cache size"],
            prune_threshold=self.config[
                "Provider"]["artwork cache prune threshold"])

        # Create a cache manager
        self.cache_manager = cache.CacheManager(
            db=self.db,
            item_cache=item_cache,
            artwork_cache=artwork_cache,
            connections=self.connections)

    def setup_connections(self):
        """
        Initialize the connections.
        """

        for name, section in self.config["Connections"].iteritems():
            index = len(self.connections) + 1

            self.connections[index] = Connection(
                db=self.db,
                state=self.state,
                index=index,
                name=name,
                url=section["url"],
                username=section["username"],
                password=section["password"],
                synchronization=section["synchronization"],
                synchronization_interval=section["synchronization interval"],
                transcode=section["transcode"],
                transcode_unsupported=section["transcode unsupported"])

    def setup_provider(self):
        """
        Setup the provider.
        """

        # Create provider.
        logger.debug(
            "Setting up provider for %d connection(s).", len(self.connections))

        self.provider = Provider(
            server_name=self.config["Provider"]["name"],
            db=self.db,
            state=self.state,
            connections=self.connections,
            cache_manager=self.cache_manager)

        # Do an initial synchronization if required.
        for connection in self.connections.itervalues():
            connection.synchronizer.provider = self.provider
            connection.synchronizer.synchronize(initial=True)

    def setup_server(self):
        """
        Create the DAAP server.
        """

        logger.debug(
            "Setting up DAAP server at %s:%d",
            self.config["Daap"]["interface"], self.config["Daap"]["port"])

        self.server = DaapServer(
            provider=self.provider,
            password=self.config["Daap"]["password"],
            ip=self.config["Daap"]["interface"],
            port=self.config["Daap"]["port"],
            cache=self.config["Daap"]["cache"],
            cache_timeout=self.config["Daap"]["cache timeout"] * 60,
            bonjour=self.config["Daap"]["zeroconf"],
            debug=self.verbose > 1)

        # Extend server with a web interface
        if self.config["Daap"]["web interface"]:
            webserver.extend_server_app(self, self.server.app)

    def setup_tasks(self):
        """
        Setup all tasks that run periodically.
        """

        self.scheduler = GeventScheduler()

        # Add an initial job
        def _job():
            job.remove()
            self.synchronize(synchronization="startup")
        job = self.scheduler.add_job(
            _job, max_instances=1, trigger="interval", seconds=1)

        # Scheduler task to clean and expire the cache.
        cache_interval = self.config['Provider']['item cache prune interval']

        self.scheduler.add_job(
            self.cache_manager.expire,
            max_instances=1, trigger="interval", minutes=cache_interval)
        self.scheduler.add_job(
            self.cache_manager.clean,
            max_instances=1, trigger="interval", minutes=cache_interval)

        # Schedule tasks to synchronize each connection.
        for connection in self.connections.itervalues():
            self.scheduler.add_job(
                self.synchronize, args=([connection, "interval"]),
                max_instances=1, trigger="interval",
                minutes=connection.synchronization_interval)

    def synchronize(self, connections=None, synchronization="manual"):
        """
        Synchronize selected connections (or all) given a synchronization
        event.
        """

        count = 0
        connections = connections or self.connections.values()

        logger.debug("Synchronization triggered via '%s'.", synchronization)

        for connection in connections:
            if synchronization == "interval":
                if connection.synchronization == "interval":
                    connection.synchronizer.synchronize()
                    count += 1
            elif synchronization == "startup":
                if connection.synchronization == "startup":
                    if not connection.synchronizer.is_initial_synced:
                        connection.synchronizer.synchronize()
                        count += 1
            elif synchronization == "manual":
                connection.synchronizer.synchronize()
                count += 1

        logger.debug("Synchronized %d connections.", count)

        # Update the cache.
        self.cache_manager.cache()

    def start(self):
        """
        Start the server.
        """

        logger.debug("Starting task scheduler.")
        self.scheduler.start()

        logger.debug("Starting DAAP server.")
        self.server.serve_forever()

    def stop(self):
        """
        Stop the server.
        """

        logger.debug("Stopping DAAP server.")
        self.server.stop()

        logger.debug("Stopping task scheduler.")
        self.scheduler.shutdown()

    def get_cache_dir(self, *path):
        """
        Resolve the path to a cache directory. The path is relative to the data
        directory. The directory will be created if it does not exists, and
        will be tested for writing.
        """

        full_path = os.path.abspath(os.path.normpath(
            os.path.join(self.data_dir, *path)))
        logger.debug("Resolved %s to %s", path, full_path)

        # Create path if required.
        try:
            os.makedirs(full_path, 0755)
        except OSError as e:
            if e.errno == errno.EEXIST and os.path.isdir(full_path):
                pass
            else:
                raise Exception("Could not create folder: %s" % full_path)

        # Test for writing.
        ok = True
        test_file = os.path.join(full_path, ".write-test")

        while os.path.exists(test_file):
            test_file = test_file + str(random.randint(0, 9))

        try:
            with open(test_file, "w") as fp:
                fp.write("test")
        except IOError:
            ok = False
        finally:
            try:
                os.remove(test_file)
            except OSError:
                ok = False

        if not ok:
            raise Exception("Could not write to cache folder: %s" % full_path)

        # Cache directory created and tested for writing.
        return full_path
Exemplo n.º 33
0
    if int(app.conf.automation_status) == 1 or forced is True:
        if forced is False:
            global l_t_check
            l_t_check = app.datetime.now().strftime("%d/%m/%Y, %H:%M:%S")
        logging.info("Checking for releases in torrents")
        todays_date = app.datetime.now()
        schd_albums = app.QueueAlbum.query.all()
        for query in schd_albums:
            date = app.datetime.strptime(query.date, "%d %B %Y")
            if date <= todays_date:
                if int(query.status) == 0:
                    app.download(query.album_name)
        data = ({"album": "C_T", "date": "C_T"})
        app.pushtoListener(data)


def reschedule():
    sched.reschedule_job(job_id="auto_A", trigger='interval', minutes=int(app.conf.automation_interval) * 60)
    sched.reschedule_job(job_id="auto_T", trigger='interval', minutes=int(app.conf.automation_interval) * 60)


# ugly
l_t_check = "Never"
l_a_check = "Never"

sched = GeventScheduler()
sched.add_job(look_for_artist, 'interval', id="auto_A", minutes=int(app.conf.automation_interval) * 60)
sched.add_job(look_for_torrents, 'interval', id="auto_T", minutes=int(app.conf.automation_interval) * 60)
sched.add_job(generateSuggestions, 'interval', id="auto_S", seconds=6200)
sched.start()
Exemplo n.º 34
0
class Application(object):
    def __init__(self, config_file, data_dir, verbose=0):
        """
        Construct a new application instance.
        """

        self.config_file = config_file
        self.data_dir = data_dir
        self.verbose = verbose

        self.server = None
        self.provider = None
        self.connections = {}

        # Setup all parts of the application
        self.setup_config()
        self.setup_open_files()
        self.setup_database()
        self.setup_state()
        self.setup_connections()
        self.setup_cache()
        self.setup_provider()
        self.setup_server()
        self.setup_tasks()

    def setup_config(self):
        """
        Load the application config from file.
        """

        logger.debug("Loading config from %s", self.config_file)
        self.config = config.get_config(self.config_file)

    def setup_open_files(self):
        """
        Get and set open files limit.
        """

        open_files_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
        new_open_files_limit = self.config["Advanced"]["open files limit"]

        logger.info("System reports open files limit is %d.", open_files_limit)

        if new_open_files_limit != -1:
            logger.info("Changing open files limit to %d.",
                        new_open_files_limit)

            try:
                resource.setrlimit(
                    resource.RLIMIT_NOFILE,
                    (new_open_files_limit, resource.RLIM_INFINITY))
            except resource.error as e:
                logger.warning(
                    "Failed to increase the number of open files: %s", e)

    def setup_database(self):
        """
        Initialize database.
        """

        self.db = Database(self.config["Provider"]["database"])
        self.db.create_database(drop_all=False)

    def setup_state(self):
        """
        Setup state.
        """

        self.state = State(os.path.join(self.get_cache_dir(),
                                        "provider.state"))

    def setup_cache(self):
        """
        Setup the caches for items and artwork.
        """

        # Initialize caches for items and artwork.
        item_cache = cache.ItemCache(
            path=self.get_cache_dir(self.config["Provider"]["item cache dir"]),
            max_size=self.config["Provider"]["item cache size"],
            prune_threshold=self.config["Provider"]
            ["item cache prune threshold"])
        artwork_cache = cache.ArtworkCache(
            path=self.get_cache_dir(
                self.config["Provider"]["artwork cache dir"]),
            max_size=self.config["Provider"]["artwork cache size"],
            prune_threshold=self.config["Provider"]
            ["artwork cache prune threshold"])

        # Create a cache manager
        self.cache_manager = cache.CacheManager(db=self.db,
                                                item_cache=item_cache,
                                                artwork_cache=artwork_cache,
                                                connections=self.connections)

    def setup_connections(self):
        """
        Initialize the connections.
        """

        for name, section in self.config["Connections"].iteritems():
            index = len(self.connections) + 1

            self.connections[index] = Connection(
                db=self.db,
                state=self.state,
                index=index,
                name=name,
                url=section["url"],
                username=section["username"],
                password=section["password"],
                synchronization=section["synchronization"],
                synchronization_interval=section["synchronization interval"],
                transcode=section["transcode"],
                transcode_unsupported=section["transcode unsupported"])

    def setup_provider(self):
        """
        Setup the provider.
        """

        # Create provider.
        logger.debug("Setting up provider for %d connection(s).",
                     len(self.connections))

        self.provider = Provider(server_name=self.config["Provider"]["name"],
                                 db=self.db,
                                 state=self.state,
                                 connections=self.connections,
                                 cache_manager=self.cache_manager)

        # Do an initial synchronization if required.
        for connection in self.connections.itervalues():
            connection.synchronizer.provider = self.provider
            connection.synchronizer.synchronize(initial=True)

    def setup_server(self):
        """
        Create the DAAP server.
        """

        logger.debug("Setting up DAAP server at %s:%d",
                     self.config["Daap"]["interface"],
                     self.config["Daap"]["port"])

        self.server = DaapServer(
            provider=self.provider,
            password=self.config["Daap"]["password"],
            ip=self.config["Daap"]["interface"],
            port=self.config["Daap"]["port"],
            cache=self.config["Daap"]["cache"],
            cache_timeout=self.config["Daap"]["cache timeout"] * 60,
            bonjour=self.config["Daap"]["zeroconf"],
            debug=self.verbose > 1)

        # Extend server with a web interface
        if self.config["Daap"]["web interface"]:
            webserver.extend_server_app(self, self.server.app)

    def setup_tasks(self):
        """
        Setup all tasks that run periodically.
        """

        self.scheduler = GeventScheduler()

        # Add an initial job
        def _job():
            job.remove()
            self.synchronize(synchronization="startup")

        job = self.scheduler.add_job(_job,
                                     max_instances=1,
                                     trigger="interval",
                                     seconds=1)

        # Scheduler task to clean and expire the cache.
        cache_interval = self.config['Provider']['item cache prune interval']

        self.scheduler.add_job(self.cache_manager.expire,
                               max_instances=1,
                               trigger="interval",
                               minutes=cache_interval)
        self.scheduler.add_job(self.cache_manager.clean,
                               max_instances=1,
                               trigger="interval",
                               minutes=cache_interval)

        # Schedule tasks to synchronize each connection.
        for connection in self.connections.itervalues():
            self.scheduler.add_job(self.synchronize,
                                   args=([connection, "interval"]),
                                   max_instances=1,
                                   trigger="interval",
                                   minutes=connection.synchronization_interval)

    def synchronize(self, connections=None, synchronization="manual"):
        """
        Synchronize selected connections (or all) given a synchronization
        event.
        """

        count = 0
        connections = connections or self.connections.values()

        logger.debug("Synchronization triggered via '%s'.", synchronization)

        for connection in connections:
            if synchronization == "interval":
                if connection.synchronization == "interval":
                    connection.synchronizer.synchronize()
                    count += 1
            elif synchronization == "startup":
                if connection.synchronization == "startup":
                    if not connection.synchronizer.is_initial_synced:
                        connection.synchronizer.synchronize()
                        count += 1
            elif synchronization == "manual":
                connection.synchronizer.synchronize()
                count += 1

        logger.debug("Synchronized %d connections.", count)

        # Update the cache.
        self.cache_manager.cache()

    def start(self):
        """
        Start the server.
        """

        logger.debug("Starting task scheduler.")
        self.scheduler.start()

        logger.debug("Starting DAAP server.")
        self.server.serve_forever()

    def stop(self):
        """
        Stop the server.
        """

        logger.debug("Stopping DAAP server.")
        self.server.stop()

        logger.debug("Stopping task scheduler.")
        self.scheduler.shutdown()

    def get_cache_dir(self, *path):
        """
        Resolve the path to a cache directory. The path is relative to the data
        directory. The directory will be created if it does not exists, and
        will be tested for writing.
        """

        full_path = os.path.abspath(
            os.path.normpath(os.path.join(self.data_dir, *path)))
        logger.debug("Resolved %s to %s", path, full_path)

        # Create path if required.
        try:
            os.makedirs(full_path, 0755)
        except OSError as e:
            if e.errno == errno.EEXIST and os.path.isdir(full_path):
                pass
            else:
                raise Exception("Could not create folder: %s" % full_path)

        # Test for writing.
        ok = True
        test_file = os.path.join(full_path, ".write-test")

        while os.path.exists(test_file):
            test_file = test_file + str(random.randint(0, 9))

        try:
            with open(test_file, "w") as fp:
                fp.write("test")
        except IOError:
            ok = False
        finally:
            try:
                os.remove(test_file)
            except OSError:
                ok = False

        if not ok:
            raise Exception("Could not write to cache folder: %s" % full_path)

        # Cache directory created and tested for writing.
        return full_path
Exemplo n.º 35
0
class Scheduler(Service):
    """
    Service running scheduled and periodic commands
    """

    _schedulers = weakref.WeakSet()

    def __init__(self, name):
        super().__init__(name)
        self._commands = {}  # cmd_id : command
        # self._jobstores = {'default': SQLAlchemyJobStore(url=constants.scheduler_database_url,
        #                                                 tablename="jobs")}
        self._jobstores = {}
        self._executors = {}
        self._job_defaults = {
            'max_instances': 10,
            'coalesce': True,
        }
        self._triggers = {}  # cmd_id : trigger
        self._scheduler = GeventScheduler(
            jobstores=self._jobstores,
            executors=self._executors,
            job_defaults=self._job_defaults,
            timezone=pytz.utc,  # TODO: make user configurable
        )
        self._schedulers.add(self)
        s_cmds = constants.internaldb.scheduler_commands.get({})
        v = s_cmds.setdefault(self.identifier(), {})
        if not v:
            constants.internaldb.scheduler_commands.set(s_cmds)

    def _get_job_id(self, cmd_id):
        s_cmds = constants.internaldb.scheduler_commands.get()
        return s_cmds[self.identifier()].get(cmd_id)

    def _set_job_id(self, cmd_id, job_id):
        if not isinstance(job_id, int):
            job_id = job_id.id
        s_cmds = constants.internaldb.scheduler_commands.get()
        s_cmds[self.identifier()][cmd_id] = job_id
        constants.internaldb.scheduler_commands.set(s_cmds)

    def start(self):
        "Start the scheduler"
        self._scheduler.start()
        log.d("Started scheduler service:", self.name)
        return self

    def add_command(self, cmd, trigger=None):
        "Add a command to this scheduler and return a command id"
        assert isinstance(cmd, command.CoreCommand)
        command_id = super().add_command(cmd)
        self._commands[command_id] = cmd
        if trigger:
            self.set_trigger(command_id, trigger)
        else:
            raise NotImplementedError
        return command_id

    def set_trigger(self, cmd_id, trigger):
        "Change the trigger for a command in this scheduler"
        self._triggers[cmd_id] = trigger

    def remove_command(self, cmd_id):
        "Remove a command from this scheduler"
        j_id = self._get_job_id(cmd_id)
        self._scheduler.remove_job(j_id)

    def pause_command(self, cmd_id):
        "Pause a command in this scheduler, returns command state"
        raise NotImplementedError

    def resume_command(self, cmd_id):
        "Resume a command in this scheduler, returns command state"
        raise NotImplementedError

    def start_command(self, cmd_id, *args, **kwargs):
        "Start running a command in this scheduler, returns command state"
        cmd = self.get_command(cmd_id)
        j_id = self._scheduler.add_job(cmd._run,
                                       self._triggers[cmd_id],
                                       args,
                                       kwargs,
                                       name=cmd.__class__.__name__)
        self._set_job_id(cmd_id, j_id)

    def stop_command(self, cmd_id):
        "alias for remove_command"
        self.remove_command(cmd_id)

    def shutdown(self, wait=True):
        "Shutdown scheduler"
        self._scheduler.shutdown(wait=wait)
        return self

    @classmethod
    def shutdown_all(cls, wait=True):
        "Shutdown all schedulers"
        for s in cls._schedulers:
            s.shutdown(wait=wait)

    def pause(self):
        "Pause scheduler"
        raise NotImplementedError
        return self

    def resume(self):
        "Resume scheduler"
        raise NotImplementedError
        return self
Exemplo n.º 36
0
class W5Timer(object):
    _instance_lock = threading.Lock()

    def __init__(self):
        self.scheduler = None

    def __new__(cls, *args, **kwargs):
        if not hasattr(W5Timer, "_instance"):
            with W5Timer._instance_lock:
                if not hasattr(W5Timer, "_instance"):
                    W5Timer._instance = object.__new__(cls)
        return W5Timer._instance

    def create_scheduler(self):
        self.scheduler = GeventScheduler()

    def start(self):
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()

    def pause(self, uuid):
        self.scheduler.pause_job(uuid)

    def pause_all(self):
        self.scheduler.pause()

    def resume(self, uuid):
        self.scheduler.resume_job(uuid)

    def resume_all(self):
        self.scheduler.resume()

    def remove_job(self, uuid):
        self.scheduler.remove_job(uuid)

    def get_jobs(self):
        return self.scheduler.get_jobs()

    def add_date(self, run_date=None, uuid=None, timer_uuid=None):
        self.scheduler.add_job(
            auto_execute,
            'date',
            run_date=run_date,
            id=timer_uuid,
            args=(uuid,)
        )

    def update_date(self, uuid, run_date=None):
        self.scheduler.reschedule_job(
            uuid,
            trigger='date',
            run_date=run_date
        )

    def add_interval(self, t, interval, uuid=None, timer_uuid=None, start_date=None, end_date=None, jitter=0):
        if t == "seconds":
            self.scheduler.add_job(
                auto_execute,
                'interval',
                seconds=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter,
                id=timer_uuid,
                args=(uuid,)
            )
        elif t == "minutes":
            self.scheduler.add_job(
                auto_execute,
                'interval',
                minutes=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter,
                id=timer_uuid,
                args=(uuid,)
            )
        elif t == "hours":
            self.scheduler.add_job(
                auto_execute,
                'interval',
                hours=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter,
                id=timer_uuid,
                args=(uuid,)
            )
        elif t == "days":
            self.scheduler.add_job(
                auto_execute,
                'interval',
                days=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter,
                id=timer_uuid,
                args=(uuid,)
            )
        elif t == "weeks":
            self.scheduler.add_job(
                auto_execute,
                'interval',
                weeks=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter,
                id=timer_uuid,
                args=(uuid,)
            )

    def update_interval(self, uuid, t, interval, start_date=None, end_date=None, jitter=0):
        if t == "seconds":
            self.scheduler.reschedule_job(
                uuid,
                trigger="interval",
                seconds=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter
            )
        elif t == "minutes":
            self.scheduler.reschedule_job(
                uuid,
                trigger="interval",
                minutes=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter
            )
        elif t == "hours":
            self.scheduler.reschedule_job(
                uuid,
                trigger="interval",
                hours=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter
            )
        elif t == "days":
            self.scheduler.reschedule_job(
                uuid,
                trigger="interval",
                days=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter
            )
        elif t == "weeks":
            self.scheduler.reschedule_job(
                uuid,
                trigger="interval",
                weeks=interval,
                start_date=start_date,
                end_date=end_date,
                jitter=jitter
            )

    def add_cron(self, cron, uuid=None, timer_uuid=None, start_date=None, end_date=None, jitter=0):
        self.scheduler.add_job(
            auto_execute,
            CronTrigger.from_crontab(cron),
            start_date=start_date,
            end_date=end_date,
            jitter=jitter,
            id=timer_uuid,
            args=(uuid,)
        )

    def update_cron(self, uuid, cron, start_date=None, end_date=None, jitter=0):
        values = cron.split()

        if len(values) != 5:
            raise ValueError('Wrong number of fields; got {}, expected 5'.format(len(values)))

        self.scheduler.reschedule_job(
            uuid,
            None,
            "cron",
            minute=values[0],
            hour=values[1],
            day=values[2],
            month=values[3],
            day_of_week=values[4],
            start_date=start_date,
            end_date=end_date,
            jitter=jitter
        )
Exemplo n.º 37
0
class Controller(object):
    def __init__(self,
                 name='defaultController',
                 workflows_path=core.config.paths.workflows_path):
        """Initializes a Controller object.
        
        Args:
            name (str, optional): Name for the controller.
            workflows_path (str, optional): Path to the workflows.
        """
        self.name = name
        self.workflows = {}
        self.load_all_workflows_from_directory(path=workflows_path)
        self.instances = {}
        self.tree = None

        self.scheduler = GeventScheduler()
        self.scheduler.add_listener(
            self.__scheduler_listener(),
            EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
            | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
            | EVENT_JOB_ADDED | EVENT_JOB_REMOVED
            | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.ancestry = [self.name]
        self.paused_workflows = {}

    def reconstruct_ancestry(self):
        """Reconstructs the ancestry list field of a workflow in case it changes.
        """
        for key in self.workflows:
            self.workflows[key].reconstruct_ancestry(self.ancestry)

    def load_workflow_from_file(self,
                                path,
                                workflow_name,
                                name_override=None,
                                playbook_override=None):
        """Loads a workflow from a file.
        
        Args:
            path (str): Path to the workflow.
            workflow_name (str): Name of the workflow to load.
            name_override (str, optional): Name that the workflow should be changed to.
            playbook_override (str, optional): Name that the playbook should be changed to.
            
        Returns:
            True on success, False otherwise.
        """
        self.tree = cElementTree.ElementTree(file=path)
        playbook_name = playbook_override if playbook_override else os.path.splitext(
            os.path.basename(path))[0]
        for workflow in self.tree.iter(tag='workflow'):
            current_workflow_name = workflow.get('name')
            if current_workflow_name == workflow_name:
                if name_override:
                    workflow_name = name_override
                name = construct_workflow_name_key(playbook_name,
                                                   workflow_name)
                key = _WorkflowKey(playbook_name, workflow_name)
                self.workflows[key] = wf.Workflow(name=name,
                                                  xml=workflow,
                                                  parent_name=self.name,
                                                  playbook_name=playbook_name)
                logger.info('Adding workflow {0} to controller'.format(name))
                break
        else:
            logger.warning(
                'Workflow {0} not found in playbook {0}. Cannot load.'.format(
                    workflow_name, playbook_name))
            return False

        self.add_child_workflows()
        self.add_workflow_scheduled_jobs()
        return True

    def load_workflows_from_file(self,
                                 path,
                                 name_override=None,
                                 playbook_override=None):
        """Loads multiple workloads from a file.
        
        Args:
            path (str): Path to the workflow.
            name_override (str, optional): Name that the workflow should be changed to. 
            playbook_override (str, optional): Name that the playbook should be changed to.
        """
        self.tree = cElementTree.ElementTree(file=path)
        playbook_name = playbook_override if playbook_override else os.path.splitext(
            os.path.basename(path))[0]
        for workflow in self.tree.iter(tag='workflow'):
            workflow_name = name_override if name_override else workflow.get(
                'name')
            name = construct_workflow_name_key(playbook_name, workflow_name)
            key = _WorkflowKey(playbook_name, workflow_name)
            self.workflows[key] = wf.Workflow(name=name,
                                              xml=workflow,
                                              parent_name=self.name,
                                              playbook_name=playbook_name)
            logger.info('Adding workflow {0} to controller'.format(name))
        self.add_child_workflows()
        self.add_workflow_scheduled_jobs()

    def load_all_workflows_from_directory(self, path=None):
        """Loads all workflows from a directory.
        
        Args:
            path (str, optional): Path to the directory to load from. Defaults to the configuration workflows_path. 
        """
        if path is None:
            path = core.config.paths.workflows_path
        for workflow in locate_workflows_in_directory(path):
            self.load_workflows_from_file(os.path.join(path, workflow))

    def add_child_workflows(self):
        for workflow in self.workflows:
            playbook_name = workflow.playbook
            children = self.workflows[workflow].options.children
            for child in children:
                workflow_key = _WorkflowKey(
                    playbook_name,
                    extract_workflow_name(child, playbook_name=playbook_name))
                if workflow_key in self.workflows:
                    logger.info(
                        'Adding child workflow {0} to workflow {1}'.format(
                            child, self.workflows[workflow_key].name))
                    children[child] = self.workflows[workflow_key]

    def add_workflow_scheduled_jobs(self):
        """Schedules the workflow to run based on workflow options.
        """
        for workflow in self.workflows:
            if (self.workflows[workflow].options.enabled
                    and self.workflows[workflow].options.scheduler['autorun']
                    == 'true'):
                schedule_type = self.workflows[workflow].options.scheduler[
                    'type']
                schedule = self.workflows[workflow].options.scheduler['args']
                self.scheduler.add_job(self.workflows[workflow].execute,
                                       trigger=schedule_type,
                                       replace_existing=True,
                                       **schedule)
                logger.info('Added scheduled job for workflow {0}'.format(
                    self.workflows[workflow].name))

    def create_workflow_from_template(self,
                                      playbook_name,
                                      workflow_name,
                                      template_playbook='emptyWorkflow',
                                      template_name='emptyWorkflow'):
        """Creates a workflow from a workflow template.
        
        Args:
            playbook_name (str): The name of the new playbook. 
            workflow_name (str): The name of the new workflow.
            template_playbook (str): The name of the playbook template to load. Default is "emptyWorkflow".
            template_name (str): The name of the workflow template to load. Default is "emptyWorkflow".
            
        Returns:
            True on success, False if otherwise.
        """
        path = '{0}{1}{2}.workflow'.format(core.config.paths.templates_path,
                                           sep, template_playbook)
        return self.load_workflow_from_file(path=path,
                                            workflow_name=template_name,
                                            name_override=workflow_name,
                                            playbook_override=playbook_name)

    def create_playbook_from_template(self,
                                      playbook_name,
                                      template_playbook='emptyWorkflow'):
        """Creates a playbook from a playbook template.
        
        Args:
            playbook_name (str): The name of the new playbook.
            template_playbook (str): The name of the playbook template to load. Default is "emptyWorkflow".
        """
        # TODO: Need a handler for returning workflow key and status
        path = '{0}{1}{2}.workflow'.format(core.config.paths.templates_path,
                                           sep, template_playbook)
        self.load_workflows_from_file(path=path,
                                      playbook_override=playbook_name)

    def remove_workflow(self, playbook_name, workflow_name):
        """Removes a workflow.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow to remove.
            
        Returns:
            True on success, False otherwise.
        """

        name = _WorkflowKey(playbook_name, workflow_name)
        if name in self.workflows:
            del self.workflows[name]

            logger.debug('Removed workflow {0}'.format(name))
            return True
        logger.warning(
            'Cannot remove workflow {0}. Does not exist in controller'.format(
                name))
        return False

    def remove_playbook(self, playbook_name):
        """Removes a playbook and all workflows within it.
        
        Args:
            playbook_name (str): The name of the playbook to remove.
            
        Returns:
            True on success, False otherwise.
        """
        for name in [
                workflow for workflow in self.workflows
                if workflow.playbook == playbook_name
        ]:
            del self.workflows[name]
            logger.debug('Removed workflow {0}'.format(name))
        logger.debug('Removed playbook {0}'.format(playbook_name))
        return True

    def get_all_workflows(self):
        """Gets all of the currently loaded workflows.
        
        Returns:
            A dict with key being the playbook, mapping to a list of workflow names for each playbook.
        """
        result = {}
        for key in self.workflows.keys():
            if key.playbook not in result:
                result[key.playbook] = []
            result[key.playbook].append(key.workflow)
        return result

    def get_all_playbooks(self):
        """Gets a list of all playbooks.
        
        Returns:
            A list containing all currently loaded playbook names.
        """
        return list(set(key.playbook for key in self.workflows.keys()))

    def is_workflow_registered(self, playbook_name, workflow_name):
        """Checks whether or not a workflow is currently registered in the system.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow.
            
        Returns:
            True if the workflow is registered, false otherwise.
        """
        return _WorkflowKey(playbook_name, workflow_name) in self.workflows

    def is_playbook_registered(self, playbook_name):
        """Checks whether or not a playbook is currently registered in the system.
        
        Args:
            playbook_name (str): The name of the playbook.
            
        Returns:
            True if the playbook is registered, false otherwise.
        """
        return any(workflow_key.playbook == playbook_name
                   for workflow_key in self.workflows)

    def update_workflow_name(self, old_playbook, old_workflow, new_playbook,
                             new_workflow):
        """Update the name of a workflow.
        
        Args:
            old_playbook (str): Name of the current playbook.
            old_workflow (str): Name of the current workflow.
            new_playbook (str): The new name of the playbook.
            new_workflow (str): The new name of the workflow.
        """
        old_key = _WorkflowKey(old_playbook, old_workflow)
        new_key = _WorkflowKey(new_playbook, new_workflow)
        self.workflows[new_key] = self.workflows.pop(old_key)
        self.workflows[new_key].name = construct_workflow_name_key(
            new_playbook, new_workflow)
        self.workflows[new_key].reconstruct_ancestry([self.name])
        logger.debug('updated workflow name {0} to {1}'.format(
            old_key, new_key))

    def update_playbook_name(self, old_playbook, new_playbook):
        """Update the name of a playbook.
        
        Args:
            old_playbook (str): Name of the current playbook.
            new_playbook (str): The new name of the playbook.
        """
        for key in [
                name for name in self.workflows.keys()
                if name.playbook == old_playbook
        ]:
            self.update_workflow_name(old_playbook, key.workflow, new_playbook,
                                      key.workflow)

    def add_workflow_breakpoint_steps(self, playbook_name, workflow_name,
                                      steps):
        """Adds a breakpoint (for debugging purposes) in the specified steps.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow under which the steps are located.
            steps (list[str]): The list of step names for which the user would like to pause execution.
        """
        workflow = self.get_workflow(playbook_name, workflow_name)
        if workflow:
            workflow.breakpoint_steps.extend(steps)

    def execute_workflow(self, playbook_name, workflow_name, start=None):
        """Executes a workflow.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): Workflow to execute.
            start (str, optional): The name of the first step. Defaults to "start".
        """
        global pool
        global workflows
        global threading_is_initialized

        key = _WorkflowKey(playbook_name, workflow_name)
        if key in self.workflows:
            workflow = self.workflows[key]
            subs = deepcopy(subscription.subscriptions)

            # If threading has not been initialized, initialize it.
            if not threading_is_initialized:
                initialize_threading()
            if start is not None:
                logger.info('Executing workflow {0} for step {1}'.format(
                    key, start))
                workflows.append(
                    pool.submit(execute_workflow_worker, workflow, subs,
                                start))
            else:
                logger.info(
                    'Executing workflow {0} with default starting step'.format(
                        key, start))
                workflows.append(
                    pool.submit(execute_workflow_worker, workflow, subs))
            callbacks.SchedulerJobExecuted.send(self)
        else:
            logger.error(
                'Attempted to execute playbook which does not exist in controller'
            )

    def get_workflow(self, playbook_name, workflow_name):
        """Get a workflow object.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow.
            
        Returns:
            The workflow object if found, else None.
        """
        key = _WorkflowKey(playbook_name, workflow_name)
        if key in self.workflows:
            return self.workflows[key]
        return None

    def get_all_workflows_by_playbook(self, playbook_name):
        """Get a list of all workflow objects in a playbook.
        
        Args:
            playbook_name: The name of the playbook.
            
        Returns:
            A list of all workflow objects in a playbook.
        """
        _workflows = []
        for key in self.workflows.keys():
            if key.playbook == playbook_name:
                _workflows.append(self.workflows[key].name)
        return _workflows

    def playbook_to_xml(self, playbook_name):
        """Returns the XML representation of a playbook.
        
        Args:
            playbook_name: The name of the playbook.
            
        Returns:
            The XML representation of the playbook if the playbook has any workflows under it, else None.
        """
        all_workflows = [
            workflow for key, workflow in self.workflows.items()
            if key.playbook == playbook_name
        ]
        if all_workflows:
            xml = cElementTree.Element('workflows')
            for workflow in all_workflows:
                xml.append(workflow.to_xml())
            return xml
        else:
            logger.debug(
                'No workflows are registered in controller to convert to XML')
            return None

    def copy_workflow(self, old_playbook_name, new_playbook_name,
                      old_workflow_name, new_workflow_name):
        """Duplicates a workflow into its current playbook, or a different playbook.
        
        Args:
            old_playbook_name (str): Playbook name under which the workflow is located.
            new_playbook_name (str): The new playbook name for the duplicated workflow.
            old_workflow_name (str): The name of the workflow to be copied.
            new_workflow_name (str): The new name of the duplicated workflow.
        """
        workflow = self.get_workflow(old_playbook_name, old_workflow_name)
        workflow_copy = deepcopy(workflow)
        workflow_copy.playbook_name = new_playbook_name
        workflow_copy.name = construct_workflow_name_key(
            new_playbook_name, new_workflow_name)

        key = _WorkflowKey(new_playbook_name, new_workflow_name)
        self.workflows[key] = workflow_copy
        self.workflows[key].reconstruct_ancestry([self.name])
        logger.info('Workflow copied from {0}-{1} to {2}-{3}'.format(
            old_playbook_name, old_workflow_name, new_playbook_name,
            new_workflow_name))

    def copy_playbook(self, old_playbook_name, new_playbook_name):
        """Copies a playbook
        
        Args:
            old_playbook_name (str): The name of the playbook to be copied.
            new_playbook_name (str): The new name of the duplicated playbook.
        """
        for workflow in [
                workflow.workflow for workflow in self.workflows
                if workflow.playbook == old_playbook_name
        ]:
            self.copy_workflow(old_playbook_name, new_playbook_name, workflow,
                               workflow)

    def pause_workflow(self, playbook_name, workflow_name):
        """Pauses a workflow that is currently executing.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow.
            
        Returns:
            A randomly-generated key that needs to be used in order to resume the workflow. This feature is added for
            security purposes.
        """
        workflow = self.get_workflow(playbook_name, workflow_name)
        wf_key = _WorkflowKey(playbook_name, workflow_name)
        self.paused_workflows[wf_key] = uuid.uuid4()
        if workflow:
            logger.info('Pausing workflow {0}'.format(workflow.name))
            workflow.pause()
        return self.paused_workflows[wf_key].hex

    def resume_workflow(self, playbook_name, workflow_name, validate_uuid):
        """Resumes a workflow that has been paused.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow.
            validate_uuid (str): The randomly-generated hexadecimal key that was returned from pause_workflow(). This
            is needed to resume a workflow for security purposes.
            
        Returns:
            "Success" if it is successful, or other error messages.
        """
        workflow = self.get_workflow(playbook_name, workflow_name)
        wf_key = _WorkflowKey(playbook_name, workflow_name)
        if workflow:
            if validate_uuid == self.paused_workflows[wf_key].hex:
                logger.info('Resuming workflow {0}'.format(workflow.name))
                workflow.resume()
                return True
            else:
                logger.warning(
                    'Cannot resume workflow {0}. Invalid key'.format(
                        workflow.name))
                return False

    def resume_breakpoint_step(self, playbook_name, workflow_name):
        """Resumes a step that has been specified as a breakpoint.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow.
        """
        workflow = self.get_workflow(playbook_name, workflow_name)
        if workflow:
            logger.debug('Resuming workflow {0} from breakpoint'.format(
                workflow.name))
            workflow.resume_breakpoint_step()

    # Starts active execution
    def start(self):
        """Starts the scheduler for active execution. This function must be called before any workflows are executed.
        
        Returns:
            The state of the scheduler if successful, error message if scheduler is in "stopped" state.
        """
        if self.scheduler.state != STATE_RUNNING and self.scheduler.state != STATE_PAUSED:
            logger.info('Starting scheduler')
            self.scheduler.start()
        else:
            logger.warning(
                'Cannot start scheduler. Scheduler is already running or is paused'
            )
            return "Scheduler already running."
        return self.scheduler.state

    # Stops active execution
    def stop(self, wait=True):
        """Stops active execution. 
        
        Args:
            wait (bool, optional): Boolean to synchronously or asynchronously wait for the scheduler to shutdown.
                Default is True.
                
        Returns:
            The state of the scheduler if successful, error message if scheduler is already in "stopped" state.
        """
        if self.scheduler.state != STATE_STOPPED:
            logger.info('Stopping scheduler')
            self.scheduler.shutdown(wait=wait)
        else:
            logger.warning(
                'Cannot stop scheduler. Scheduler is already stopped')
            return "Scheduler already stopped."
        return self.scheduler.state

    # Pauses active execution
    def pause(self):
        """Pauses active execution.
        
        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "running" state.
        """
        if self.scheduler.state == STATE_RUNNING:
            logger.info('Pausing scheduler')
            self.scheduler.pause()
        elif self.scheduler.state == STATE_PAUSED:
            logger.warning(
                'Cannot pause scheduler. Scheduler is already paused')
            return "Scheduler already paused."
        elif self.scheduler.state == STATE_STOPPED:
            logger.warning('Cannot pause scheduler. Scheduler is stopped')
            return "Scheduler is in STOPPED state and cannot be paused."
        return self.scheduler.state

    # Resumes active execution
    def resume(self):
        """Resumes active execution.
        
        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "paused" state.
        """
        if self.scheduler.state == STATE_PAUSED:
            logger.info('Resuming scheduler')
            self.scheduler.resume()
        else:
            logger.warning(
                "Scheduler is not in PAUSED state and cannot be resumed.")
            return "Scheduler is not in PAUSED state and cannot be resumed."
        return self.scheduler.state

    # Pauses active execution of specific job
    def pause_job(self, job_id):
        """Pauses active execution of a specific job.
        
        Args:
            job_id (str): ID of the job to pause.
        """
        logger.info('Pausing job {0}'.format(job_id))
        self.scheduler.pause_job(job_id=job_id)

    # Resumes active execution of specific job
    def resume_job(self, job_id):
        """Resumes active execution of a specific job.
        
        Args:
            job_id (str): ID of the job to resume.
        """
        logger.info('Resuming job {0}'.format(job_id))
        self.scheduler.resume_job(job_id=job_id)

    # Returns jobs scheduled for active execution
    def get_scheduled_jobs(self):
        """Get all actively scheduled jobs.
        
        Returns:
             A list of all actively scheduled jobs.
        """
        self.scheduler.get_jobs()

    def __scheduler_listener(self):
        event_selector_map = {
            EVENT_SCHEDULER_START:
            (lambda: callbacks.SchedulerStart.send(self)),
            EVENT_SCHEDULER_SHUTDOWN:
            (lambda: callbacks.SchedulerShutdown.send(self)),
            EVENT_SCHEDULER_PAUSED:
            (lambda: callbacks.SchedulerPaused.send(self)),
            EVENT_SCHEDULER_RESUMED:
            (lambda: callbacks.SchedulerResumed.send(self)),
            EVENT_JOB_ADDED: (lambda: callbacks.SchedulerJobAdded.send(self)),
            EVENT_JOB_REMOVED:
            (lambda: callbacks.SchedulerJobRemoved.send(self)),
            EVENT_JOB_EXECUTED:
            (lambda: callbacks.SchedulerJobExecuted.send(self)),
            EVENT_JOB_ERROR: (lambda: callbacks.SchedulerJobError.send(self))
        }

        def event_selector(event):
            try:
                event_selector_map[event.code]()
            except KeyError:
                print('Error: Unknown event sent!')

        return event_selector
Exemplo n.º 38
0
    logger = logging.getLogger(param)
    logger.info('Starting task %s' % (param))
    conn = get_conn(config)
    source.crawl(conn)


if __name__ == '__main__':
    reload(sys)
    sys.setdefaultencoding('utf-8')
    config = configparser.ConfigParser()
    config.read("config.ini")
    logging.config.fileConfig("logger.conf")

    scheduler = GeventScheduler()
    scheduler.add_executor('processpool')
    scheduler.add_job(tick, 'interval', ['sina_video'], seconds=7200)
    scheduler.add_job(tick, 'interval', ['smzdm'], seconds=7200)
    scheduler.add_job(tick, 'interval', ['kr36'], seconds=7200)
    scheduler.add_job(tick, 'interval', ['sina_top'], seconds=600)
    scheduler.add_job(tick, 'interval', ['zhihu_daily'], seconds=86400)
    scheduler.add_job(tick, 'interval', ['chuangye'], seconds=86400)
    scheduler.add_job(tick, 'interval', ['douban'], seconds=86400)
    scheduler.add_job(tick, 'interval', ['qiushibaike'], seconds=86400)
    scheduler.add_job(tick, 'interval', ['study163'], seconds=600)
    scheduler.add_job(tick, 'interval', ['news'], seconds=600)

    scheduler.add_job(tick, 'interval', ['baidu_amuse'], seconds=3600)
    scheduler.add_job(tick, 'interval', ['baidu_beauty'], seconds=3600)
    scheduler.add_job(tick, 'interval', ['baidu_history'], seconds=3600)
    scheduler.add_job(tick, 'interval', ['baidu_music'], seconds=3600)
    scheduler.add_job(tick, 'interval', ['baidu_society'], seconds=3600)
Exemplo n.º 39
0
class Controller(object):
    def __init__(self, name="defaultController"):
        self.name = name
        self.workflows = {}
        self.load_all_workflows_from_directory()
        self.instances = {}
        self.tree = None
        self.eventlog = []
        self.schedulerStatusListener = SchedulerStatusListener(self.eventlog)
        self.jobStatusListener = JobStatusListener(self.eventlog)
        self.jobExecutionListener = JobExecutionListener(self.eventlog)
        self.scheduler = GeventScheduler()
        self.scheduler.add_listener(
            self.schedulerStatusListener.callback(self),
            EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
            | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED)
        self.scheduler.add_listener(self.jobStatusListener.callback(self),
                                    EVENT_JOB_ADDED | EVENT_JOB_REMOVED)
        self.scheduler.add_listener(self.jobExecutionListener.callback(self),
                                    EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.ancestry = [self.name]

        # MULTIPROCESSING
        # self.pool = multiprocessing.Pool(processes=5)
        # self.manager = multiprocessing.Manager()
        # self.queue = self.manager.SimpleQueue()

    def load_workflow_from_file(self,
                                path,
                                workflow_name,
                                name_override=None,
                                playbook_override=None):
        self.tree = et.ElementTree(file=path)
        playbook_name = playbook_override if playbook_override else os.path.splitext(
            os.path.basename(path))[0]
        for workflow in self.tree.iter(tag="workflow"):
            current_workflow_name = workflow.get('name')
            if current_workflow_name == workflow_name:
                if name_override:
                    workflow_name = name_override
                name = construct_workflow_name_key(playbook_name,
                                                   workflow_name)
                key = _WorkflowKey(playbook_name, workflow_name)
                self.workflows[key] = wf.Workflow(name=name,
                                                  workflowConfig=workflow,
                                                  parent_name=self.name,
                                                  filename=playbook_name)
                break
        else:
            return False

        self.addChildWorkflows()
        self.addWorkflowScheduledJobs()
        return True

    def loadWorkflowsFromFile(self,
                              path,
                              name_override=None,
                              playbook_override=None):
        self.tree = et.ElementTree(file=path)
        playbook_name = playbook_override if playbook_override else os.path.splitext(
            os.path.basename(path))[0]
        for workflow in self.tree.iter(tag='workflow'):
            workflow_name = name_override if name_override else workflow.get(
                'name')
            name = construct_workflow_name_key(playbook_name, workflow_name)
            key = _WorkflowKey(playbook_name, workflow_name)
            self.workflows[key] = wf.Workflow(name=name,
                                              workflowConfig=workflow,
                                              parent_name=self.name,
                                              filename=playbook_name)
        self.addChildWorkflows()
        self.addWorkflowScheduledJobs()

    def load_all_workflows_from_directory(self, path=config.workflowsPath):
        for workflow in locate_workflows_in_directory(path):
            self.loadWorkflowsFromFile(
                os.path.join(config.workflowsPath, workflow))

    def addChildWorkflows(self):
        for workflow in self.workflows:
            playbook_name = workflow.playbook
            children = self.workflows[workflow].options.children
            for child in children:
                workflow_key = _WorkflowKey(
                    playbook_name,
                    extract_workflow_name(child, playbook_name=playbook_name))
                if workflow_key in self.workflows:
                    children[child] = self.workflows[workflow_key]

    def addWorkflowScheduledJobs(self):
        for workflow in self.workflows:
            if (self.workflows[workflow].options.enabled
                    and self.workflows[workflow].options.scheduler["autorun"]
                    == "true"):
                schedule_type = self.workflows[workflow].options.scheduler[
                    "type"]
                schedule = self.workflows[workflow].options.scheduler["args"]
                self.scheduler.add_job(self.workflows[workflow].execute,
                                       trigger=schedule_type,
                                       replace_existing=True,
                                       **schedule)

    def create_workflow_from_template(self,
                                      playbook_name,
                                      workflow_name,
                                      template_playbook='emptyWorkflow',
                                      template_name='emptyWorkflow'):
        path = '{0}{1}{2}.workflow'.format(config.templatesPath, sep,
                                           template_playbook)
        return self.load_workflow_from_file(path=path,
                                            workflow_name=template_name,
                                            name_override=workflow_name,
                                            playbook_override=playbook_name)

    def create_playbook_from_template(self,
                                      playbook_name,
                                      template_playbook='emptyWorkflow'):
        #TODO: Need a handler for returning workflow key and status
        path = '{0}{1}{2}.workflow'.format(config.templatesPath, sep,
                                           template_playbook)
        self.loadWorkflowsFromFile(path=path, playbook_override=playbook_name)

    def removeWorkflow(self, playbook_name, workflow_name):
        name = _WorkflowKey(playbook_name, workflow_name)
        if name in self.workflows:
            del self.workflows[name]
            return True
        return False

    def remove_playbook(self, playbook_name):
        for name in [
                workflow for workflow in self.workflows
                if workflow.playbook == playbook_name
        ]:
            del self.workflows[name]
            return True
        return False

    def get_all_workflows(self):
        result = {}
        for key in self.workflows.keys():
            if key.playbook not in result:
                result[key.playbook] = []
            result[key.playbook].append(key.workflow)
        return result

    def is_workflow_registered(self, playbook_name, workflow_name):
        return _WorkflowKey(playbook_name, workflow_name) in self.workflows

    def is_playbook_registerd(self, playbook_name):
        return any(workflow_key.playbook == playbook_name
                   for workflow_key in self.workflows)

    def update_workflow_name(self, old_playbook, old_workflow, new_playbook,
                             new_workflow):
        old_key = _WorkflowKey(old_playbook, old_workflow)
        new_key = _WorkflowKey(new_playbook, new_workflow)
        self.workflows[new_key] = self.workflows.pop(old_key)
        self.workflows[new_key].name = construct_workflow_name_key(
            new_playbook, new_workflow)

    def update_playbook_name(self, old_playbook, new_playbook):
        for key in [
                name for name in self.workflows.keys()
                if name.playbook == old_playbook
        ]:
            self.update_workflow_name(old_playbook, key.workflow, new_playbook,
                                      key.workflow)

    # def executeWorkflowWorker(self):
    #
    #     print("Thread " + str(os.getpid()) + " starting up...")
    #
    #     while (True):
    #         while (self.queue.empty()):
    #             continue
    #         name,start,data = self.queue.get()
    #         print("Thread " + str(os.getpid()) + " received and executing workflow "+name)
    #         steps, instances = self.workflows[name].execute(start=start, data=data)

    def executeWorkflow(self,
                        playbook_name,
                        workflow_name,
                        start="start",
                        data=None):
        self.workflows[_WorkflowKey(playbook_name,
                                    workflow_name)].execute(start=start,
                                                            data=data)
        # print("Boss thread putting "+name+" workflow on queue...:")
        # self.queue.put((name, start, data))
        self.jobExecutionListener.execute_event_code(self, 'JobExecuted')

    def get_workflow(self, playbook_name, workflow_name):
        key = _WorkflowKey(playbook_name, workflow_name)
        if key in self.workflows:
            return self.workflows[key]
        return None

    def playbook_to_xml(self, playbook_name):
        workflows = [
            workflow for key, workflow in self.workflows.items()
            if key.playbook == playbook_name
        ]
        if workflows:
            xml = et.Element("workflows")
            for workflow in workflows:
                xml.append(workflow.to_xml())
            return xml
        else:
            return None

    # Starts active execution
    def start(self):
        self.scheduler.start()

    # Stops active execution
    def stop(self, wait=True):
        self.scheduler.shutdown(wait=wait)

    # Pauses active execution
    def pause(self):
        self.scheduler.pause()

    # Resumes active execution
    def resume(self):
        self.scheduler.resume()

    # Pauses active execution of specific job
    def pauseJob(self, job_id):
        self.scheduler.pause_job(job_id=job_id)

    # Resumes active execution of specific job
    def resumeJob(self, job_id):
        self.scheduler.resume_job(job_id=job_id)

    # Returns jobs scheduled for active execution
    def getScheduledJobs(self):
        self.scheduler.get_jobs()
Exemplo n.º 40
0
                  view_func=login_required(
                      check_activate(HiChatUpdate.as_view('hichat_update'))),
                  methods=['POST'])
user.add_url_rule(rule='/notification_load',
                  endpoint='notification_load',
                  view_func=login_required(
                      check_activate(
                          Notification.as_view('notification_load'))),
                  methods=['GET', 'POST'])
user.add_url_rule(rule='/notification_count',
                  endpoint='notification_count',
                  view_func=login_required(
                      check_activate(
                          NotificationCount.as_view('notification_count'))),
                  methods=['GET', 'POST'])
app.register_blueprint(user, url_prefix='/user')


def p():
    # import time
    # while(True):
    print("bbbbbbbbb")
    # time.sleep(1)


if __name__ == '__main__':
    scheduler = GeventScheduler()
    scheduler.add_job(check_time, 'interval', seconds=1)
    scheduler.start()
    socketio.run(app, debug=True, host='0.0.0.0')
Exemplo n.º 41
0
"""
Demonstrates how to use the gevent compatible scheduler to schedule a job that executes on 3 second
intervals.
"""

from datetime import datetime
import os

from apscheduler.schedulers.gevent import GeventScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = GeventScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 42
0
class Scheduler(object):
    def __init__(self):
        self.scheduler = GeventScheduler()
        self.scheduler.add_listener(self.__scheduler_listener(),
                                    EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
                                    | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
                                    | EVENT_JOB_ADDED | EVENT_JOB_REMOVED
                                    | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.id = 'controller'
        self.app = None

    def schedule_workflows(self, task_id, executable, workflow_ids, trigger):
        """
        Schedules a workflow for execution

        Args:
            task_id (int): Id of the scheduled task
            executable (func): A callable to execute must take in one argument -- a workflow id
            workflow_ids (iterable(str)): An iterable of workflow ids
            trigger (Trigger): The trigger to use for this scheduled task
        """

        def execute(id_):
            with self.app.app_context():
                executable(id_)

        for workflow_id in workflow_ids:
            self.scheduler.add_job(execute, args=(workflow_id,),
                                   id=construct_task_id(task_id, workflow_id),
                                   trigger=trigger, replace_existing=True)

    def get_all_scheduled_workflows(self):
        """
        Gets all the scheduled workflows

        Returns:
             (dict{str: list[str]}) A dict of task_id to workflow execution ids
        """
        tasks = {}
        for job in self.scheduler.get_jobs():
            task, workflow_execution_id = split_task_id(job.id)
            if task not in tasks:
                tasks[task] = [workflow_execution_id]
            else:
                tasks[task].append(workflow_execution_id)
        return tasks

    def get_scheduled_workflows(self, task_id):
        """
        Gets all the scheduled worfklows for a given task id

        Args:
            task_id (str): The task id

        Returns:
            (list[str]) A list fo workflow execution id associated with this task id
        """
        tasks = []
        for job in self.scheduler.get_jobs():
            task, workflow_execution_id = split_task_id(job.id)
            if task == task_id:
                tasks.append(workflow_execution_id)
        return tasks

    def update_workflows(self, task_id, trigger):
        """
        Updates the workflows for a given task id to use a different trigger

        Args:
            task_id (str|int): The task id to update
            trigger (Trigger): The new trigger to use
        """
        existing_tasks = {construct_task_id(task_id, workflow_execution_id) for workflow_execution_id in
                          self.get_scheduled_workflows(task_id)}
        for job_id in existing_tasks:
            self.scheduler.reschedule_job(job_id=job_id, trigger=trigger)

    def unschedule_workflows(self, task_id, workflow_execution_ids):
        """
        Unschedules a workflow

        Args:
            task_id (str|int): The task ID to unschedule
            workflow_execution_ids (list[str]): The list of workflow execution IDs to update
        """
        for workflow_execution_id in workflow_execution_ids:
            try:
                self.scheduler.remove_job(construct_task_id(task_id, workflow_execution_id))
            except JobLookupError:
                logger.warning('Cannot delete task {}. '
                               'No task found in scheduler'.format(construct_task_id(task_id, workflow_execution_id)))

    def start(self):
        """Starts the scheduler for active execution. This function must be called before any workflows are executed.

        Returns:
            The state of the scheduler if successful, error message if scheduler is in "stopped" state.
        """
        if self.scheduler.state == STATE_STOPPED:
            logger.info('Starting scheduler')
            self.scheduler.start()
        else:
            logger.warning('Cannot start scheduler. Scheduler is already running or is paused')
            return "Scheduler already running."
        return self.scheduler.state

    def stop(self, wait=True):
        """Stops active execution.

        Args:
            wait (bool, optional): Boolean to synchronously or asynchronously wait for the scheduler to shutdown.
                Default is True.

        Returns:
            The state of the scheduler if successful, error message if scheduler is already in "stopped" state.
        """
        if self.scheduler.state != STATE_STOPPED:
            logger.info('Stopping scheduler')
            self.scheduler.shutdown(wait=wait)
        else:
            logger.warning('Cannot stop scheduler. Scheduler is already stopped')
            return "Scheduler already stopped."
        return self.scheduler.state

    def pause(self):
        """Pauses active execution.

        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "running" state.
        """
        if self.scheduler.state == STATE_RUNNING:
            logger.info('Pausing scheduler')
            self.scheduler.pause()
        elif self.scheduler.state == STATE_PAUSED:
            logger.warning('Cannot pause scheduler. Scheduler is already paused')
            return "Scheduler already paused."
        elif self.scheduler.state == STATE_STOPPED:
            logger.warning('Cannot pause scheduler. Scheduler is stopped')
            return "Scheduler is in STOPPED state and cannot be paused."
        return self.scheduler.state

    def resume(self):
        """Resumes active execution.

        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "paused" state.
        """
        if self.scheduler.state == STATE_PAUSED:
            logger.info('Resuming scheduler')
            self.scheduler.resume()
        else:
            logger.warning("Scheduler is not in PAUSED state and cannot be resumed.")
            return "Scheduler is not in PAUSED state and cannot be resumed."
        return self.scheduler.state

    def pause_workflows(self, task_id, workflow_execution_ids):
        """
        Pauses some workflows associated with a task

        Args:
            task_id (int|str): The id of the task to pause
            workflow_execution_ids (list[str]): The list of workflow execution IDs to pause
        """
        for workflow_execution_id in workflow_execution_ids:
            job_id = construct_task_id(task_id, workflow_execution_id)
            try:
                self.scheduler.pause_job(job_id=job_id)
                logger.info('Paused job {0}'.format(job_id))
            except JobLookupError:
                logger.warning('Cannot pause scheduled workflow {}. Workflow ID not found'.format(job_id))

    def resume_workflows(self, task_id, workflow_execution_ids):
        """
        Resumes some workflows associated with a task

        Args:
            task_id (int|str): The id of the task to pause
            workflow_execution_ids (list[str]): The list of workflow execution IDs to resume
        """
        for workflow_execution_id in workflow_execution_ids:
            job_id = construct_task_id(task_id, workflow_execution_id)
            try:
                self.scheduler.resume_job(job_id=job_id)
                logger.info('Resumed job {0}'.format(job_id))
            except JobLookupError:
                logger.warning('Cannot resume scheduled workflow {}. Workflow ID not found'.format(job_id))

    def __scheduler_listener(self):
        event_selector_map = {EVENT_SCHEDULER_START: WalkoffEvent.SchedulerStart,
                              EVENT_SCHEDULER_SHUTDOWN: WalkoffEvent.SchedulerShutdown,
                              EVENT_SCHEDULER_PAUSED: WalkoffEvent.SchedulerPaused,
                              EVENT_SCHEDULER_RESUMED: WalkoffEvent.SchedulerResumed,
                              EVENT_JOB_ADDED: WalkoffEvent.SchedulerJobAdded,
                              EVENT_JOB_REMOVED: WalkoffEvent.SchedulerJobRemoved,
                              EVENT_JOB_EXECUTED: WalkoffEvent.SchedulerJobExecuted,
                              EVENT_JOB_ERROR: WalkoffEvent.SchedulerJobError}

        def event_selector(event):
            try:
                event = event_selector_map[event.code]
                event.send(self)
            except KeyError:  # pragma: no cover
                logger.error('Unknown event sent triggered in scheduler {}'.format(event))

        return event_selector