Exemplo n.º 1
0
 def create_scheduler(self):
     self.scheduler = GeventScheduler(
         job_defaults={
             'coalesce': False,
             'max_instances': max_instances
         }
     )
Exemplo n.º 2
0
    def __init__(self, config, activate=False):
        super(AuctionsDataBridge, self).__init__()
        self.config = config
        self.tenders_ids_list = []
        self.activate = activate
        self.client = ApiClient(
            '',
            host_url=self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version'))
        params = {'opt_fields': 'status,auctionPeriod', 'mode': '_all_'}
        if parse_version(
                self.config_get('tenders_api_version')) > parse_version('0.9'):
            params['opt_fields'] += ',lots'
        self.client.params.update(params)
        self.tz = tzlocal()

        self.couch_url = urljoin(self.config_get('couch_url'),
                                 self.config_get('auctions_db'))
        self.db = Database(self.couch_url,
                           session=Session(retry_delays=range(10)))

        if self.activate:
            self.queue = Queue()
            self.scheduler = GeventScheduler()
            self.scheduler.add_job(self.run_systemd_cmds,
                                   'interval',
                                   max_instances=1,
                                   minutes=2,
                                   id='run_systemd_cmds')
            self.scheduler.start()
Exemplo n.º 3
0
class WatchDog(WebSocketWatchDog):
    websocket_manage_dict = dict()

    def __init__(self, is_auto_connect=True, heart_beat_limit_ms=HEART_BEAT_MS, reconnect_after_ms=RECONNECT_MS, restart_ms=RESTART_MS):
        threading.Thread.__init__(self)
        self.is_auto_connect = is_auto_connect
        self.heart_beat_limit_ms = heart_beat_limit_ms
        self.reconnect_after_ms = reconnect_after_ms if reconnect_after_ms > heart_beat_limit_ms else heart_beat_limit_ms
        self.restart_ms = restart_ms
        self.logger = logger
        self.scheduler = Scheduler()
        self.scheduler.add_job(check_reconnect, "interval", max_instances=1, seconds=1, args=[self])
        self.start()

    def get_random_restart_at(self, wm):
        return wm.created_at + self.restart_ms + hash(wm) % RESTART_RANGE

    def on_connection_closed(self, websocket_manage):
        self.mutex.acquire()
        self.websocket_manage_list.remove(websocket_manage)
        [name] = [name for name, wm in self.websocket_manage_dict.items() if wm == websocket_manage]
        del self.websocket_manage_dict[name]
        self.mutex.release()

    def after_connection_created(self, name):
        [wm] = [wm for wm in self.websocket_manage_list if wm not in self.websocket_manage_dict.values()]
        self.mutex.acquire()
        self.websocket_manage_dict[name] = wm
        self.mutex.release()
Exemplo n.º 4
0
def delayed_finish():
    final_sched = Scheduler()
    final_sched.start()
    now = datetime.today()
    deltaFinal = timedelta( seconds = final_wait )
    starttime = now + deltaFinal
    final_sched.add_job( final_finish, 'date', run_date = starttime, args= [ ] )
Exemplo n.º 5
0
    def __init__(self, irc_c, config):
        self.config = config
        self.scheduler = GeventScheduler()

        self._scheduler_greenlet = self.scheduler.start()

        log_propagation_message = partial(
            irc_c.PRIVMSG,
            CONFIG.external['propagation']['logging']['channel'],
        )

        # Scheduled full wiki update
        self.scheduler.add_job(
            Propagate.get_wiki_data,
            'cron',
            kwargs={'reply': log_propagation_message},
            **self.cron_to_kwargs(
                CONFIG.external['propagation']['all_articles']['often']),
        )

        # Scheduled recent pages update
        self.scheduler.add_job(
            Propagate.get_wiki_data,
            'cron',
            kwargs={
                'reply': log_propagation_message,
                'seconds': 259200
            },
            **self.cron_to_kwargs(
                CONFIG.external['propagation']['new_articles']['often']),
        )
Exemplo n.º 6
0
    def __init__(self, name='defaultController', workflows_path=core.config.paths.workflows_path):
        """Initializes a Controller object.
        
        Args:
            name (str, optional): Name for the controller.
            workflows_path (str, optional): Path to the workflows.
        """
        self.name = name
        self.workflows = {}
        self.workflow_status = {}
        self.load_all_workflows_from_directory(path=workflows_path)
        self.instances = {}
        self.tree = None

        self.scheduler = GeventScheduler()
        self.scheduler.add_listener(self.__scheduler_listener(),
                                    EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
                                    | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
                                    | EVENT_JOB_ADDED | EVENT_JOB_REMOVED
                                    | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.ancestry = [self.name]

        def workflow_completed_callback(sender, **kwargs):
            self.__workflow_completed_callback(sender, **kwargs)

        callbacks.WorkflowShutdown.connect(workflow_completed_callback)
Exemplo n.º 7
0
class ScheduledEventProducer(EventProducer):
    '''
    Desciption:
        Continuously generates an event based on a defined interval, or if an event is consumed

    Parameters:
        producers (Optional[int]):
            | The number of greenthreads to spawn that each spawn events at the provided interval
            | Default: 1
        interval (Optional[float] OR dict):
            | The interval (in seconds) between each generated event.
            | Should have a value > 0.
            | Can also be a dict, supporting values of weeks, days, hours, minutes, and seconds
            | default: 5
        delay (Optional[float]):
            | The time (in seconds) to wait before initial event generation.
            | Default: 0
        interval_grace_time (Optional[int]):
            | Sometimes the scheduler can fail to wakeup and execute a job right at the set interval. This is how much
            | time (in seconds) the scheduler can miss the interval time by and the job will still be run.
            | Default: None (will use apscheduler's default miss_grace_time of 1 second)
    '''
    '''
    MIXIN Attributes:
        DEFAULT_INTERVAL = {}
        def _parse_interval(self, interval):
        def _initialize_jobs(self):
    '''
    def __init__(self, name, *args, **kwargs):
        super(ScheduledEventProducer, self).__init__(name, *args, **kwargs)
        self._init_scheduler(*args, **kwargs)

    def _init_scheduler(self,
                        producers=1,
                        interval=5,
                        delay=0,
                        scheduler=None,
                        interval_grace_time=None,
                        *args,
                        **kwargs):
        self.interval = self._parse_interval(interval)
        self.delay = delay
        self.producers = producers
        self.scheduler = scheduler
        self.interval_grace_time = interval_grace_time
        if not self.scheduler:
            self.scheduler = GeventScheduler(
                misfire_grace_time=self.interval_grace_time)

    def pre_hook(self):
        #super(ScheduledEventProducer, self).pre_hook()
        self._initialize_jobs()
        gevent.sleep(self.delay)
        self.scheduler.start()

    def post_hook(self):
        #super(ScheduledEventProducer, self).post_hook()
        self.scheduler.shutdown()
Exemplo n.º 8
0
 def __init__(self):
     self.scheduler = GeventScheduler()
     self.scheduler.add_listener(self.__scheduler_listener(),
                                 EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
                                 | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
                                 | EVENT_JOB_ADDED | EVENT_JOB_REMOVED
                                 | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
     self.id = 'controller'
     self.app = None
Exemplo n.º 9
0
 def deactivate_player(self, player):
     player.active = False
     scheduler = GeventScheduler()
     timeout_date = datetime.now() + timedelta(seconds=PLAYER_TIMEOUT)
     scheduler.add_job(self.delete_player,
                       'date',
                       run_date=timeout_date,
                       args=[player])
     g = scheduler.start()
     g.join()
Exemplo n.º 10
0
    def __init__(self, socketio):
        self.__socketio = socketio
        self.__jobs = {}
        self.__tiles = {}
        self.__cache = {}
        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20}
        }

        self.__scheduler = GeventScheduler(executors=executors)
Exemplo n.º 11
0
def scheduler_loop(arg):
    global sched
    sched = Scheduler()
    sched.start()
    logging.basicConfig()
    scheduler_main("")
    while True:
        sleep( 1 )
        sys.stdout.write( '.' )
        sys.stdout.flush()
Exemplo n.º 12
0
def GeventScheduler_test():
    sched = GeventScheduler()
    sched.add_job(tick, 'interval', seconds=3)
    g = sched.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 13
0
def startScheduledExecution(interval):
    scheduler = GeventScheduler()
    fetchAndSaveData()
    scheduler.add_job(fetchAndSaveData, 'interval', minutes=interval)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 14
0
def scheduler_loop(arg):
    global sched
    ready_for_scheduler.wait()
    print("STARTING SCHEDULER.")
    sched = Scheduler()
    sched.start()
    logging.basicConfig()
    scheduler_main("")
    ready_for_queue.set()
    while True:
        sys.stdout.write( '.' )
        sys.stdout.flush()
        gevent.sleep(1)
Exemplo n.º 15
0
class ScheduledEventProducer(EventProducer):
    '''
    Desciption:
        Continuously generates an event based on a defined interval, or if an event is consumed

    Parameters:
        producers (Optional[int]):
            | The number of greenthreads to spawn that each spawn events at the provided interval
            | Default: 1
        interval (Optional[float] OR dict):
            | The interval (in seconds) between each generated event.
            | Should have a value > 0.
            | Can also be a dict, supporting values of weeks, days, hours, minutes, and seconds
            | default: 5
        delay (Optional[float]):
            | The time (in seconds) to wait before initial event generation.
            | Default: 0
        interval_grace_time (Optional[int]):
            | Sometimes the scheduler can fail to wakeup and execute a job right at the set interval. This is how much
            | time (in seconds) the scheduler can miss the interval time by and the job will still be run.
            | Default: None (will use apscheduler's default miss_grace_time of 1 second)
    '''

    '''
    MIXIN Attributes:
        DEFAULT_INTERVAL = {}
        def _parse_interval(self, interval):
        def _initialize_jobs(self):
    '''
    def __init__(self, name, *args, **kwargs):
        super(ScheduledEventProducer, self).__init__(name, *args, **kwargs)
        self._init_scheduler(*args, **kwargs)

    def _init_scheduler(self, producers=1, interval=5, delay=0, scheduler=None, interval_grace_time=None, *args, **kwargs):
        self.interval = self._parse_interval(interval)
        self.delay = delay
        self.producers = producers
        self.scheduler = scheduler
        self.interval_grace_time = interval_grace_time
        if not self.scheduler:
            self.scheduler = GeventScheduler(misfire_grace_time=self.interval_grace_time)

    def pre_hook(self):
        #super(ScheduledEventProducer, self).pre_hook()
        self._initialize_jobs()
        gevent.sleep(self.delay)
        self.scheduler.start()

    def post_hook(self):
        #super(ScheduledEventProducer, self).post_hook()
        self.scheduler.shutdown()
Exemplo n.º 16
0
 def __init__(self,
              name,
              producers=1,
              interval=5,
              delay=0,
              scheduler=None,
              *args,
              **kwargs):
     self.interval = self._parse_interval(interval)
     self.delay = delay
     self.producers = producers
     self.scheduler = scheduler
     if not self.scheduler:
         self.scheduler = GeventScheduler()
Exemplo n.º 17
0
class MockedSchedulingActor(PrepClazz):
    def __init__(self, name, producers=1, interval=5, delay=0, scheduler=None, *args, **kwargs):
        self.interval = self._parse_interval(interval)
        self.delay = delay
        self.producers = producers
        self.scheduler = scheduler
        if not self.scheduler:
            self.scheduler = GeventScheduler()

    def _do_produce(self):
        self.mock_do_produce()

    def pre_hook(self):
        self.scheduler.start()
Exemplo n.º 18
0
def watch():
    scheduler = GeventScheduler()

    for web in watchweb.get_watch_webs():
        s = int(web[watchweb.INTERVAL_SECONDS])
        scheduler.add_job(check_web, "interval", seconds=s, kwargs=web)

    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print ("Press Ctrl+{0} to exit".format("Break" if os.name == "nt" else "C"))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 19
0
 def _init_scheduler(self,
                     producers=1,
                     interval=5,
                     delay=0,
                     scheduler=None,
                     interval_grace_time=None,
                     *args,
                     **kwargs):
     self.interval = self._parse_interval(interval)
     self.delay = delay
     self.producers = producers
     self.scheduler = scheduler
     if not self.scheduler:
         self.scheduler = GeventScheduler(
             job_defaults={'misfire_grace_time': interval_grace_time})
Exemplo n.º 20
0
class Schedule:
    def __init__(self, irc_c, config):
        self.config = config
        self.scheduler = GeventScheduler()

        self._scheduler_greenlet = self.scheduler.start()

        log_propagation_message = partial(
            irc_c.PRIVMSG,
            CONFIG.external['propagation']['logging']['channel'],
        )

        # Scheduled full wiki update
        self.scheduler.add_job(
            Propagate.get_wiki_data,
            'cron',
            kwargs={'reply': log_propagation_message},
            **self.cron_to_kwargs(
                CONFIG.external['propagation']['all_articles']['often']),
        )

        # Scheduled recent pages update
        self.scheduler.add_job(
            Propagate.get_wiki_data,
            'cron',
            kwargs={
                'reply': log_propagation_message,
                'seconds': 259200
            },
            **self.cron_to_kwargs(
                CONFIG.external['propagation']['new_articles']['often']),
        )

    @staticmethod
    def cron_to_kwargs(cronstring):
        """Converts a cron string to cron kwargs"""
        crons = cronstring.split(" ")
        if len(crons) != 5:
            raise ValueError("Invalid cron {}".format(cronstring))
        crons = [cron.replace("_", " ") for cron in crons]
        kwargs = {
            'minute': crons[0],
            'hour': crons[1],
            'day': crons[2],
            'month': crons[3],
            'day_of_week': crons[4],
        }
        return kwargs
Exemplo n.º 21
0
    def setup_tasks(self):
        """
        Setup all tasks that run periodically.
        """

        self.scheduler = GeventScheduler()

        # Add an initial job
        def _job():
            job.remove()
            self.synchronize(synchronization="startup")
        job = self.scheduler.add_job(
            _job, max_instances=1, trigger="interval", seconds=1)

        # Scheduler task to clean and expire the cache.
        cache_interval = self.config['Provider']['item cache prune interval']

        self.scheduler.add_job(
            self.cache_manager.expire,
            max_instances=1, trigger="interval", minutes=cache_interval)
        self.scheduler.add_job(
            self.cache_manager.clean,
            max_instances=1, trigger="interval", minutes=cache_interval)

        # Schedule tasks to synchronize each connection.
        for connection in self.connections.itervalues():
            self.scheduler.add_job(
                self.synchronize, args=([connection, "interval"]),
                max_instances=1, trigger="interval",
                minutes=connection.synchronization_interval)
Exemplo n.º 22
0
def gevent_schedule():
    from apscheduler.schedulers.gevent import GeventScheduler

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = GeventScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 23
0
def gevent_schedule():
    from apscheduler.schedulers.gevent import GeventScheduler

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = GeventScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 24
0
 def _init_scheduler(self, producers=1, interval=5, delay=0, scheduler=None, *args, **kwargs):
     self.interval = self._parse_interval(interval)
     self.delay = delay
     self.producers = producers
     self.scheduler = scheduler
     if not self.scheduler:
         self.scheduler = GeventScheduler()
Exemplo n.º 25
0
def watch():
	scheduler = GeventScheduler()
	
	for web in watchweb.get_watch_webs():
		s = int(web[watchweb.INTERVAL_SECONDS])
		scheduler.add_job(check_web, 'interval', seconds=s,kwargs=web)


	g = scheduler.start()  # g is the greenlet that runs the scheduler loop
	print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

	# Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
	try:
		g.join()
	except (KeyboardInterrupt, SystemExit):
		pass
Exemplo n.º 26
0
    def __init__(self):
        print "init Task Manager"
        self.logger = logging.getLogger('dls')
        executors = {
            'default': ThreadPoolExecutor(config.EXECUTOR_THREADS_NUMBER),
            'monitor': GeventExecutor(),
        }

        self.scheduler = GeventScheduler(executors=executors)
        self.scheduler.start()

        # Map of tasks for tracking them on UI
        self.tasks = {}
        self.scheduler.add_job(self.report_progress,
                               'interval',
                               seconds=config.JOB_MONITOR_INTERVAL,
                               executor='monitor')
Exemplo n.º 27
0
class ScheduledEventProducer(EventProducer):
    '''
    Desciption:
        Continuously generates an event based on a defined interval, or if an event is consumed

    Parameters:
        producers (Optional[int]):
            | The number of greenthreads to spawn that each spawn events at the provided interval
            | Default: 1
        interval (Optional[float] OR dict):
            | The interval (in seconds) between each generated event.
            | Should have a value > 0.
            | Can also be a dict, supporting values of weeks, days, hours, minutes, and seconds
            | default: 5
        delay (Optional[float]):
            | The time (in seconds) to wait before initial event generation.
            | Default: 0
    '''

    '''
    MIXIN Attributes:
        DEFAULT_INTERVAL = {}
        def _parse_interval(self, interval):
        def _initialize_jobs(self):
    '''
    def __init__(self, name, *args, **kwargs):
        super(ScheduledEventProducer, self).__init__(name, *args, **kwargs)
        self._init_scheduler(*args, **kwargs)

    def _init_scheduler(self, producers=1, interval=5, delay=0, scheduler=None, *args, **kwargs):
        self.interval = self._parse_interval(interval)
        self.delay = delay
        self.producers = producers
        self.scheduler = scheduler
        if not self.scheduler:
            self.scheduler = GeventScheduler()

    def pre_hook(self):
        #super(ScheduledEventProducer, self).pre_hook()
        self._initialize_jobs()
        gevent.sleep(self.delay)
        self.scheduler.start()

    def post_hook(self):
        #super(ScheduledEventProducer, self).post_hook()
        self.scheduler.shutdown()
    def run(self):
        """Start apscheduler tasks"""
        jobstores = {'mongo': MongoDBJobStore()}

        executors = {
            'default': ThreadPoolExecutor(self.poolsize),
            'processpool': ProcessPoolExecutor(self.procsize)
        }

        job_defaults = {'coalesce': False, 'max_instances': 3}

        scheduler = GeventScheduler()
        scheduler.configure(jobstores=jobstores,
                            executors=executors,
                            job_defaults=job_defaults,
                            timezone=utc)
        scheduler.add_job(self.job_worker, 'interval', seconds=0.001)

        green_let = scheduler.start()
        print('Ctrl+{0} to exit.'.format('Break' if os.name == 'nt' else 'C'))

        # Execution will block here util Ctrl+C (Ctrl+Break on Windows).
        try:
            green_let.join()
        except (KeyboardInterrupt, SystemExit):
            pass
Exemplo n.º 29
0
 def __init__(self, scheduler=None):
     if scheduler is None:
         try:
             from apscheduler.schedulers.gevent import GeventScheduler
             self._scheduler = GeventScheduler()
         except ImportError:
             self._scheduler = BackgroundScheduler()
     else:
         self._scheduler = scheduler
Exemplo n.º 30
0
 def __init__(self):
     self.scheduler = GeventScheduler()
     self.scheduler.add_listener(self.__scheduler_listener(),
                                 EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
                                 | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
                                 | EVENT_JOB_ADDED | EVENT_JOB_REMOVED
                                 | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
     self.id = 'controller'
     self.app = None
Exemplo n.º 31
0
def nsGevent(ns, *args, **kw):
    ns.V("/sys/scheduler", GeventScheduler())
    s = nsGet(ns, "/sys/scheduler").value
    nsSchedulerIntervalJob(ns, 60, "/dev/time", nsGeventTick)
    g = s.start()
    nsProcAlloc(ns, "scheduler", g, scheduler=s)
    glist = nsGet(ns, "/sys/greenlets").value
    glist.append(g)
    return True
Exemplo n.º 32
0
def production(*_, **settings):
    """Hooks exceptions and returns the Flask app."""

    hook_exceptions()

    app.shiptoasts = ShipToasts()
    app.shiptoasts.initial_fill()

    scheduler = GeventScheduler()
    scheduler.add_job(app.shiptoasts.periodic_call, "interval", seconds=30)
    cleaner = scheduler.start()
    listener = gevent.Greenlet.spawn(app.shiptoasts.listen_for_updates)

    atexit.register(cleaner.join, timeout=2)
    atexit.register(listener.join, timeout=2)
    atexit.register(scheduler.shutdown)

    return app
Exemplo n.º 33
0
 def __init__(self,
              name,
              event_class=Event,
              event_kwargs=None,
              producers=1,
              interval=5,
              delay=0,
              generate_error=False,
              *args,
              **kwargs):
     super(EventGenerator, self).__init__(name, *args, **kwargs)
     self.blockdiag_config["shape"] = "flowchart.input"
     self.generate_error = generate_error
     self.interval = self._parse_interval(interval)
     self.delay = delay
     self.event_kwargs = event_kwargs or {}
     self.output = event_class
     self.producers = producers
     self.scheduler = GeventScheduler()
Exemplo n.º 34
0
 def __init__(self, name, event_class=Event, event_kwargs=None, producers=1, interval=5, delay=0, generate_error=False, *args, **kwargs):
     super(EventGenerator, self).__init__(name, *args, **kwargs)
     self.blockdiag_config["shape"] = "flowchart.input"
     self.generate_error = generate_error
     self.interval = self._parse_interval(interval)
     self.delay = delay
     self.event_kwargs = event_kwargs or {}
     self.output = event_class
     self.producers = producers
     self.scheduler = GeventScheduler()
Exemplo n.º 35
0
 def __init__(self, is_auto_connect=True, heart_beat_limit_ms=HEART_BEAT_MS, reconnect_after_ms=RECONNECT_MS, restart_ms=RESTART_MS):
     threading.Thread.__init__(self)
     self.is_auto_connect = is_auto_connect
     self.heart_beat_limit_ms = heart_beat_limit_ms
     self.reconnect_after_ms = reconnect_after_ms if reconnect_after_ms > heart_beat_limit_ms else heart_beat_limit_ms
     self.restart_ms = restart_ms
     self.logger = logger
     self.scheduler = Scheduler()
     self.scheduler.add_job(check_reconnect, "interval", max_instances=1, seconds=1, args=[self])
     self.start()
Exemplo n.º 36
0
 def __init__(self, name="defaultController"):
     self.name = name
     self.workflows = {}
     self.load_all_workflows_from_directory()
     self.instances = {}
     self.tree = None
     self.eventlog = []
     self.schedulerStatusListener = SchedulerStatusListener(self.eventlog)
     self.jobStatusListener = JobStatusListener(self.eventlog)
     self.jobExecutionListener = JobExecutionListener(self.eventlog)
     self.scheduler = GeventScheduler()
     self.scheduler.add_listener(
         self.schedulerStatusListener.callback(self),
         EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
         | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED)
     self.scheduler.add_listener(self.jobStatusListener.callback(self),
                                 EVENT_JOB_ADDED | EVENT_JOB_REMOVED)
     self.scheduler.add_listener(self.jobExecutionListener.callback(self),
                                 EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
     self.ancestry = [self.name]
Exemplo n.º 37
0
class MockedSchedulingActor(PrepClazz):
    def __init__(self,
                 name,
                 producers=1,
                 interval=5,
                 delay=0,
                 scheduler=None,
                 *args,
                 **kwargs):
        self.interval = self._parse_interval(interval)
        self.delay = delay
        self.producers = producers
        self.scheduler = scheduler
        if not self.scheduler:
            self.scheduler = GeventScheduler()

    def _do_produce(self):
        self.mock_do_produce()

    def pre_hook(self):
        self.scheduler.start()
Exemplo n.º 38
0
def setup_schedule_config(self, endpoint_explorer, node_address):
    scheduler = GeventScheduler()
    scheduler.add_job(
        lambda: notice_explorer_to_be_alive(endpoint_explorer, node_address),
        'interval',
        minutes=30)
    scheduler.start()
Exemplo n.º 39
0
class Scheduler:

    def __init__(self, app=None):
        self._scheduler = GeventScheduler()
        self.jobs_list = [monitor_ac_usage, monitor_temperatures]
        if app:
            self.init_app(app)

    def init_app(self, app):
        for job in self.jobs_list:
            self.add_jobs(app, job)

    def add_jobs(self, app, job):
        def call_func(*args, **kwargs):
            with app.app_context():
                func(*args, **kwargs)
        func = job

        self._scheduler.add_job(call_func, 'interval', minutes=func.minutes, coalesce=True)

    def start(self):
        self._scheduler.start()
Exemplo n.º 40
0
class TaskManager:
    """Simple wrapper for Advanced Python Scheduler"""
    def __init__(self):
        print "init Task Manager"
        self.logger = logging.getLogger('dls')
        executors = {
            'default': ThreadPoolExecutor(config.EXECUTOR_THREADS_NUMBER),
            'monitor': GeventExecutor(),
        }

        self.scheduler = GeventScheduler(executors=executors)
        self.scheduler.start()

        # Map of tasks for tracking them on UI
        self.tasks = {}
        self.scheduler.add_job(self.report_progress,
                               'interval',
                               seconds=config.JOB_MONITOR_INTERVAL,
                               executor='monitor')

    # Starts new task
    def start_task(self, task):

        self.scheduler.add_job(func=task.execute,
                               misfire_grace_time=config.MISFIRE_GRACE_TIME)
        self.tasks[task.id] = task

    # Kills task by it's ID
    def term_task(self, index):
        task = self.tasks[index]
        task.kill()

    def shutdown(self):
        self.scheduler.shutdown()

    def report_progress(self):
        """Gathers information from task and sends to clients"""
        # self.logger.info("sending tasks progress")
        task_data = []
        for t in self.tasks.values():
            task_data.append(t.status())
        # from pprint import pprint
        # pprint (task_data)
        socketio.emit('task_monitor', json.dumps(task_data))
        return task_data

    def task_info(self, id):
        t = self.tasks[int(id)]
        return t.detailed_status()
Exemplo n.º 41
0
def setup_schedule_config(self, endpoint_explorer, discoverable, node_address,
                          raiden_instance):
    scheduler = GeventScheduler()
    scheduler.add_job(lambda: notice_explorer_to_be_alive(
        endpoint_explorer, discoverable, node_address, raiden_instance),
                      'interval',
                      minutes=30)
    scheduler.start()
Exemplo n.º 42
0
 def __init__(self, name):
     super().__init__(name)
     self._commands = {}  # cmd_id : command
     # self._jobstores = {'default': SQLAlchemyJobStore(url=constants.scheduler_database_url,
     #                                                 tablename="jobs")}
     self._jobstores = {}
     self._executors = {}
     self._job_defaults = {
         'max_instances': 10,
         'coalesce': True,
     }
     self._triggers = {}  # cmd_id : trigger
     self._scheduler = GeventScheduler(
         jobstores=self._jobstores,
         executors=self._executors,
         job_defaults=self._job_defaults,
         timezone=pytz.utc,  # TODO: make user configurable
     )
     self._schedulers.add(self)
     s_cmds = constants.internaldb.scheduler_commands.get({})
     v = s_cmds.setdefault(self.identifier(), {})
     if not v:
         constants.internaldb.scheduler_commands.set(s_cmds)
Exemplo n.º 43
0
    def setup_tasks(self):
        """
        Setup all tasks that run periodically.
        """

        self.scheduler = GeventScheduler()

        # Add an initial job
        def _job():
            job.remove()
            self.synchronize(synchronization="startup")

        job = self.scheduler.add_job(_job,
                                     max_instances=1,
                                     trigger="interval",
                                     seconds=1)

        # Scheduler task to clean and expire the cache.
        cache_interval = self.config['Provider']['item cache prune interval']

        self.scheduler.add_job(self.cache_manager.expire,
                               max_instances=1,
                               trigger="interval",
                               minutes=cache_interval)
        self.scheduler.add_job(self.cache_manager.clean,
                               max_instances=1,
                               trigger="interval",
                               minutes=cache_interval)

        # Schedule tasks to synchronize each connection.
        for connection in self.connections.itervalues():
            self.scheduler.add_job(self.synchronize,
                                   args=([connection, "interval"]),
                                   max_instances=1,
                                   trigger="interval",
                                   minutes=connection.synchronization_interval)
Exemplo n.º 44
0
    def __init__(self, scheduler):
        '''
		https://apscheduler.readthedocs.io/en/latest/userguide.html?highlight=add_job

		Parameters
		----------
		scheduler:
			[str] 调度器,根据开发需求选择相应的调度器
			'BlockingScheduler' 阻塞式调度器:
				适用于只跑调度器的程序
			'BackgroundScheduler' 后台调度器:
				适用于非阻塞的情况,调度器会在后台独立运行
			'AsyncIOScheduler' AsyncIO调度器:
				适用于应用使用AsnycIO的情况
			'GeventScheduler' Gevent调度器:
				适用于应用通过Gevent的情况
			'TornadoScheduler' Tornado调度器:
				适用于构建Tornado应用
			'TwistedScheduler' Twisted调度器:
				适用于构建Twisted应用
			'QtScheduler' Qt调度器:
				适用于构建Qt应用
		'''
        import logging
        logging.basicConfig()
        scheduler = str(scheduler).lower()
        if ('blocking' in scheduler):
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.scheduler = BlockingScheduler()
        elif ('background' in scheduler):
            from apscheduler.schedulers.background import BackgroundScheduler
            self.scheduler = BackgroundScheduler()
        elif ('asyncio' in scheduler):
            from apscheduler.schedulers.asyncio import AsyncIOScheduler
            self.scheduler = AsyncIOScheduler()
        elif ('gevent' in scheduler):
            from apscheduler.schedulers.gevent import GeventScheduler
            self.scheduler = GeventScheduler()
        elif ('tornado' in scheduler):
            from apscheduler.schedulers.tornado import TornadoScheduler
            self.scheduler = TornadoScheduler()
        elif ('twisted' in scheduler):
            from apscheduler.schedulers.twisted import TwistedScheduler
            self.scheduler = TwistedScheduler()
        elif ('qt' in scheduler):
            from apscheduler.schedulers.qt import QtScheduler
            self.scheduler = QtScheduler()
Exemplo n.º 45
0
def main():

    scheduler = GeventScheduler()

    url = os.environ.get('SQLALCHEMY_DATABASE_URI', 'sqlite:///database.db')

    scheduler.add_jobstore('sqlalchemy', url=url)

    scheduler.add_job(tick, 'interval', seconds=3, id='example_job', replace_existing=True)

    # g is the greenlet that runs the scheduler loop.
    g = scheduler.start()

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 46
0
class Scheduler(object):
    def __init__(self):
        self.scheduler = GeventScheduler()
        self.scheduler.add_listener(self.__scheduler_listener(),
                                    EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN
                                    | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
                                    | EVENT_JOB_ADDED | EVENT_JOB_REMOVED
                                    | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.id = 'controller'
        self.app = None

    def schedule_workflows(self, task_id, executable, workflow_ids, trigger):
        """
        Schedules a workflow for execution

        Args:
            task_id (int): Id of the scheduled task
            executable (func): A callable to execute must take in one argument -- a workflow id
            workflow_ids (iterable(str)): An iterable of workflow ids
            trigger (Trigger): The trigger to use for this scheduled task
        """

        def execute(id_):
            with self.app.app_context():
                executable(id_)

        for workflow_id in workflow_ids:
            self.scheduler.add_job(execute, args=(workflow_id,),
                                   id=construct_task_id(task_id, workflow_id),
                                   trigger=trigger, replace_existing=True)

    def get_all_scheduled_workflows(self):
        """
        Gets all the scheduled workflows

        Returns:
             (dict{str: list[str]}) A dict of task_id to workflow execution ids
        """
        tasks = {}
        for job in self.scheduler.get_jobs():
            task, workflow_execution_id = split_task_id(job.id)
            if task not in tasks:
                tasks[task] = [workflow_execution_id]
            else:
                tasks[task].append(workflow_execution_id)
        return tasks

    def get_scheduled_workflows(self, task_id):
        """
        Gets all the scheduled worfklows for a given task id

        Args:
            task_id (str): The task id

        Returns:
            (list[str]) A list fo workflow execution id associated with this task id
        """
        tasks = []
        for job in self.scheduler.get_jobs():
            task, workflow_execution_id = split_task_id(job.id)
            if task == task_id:
                tasks.append(workflow_execution_id)
        return tasks

    def update_workflows(self, task_id, trigger):
        """
        Updates the workflows for a given task id to use a different trigger

        Args:
            task_id (str|int): The task id to update
            trigger (Trigger): The new trigger to use
        """
        existing_tasks = {construct_task_id(task_id, workflow_execution_id) for workflow_execution_id in
                          self.get_scheduled_workflows(task_id)}
        for job_id in existing_tasks:
            self.scheduler.reschedule_job(job_id=job_id, trigger=trigger)

    def unschedule_workflows(self, task_id, workflow_execution_ids):
        """
        Unschedules a workflow

        Args:
            task_id (str|int): The task ID to unschedule
            workflow_execution_ids (list[str]): The list of workflow execution IDs to update
        """
        for workflow_execution_id in workflow_execution_ids:
            try:
                self.scheduler.remove_job(construct_task_id(task_id, workflow_execution_id))
            except JobLookupError:
                logger.warning('Cannot delete task {}. '
                               'No task found in scheduler'.format(construct_task_id(task_id, workflow_execution_id)))

    def start(self):
        """Starts the scheduler for active execution. This function must be called before any workflows are executed.

        Returns:
            The state of the scheduler if successful, error message if scheduler is in "stopped" state.
        """
        if self.scheduler.state == STATE_STOPPED:
            logger.info('Starting scheduler')
            self.scheduler.start()
        else:
            logger.warning('Cannot start scheduler. Scheduler is already running or is paused')
            return "Scheduler already running."
        return self.scheduler.state

    def stop(self, wait=True):
        """Stops active execution.

        Args:
            wait (bool, optional): Boolean to synchronously or asynchronously wait for the scheduler to shutdown.
                Default is True.

        Returns:
            The state of the scheduler if successful, error message if scheduler is already in "stopped" state.
        """
        if self.scheduler.state != STATE_STOPPED:
            logger.info('Stopping scheduler')
            self.scheduler.shutdown(wait=wait)
        else:
            logger.warning('Cannot stop scheduler. Scheduler is already stopped')
            return "Scheduler already stopped."
        return self.scheduler.state

    def pause(self):
        """Pauses active execution.

        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "running" state.
        """
        if self.scheduler.state == STATE_RUNNING:
            logger.info('Pausing scheduler')
            self.scheduler.pause()
        elif self.scheduler.state == STATE_PAUSED:
            logger.warning('Cannot pause scheduler. Scheduler is already paused')
            return "Scheduler already paused."
        elif self.scheduler.state == STATE_STOPPED:
            logger.warning('Cannot pause scheduler. Scheduler is stopped')
            return "Scheduler is in STOPPED state and cannot be paused."
        return self.scheduler.state

    def resume(self):
        """Resumes active execution.

        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "paused" state.
        """
        if self.scheduler.state == STATE_PAUSED:
            logger.info('Resuming scheduler')
            self.scheduler.resume()
        else:
            logger.warning("Scheduler is not in PAUSED state and cannot be resumed.")
            return "Scheduler is not in PAUSED state and cannot be resumed."
        return self.scheduler.state

    def pause_workflows(self, task_id, workflow_execution_ids):
        """
        Pauses some workflows associated with a task

        Args:
            task_id (int|str): The id of the task to pause
            workflow_execution_ids (list[str]): The list of workflow execution IDs to pause
        """
        for workflow_execution_id in workflow_execution_ids:
            job_id = construct_task_id(task_id, workflow_execution_id)
            try:
                self.scheduler.pause_job(job_id=job_id)
                logger.info('Paused job {0}'.format(job_id))
            except JobLookupError:
                logger.warning('Cannot pause scheduled workflow {}. Workflow ID not found'.format(job_id))

    def resume_workflows(self, task_id, workflow_execution_ids):
        """
        Resumes some workflows associated with a task

        Args:
            task_id (int|str): The id of the task to pause
            workflow_execution_ids (list[str]): The list of workflow execution IDs to resume
        """
        for workflow_execution_id in workflow_execution_ids:
            job_id = construct_task_id(task_id, workflow_execution_id)
            try:
                self.scheduler.resume_job(job_id=job_id)
                logger.info('Resumed job {0}'.format(job_id))
            except JobLookupError:
                logger.warning('Cannot resume scheduled workflow {}. Workflow ID not found'.format(job_id))

    def __scheduler_listener(self):
        event_selector_map = {EVENT_SCHEDULER_START: WalkoffEvent.SchedulerStart,
                              EVENT_SCHEDULER_SHUTDOWN: WalkoffEvent.SchedulerShutdown,
                              EVENT_SCHEDULER_PAUSED: WalkoffEvent.SchedulerPaused,
                              EVENT_SCHEDULER_RESUMED: WalkoffEvent.SchedulerResumed,
                              EVENT_JOB_ADDED: WalkoffEvent.SchedulerJobAdded,
                              EVENT_JOB_REMOVED: WalkoffEvent.SchedulerJobRemoved,
                              EVENT_JOB_EXECUTED: WalkoffEvent.SchedulerJobExecuted,
                              EVENT_JOB_ERROR: WalkoffEvent.SchedulerJobError}

        def event_selector(event):
            try:
                event = event_selector_map[event.code]
                event.send(self)
            except KeyError:  # pragma: no cover
                logger.error('Unknown event sent triggered in scheduler {}'.format(event))

        return event_selector
Exemplo n.º 47
0
class EventGenerator(Actor):

    '''**Generates a test event at the chosen interval.**

    Parameters:

        name (str):
            | The instance name
        event_class (Optional[compysition.event.Event]):
            | The class that the generated event should be created as
            | Default: Event
        event_kwargs (Optional[int]):
            | Any additional kwargs to add to the event, including data
        producers (Optional[int]):
            | The number of greenthreads to spawn that each spawn events at the provided interval
            | Default: 1
        interval (Optional[float] OR dict):
            | The interval (in seconds) between each generated event.
            | Should have a value > 0.
            | Can also be a dict, supporting values of weeks, days, hours, minutes, and seconds
            | default: 5
        delay (Optional[float]):
            | The time (in seconds) to wait before initial event generation.
            | Default: 0
        generate_error (Optional[bool]):
            | Whether or not to also send the event via Actor.send_error
            | Default: False

    '''

    DEFAULT_INTERVAL = {'weeks': 0,
                         'days': 0,
                         'hours': 0,
                         'minutes': 0,
                         'seconds': 5}

    def __init__(self, name, event_class=Event, event_kwargs=None, producers=1, interval=5, delay=0, generate_error=False, *args, **kwargs):
        super(EventGenerator, self).__init__(name, *args, **kwargs)
        self.blockdiag_config["shape"] = "flowchart.input"
        self.generate_error = generate_error
        self.interval = self._parse_interval(interval)
        self.delay = delay
        self.event_kwargs = event_kwargs or {}
        self.output = event_class
        self.producers = producers
        self.scheduler = GeventScheduler()

    def _parse_interval(self, interval):
        _interval = self.DEFAULT_INTERVAL

        if isinstance(interval, int):
            _interval['seconds'] = interval
        elif isinstance(interval, dict):
            _interval.update(interval)

        return _interval

    def _initialize_jobs(self):
        for i in xrange(self.producers):
            self.scheduler.add_job(self._do_produce, 'interval', **self.interval)

    def pre_hook(self):
        self._initialize_jobs()
        gevent.sleep(self.delay)
        self.scheduler.start()

    def post_hook(self):
        self.scheduler.shutdown()

    def _do_produce(self):
        event = self.output[0](**self.event_kwargs)
        self.logger.debug("Generated new event {event_id}".format(event_id=event.event_id))
        self.send_event(event)
        if self.generate_error:
            event = self.output(**self.event_kwargs)
            self.send_error(event)

    def consume(self, event, *args, **kwargs):
        self._do_produce()
Exemplo n.º 48
0
    if int(app.conf.automation_status) == 1 or forced is True:
        if forced is False:
            global l_t_check
            l_t_check = app.datetime.now().strftime("%d/%m/%Y, %H:%M:%S")
        logging.info("Checking for releases in torrents")
        todays_date = app.datetime.now()
        schd_albums = app.QueueAlbum.query.all()
        for query in schd_albums:
            date = app.datetime.strptime(query.date, "%d %B %Y")
            if date <= todays_date:
                if int(query.status) == 0:
                    app.download(query.album_name)
        data = ({"album": "C_T", "date": "C_T"})
        app.pushtoListener(data)


def reschedule():
    sched.reschedule_job(job_id="auto_A", trigger='interval', minutes=int(app.conf.automation_interval) * 60)
    sched.reschedule_job(job_id="auto_T", trigger='interval', minutes=int(app.conf.automation_interval) * 60)


# ugly
l_t_check = "Never"
l_a_check = "Never"

sched = GeventScheduler()
sched.add_job(look_for_artist, 'interval', id="auto_A", minutes=int(app.conf.automation_interval) * 60)
sched.add_job(look_for_torrents, 'interval', id="auto_T", minutes=int(app.conf.automation_interval) * 60)
sched.add_job(generateSuggestions, 'interval', id="auto_S", seconds=6200)
sched.start()
Exemplo n.º 49
0
"""
Demonstrates how to use the gevent compatible scheduler to schedule a job that executes on 3 second intervals.
"""

from datetime import datetime
import os

from apscheduler.schedulers.gevent import GeventScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = GeventScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    g = scheduler.start()  # g is the greenlet that runs the scheduler loop
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        g.join()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 50
0
 def __init__(self, app=None):
     self._scheduler = GeventScheduler()
     self.jobs_list = [monitor_ac_usage, monitor_temperatures]
     if app:
         self.init_app(app)
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """
    config = Configurator(settings=settings)
    if JournalHandler:
        config.add_subscriber(set_journal_handler, ContextFound)
        config.add_subscriber(clear_journal_handler, BeforeRender)
    config.include('pyramid_exclog')
    config.add_route('home', '/')
    config.add_route('resync_all', '/resync_all')
    config.add_route('resync', '/resync/{tender_id}')
    config.add_route('calendar', '/calendar')
    config.add_route('calendar_entry', '/calendar/{date}')
    config.add_route('streams', '/streams')
    config.scan(ignore='openprocurement.chronograph.tests')
    config.add_subscriber(start_scheduler, ApplicationCreated)
    config.registry.api_token = os.environ.get('API_TOKEN', settings.get('api.token'))

    db_name = os.environ.get('DB_NAME', settings['couchdb.db_name'])
    server = Server(settings.get('couchdb.url'), session=Session(retry_delays=range(60)))
    if 'couchdb.admin_url' not in settings and server.resource.credentials:
        try:
            server.version()
        except Unauthorized:
            server = Server(extract_credentials(settings.get('couchdb.url'))[0])
    config.registry.couchdb_server = server
    if 'couchdb.admin_url' in settings and server.resource.credentials:
        aserver = Server(settings.get('couchdb.admin_url'), session=Session(retry_delays=range(10)))
        users_db = aserver['_users']
        if SECURITY != users_db.security:
            INIT_LOGGER.info("Updating users db security", extra={'MESSAGE_ID': 'update_users_security'})
            users_db.security = SECURITY
        username, password = server.resource.credentials
        user_doc = users_db.get('org.couchdb.user:{}'.format(username), {'_id': 'org.couchdb.user:{}'.format(username)})
        if not user_doc.get('derived_key', '') or PBKDF2(password, user_doc.get('salt', ''), user_doc.get('iterations', 10)).hexread(int(len(user_doc.get('derived_key', '')) / 2)) != user_doc.get('derived_key', ''):
            user_doc.update({
                "name": username,
                "roles": [],
                "type": "user",
                "password": password
            })
            INIT_LOGGER.info("Updating chronograph db main user", extra={'MESSAGE_ID': 'update_chronograph_main_user'})
            users_db.save(user_doc)
        security_users = [username, ]
        if db_name not in aserver:
            aserver.create(db_name)
        db = aserver[db_name]
        SECURITY[u'members'][u'names'] = security_users
        if SECURITY != db.security:
            INIT_LOGGER.info("Updating chronograph db security", extra={'MESSAGE_ID': 'update_chronograph_security'})
            db.security = SECURITY
        auth_doc = db.get(VALIDATE_DOC_ID, {'_id': VALIDATE_DOC_ID})
        if auth_doc.get('validate_doc_update') != VALIDATE_DOC_UPDATE % username:
            auth_doc['validate_doc_update'] = VALIDATE_DOC_UPDATE % username
            INIT_LOGGER.info("Updating chronograph db validate doc", extra={'MESSAGE_ID': 'update_chronograph_validate_doc'})
            db.save(auth_doc)
    else:
        if db_name not in server:
            server.create(db_name)
    config.registry.db = server[db_name]

    jobstores = {
        #'default': CouchDBJobStore(database=db_name, client=server)
    }
    #executors = {
        #'default': ThreadPoolExecutor(5),
        #'processpool': ProcessPoolExecutor(5)
    #}
    job_defaults = {
        'coalesce': False,
        'max_instances': 5
    }
    config.registry.api_url = settings.get('api.url')
    config.registry.callback_url = settings.get('callback.url')
    scheduler = Scheduler(jobstores=jobstores,
                          #executors=executors,
                          job_defaults=job_defaults,
                          timezone=TZ)
    if 'jobstore_db' in settings:
        scheduler.add_jobstore('sqlalchemy', url=settings['jobstore_db'])
    config.registry.scheduler = scheduler
    # scheduler.remove_all_jobs()
    # scheduler.start()
    resync_all_job = scheduler.get_job('resync_all')
    now = datetime.now(TZ)
    if not resync_all_job or resync_all_job.next_run_time < now - timedelta(hours=1):
        if resync_all_job:
            args = resync_all_job.args
        else:
            args = [settings.get('callback.url') + 'resync_all', None]
        run_date = now + timedelta(seconds=60)
        scheduler.add_job(push, 'date', run_date=run_date, timezone=TZ,
                          id='resync_all', args=args,
                          replace_existing=True, misfire_grace_time=60 * 60)
    return config.make_wsgi_app()
BIDS_KEYS_FOR_COPY = (
    "bidder_id",
    "amount",
    "time"
)
SYSTEMD_DIRECORY = '.config/systemd/user/'
SYSTEMD_RELATIVE_PATH = SYSTEMD_DIRECORY + 'auction_{0}.{1}'
TIMER_STAMP = re.compile(
    r"OnCalendar=(?P<year>[0-9][0-9][0-9][0-9])"
    r"-(?P<mon>[0-9][0-9])-(?P<day>[0123][0-9]) "
    r"(?P<hour>[0-2][0-9]):(?P<min>[0-5][0-9]):(?P<sec>[0-5][0-9])"
)
logger = logging.getLogger('Auction Worker')

SCHEDULER = GeventScheduler(job_defaults={"misfire_grace_time": 100},
                            executors={'default': AuctionsExecutor()},
                            logger=logger)
SCHEDULER.timezone = timezone('Europe/Kiev')


class Auction(object):
    """docstring for Auction"""
    def __init__(self, auction_doc_id,
                 worker_defaults={},
                 auction_data={}):
        super(Auction, self).__init__()
        self.auction_doc_id = auction_doc_id
        self.tender_url = urljoin(
            worker_defaults["TENDERS_API_URL"],
            '/api/{0}/tenders/{1}'.format(
                worker_defaults["TENDERS_API_VERSION"], auction_doc_id
Exemplo n.º 53
0
class Application(object):

    def __init__(self, config_file, data_dir, verbose=0):
        """
        Construct a new application instance.
        """

        self.config_file = config_file
        self.data_dir = data_dir
        self.verbose = verbose

        self.server = None
        self.provider = None
        self.connections = {}

        # Setup all parts of the application
        self.setup_config()
        self.setup_open_files()
        self.setup_database()
        self.setup_state()
        self.setup_connections()
        self.setup_cache()
        self.setup_provider()
        self.setup_server()
        self.setup_tasks()

    def setup_config(self):
        """
        Load the application config from file.
        """

        logger.debug("Loading config from %s", self.config_file)
        self.config = config.get_config(self.config_file)

    def setup_open_files(self):
        """
        Get and set open files limit.
        """

        open_files_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
        new_open_files_limit = self.config["Advanced"]["open files limit"]

        logger.info(
            "System reports open files limit is %d.", open_files_limit)

        if new_open_files_limit != -1:
            logger.info(
                "Changing open files limit to %d.", new_open_files_limit)

            try:
                resource.setrlimit(resource.RLIMIT_NOFILE, (
                    new_open_files_limit, resource.RLIM_INFINITY))
            except resource.error as e:
                logger.warning(
                    "Failed to increase the number of open files: %s", e)

    def setup_database(self):
        """
        Initialize database.
        """

        self.db = Database(self.config["Provider"]["database"])
        self.db.create_database(drop_all=False)

    def setup_state(self):
        """
        Setup state.
        """

        self.state = State(os.path.join(
            self.get_cache_dir(), "provider.state"))

    def setup_cache(self):
        """
        Setup the caches for items and artwork.
        """

        # Initialize caches for items and artwork.
        item_cache = cache.ItemCache(
            path=self.get_cache_dir(
                self.config["Provider"]["item cache dir"]),
            max_size=self.config["Provider"]["item cache size"],
            prune_threshold=self.config[
                "Provider"]["item cache prune threshold"])
        artwork_cache = cache.ArtworkCache(
            path=self.get_cache_dir(self.config[
                "Provider"]["artwork cache dir"]),
            max_size=self.config["Provider"]["artwork cache size"],
            prune_threshold=self.config[
                "Provider"]["artwork cache prune threshold"])

        # Create a cache manager
        self.cache_manager = cache.CacheManager(
            db=self.db,
            item_cache=item_cache,
            artwork_cache=artwork_cache,
            connections=self.connections)

    def setup_connections(self):
        """
        Initialize the connections.
        """

        for name, section in self.config["Connections"].iteritems():
            index = len(self.connections) + 1

            self.connections[index] = Connection(
                db=self.db,
                state=self.state,
                index=index,
                name=name,
                url=section["url"],
                username=section["username"],
                password=section["password"],
                synchronization=section["synchronization"],
                synchronization_interval=section["synchronization interval"],
                transcode=section["transcode"],
                transcode_unsupported=section["transcode unsupported"])

    def setup_provider(self):
        """
        Setup the provider.
        """

        # Create provider.
        logger.debug(
            "Setting up provider for %d connection(s).", len(self.connections))

        self.provider = Provider(
            server_name=self.config["Provider"]["name"],
            db=self.db,
            state=self.state,
            connections=self.connections,
            cache_manager=self.cache_manager)

        # Do an initial synchronization if required.
        for connection in self.connections.itervalues():
            connection.synchronizer.provider = self.provider
            connection.synchronizer.synchronize(initial=True)

    def setup_server(self):
        """
        Create the DAAP server.
        """

        logger.debug(
            "Setting up DAAP server at %s:%d",
            self.config["Daap"]["interface"], self.config["Daap"]["port"])

        self.server = DaapServer(
            provider=self.provider,
            password=self.config["Daap"]["password"],
            ip=self.config["Daap"]["interface"],
            port=self.config["Daap"]["port"],
            cache=self.config["Daap"]["cache"],
            cache_timeout=self.config["Daap"]["cache timeout"] * 60,
            bonjour=self.config["Daap"]["zeroconf"],
            debug=self.verbose > 1)

        # Extend server with a web interface
        if self.config["Daap"]["web interface"]:
            webserver.extend_server_app(self, self.server.app)

    def setup_tasks(self):
        """
        Setup all tasks that run periodically.
        """

        self.scheduler = GeventScheduler()

        # Add an initial job
        def _job():
            job.remove()
            self.synchronize(synchronization="startup")
        job = self.scheduler.add_job(
            _job, max_instances=1, trigger="interval", seconds=1)

        # Scheduler task to clean and expire the cache.
        cache_interval = self.config['Provider']['item cache prune interval']

        self.scheduler.add_job(
            self.cache_manager.expire,
            max_instances=1, trigger="interval", minutes=cache_interval)
        self.scheduler.add_job(
            self.cache_manager.clean,
            max_instances=1, trigger="interval", minutes=cache_interval)

        # Schedule tasks to synchronize each connection.
        for connection in self.connections.itervalues():
            self.scheduler.add_job(
                self.synchronize, args=([connection, "interval"]),
                max_instances=1, trigger="interval",
                minutes=connection.synchronization_interval)

    def synchronize(self, connections=None, synchronization="manual"):
        """
        Synchronize selected connections (or all) given a synchronization
        event.
        """

        count = 0
        connections = connections or self.connections.values()

        logger.debug("Synchronization triggered via '%s'.", synchronization)

        for connection in connections:
            if synchronization == "interval":
                if connection.synchronization == "interval":
                    connection.synchronizer.synchronize()
                    count += 1
            elif synchronization == "startup":
                if connection.synchronization == "startup":
                    if not connection.synchronizer.is_initial_synced:
                        connection.synchronizer.synchronize()
                        count += 1
            elif synchronization == "manual":
                connection.synchronizer.synchronize()
                count += 1

        logger.debug("Synchronized %d connections.", count)

        # Update the cache.
        self.cache_manager.cache()

    def start(self):
        """
        Start the server.
        """

        logger.debug("Starting task scheduler.")
        self.scheduler.start()

        logger.debug("Starting DAAP server.")
        self.server.serve_forever()

    def stop(self):
        """
        Stop the server.
        """

        logger.debug("Stopping DAAP server.")
        self.server.stop()

        logger.debug("Stopping task scheduler.")
        self.scheduler.shutdown()

    def get_cache_dir(self, *path):
        """
        Resolve the path to a cache directory. The path is relative to the data
        directory. The directory will be created if it does not exists, and
        will be tested for writing.
        """

        full_path = os.path.abspath(os.path.normpath(
            os.path.join(self.data_dir, *path)))
        logger.debug("Resolved %s to %s", path, full_path)

        # Create path if required.
        try:
            os.makedirs(full_path, 0755)
        except OSError as e:
            if e.errno == errno.EEXIST and os.path.isdir(full_path):
                pass
            else:
                raise Exception("Could not create folder: %s" % full_path)

        # Test for writing.
        ok = True
        test_file = os.path.join(full_path, ".write-test")

        while os.path.exists(test_file):
            test_file = test_file + str(random.randint(0, 9))

        try:
            with open(test_file, "w") as fp:
                fp.write("test")
        except IOError:
            ok = False
        finally:
            try:
                os.remove(test_file)
            except OSError:
                ok = False

        if not ok:
            raise Exception("Could not write to cache folder: %s" % full_path)

        # Cache directory created and tested for writing.
        return full_path
Exemplo n.º 54
0
city = HomerHelper.getSettingValue('City')
state = HomerHelper.getSettingValue('State')
timezone = HomerHelper.calcTimeZone(street, city, state)

jobstores = {
    'default': MemoryJobStore
}
executors = {
    'default': ThreadPoolExecutor(40)
}
job_defaults = {
    'misfire_grace_time': None,
    'coalesce': True,
    'max_instances': 3
}
scheduler = GeventScheduler(executors=executors, job_defaults=job_defaults, timezone=timezone)
scheduler.start()

def schedule(*args, **kwargs):
    job = scheduler.add_job(*args, **kwargs)
    return job

def KillJob(*args, **kwargs):
    scheduler.remove_job(*args, **kwargs)

def KillScheduler():
    scheduler.shutdown()

def GetJob(*args, **kwargs):
    job = scheduler.get_job(*args, **kwargs)
    return job