Exemple #1
0
def run():
    scheduler = BlockingScheduler()
    scheduler.add_executor(ThreadPoolExecutor(20))
    scheduler.add_jobstore(MemoryJobStore())
    scheduler.add_job(process_insights,
                      'interval',
                      minutes=2,
                      max_instances=1,
                      jitter=20)
    scheduler.add_job(mark_insights,
                      'interval',
                      minutes=2,
                      max_instances=1,
                      jitter=20)
    scheduler.add_job(download_product_dataset,
                      'cron',
                      day='*',
                      hour='3',
                      max_instances=1)
    scheduler.add_job(refresh_insights,
                      'cron',
                      day='*',
                      hour='4',
                      max_instances=1)
    scheduler.add_job(generate_insights,
                      'cron',
                      day='*',
                      hour='4',
                      minute=15,
                      max_instances=1)
    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)
    scheduler.start()
Exemple #2
0
def run():
    scheduler = BlockingScheduler()
    scheduler.add_executor(ThreadPoolExecutor(20))
    scheduler.add_jobstore(MemoryJobStore())
    scheduler.add_job(
        process_insights, "interval", minutes=2, max_instances=1, jitter=20
    )
    scheduler.add_job(mark_insights, "interval", minutes=2, max_instances=1, jitter=20)
    scheduler.add_job(save_facet_metrics, "cron", day="*", hour=1, max_instances=1)
    scheduler.add_job(
        download_product_dataset, "cron", day="*", hour="3", max_instances=1
    )
    scheduler.add_job(
        functools.partial(refresh_insights, with_deletion=True),
        "cron",
        day="*",
        hour="4",
        max_instances=1,
    )
    scheduler.add_job(
        generate_insights, "cron", day="*", hour="4", minute=15, max_instances=1
    )
    scheduler.add_job(
        generate_quality_facets,
        "cron",
        day="*",
        hour="5",
        minute=25,
        max_instances=1,
    )
    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)
    scheduler.start()
Exemple #3
0
def schedule():
    scheduler = BlockingScheduler()
    scheduler.add_executor('processpool')
    scheduler.add_job(pull_job, 'cron',
                      day_of_week='mon-sun', hour=21, minute=30, timezone=mountain_time)
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #4
0
def single_scheduler(i):
    global job_instance
    words = get_words(i)
    job_instance = Instance(words, index)
    scheduler = BlockingScheduler()
    scheduler.add_executor('processpool')
    # scheduler.add_job(tick, 'interval', seconds=200)
    scheduler.add_job(tick, 'interval', seconds=YConfig.TRACK_SPAN)
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        print('process has exit!!!')
        scheduler.shutdown()
Exemple #5
0
def interval_example1():
    """
    Demonstrates how to schedule a job to be run in a process pool on 3 second intervals.
    """
    scheduler = BlockingScheduler()
    scheduler.add_executor('processpool')
    scheduler.add_job(tick, 'interval', seconds=3)
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #6
0
def main(args):
    run_flag = args[1]
    bs = BlockingScheduler()
    bs.add_executor(ThreadPoolExecutor(5))
    system = SystemStatsReader(conf)
    elastic = ElasticStatsReader(conf)
    if run_flag == 'sys':
        add_job_to_scheduler(bs,run_system_only, [system])
    elif run_flag == 'elastic':
        add_job_to_scheduler(bs, run_elastic_only, [elastic])
    elif run_flag == None:
        add_job_to_scheduler(bs,run_system_only, [system])
        add_job_to_scheduler(bs, run_elastic_only, [elastic])
    bs.start()
def main(argv):
    global config
    global logger
    f = open('./config/manager.yaml', mode='rb')
    config = yaml.load(f)
    f.close()
    logging.basicConfig()
    logger = logging.getLogger(config['logging']['name'])
    logger.setLevel(get_logging_level())
    logging.getLogger('apscheduler.scheduler').setLevel(get_logging_level())

    scheduler = BlockingScheduler()
    scheduler.add_executor(ThreadPoolExecutor(5))
    add_scheduler_tasks(scheduler)
    scheduler.start()
Exemple #8
0
def main(argv):
    global config
    global logger
    f = open('../config/manager.yaml', mode='rb')
    config = yaml.load(f)
    f.close()
    logging.basicConfig()
    logger = logging.getLogger(config['logging']['name'])
    logger.setLevel(get_logging_level())
    logging.getLogger('apscheduler.scheduler').setLevel(get_logging_level())

    scheduler = BlockingScheduler()
    scheduler.add_executor(ThreadPoolExecutor(5))
    add_scheduler_tasks(scheduler)
    scheduler.start()
def schedule(parsed_args: Union[argparse.Namespace, Dict[str, int]]):
    """
    schedules the application to automaticly collect data

    pass a namespace or dict with one or more of the following keys
    do not include () adding s is optional, ie: both 'year' and 'years' is a valid key
    year(s), month(s), week(s), day(s), hour(s), minute(s), second(s)

    :param parsed_args: Union[argparse.Namespace, Dict[str, int]]
    :return: None
    """

    config = {
        "year": parsed_args.year,
        "month": parsed_args.month,
        "week": parsed_args.week,
        "day": parsed_args.day,
        "hour": parsed_args.hour,
        "minute": parsed_args.minute,
        "second": parsed_args.second
    }

    if parsed_args.job_type == "interval":
        config = {(k + "s"): config[k]
                  for k in config
                  if k not in ["year", "month"] and config[k] is not None}
    else:
        config = {
            key: value
            for key, value in config.items() if value is not None
        }

    scheduler = BlockingScheduler()
    scheduler.add_executor("processpool")
    scheduler.add_job(store_temp,
                      parsed_args.job_type,
                      misfire_grace_time=parsed_args.misfire,
                      **config)
    scheduler.start()
Exemple #10
0
class CronJobs:
    """
    class Cron Jobs
    """
    def __init__(self):
        self.scheduler = BlockingScheduler()
        self.scheduler.add_executor('processpool')

    def addJobs(self, time_in_seconds, job_to_execut):
        """
        add jobs
        :param time_in_seconds: time interval
        :param job_to_execut: execute the job
        """
        self.scheduler.add_job(job_to_execut,
                               'interval',
                               seconds=time_in_seconds)

    def addJobsWithArgs(self, time_in_seconds, job_to_execut, job_args=None):
        """
        add jobs with arguments
        :param time_in_seconds: time interval
        :param job_to_execut: execute the job
        :param job_args: argument's job
        """
        self.scheduler.add_job(job_to_execut,
                               'interval',
                               seconds=time_in_seconds,
                               args=job_args)

    def startJobs(self):
        """
        start jobs
        """
        try:
            self.scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            pass
Exemple #11
0
    # old_day = datetime.datetime.now() - datetime.timedelta(days=1) #6年前
    # old_day = datetime.datetime(2013,5,1,0,0,0)
    old_day = None
    #fetch_5min_data(old_day, 'bitcoin')
    # fetch_5min_data(old_day, 'bitcoin')
    fetch_sometime_data('bitcoin', 1, 5, from_date=old_day)
    # fetch_sometime_data('bitcoin', 7, 15, from_date=old_day)   #7天
    # fetch_sometime_data('bitcoin', 30, 60, from_date=old_day) #30天
    # fetch_sometime_data('bitcoin', 90, 120, from_date=old_day)  #3个月
    day_1year_ago = format_today - datetime.timedelta(days=365)
    print(format_today, day_1year_ago)
    # fetch_sometime_data('bitcoin', 365, 1440, from_date=day_1year_ago) #1年
    fetch_sometime_data('bitcoin', 365*4, 2880, from_date=old_day) #全部



if __name__ == '__main__':
    scheduler = BlockingScheduler()
    scheduler.add_executor('processpool')
    scheduler.add_job(run_once, 'interval', seconds=300)
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        # scheduler.start()
        run_once()
    except (KeyboardInterrupt, SystemExit):
        pass



Exemple #12
0
from apscheduler.schedulers.blocking import BlockingScheduler
import pytz
import datetime


def begin():
    create_json.begin()
    print("create_json finished")
    datax_command.begin()
    print("datax finished")
    algorithm_newHouse.begin()
    print("algorithm_newhouse  synchronous finished ")
    algorithm_secondHouse.begin()
    print("algorithm_secondHouse synchronous finished")
    algorithm_phoneDevice.begin()
    print("algorithm_phoneDevice synchronous finished")


def begin2():
    print("test")


if __name__ == '__main__':
    timez = pytz.timezone('Asia/Shanghai')
    scheduler = BlockingScheduler(timezone=timez)
    scheduler.add_executor('processpool')
    scheduler.add_job(begin, 'cron', hour=4, minute=00, second=00)
    # scheduler.add_job(begin2, 'interval', seconds=2)
    scheduler.start()
    # begin()
Exemple #13
0
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler


PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))


def log(message: str, file="ip_blacklister.log") -> None:
    file = os.path.join(PROJECT_ROOT, file)
    with open(file, "a") as f:
        f.write(message + "\n")


def run():
    log(f"ip blacklister starting to scan access.log @ {datetime.datetime.now()}")
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())
    log(f"ip blacklister scanned access.log @ {datetime.datetime.now()}\n\n")


if __name__ == '__main__':
    scheduler = BlockingScheduler()
    scheduler.add_executor("processpool")
    scheduler.add_job(run, "cron",
                      hour=12,
                      misfire_grace_time=3600,
                      name=f"scan @ {datetime.datetime.now()}")
    log(f"\nStarted ip blacklister @ {datetime.datetime.now()}")
    log("ip blacklister will scan access.log at 12:00 local time")
    scheduler.start()
Exemple #14
0
def run():
    # ensure influxdb database exists
    ensure_influx_database()

    # This call needs to happen on every start of the scheduler to ensure we're not in
    # the state where Robotoff is unable to perform tasks because of missing data.
    _update_data()

    scheduler = BlockingScheduler()
    scheduler.add_executor(ThreadPoolExecutor(20))
    scheduler.add_jobstore(MemoryJobStore())

    # This job takes all of the newly added automatically-processable insights and sets the process_after field on them,
    # indicating when these insights should be auto-applied.
    scheduler.add_job(mark_insights,
                      "interval",
                      minutes=2,
                      max_instances=1,
                      jitter=20)

    # This job applies all of the automatically-processable insights that have not been applied yet.
    scheduler.add_job(process_insights,
                      "interval",
                      minutes=2,
                      max_instances=1,
                      jitter=20)

    # This job exports daily product metrics for monitoring.
    scheduler.add_job(save_facet_metrics,
                      "cron",
                      day="*",
                      hour=1,
                      max_instances=1)

    # This job refreshes data needed to generate insights.
    scheduler.add_job(_update_data, "cron", day="*", hour="3", max_instances=1)

    # This job updates the product insights state with respect to the latest PO dump by:
    # - Deleting non-annotated insights for deleted products and insights that
    #   are no longer applicable.
    # - Updating insight attributes.
    scheduler.add_job(
        functools.partial(refresh_insights, with_deletion=True),
        "cron",
        day="*",
        hour="4",
        max_instances=1,
    )

    # This job generates category insights using ElasticSearch from the last Product Opener data dump.
    scheduler.add_job(generate_insights,
                      "cron",
                      day="*",
                      hour="4",
                      minute=15,
                      max_instances=1)

    scheduler.add_job(
        generate_quality_facets,
        "cron",
        day="*",
        hour="5",
        minute=25,
        max_instances=1,
    )

    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)
    scheduler.start()
Exemple #15
0
			print("Die Verbindung zu „%s@%s“ ist fehlgeschlagen: %s" % (username, hostname, e.args[1]))
			if not tolerant:
				exit(1)
		except SSHException as e:
			print("Allgemeiner SSH-Fehler bei Verbindung zu Host „%s“: %s" % (hostname, e))
			if not tolerant:
				exit(1)
		except gaierror:
			print("Der Hostname „%s“ konnte nicht gefunden werden." % hostname)
			if not tolerant:
				exit(1)
		else:
			scp = SCPClient(ssh.get_transport())

	scheduler = BlockingScheduler()
	scheduler.add_executor('threadpool')
	if not tolerant: # Programm-Abrruch in verschiedenen Fehlerfällen
		scheduler.add_listener(errorListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

	if (probeCronTabExpression == ''):
		scheduler.add_job(probe, 'cron', args=[dbConnection, tolerant, maxProbeTries], coalesce=False, second='*/10') # verwende Standard-Intervall, falls keine Cron-Tab-Expr. gesetzt
	else:
		scheduler.add_job(probe, CronTrigger.from_crontab(probeCronTabExpression), args=[dbConnection, tolerant, maxProbeTries])

	if (plotCronTabExpression == ''):
		scheduler.add_job(plot, 'cron', args=[dbConnection, diagramPeriod, remotepath, scp, tolerant], coalesce=False, minute='*/1', second=50) # verwende Standard-Intervall, falls keine Cron-Tab-Expr. gesetzt
	else:
		scheduler.add_job(plot, CronTrigger.from_crontab(plotCronTabExpression), args=[dbConnection, diagramPeriod, remotepath, scp, tolerant])


	#probe(tolerant) #Zum Testen! Später entfernen
Exemple #16
0
class JobScheduler(object):
    def __init__(self, every=30, unit='second'):
        self.mongo = mongopool.get()
        self.cursor = self.mongo.get_database('apscheduler').get_collection(
            'jobs')
        self.every = every
        self.unit = unit
        self.scheduler = BlockingScheduler(logger=logger)
        self.scheduler.configure(jobstores=jobstores,
                                 executors=executors,
                                 job_defaults=job_defaults,
                                 timezone=pytz.timezone('Asia/Saigon'))
        self._set_trigger(every, unit)

    def _set_trigger(self, every, unit):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        if unit == 'second':
            self.trigger = CronTrigger(second='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'minute':
            self.trigger = CronTrigger(minute='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'hour':
            self.trigger = CronTrigger(hour='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'day':
            self.trigger = CronTrigger(day='*/{}'.format(every),
                                       start_date=now)
        else:
            raise Exception(message='Unknown time unit')

    def add_jobstore(self, jobstore, alias):
        self.scheduler.add_jobstore(jobstore, alias)

    def add_executor(self, executor, alias):
        self.scheduler.add_executor(executor, alias)

    def add_job(self,
                job_fn,
                id='id1',
                name='job1',
                jobstore='default',
                executor='default',
                args=None,
                kwargs=None):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        history = list(self.cursor.find({'_id': id}))
        if history:
            #TODO: process missing jobs
            self.cursor.delete_one({'_id': id})
        next_run_time = self.trigger.get_next_fire_time(None, now)
        if kwargs:
            kwargs['run_time'] = next_run_time
        else:
            kwargs = {'run_time': next_run_time}

        self.scheduler.add_job(job_fn,
                               trigger=self.trigger,
                               next_run_time=next_run_time,
                               id=id,
                               name=name,
                               jobstore=jobstore,
                               executor=executor,
                               args=args,
                               kwargs=kwargs)

    def remove_job(self, id, jobstore='default'):
        self.scheduler.remove_job(job_id=id, jobstore=jobstore)

    def callback(self, callback_fn, mark=EVENT_ALL):
        self.scheduler.add_listener(callback_fn)

    def start(self):
        mongopool.put(self.mongo)
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.scheduled_job