def main():
    modulesToSchedule = getAgentsShedule()
    logging.debug("The following classes found: {}".format(modulesToSchedule))

    for agentname, polltime in modulesToSchedule.items():
        modulespec = importutil.find_spec(agentname)
        app_conf = importutil.module_from_spec(modulespec)
        modulespec.loader.exec_module(app_conf)
        clsmembers = inspect.getmembers(app_conf, inspect.isclass)
        for clsmember in clsmembers:
            if issubclass(clsmember[1],
                          BaseAgent) and clsmember[0] != 'BaseAgent':
                logging.debug(
                    "We scheduling the following agent class: {}".format(
                        clsmember[0]))
                agent = clsmember[1]()
                schedule.every(polltime).seconds.do(run_threaded,
                                                    agent.execute)

    if DO_DEBUG_AGENTS:
        schedule.run_all()
        for t in threading.enumerate():
            if t.daemon and 'pydevd.' not in t.getName():
                t.join()
        return 0
    else:
        while 1:
            schedule.run_pending()
            time.sleep(1)
Exemplo n.º 2
0
def _loop():
    logger.debug("Looping on schedule forever")

    schedule.run_all()
    while True:
        schedule.run_pending()
        time.sleep(1)
Exemplo n.º 3
0
 def test_run_all(self):
     mock_job = make_mock_job()
     every().minute.do(mock_job)
     every().hour.do(mock_job)
     every().day.at('11:00').do(mock_job)
     schedule.run_all()
     assert mock_job.call_count == 3
Exemplo n.º 4
0
def add_group(group_id):
    print(group_id)
    type = request.args.get('type')
    if type == "group":
        group = groupy.Group.list().filter(id=group_id).first
    elif type == "member":
        group = groupy.Member.list().filter(user_id=group_id).first
    if not group:
        return render_template(
            "layout.html", message="Error! Group ID not found."), 404
    if group_id in group_jobs:
        return render_template(
            "layout.html",
            message="Error! Group already added.")
    schedule.every(1).minutes.do(
        handle_update_group_async,
        group_id=group_id,
        type=type,
        lock=threading.Lock())
    group_jobs.append(group_id)
    schedule.run_all()
    if type == "group":
        return render_template(
            "layout.html",
            message="Fetching group history, please wait. <br> Number of messages: {0}. <br> Estimated time for processing: {1}.".format(
                group.message_count,
                verbose_timedelta(
                    timedelta(
                        seconds=group.message_count /
                        100 *
                        1.1))))
    elif type == "member":
        return render_template(
            "layout.html",
            message="Fetching message history, please wait.")
Exemplo n.º 5
0
    def run(self):
        # Compute start time
        t = time(hour=0, minute=0)
        soff = timedelta(minutes=self.starttime)
        t = datetime.combine(date.today(), t) + soff

        now = datetime.now()

        soff = t - now
        if soff.total_seconds() < 0:
            # We missed the window.  Wait until the next day.
            soff = soff + timedelta(days=1)

        print "First start is in %s" % soff

        # Wait for first iteration
        sleep(soff.total_seconds())

        job = schedule.every(self.period).minutes.do(self.job)

        # Do the first run
        schedule.run_all()

        while True:
            print "Next run at %s" % job.next_run
            sleep(schedule.idle_seconds() + 1)
            schedule.run_pending()
Exemplo n.º 6
0
def blockInternet(duration):
    """
    For the physical device running this script that is connected to a
    network, arp poisons its default gateway on the network for all hosts,
    thereby 'suspending' connectivity to WAN (internet) for the given duration.

    Pre-requisites for this function to run properly:
        1. Install arpspoof on OS: sudo apt-get install dsniff.
        2. Run this function using an OS account with root privileges.

    :param duration: Integer, the block duration in minutes.
    :return: Scheduler.CancelJob, making this function a one-off job (no repeat).
    """
    import time
    # Fetch network parameters from OS for arp spoofing
    print("GOING TO SLEEP!")

    def noob():
        print("STILL SLEEPING")
        time.sleep(1)
        p1 = subprocess.Popen(shlex.split("ping -t google.com"), shell=True)
        # p1.communicate()

    schedule.every().day.at("20:28").do(noob)
    print("DONE!")

    while True:
        schedule.run_all()
        print("WAITING")
        time.sleep(1)
Exemplo n.º 7
0
 def test_run_all(self):
     mock_job = make_mock_job()
     every().minute.do(mock_job)
     every().hour.do(mock_job)
     every().day.at("11:00").do(mock_job)
     schedule.run_all()
     assert mock_job.call_count == 3
Exemplo n.º 8
0
 def test_run_all(self):
     mock_job = make_mock_job()
     every().minute.do(mock_job)
     every().hour.do(mock_job)
     every().day.at('11:00').do(mock_job)
     every(1, 2).seconds.do(mock_job)
     schedule.run_all()
     assert mock_job.call_count == 4
Exemplo n.º 9
0
 def test_once(self):
     mock_job = make_mock_job()
     once().second.do(mock_job)
     once().day.at('10:30').do(mock_job)
     assert len(schedule.jobs) == 2
     schedule.run_all()
     assert mock_job.call_count == 2
     assert len(schedule.jobs) == 0
Exemplo n.º 10
0
 def loop(self):
     if self.test_mode:
         schedule.run_all()
     else:
         self.log_time_of_next_run()
     while True:
         schedule.run_pending()
         time.sleep(60)
Exemplo n.º 11
0
    def run(self) -> None:
        # When started run all jobs (download data till now)
        schedule.run_all()

        while True:
            # run_pending obtain calls
            schedule.run_pending()
            time.sleep(1)
Exemplo n.º 12
0
    def watch_inbox(self, pubsub_request):
        schedule.every().day.do(self._watch_inbox_helper,
                                request=pubsub_request)

        schedule.run_all()
        while True:
            schedule.run_pending()
            time.sleep(1)
def testOneTimeJobDecorator_runTheOneJobScheduled_logNoNextJob(mocker):
    mockLogging = mocker.patch("block.logging.info")

    scheduledFunction = block.oneTimeJob(lambda x: 0)
    schedule.every().day.at("10:00").do(scheduledFunction, 0)
    schedule.run_all()

    mockLogging.assert_called_with("No next job scheduled!")
Exemplo n.º 14
0
 def loop(self):
     if self.test_mode:
         schedule.run_all()
     else:
         self.log_time_of_next_run()
     while True:
         schedule.run_pending()
         time.sleep(60)
Exemplo n.º 15
0
    def test_run_all_with_decorator_defaultargs(self):
        mock_job = make_mock_job()

        @repeat(every().minute)
        def job(nothing=None):
            mock_job(nothing)

        schedule.run_all()
        mock_job.assert_called_once_with(None)
Exemplo n.º 16
0
    def test_run_all_with_decorator_args(self):
        mock_job = make_mock_job()

        @repeat(every().minute, 1, 2, "three", foo=23, bar={})
        def job(*args, **kwargs):
            mock_job(*args, **kwargs)

        schedule.run_all()
        mock_job.assert_called_once_with(1, 2, "three", foo=23, bar={})
Exemplo n.º 17
0
    def test_integration(self):
        import start
        import schedule

        user = User("*****@*****.**")
        user.add_homepage_notifications(
            Homepage(
                "http://dragonball-tube.com/dragonball-super-episoden-streams",
                MediaitemEpisodeParser, "Dragonball Super Anime"))
        schedule.run_all()
Exemplo n.º 18
0
def func1():
    schedule.every(1).hours.do(job2)
    schedule.every(3).seconds.do(job)

    schedule.run_all()
    while True:
        #     if not STUTAS:
        #         schedule.clear()
        #         break
        schedule.run_pending()
Exemplo n.º 19
0
def result():
    if request.method == 'POST':
        info1 = request.form.get('drug')
        info2 = request.form.get('frequency')
        info3 = request.form.get('duration')
        numba = request.form.get('cellnum')
        schedule.every().day.at("2:33").do(send_messages, "ved", info1)
        schedule.run_all()
        result = request.form
        return render_template("result.html", result=result)
Exemplo n.º 20
0
def set_schedule():
    schedule.every(7).minutes.do(unwanted_submission_remover.delete_unwanted_submissions)
    schedule.every(20).minutes.do(phase2_handler.filter_comments_from_db)
    listen_only = bool(util.strtobool(os.environ.get('LISTEN_ONLY')))
    if not listen_only:
        schedule.every(6).minutes.do(phase3_handler.process_comment_entries)

    debug = bool(util.strtobool(os.environ.get('DEBUG')))
    if debug:
        schedule.run_all()
Exemplo n.º 21
0
def run_periodic(interval: int = 1) -> None:
    log.info(f"Running periodic in intervals of {interval} minute")
    schedule.every(interval).minutes.do(app)
    time.sleep(1)
    schedule.run_all()
    while True:
        schedule.run_pending()
        sys.stdout.write(".")
        sys.stdout.flush()
        time.sleep(1)
Exemplo n.º 22
0
 def run(self):
     """
     Starts the scheduling
     """
     self._active = True
     # Run them all once at the beginning
     schedule.run_all()
     while self._active:
         schedule.run_pending()
         time.sleep(10)
Exemplo n.º 23
0
def monitor():
    print("[monitor] scheduler started!")
    schedule.every(2).hours.do(monitor_run,
                               monant_client_provider=monant_client,
                               fb_client_provider=fb_client)

    schedule.run_all()
    while True:
        schedule.run_pending()
        time.sleep(1)
Exemplo n.º 24
0
    def test_run_net_cmd_sup(self):
        cmd_up0 = get_net_cmds(self.bin_dir, 'fpn0', True)
        cmd_up1 = get_net_cmds(self.bin_dir, 'fpn1', True)

        every().second.do(run_net_cmd, cmd_up0).tag('net-change')
        every().second.do(run_net_cmd, cmd_up1).tag('net-change')

        self.assertEqual(len(schedule.jobs), 2)

        schedule.run_all(0, 'net-change')
        self.assertEqual(len(schedule.jobs), 0)
Exemplo n.º 25
0
    def test_cancel_jobs(self):
        def stop_job():
            return schedule.CancelJob

        every().second.do(stop_job)
        every().second.do(stop_job)
        every().second.do(stop_job)
        assert len(schedule.jobs) == 3

        schedule.run_all()
        assert len(schedule.jobs) == 0
Exemplo n.º 26
0
def main():
    logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                        level=LOGLEVEL)
    logging.getLogger('schedule').setLevel(logging.ERROR)

    hashes = {}
    schedule.every(INTERVAL).seconds.do(tick, hashes)
    schedule.run_all()
    while INTERVAL > 0 and not DEBUG:
        time.sleep(1)
        schedule.run_pending()
Exemplo n.º 27
0
    def test_cancel_jobs(self):
        def stop_job():
            return schedule.CancelJob

        every().second.do(stop_job)
        every().second.do(stop_job)
        every().second.do(stop_job)
        assert len(schedule.jobs) == 3

        schedule.run_all()
        assert len(schedule.jobs) == 0
Exemplo n.º 28
0
def schedule():
    every(20).seconds.do(pipeline, steps)
    # every().hour.do(job)
    # every().day.at("10:30").do(job)
    # every(5).to(10).minutes.do(job)
    # every().monday.do(job)
    # every().wednesday.at("13:15").do(job)

    run_all()
    while True:
        run_pending()
        sleep(1)
Exemplo n.º 29
0
    def handle(self, flush, *args, **kwargs):
        schedule.every().monday.at('00:30').do(self.new_sheet_job)

        if flush:
            print "Flushing all scheduled jobs..."
            schedule.run_all()
            return

        print "Running schedule..."
        while True:
            schedule.run_pending()
            time.sleep(60)
Exemplo n.º 30
0
def maintain_scheduler(bot):
    logging.info("starting up scheduler")
    try:
        schedule.run_all()
        while not SHOULD_SHUT_DOWN:
            if bot.run_scheduler:
                schedule.run_pending()
            time.sleep(1)
    except Exception:
        import traceback
        traceback.print_exc()
    logging.info("shutting down scheduler")
Exemplo n.º 31
0
def main():
    logging_setup.setup()
    schedule_tasks()
    logging.info('Tasks scheduled')
    if config.get_do_backup_on_startup():
        logging.info('Running scheduled tasks...')
        schedule.run_all(1)
    logging.info('Tasks: %s' % schedule.jobs)
    while True:
        logging.info('Running pending tasks...')
        schedule.run_pending()
        time.sleep(30)
Exemplo n.º 32
0
    def test_send_wedged_no_responder(self):

        nodeState = AttrDict.from_nested_dict(self.state)
        fpn_id = nodeState.fpn_id
        mock_job = make_mock_job()
        tj = every().second.do(mock_job)
        send_wedged_msg()
        schedule.run_all()

        # expected command result is a list
        result = send_wedged_msg(self.addr)
        # print(result)
        self.assertEqual([], result)
Exemplo n.º 33
0
    def test_send_wedged_no_responder(self):

        nodeState = AttrDict.from_nested_dict(self.state)
        fpn_id = nodeState.fpn_id
        # expected command result is a list so the return
        # result for echo_client() is actually None
        mock_job = make_mock_job()
        tj = every().second.do(mock_job)
        send_wedged_msg()
        schedule.run_all()

        result = send_wedged_msg(self.addr)
        self.assertEqual([], result)
Exemplo n.º 34
0
 def test_job_info(self):
     with mock_datetime(2010, 1, 6, 14, 16):
         mock_job = make_mock_job(name='info_job')
         info_job = every().minute.do(mock_job, 1, 7, 'three')
         schedule.run_all()
         assert len(schedule.jobs) == 1
         assert schedule.jobs[0] == info_job
         assert repr(info_job)
         assert info_job.job_name is not None
         s = info_job.info
         assert 'info_job' in s
         assert 'three' in s
         assert '2010' in s
         assert '14:16' in s
Exemplo n.º 35
0
def main():
    # initialize garduino watcher
    arduino.run()

    ## schedule waits so do first run immediately
    # schedule.every(15).seconds.do(run_threaded, test_updates) # debugger
    # schedule.every(5).minutes.do(run_threaded, fiveminute_updates)
    schedule.every(15).minutes.do(run_threaded, fifteenminute_updates)
    schedule.every(12).hours.do(run_threaded, halfday_updates)
    schedule.run_all()    
    schedule.every().day.at('6:05').do(run_threaded, waterlevel_update)
    while True:
        schedule.run_pending()
        time.sleep(5)
Exemplo n.º 36
0
 def test_last_run_property(self):
     original_datetime = datetime.datetime
     with mock_datetime(2010, 1, 6, 13, 16):
         hourly_job = make_mock_job('hourly')
         daily_job = make_mock_job('daily')
         every().day.do(daily_job)
         every().hour.do(hourly_job)
         assert schedule.idle_seconds_since() is None
         schedule.run_all()
         assert schedule.last_run() == original_datetime(2010, 1, 6, 13, 16,
                                                         tzinfo=utc)
         assert schedule.idle_seconds_since() == 0
         schedule.clear()
         assert schedule.last_run() is None
Exemplo n.º 37
0
    def test_run_all_with_decorator(self):
        mock_job = make_mock_job()

        @repeat(every().minute)
        def _job1():
            mock_job()

        @repeat(every().hour)
        def _job2():
            mock_job()

        @repeat(every().day.at('11:00'))
        def _job3():
            mock_job()
        schedule.run_all()
        assert mock_job.call_count == 3
Exemplo n.º 38
0
 def test_clear_by_tag(self):
     every().second.do(make_mock_job(name='job1')).tag('tag1')
     every().second.do(make_mock_job(name='job2')).tag('tag1', 'tag2')
     every().second.do(make_mock_job(name='job3')).tag('tag3', 'tag3',
                                                       'tag3', 'tag2')
     assert len(schedule.jobs) == 3
     schedule.run_all()
     assert len(schedule.jobs) == 3
     schedule.clear('tag3')
     assert len(schedule.jobs) == 2
     schedule.clear('tag1')
     assert len(schedule.jobs) == 0
     every().second.do(make_mock_job(name='job1'))
     every().second.do(make_mock_job(name='job2'))
     every().second.do(make_mock_job(name='job3'))
     schedule.clear()
     assert len(schedule.jobs) == 0
Exemplo n.º 39
0
def schedule_updates():

    # EDGAR
    schedule.every(1).days.at("04:30").do(_crawler('sec-edgar'))

    schedule.every(1).days.at("01:00").do(_crawler('openoil-internal-documents'))
    # SEDAR
    # Sedar website stops updating at 11pm ET, i.e. 0500 CET
    # We start our scrape just after, at 0511 CET, and allow 3 hours for it to
    # upload
    schedule.every(1).days.at("08:00").do(_crawler('sedar-partial-content'))

    schedule.every(1).days.at("16:00").do(check_alerts)

    schedule.run_all()
    
    while(1):
        schedule.run_pending()
        time.sleep(1)
Exemplo n.º 40
0
    def test_cancel_job(self):
        def stop_job():
            return schedule.CancelJob
        mock_job = make_mock_job()

        every().second.do(stop_job)
        mj = every().second.do(mock_job)
        assert len(schedule.jobs) == 2

        schedule.run_all()
        assert len(schedule.jobs) == 1
        assert schedule.jobs[0] == mj

        schedule.cancel_job('Not a job')
        assert len(schedule.jobs) == 1
        schedule.default_scheduler.cancel_job('Not a job')
        assert len(schedule.jobs) == 1

        schedule.cancel_job(mj)
        assert len(schedule.jobs) == 0
Exemplo n.º 41
0
    def test_daily_job(self):
        zcml.load_string(self.zcml_template % '''
        <schedule:job
            view="dummy-view"
            unit="day"
            at="3:00"
            />
        ''')

        jobs = schedule.jobs
        self.assertEquals(len(jobs), 1)
        job = jobs[0]
        self.assertEquals(job.interval, 1)
        self.assertEquals(job.unit, 'days')
        self.assertEquals(job.at_time, datetime.time(3, 0))

        self.assertFalse(self.request.get(VIEW_MARKER))

        schedule.run_all()

        self.assertTrue(self.request.get(VIEW_MARKER))
Exemplo n.º 42
0
def main():

    args = parse_cmd_args()
    logger.setLevel(logging.__dict__[args.verbosity.upper()])

    if args.log_file:
        file_handler = logging.handlers.TimedRotatingFileHandler(
            args.log_file, when='D')
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)

    logger.info("Beginning rendezvous circuit monitoring."
                "Status output every %d seconds", args.tick)

    with Controller.from_port(port=args.port) as controller:
        # Create a connection to the Tor control port
        controller.authenticate()

        # Add event listeners for HS_DESC and HS_DESC_CONTENT
        controller.add_event_listener(circ_event_handler,
                                      stem.control.EventType.CIRC)
        controller.add_event_listener(circ_event_handler,
                                      stem.control.EventType.CIRC_MINOR)

        # Schedule rendezvous status output.
        schedule.every(args.tick).seconds.do(output_status, controller)
        schedule.run_all()

        try:
            while True:
                schedule.run_pending()
                time.sleep(1)
        except KeyboardInterrupt:
            logger.info("Stopping rendezvous circuit monitoring.")

    sys.exit(0)
Exemplo n.º 43
0
        parsed_items_count = len(obj_ids)

        if parsed_items_count < max_item:
            itera =  range(0, parsed_items_count)
        else:
            itera = range(parsed_items_count - max_item, parsed_items_count)

        for i in itera:
            # [6:] remove "entry-" from id value
            obj_id = obj_ids[i].get('id')[6:]
            # cursor.execute("INSERT INTO RSS (fileid, filename, filedate, source)  SELECT (%s, %s, %s, %s) WHERE NOT EXISTS (SELECT * FROM RSS WHERE fileid=%s);", (obj_ids[i].get('id')[6:], obj_names[i], str(datetime.now()), rss,))
            cursor.execute("SELECT id FROM RSS WHERE fileid = %s;", (obj_id,))
            if not cursor.fetchone():
                cursor.execute("SELECT count(*) FROM RSS WHERE source = %s;", (rss,))
                if int(get_config((rss))['max_item']) == int(cursor.fetchone()[0]):
                    print("Limit reached, deleting oldest item from " + rss)
                    cursor.execute("DELETE FROM rss WHERE ctid in (SELECT ctid FROM rss WHERE source = %s ORDER BY filedate LIMIT 1);", (rss,))
                    #db.commit()
                print(obj_id + " - " + obj_names[i] + " - " + str(datetime.now()) + " - " + rss)
                cursor.execute("INSERT INTO RSS (fileid, filename, filedate, source) VALUES (%s, %s, %s, %s);", (obj_id, obj_names[i], str(datetime.now()), rss,))
        db.commit()

for rss in rss_list:
    schedule.every(float(get_config((rss))['check'])).minutes.do(update, rss)

schedule.run_all(10)

while 1:
    schedule.run_pending()
    time.sleep(1)
Exemplo n.º 44
0
def main():
    """
    Entry point when invoked over the command line.
    """
    args = parse_cmd_args().parse_args()
    config_file_options = settings.parse_config_file(args.config)

    # Update global configuration with options specified in the config file
    for setting in dir(config):
        if setting.isupper() and config_file_options.get(setting):
            setattr(config, setting, config_file_options.get(setting))

    # Override the log level if specified on the command line.
    if args.verbosity:
        config.LOG_LEVEL = args.verbosity.upper()

    # Write log file if configured in environment variable or config file
    if config.LOG_LOCATION:
        log.setup_file_logger(config.LOG_LOCATION)

    logger.setLevel(logging.__dict__[config.LOG_LEVEL.upper()])

    # Create a connection to the Tor control port
    try:
        controller = Controller.from_port(address=args.ip, port=args.port)
    except stem.SocketError as exc:
        logger.error("Unable to connect to Tor control port: %s", exc)
        sys.exit(1)
    else:
        logger.debug("Successfully connected to the Tor control port.")

    try:
        controller.authenticate()
    except stem.connection.AuthenticationFailure as exc:
        logger.error("Unable to authenticate to Tor control port: %s", exc)
        sys.exit(1)
    else:
        logger.debug("Successfully authenticated to the Tor control port.")

    # Disable no-member due to bug with "Instance of 'Enum' has no * member"
    # pylint: disable=no-member

    # Check that the Tor client supports the HSPOST control port command
    if not controller.get_version() >= stem.version.Requirement.HSPOST:
        logger.error("A Tor version >= %s is required. You may need to "
                     "compile Tor from source or install a package from "
                     "the experimental Tor repository.",
                     stem.version.Requirement.HSPOST)
        sys.exit(1)

    # Load the keys and config for each onion service
    settings.initialize_services(controller,
                                 config_file_options.get('services'))

    # Finished parsing all the config file.

    handler = eventhandler.EventHandler()
    controller.add_event_listener(handler.new_desc,
                                  EventType.HS_DESC)
    controller.add_event_listener(handler.new_desc_content,
                                  EventType.HS_DESC_CONTENT)

    # Schedule descriptor fetch and upload events
    schedule.every(config.REFRESH_INTERVAL).seconds.do(
        onionbalance.instance.fetch_instance_descriptors, controller)
    schedule.every(config.PUBLISH_CHECK_INTERVAL).seconds.do(
        onionbalance.service.publish_all_descriptors)

    try:
        # Run initial fetch of HS instance descriptors
        schedule.run_all(delay_seconds=30)

        # Begin main loop to poll for HS descriptors
        while True:
            schedule.run_pending()
            time.sleep(1)
    except KeyboardInterrupt:
        logger.info("Keyboard interrupt received. Stopping the "
                    "management server.")
    return 0
Exemplo n.º 45
0
    if table['name'] not in r.table_list().run():
        logging.info('Creating table %s' % table['name'])
        if 'primary' in table:
            r.table_create(table['name'], primary_key=table['primary']).run()
        else:
            r.table_create(table['name']).run()
    if not 'indexes' in table: continue
    indexes = set(r.table(table['name']).index_list().run())
    indexes = set(table['indexes']) - indexes
    for index in indexes:
        logging.info('Creating index %s on %s' % (index, table['name']))
        r.table(table['name']).index_create(index).run()
r.wait()
connection.close()
logging.info('Finished preparing')

# https://github.com/dbader/schedule/issues/55
logging.info('Started scheduling jobs')
schedule.every().day.at('00:00').do(lambda: logRecentScrape('launched', 24 * 60 + 5))
schedule.every().day.at('00:05').do(lambda: logRecentScrape('funded', 24 * 60 + 5))
schedule.every().day.at('00:10').do(scrapeLive)
if os.environ.get('PROD') is None:
    logging.info('Running all jobs and exiting')
    schedule.run_all(0)
    sys.exit(0)
logging.info('Finished scheduling jobs')

while True:
    schedule.run_pending()
    time.sleep(1)
Exemplo n.º 46
0
            locations.append(recent_location)

    return set(locations)


def log_run_metrics(cycle_start_time, recent_media_added, users_added, users_updated, user_recent_media_added):
    print '\nDONE: inserting new records into database at %s.' % datetime.datetime.now()
    print 'Inserted: RecentMedia: %d -- Users Added: %d -- Users Updated: %d -- UserRecentMedia: %d'\
          % (recent_media_added, users_added, users_updated, user_recent_media_added)
    print 'Cycle run time taken: %s' % (datetime.datetime.now() - cycle_start_time)
    print 'Process start time was: %s' % process_start_time
    print 'Process run time currently: %s' % (datetime.datetime.now() - process_start_time)


if __name__ == '__main__':
    api = get_instagram_api()

    # Open cluster connection to the raw keyspace, and build/connect to our tables
    open_cassandra_session()

    # Schedule our job, and begin it, sleeping for 120 seconds between each job before rerunning
    print 'Scheduling job for every 140 seconds, time is %s.' % datetime.datetime.now()
    schedule.every(140).seconds.do(produce_raw_layer)

    # Process start time
    process_start_time = datetime.datetime.now()

    while True:
        schedule.run_all()
        time.sleep(120)
Exemplo n.º 47
0
 def test_job_func_args_are_passed_on(self):
     mock_job = make_mock_job()
     every().second.do(mock_job, 1, 2, 'three', foo=23, bar={})
     schedule.run_all()
     mock_job.assert_called_once_with(1, 2, 'three', foo=23, bar={})
Exemplo n.º 48
0
def main():
  schedule.every().minute.do(fetch_kobe_follows)
  schedule.run_all()
  while True:
    schedule.run_pending()
    time.sleep(1)