def main():
    logging.info("Starting application")
    capture_nature()
    schedule.every(5).minutes.do(upload_to_gdrive)
    while True:
        schedule.run_pending()
        time.sleep(1)
Example #2
0
def SetFridaySchedule():
    FileMain("Setting friday schedule")

    sql_args = [
        "room_schedules_start_time, room_schedules_end_time",
        "room_schedules",
        "room_schedules_day like '%{}%'".format("Fri"),
    ]

    room_schedules_result = SQLMain(sql_args, 1)

    if room_schedules_result != 0:
        for row in room_schedules_result:
            hour_start_time = row[0] / 60
            minute_start_time = row[0] % 60
            start_time = "{}:{}".format(str(hour_start_time), str(minute_start_time))

            hour_end_time = row[1] / 60
            minute_end_time = row[1] % 60
            end_time = "{}:{}".format(str(hour_end_time), str(minute_end_time))

            schedule.every().friday.at(start_time).do(ChangeStatusToOn)

            schedule.every().friday.at(end_time).do(ChangeStatusToOff)

    return
Example #3
0
    def test_run_every_n_days_at_specific_time(self):
        mock_job = make_mock_job()
        with mock_datetime(2010, 1, 6, 11, 29):
            every(2).days.at('11:30').do(mock_job)
            schedule.run_pending()
            assert mock_job.call_count == 0

        with mock_datetime(2010, 1, 6, 11, 31):
            schedule.run_pending()
            assert mock_job.call_count == 0

        with mock_datetime(2010, 1, 7, 11, 31):
            schedule.run_pending()
            assert mock_job.call_count == 0

        with mock_datetime(2010, 1, 8, 11, 29):
            schedule.run_pending()
            assert mock_job.call_count == 0

        with mock_datetime(2010, 1, 8, 11, 31):
            schedule.run_pending()
            assert mock_job.call_count == 1

        with mock_datetime(2010, 1, 10, 11, 31):
            schedule.run_pending()
            assert mock_job.call_count == 2
Example #4
0
def RequestConsumption():
	global frame_id
	global rooms_id
	global frame_sending_attempts
	global room_consumption_frame

	now = datetime.datetime.now()
	time_now = (int(now.strftime("%H")) * 60) + int(now.strftime("%M"))
	day = now.strftime("%a")
	date = now.strftime("%Y-%m-%d")

	sql.SelectColumn("distinct rooms.rooms_address, rooms.rooms_id")
	sql.FromTable("room_schedules")
	sql.JoinTable([["rooms", "rooms.rooms_id=room_schedules.rooms_id"]])
	sql.WhereCondition("(room_schedules.room_schedules_day like '%{}%' or room_schedules.room_schedules_date='{}') and room_schedules.room_schedules_end_time={}".format(day, date, time_now))
	room_schedules_result = sql.FetchAll()

	for schedule in room_schedules_result:
		rooms_id = schedule[2]
		frame_id = GetFrameID(sql)
		room_addresses_data = GetRoomAddresses(sql, schedule[0])
		frame_data = "{} {} 0".format(room_addresses_data, frame_id)

		consumption_frame = CreateFrame(frame_data, 4)

		frame_sending_attempts = 1
		schedule.every(1).seconds.do(AcknowledgementTimer)

		SendFrame()
Example #5
0
def main(arguments=None):
    '''Runs thumbor server with the specified arguments.'''
    if arguments is None:
        arguments = sys.argv[1:]

    server_parameters = get_server_parameters(arguments)
    config = get_config(server_parameters.config_path)
    configure_log(config, server_parameters.log_level.upper())

    validate_config(config, server_parameters)

    importer = get_importer(config)

    with get_context(server_parameters, config, importer) as context:
        application = get_application(context)
        run_server(application, context)

        if (config.GC_INTERVAL and config.GC_INTERVAL > 0):
            schedule.every(config.GC_INTERVAL).seconds.do(gc_collect)

        try:
            logging.debug('thumbor running at %s:%d' % (context.server.ip, context.server.port))
            tornado.ioloop.IOLoop.instance().start()
        except KeyboardInterrupt:
            sys.stdout.write('\n')
            sys.stdout.write("-- thumbor closed by user interruption --\n")
Example #6
0
def  setup():
    '''
    Configure schedule
    '''
    # backup data base everyday
    schedule.every(1).days.do(backup_db_add)
    schedule.every(1).days.do(backup_db_remove)
Example #7
0
def run_or_schedule(job, schedule=False, exception_handler=None):
    """Runs a job and optionally schedules it to run later

    Args:
        job (func): The func to run
        schedule (bool): Schedule `func` to run in the future (default: False)
        exception_handler (func): The exception handler to wrap the function in
            (default: None)

    Examples:
        >>> job = partial(pprint, 'hello world')
        >>> run_or_schedule(job)
        u'hello world'
        >>> exception_handler = ExceptionHandler('*****@*****.**').handler
        >>> run_or_schedule(job, False, exception_handler)
        u'hello world'
    """
    if exception_handler and schedule:
        job = exception_handler(job)

    job()

    if schedule:
        sch.every(1).day.at(SCHEDULE_TIME).do(job)

        while True:
            sch.run_pending()
            time.sleep(1)
Example #8
0
 def schedule_with_delay(self):
     for task in self.tasks:
         interval = task.get('interval')
         schedule.every(interval).minutes.do(self.schedule_task_with_lock, task)
     while True:
         schedule.run_pending()
         time.sleep(1)
Example #9
0
def main():
	
	global thingiverse
	global twitter
	auto_mode = True

	if DEBUG: print 'welcome'
	thingiverse.DEBUG = False
	thingiverse.txt_url_mode = False
	thingiverse.connect()
	print api.VerifyCredentials().name
	print '\n\nThingisimilar\n'

	schedule.every(2).minutes.do(exploring)


	if auto_mode:
		#main_loop()

		exploring()
		while True:
			schedule.run_pending()
			sleep(1.0)

	else:
		#while True:
		#num1 = raw_input('#1 --> ')
		#num2 = raw_input('#2 -->')
		num1 = test_things[0]
		num2 = test_things[4]
		standard_job(int(num1), int(num2))
Example #10
0
def sched():
    """Makes scheduling of download-create-playback cycles."""
    log.debug("[smp][.] Read schedule file: %s" % SCHEDULE_PATH)

    with open(SCHEDULE_PATH) as fp:
        time_table = fp.read().split()

    config = configparser.ConfigParser()
    config.read(PLAYER_CONFIG_PATH)

    minutes_offset = int(config['simple_media_player']['launch_time_offset'])

    now = str(datetime.now().date())

    smp = SimpleMediaPlayer()

    for entry in time_table:
        tp = datetime.strptime(now + ' ' + entry, '%Y-%m-%d %H:%M')
        shifted_time = tp - timedelta(minutes=minutes_offset)

        # start earlier to have time create video and handle issues
        actual_start = ':'.join(str(shifted_time.time()).split(':')[:2])

        schedule.every().day.at(actual_start).do(smp.run, tp)
        log.debug("[smp][.] Job scheduled on %s to be ended at %s"
                  % (actual_start, entry))

    log.debug("[smp][.] Start scheduling loop...")
    while True:
        schedule.run_pending()
        time.sleep(1)
Example #11
0
    def prepare_jobs(self, jobs):
        suffixed_names = {
            'week': 'weekly',
            'day': 'daily',
            'hour': 'hourly',
            'minute': 'minutes',
            'second': 'seconds',
        }
        for job in jobs:
            if not job.enabled:
                continue

            interval_name = job.time_unit.lower()
            if job.interval > 0: # There can't be a job less than 0 (0 minutes? 0 seconds?)
                plural_interval_name = interval_name + 's'
                d = getattr(schedule.every(job.interval), plural_interval_name)
                d.do(self.run_job, job)
                Log.info("  Loading %s job: %s.", suffixed_names[interval_name], job.name)
            elif interval_name == 'day':
                schedule.every().day.at(job.at_time).do(self.run_job, job)
                Log.info("  Loading time-based job: " + job.name)
            else:
                d = getattr(schedule.every(), interval_name)
                d.do(self.run_job, job)
                Log.info("  Loading %s job: %s", interval_name, job.name)
Example #12
0
    def handle(self, *args, **options):
        schedule.every().week.do(_fetch_mozillians)
        schedule.every().week.do(_fetch_countries)

        while True:
            schedule.run_pending()
            time.sleep(3600)
Example #13
0
def rules(cube, scheduler_type='minutes', scheduler_interval=59,
          dashboard=None):
    if scheduler_type:
        scheduler_type = cube.get('scheduler_type', 'minutes')
    if scheduler_interval:
        scheduler_interval = cube.get('scheduler_interval', 59)

    log_it("START REGISTER", "bin-scheduler")
    log_it("cube: {}".format(cube.get('slug')), "bin-scheduler")
    log_it("type: {}".format(scheduler_type), "bin-scheduler")
    log_it("interval: {}".format(scheduler_interval), "bin-scheduler")
    log_it("END REGISTER", "bin-scheduler")

    t = {}
    if scheduler_type == 'minutes':
        env = schedule.every(int(scheduler_interval))
        t = env.minutes
    elif scheduler_type == 'hour':
        env = schedule.every()
        t = env.hour
    elif scheduler_type == 'day':
        env = schedule.every()
        t = env.day

    try:
        t.do(job, slug=cube.get('slug'))
        jobn = cube.get("slug")
        if dashboard:
            jobn = u"{}-{}".format(cube.get("slug"), dashboard)
        onrun[jobn] = env
        register.append(jobn)
    except Exception, e:
        log_it("ERROR {}: {}".format(cube.get('slug'), e))
Example #14
0
def SetDailySchedule():
	FileMain("Setting daily schedule")

	date = datetime.datetime.now()
	date = date.strftime("%Y-%m-%d")

	sql_args = ["room_schedules_start_time, room_schedules_end_time", "room_schedules", "room_schedules_date='{}'".format(date)]

	room_schedules_result = SQLMain(sql_args, 1)

	if room_schedules_result != 0:
		for row in room_schedules_result:
			hour_start_time = row[0]/60
			minute_start_time = row[0]%60
			start_time = "{}:{}".format(str(hour_start_time), str(minute_start_time))

			hour_end_time = row[1]/60
			minute_end_time = row[1]%60
			end_time = "{}:{}".format(str(hour_end_time), str(minute_end_time))

			schedule.every().day.at(time).do(DailyChangeOfStatusOn)

			schedule.every().day.at(time).do(DailyChangeOfStatusOff)

	return
Example #15
0
 def schedule_raw(self, df_rawsources):
     # Iterate through all sources with 'raw' type
     for index, source in df_rawsources.iterrows():
         print "[SCHEDULER] Working with raw source: ",source['name']
         updateFrequency = source['updateFrequency']
         print "[SCHEDULER] Update frequency is <",updateFrequency,">"
         updates = source['updates']
         if len(updates) > 0:
             # Get the most recent update
             lastUpdate = dp.parse(updates[0]['createdAt'])
             # Get the current time in seconds
             now = int(round(time.time()))
             # If time between now and the last update is greater than the
             # update interval, schedule the event
             if(now - int(lastUpdate.strftime('%s')) > updateFrequency):
                 source_id = source['_id']
                 print "[SCHEDULER] Scheduling source <",source['name'],"> with id <",source_id,">"
                 schedule.every(updateFrequency).seconds.do(process_raw, source_id)
         source_id = source['_id']
         print "[SCHEDULER] Scheduling source <",source['name'],"> with id <",source_id,">"
         schedule.every(updateFrequency).seconds.do(self.process_raw, source_id)
     # Process all scheduled items
     while True:
         schedule.run_pending()
         time.sleep(1)
Example #16
0
def add_group(group_id):
    print(group_id)
    type = request.args.get('type')
    if type == "group":
        group = groupy.Group.list().filter(id=group_id).first
    elif type == "member":
        group = groupy.Member.list().filter(user_id=group_id).first
    if not group:
        return render_template(
            "layout.html", message="Error! Group ID not found."), 404
    if group_id in group_jobs:
        return render_template(
            "layout.html",
            message="Error! Group already added.")
    schedule.every(1).minutes.do(
        handle_update_group_async,
        group_id=group_id,
        type=type,
        lock=threading.Lock())
    group_jobs.append(group_id)
    schedule.run_all()
    if type == "group":
        return render_template(
            "layout.html",
            message="Fetching group history, please wait. <br> Number of messages: {0}. <br> Estimated time for processing: {1}.".format(
                group.message_count,
                verbose_timedelta(
                    timedelta(
                        seconds=group.message_count /
                        100 *
                        1.1))))
    elif type == "member":
        return render_template(
            "layout.html",
            message="Fetching message history, please wait.")
Example #17
0
def main():
    port = "5918"
    if len(sys.argv) > 1:
        port = sys.argv[1]
    
    socket = initiate_zmq(port)
    logging.basicConfig(filename='./log/ingest_lottery.log', level=logging.INFO)
    tz = pytz.timezone(pytz.country_timezones('cn')[0])
    schedule.every(30).seconds.do(run, socket, tz)
    while True:
        try:
            schedule.run_pending()
            time.sleep(1)
        except KeyboardInterrupt:
            now = datetime.now(tz)
            message = "CTRL-C to quit the program at [%s]" % now.isoformat()
            logging.info(message)
            break
        except Exception as e:
            now = datetime.now(tz)
            message = "Error at time  [%s]" % now.isoformat()
            logging.info(message)
            logging.info(e)
            # reschedule the job
            schedule.clear()
            socket = initiate_zmq(port)
            schedule.every(30).seconds.do(run, socket, tz)
Example #18
0
    def load_feeds(self):
        schedule.clear()
        feeds = {}
        self.logging.debug("Getting List of Feeds")
        files = os.listdir(cif.options.feed_directory)
        feed_files = []
        for file in files:
            if file.endswith(".yml"):
                self.logging.debug("Found Feed File: {0}".format(file))
                feed_files.append(os.path.join(cif.options.feed_directory, file))

        feed_files.sort()
        for feed_file in feed_files:
            self.logging.info("Loading Feed File: {0}".format(feed_file))
            feeds[feed_file] = cif.feeder.Feed(feed_file)
            self.logging.info("Scheduling Feed File: {0}".format(feed_file))
            if 'feeds' not in feeds[feed_file].feed_config:
                self.logging.info("{0} does not contain feeds key".format(feed_file))
                continue
            for feed_name in feeds[feed_file].feed_config['feeds'].keys():
                if "interval" in feeds[feed_file].feed_config['feeds'][feed_name]:
                    if feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "hourly":
                        self.logging.info(repr(schedule.every().hour.at("00:00").do(feeds[feed_file].process, feed_name)))
                    elif feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "daily":
                        self.logging.info(repr(schedule.every().day.at("00:00").do(feeds[feed_file].process, feed_name)))
                    elif feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "weekly":
                        self.logging.info(repr(schedule.every().day.at("00:00").do(feeds[feed_file].process, feed_name)))
                else:
                    self.logging.info(repr(schedule.every(1).minute.do(feeds[feed_file].process, feed_name)))
Example #19
0
 def run(self):
     sendHour = str(self.parameters.get('maillist', 'sendhour'))
     print(sendHour)
     schedule.every().day.at(sendHour).do(self.sendMail)
     while(1):
         schedule.run_pending()
         time.sleep(5)
Example #20
0
    def run(self, path_local_log=None, branch='next', sched='false', launch_pause='false'):
        """
        :param str path_local_log: Path to the local log file copied from the remote server. If ``None``, do not copy
         remote log file.
        :param str branch: Target git branch to test.
        :param str sched: If ``'true'``, run tests only once. Otherwise, run tests at 23:00 hours daily.
        :param str launch_pause: If ``'true'``, pause at a breakpoint after launching the instance and mounting the data
         volume. Continuing from the breakpoint will terminate the instance and destroy the volume.
        """

        import schedule
        from logbook import Logger

        self.log = Logger('nesii-testing')

        self.path_local_log = path_local_log
        self.branch = branch
        self.launch_pause = launch_pause

        if self.launch_pause == 'true':
            self.log.info('launching instance then pausing')
            self._run_tests_(should_email=False)
        else:
            if sched == 'true':
                self.log.info('begin continous loop')
                schedule.every().day.at("6:00").do(self._run_tests_, should_email=True)
                while True:
                    schedule.run_pending()
                    time.sleep(1)
            else:
                self.log.info('running tests once')
                self._run_tests_(should_email=True)
def process(run_once=False):
    """
    runs the processign loop as log as running_event is set or undefined
    :param running_event: Event or None
    :return: None
    """
    print("Starting qshape processing")

    # handle ctrl+c
    print('Press Ctrl+C to exit')
    running_event = threading.Event()
    running_event.set()
    def signal_handler(signal, frame):
        print('Attempting to close workers')
        running_event.clear()
    signal.signal(signal.SIGINT, signal_handler)

    def report_stats():
        with statsd.pipeline() as pipe:
            for stat, value in get_qshape_stats():
                pipe.incr(stat, value)

    report_stats()  # report current metrics and schedule them to the future
    if not run_once:
        schedule.every(STATSD_DELAY).seconds.do(report_stats)
        while running_event.is_set():
            schedule.run_pending()
            time.sleep(0.1)
    print("Finished qshape processing")
Example #22
0
    def start(self):

        def make_reboot():
            if self.alarm.alarm_started == False:
                os.system("shutdown -r now")

        schedule.every().monday.at("05:00").do(make_reboot)
Example #23
0
def dynamically_scrape_and_append_sales_data(filename,
                                             interval,
                                             num_retries = 10):
    """
    Dynamically scrapes sales data and appends the data to a file by generating
    a list of links, checking it against an old list and only keeping new links,
    and scraping those links for sales data.
    """

    old_list = []

    def job(old_list):
        new_list = collect_all_featured_links()
        new_links = remove_old_links(old_list, new_list)
        bad_links = collect_bad_links(new_links)
        clean_links = remove_bad_links_from_link_list(bad_links, new_links)

        scrape_and_append_sales_data_from_featured_links(filename,
                                                         clean_links,
                                                         num_retries)

        old_list = new_list

    job(old_list)
    schedule.every(interval).hours.do(job)

    while True:
        schedule.run_pending()
        time.sleep(30)

    print "Dynamic scraping finished"
Example #24
0
    def routine(self):
        # install schedule
        for entity in self.entities:
            pieces = entity.getschedule().split(" ")
            if re.match("^\d*$", pieces[1]):
                every = schedule.every(int(pieces[1]))
                pieces = pieces[2 : len(pieces)]
            else:
                every = schedule.every()
                pieces = pieces[1 : len(pieces)]

            timedes = getattr(every, pieces[0])
            pieces = pieces[1 : len(pieces)]

            if len(pieces) and pieces[0] == "at":
                finish = timedes.at(pieces[1])
            else:
                finish = timedes

            finish.do(self.monitor, entity)

        while True:
            time.sleep(1)
            for entity in self.entities:
                schedule.run_pending()
Example #25
0
def main():
    args = parser.parse_args()

    log = logging.getLogger()
    log.level = logging.INFO
    stream = logging.StreamHandler()
    file_handler = logging.FileHandler(args.logfile)
    log.addHandler(stream)
    log.addHandler(file_handler)

    with open(args.config) as f:
        config = yaml.safe_load(f)

    log.info('Connecting to database')
    database = connect_to_database(**config['mongodb'])
    log.info('Connection established')

    services = [
        service(auxdir=args.auxdir)
        for service in supported_services.values()
    ]

    schedule.every().day.at('15:00').do(
        fill_last_night, services=services, database=database
    )

    log.info('Schedule started')
    try:
        while True:
            schedule.run_pending()
            sleep(60)
    except (KeyboardInterrupt, SystemExit):
        pass
Example #26
0
def scheduler_init (parent):
    '''
        Schedule Init

        Start the main loop for the internal scheduler that
        ticks every second.

        --
        @param  parent:int  The PID of the parent.

        @return void
    '''

    # Define the jobs to run at which intervals
    schedule.every().minute.do(Reminder.run_remind_once)
    schedule.every().minute.do(Reminder.run_remind_recurring)

    # Start the main thread, polling the schedules
    # every second
    while True:

        # Check if the current parent pid matches the original
        # parent that started us. If not, we should end.
        if os.getppid() != parent:
            logger.error(
                'Killing scheduler as it has become detached from parent PID.')

            sys.exit(1)

        # Run the schedule
        schedule.run_pending()
        time.sleep(1)

    return
Example #27
0
def dynamically_scrape_combined_data(data_filename,
                                     sales_filename,
                                     interval,
                                     num_retries = 10):
    """
    Dynamically scrapes a continuously updated list of unique clean links and
    appends the data to their respective files.
    """

    old_list = []

    def job(old_list):
        new_list = collect_all_featured_links()
        new_links = remove_old_links(old_list, new_list)
        bad_links = collect_bad_links(new_links)
        clean_links = remove_bad_links_from_link_list(bad_links, new_links)

        scrape_combined_data_from_all_featured_products(data_filename,
                                                        sales_filename,
                                                        clean_links,
                                                        num_retries)

        old_list = new_list

    job(old_list)
    schedule.every(interval).hours.do(job)

    while True:
        schedule.run_pending()
        time.sleep(30)

    print "Dynamic scraping finished"
Example #28
0
def ChangePort(roomID):
	global frame_id
	global frame_sending_attempts
	global port_frame

	sql.GetWhereQuery("rooms", "rooms_id={}".format(roomID))
	sql.SelectColumn("rooms_port, rooms_address")
	rooms_result = sql.FetchOne()

	sql.GetWhereQuery("room_devices", "rooms_id={}".format(roomID))
	sql.SelectColumn("room_devices_port")
	room_devices_result = sql.FetchAll()

	port_data = "{} {}".format(rooms_result[0], len(room_devices_result))
	for device in room_devices_result:
		port_data = "{} {}".format(port_data, device[0])

	room_addresses_data = GetRoomAddresses(sql, rooms_result[1])
	frame_id = GetFrameID(sql)

	frame_data = "{} {} {}".format(room_addresses_data, frame_id, port_data)

	port_frame = CreateFrame(frame_data, 12)

	frame_sending_attempts = 1
	schedule.every(1).seconds.do(AcknowledgementTimer)

	SendFrame()
Example #29
0
def watch():
    # set up argument parsing
    parser = example.BigFixArgParser()
    parser.add_argument('-a', '--actions', required = False, help = 'List of actions to watch')
    parser.add_argument('-v', '--verbose', default = False, action = "store_true", required = False, help = 'To see the full list of commands that contain watched actions')
    parser.add_argument('-t', '--time', default = 60, required = False, help = 'To set the waiting period')
    parser.base_usage += """
  -a, --actions [ACTIONS/FILENAME]   Specify a list of actions to watch, seperated by comma(,); 
                                     if FILENAME with .wal extension detected, will read the file to get the list. 
  -v, --verbose                      To see the full list of commands that contain watched actions
  -t, --time [MINUTE]                   A number specifing the waiting period between checks"""
    
    parser.description = 'Used to watch certain actions'
    ext = ".wal"
    args = parser.parse_args()
    args_actions = ""
    if ext in args.actions:
        actions_file = open(args.actions, 'r')
        for line in actions_file:
                args_actions += line
    else:
        args_actions = args.actions
    actions_list = args_actions.split(",")

    watched_actions = gen_regex(actions_list)
    action_record = {}
    for a in actions_list:
        action_record[a] = False

    t = int(args.time)
    gen_summary(action_record, watched_actions, args)
    schedule.every(t).minutes.do(gen_summary, action_record, watched_actions, args)
    while True:
        schedule.run_pending()
Example #30
0
  def run(self):
    logging.info('CrawlerDaemon run')
    sqlite_session = get_session( self.config.database )
    orm_engines = sqlite_session.query( ORM_Engine ).all()

    if not self.config.dry_run:
      if len( orm_engines ) == 0:
        logging.debug( 'Crawler has no engines' )
         
      # Start controllers in each thread 
      for orm_engine in orm_engines:
        logging.info('Load orm_engine: %s' % orm_engine.name )
        engine = Engine( orm_engine )
        self.controllers[ engine.name ] = Controller( engine, sqlite_session ) 
        self.controllers[ engine.name ].start()
      
      # Start scheduling searches 
      for orm_search in sqlite_session.query( Search ).all():
        for engine in orm_search.engines:
          job = lambda: self.controllers[ engine.name ].queue.put( orm_search )
          schedule.every( orm_search.periodicity ).seconds.do( job )
          logging.debug('Put %s to schedule with periodicity %i seconds' % ( orm_search.name, orm_search.periodicity ) )

    self.httpd = HTTPD( self.config, self.controllers )
    self.httpd.start()
     
    while True:
      if not self.config.dry_run:
        schedule.run_pending()

      time.sleep(1)
Example #31
0
from scraper import GoesScraper

import schedule
import time

print('creating scraper...')
scraper = GoesScraper()

print('setting schedule...')
schedule.every().day.at('08:00').do(scraper.run)
schedule.every().day.at('10:00').do(scraper.run)
schedule.every().day.at('12:00').do(scraper.run)
schedule.every().day.at('13:00').do(scraper.run)
schedule.every().day.at('14:00').do(scraper.run)
schedule.every().day.at('15:00').do(scraper.run)
schedule.every().day.at('16:00').do(scraper.run)
schedule.every().day.at('17:00').do(scraper.run)
schedule.every().day.at('18:00').do(scraper.run)
schedule.every().day.at('19:00').do(scraper.run)
schedule.every().day.at('20:00').do(scraper.run)
schedule.every().day.at('21:00').do(scraper.run)
schedule.every().day.at('22:00').do(scraper.run)

print('starting run loop...')
while 1:
    schedule.run_pending()
    time.sleep(60)

print('done - exiting')
Example #32
0
from twilio.rest import Client
import schedule, time
import random

GOOD_MORNING_QUOTES = ["Good morning love!", "I don't cook, I don't clean!"]


def send_message(quotes_list=GOOD_MORNING_QUOTES):
    # Your Account SID from twilio.com/console
    account_sid = ""
    # Your Auth Token from twilio.com/console
    auth_token = ""

    client = Client(account_sid, auth_token)

    quote = quotes_list[random.randint(0, len(GOOD_MORNING_QUOTES) - 1)]

    client.messages.create(to="+16476282884", from_="+17868286986", body=quote)


schedule.every().day.at("23:39").do(send_message, GOOD_MORNING_QUOTES)

while True:
    schedule.run_pending()
    time.sleep(1)
new_name_folder='log_cpr'

########## creation of individual paths #########
path_main_folder=os.path.join(path,name_folder)
path_new_main_folder=os.path.join(path,new_name_folder)
suffix = '.gz'

def compression():
    files = os.listdir(path_main_folder)
    ############# new folder #########################
    if not os.path.exists(path_new_main_folder):
        os.makedirs(path_new_main_folder)
    ############# compression of files in folder #########################
    for f in files:
        path_main_files = os.path.join(path, name_folder, f)
        path_main_files_02 = os.path.join(path, name_folder, f + suffix)
        destination = os.path.join(path, new_name_folder, f + suffix)
        with open(path_main_files, 'rb') as f_in:
            with gzip.open(path_main_files_02, 'wb') as f_out:
                shutil.copyfileobj(f_in, f_out)
        ########## moving new files #########################
        shutil.move(path_main_files_02, destination)
        ########## removing old files #########################
        os.remove(path_main_files)

########## timing of repeating #########################
schedule.every(30).day.at("8:00").do(compression)

while True:
    schedule.run_pending()
    time.sleep(1)
import json
import urllib.request
import requests
import schedule
import time


def getdatabase():
    requestr = 'https://api.worldofwarships.asia/wows/encyclopedia/ships/?application_id=2b7fe83ad3455ce47818ecb2cb9d5818&fields=tier&language=en&page_no='
    shipdb = requests.get(requestr).json()
    pages = shipdb['meta']['page_total']
    jsonpages = {}
    for i in range(pages):
        data = requests.get(requestr + str(i + 1)).json()
        data = data['data']
        jsonpages.update(data)
    with open('shipdb.json', 'w') as outfile:
        json.dump(jsonpages, outfile)

    urllib.request.urlretrieve(
        'https://api.asia.warships.today/json/wows/ratings/warships-today-rating/coefficients',
        'coefficients.json')


schedule.every().day.at("17:37").do(getdatabase)

while True:
    schedule.run_pending()
    time.sleep(1)
Example #35
0
                    os.makedirs(folder_path,exist_ok=True)
                    if latest_file:
                        logging.info('Downloading truck image for camera %s', device_id)
                        filename = os.path.join(folder_path,now_file_path+'.jpg')
                        sftp.getfo(file, open(filename, 'wb'), callback=None)
                        logging.info('Downloaded truck image for camera %s!', device_id)
                    else:
                         logging.info('No latest image available for camera %s', device_id)
                except Exception as e:
                    logging.error('Image download failed for camera %s \n error: %s', file, e)

    except Exception as e:
        logging.error('Failed to download truck stops: %s',e)


if __name__ == '__main__':
    logging.debug('Started camera image downloader')
    update_camera_exclusions()
    update_camera_inventory()
    logging.info("Initialization completed.")
    downloadCameraImages()
    download_private_truck_stops_sftp()
    schedule.every().day.at("00:02").do(update_camera_inventory)
    schedule.every(5).minutes.do(downloadCameraImages)
    schedule.every(5).minutes.do(download_private_truck_stops_sftp)
    while True:
        schedule.run_pending()
        time.sleep(1)

    # downloadCameraImages()
Example #36
0
            Tanggal     : {tanggal},
            Jam         : {jam},
            Koordinat   : {koordinat},
            Lintang     : {lintang},
            Bujur       : {bujur},
            Magnitude   : {magnitude},
            Kedalaman   : {kedalaman},   
            Wilayah     : {wilayah}

            waiting {num}

            """.format(tanggal=a.find("tanggal").text,
                       jam=a.find("jam").text,
                       koordinat=a.find("coordinates").text,
                       lintang=a.find("lintang").text,
                       bujur=a.find("bujur").text,
                       magnitude=a.find("magnitude").text,
                       kedalaman=a.find("kedalaman").text,
                       wilayah=a.find("wilayah").text,
                       num=1))


def job():
    getInfo(link)


schedule.every(5).seconds.do(job)

while True:
    schedule.run_pending()
    time.sleep(1)
def main():
    log_path = path.join(LOG_DIR, 'download_transfer.json')
    log = load_json(log_path)
    if not log:
        log = {'files': {}, 'sizes': {}}

    def clean_up():
        for f in log['files'].keys():
            if log['files'][f] == 'delete':
                if recursive_delete(path.join(LOCAL_DIR, f)):
                    print('deleted', f)

    schedule.every().day.at('04:00').do(clean_up)

    def save_log():
        save_json(log, log_path)

    while True:
        local_files = listdir(LOCAL_DIR)
        schedule.run_pending()

        for file in local_files:
            if file not in log['files']:
                if file not in log['sizes'] or log['sizes'][file] != 'done':
                    current_size = get_size(path.join(LOCAL_DIR, file))
                    if file not in log[
                            'sizes'] or not current_size == log['sizes'][file]:
                        log['sizes'][file] = current_size
                        save_log()
                        continue
                    elif file.startswith('_UNPACK'):
                        continue
                    elif file.startswith('.'):
                        continue
                    else:
                        del log['sizes'][file]
                        save_log()
                print('copying', file)
                log['files'][file] = 'copying'
                save_log()
                path.join(LOCAL_DIR, file)
                if copytree(path.join(LOCAL_DIR, file), FILE_DIR, file, True):
                    log['files'][file] = 'copied'
                    print('copied', file)
                else:
                    del log['files'][file]
                save_log()

        remote_files = listdir(FILE_DIR)
        delete = []
        for file in log['files'].keys():
            if file not in remote_files:
                log['files'][file] = 'delete'
                save_log()

            if file not in local_files:
                delete.append(file)
                save_log()

        for key in delete:
            del log['files'][key]
            save_log()

        sleep(1)
Example #38
0
    do_dns_update(cf, zone_name, zone_id, dns_name, ip_address,
                  ip_address_type)


if __name__ == '__main__':
    main()

    fallback_sched_time = 21600  # 6 hours
    sched_time = os.getenv('SCHED_TIME')
    if sched_time is None or sched_time == '':
        print('SCHED_TIME not set or empty - falling back to default')
        sched_time = fallback_sched_time
    else:
        try:
            sched_time = abs(int(sched_time))
        except ValueError:
            print(
                'SCHED_TIME must be a number of seconds, but %s was given - falling back to default'
                % (sched_time))
            sched_time = fallback_sched_time
    print('SCHED_TIME: %d seconds' % (sched_time))

    try:
        schedule.every(sched_time).seconds.do(main)
        while 1:
            schedule.run_pending()
            time.sleep(1)
    except KeyboardInterrupt:
        exit(0)
Example #39
0
import schedule
import time
import datetime
from datetime import timedelta
import glob


def clear():
    date = datetime.datetime.now().strftime('%Y-%m-%d')

    with open('daily-stats-graph_output_example', 'r') as plot:
        read = plot.read()

    with open('daily-stats-graph_output_example' % date, 'w') as plot2:
        plot2.write(read)
    list = glob.glob('*.txt')
    for name in list:
        open('%s' % name, 'w').close()
    return


schedule.every().day.at("21:59").do(clear)

while True:
    schedule.run_pending()
    time.sleep(15)
Example #40
0
        m_2 = m_2 + x + '\n'
    for x in wrong_names:
        m_3 = m_3 + x + '\n'

    vk.messages.send(**config, random_id=get_random_id(), user_id=editor,
                     message=f'Удалены или не в клане: {m_1}\n\n'
                     )
    vk.messages.send(**config, random_id=get_random_id(), user_id=editor,
                     message=f'Другие должности: {m_2}\n\n'
                     )
    vk.messages.send(**config, random_id=get_random_id(), user_id=editor,
                     message=f'Другие имена: {m_3}\n\n',
                     )


schedule.every().day.at('20:00').do(check_pages, vk, config)

while True:
    try:
        schedule.run_pending()
        time.sleep(1)
    except Exception as e:
        with open('errors.txt', 'a') as f:
            f.write(str(e) + '\n')
        vk_session = vk_api.VkApi(token=token)
        longpoll = VkBotLongPoll(vk_session, group_id, wait=25)
        vk = vk_session.get_api()
        LongPollServer = vk.groups.getLongPollServer(group_id=group_id)
        key, server, ts = LongPollServer['key'], LongPollServer['server'], LongPollServer['ts']
        config = {'key': key, 'server': server, 'ts': ts}
        vk_token = (vk_api.VkApi(token=user_token)).get_api()
Example #41
0
def crawl_daily():
    """
    每日的定时抓取
    """

    # 初始化抓取日线数据类
    dc = DailyCrawler()
    # 获取当前时间
    now_date = datetime.now()
    # 获取今天星期几,周日-周六:0-6
    weekday = int(now_date.strftime('%w'))
    # 只有周一到周五执行
    if 0 < weekday < 6:
        # 当前日期
        now = now_date.strftime('%Y-%m-%d')
        # 抓取当日指数
        dc.crawl_index(begin_date=now, end_date=now)
        # 抓取当日K线
        dc.crawl(begin_date=now, end_date=now)


# 定时任务的启动入口
if __name__ == '__main__':
    # 设定每天下午15:30执行抓取任务
    schedule.every().day.at("15:30").do(crawl_daily)
    # 通过无限循环,执行任务检测
    while True:
        # 每10秒检测一次
        schedule.run_pending()
        time.sleep(10)
Example #42
0
import schedule
import tweet
import time
import data

dt = data.Data()
schedule.every(5).minutes.do(tweet.tweet_noti, dt)

while True:
    schedule.run_pending()
    time.sleep(1)
import schedule
from binance.enums import FuturesType
import sys
from alert_future_spread import future_spread_alerting
from alert_funding import funding_alert


if __name__ == "__main__":

    # # Spread Alerts
    schedule.every(5).minutes.do(future_spread_alerting, spread=-0.05,
                                 return_annual=None, is_spread_neg=True)
    schedule.every(30).seconds.do(future_spread_alerting,
                                  spread=1.12, return_annual=0.5)
    schedule.every(6).hours.do(future_spread_alerting,
                               spread=0.10, return_annual=0.3, )

    # # Funding Alerts
    alert_1_USDM = {"threshold": 5,
                    "is_neg": False,
                    "is_both": True,
                    "futures_type": FuturesType.USD_M,
                    "disable_notification": False,
                    }
    alert_1_COIM = {"threshold": 5,
                    "is_neg": False,
                    "is_both": True,
                    "futures_type": FuturesType.COIN_M,
                    "disable_notification": False,
                    }
Example #44
0
if DEBUG:
    logging.basicConfig(
        format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s:%(message)s",
        level=logging.INFO,
    )
    logging.getLogger("schedule").setLevel(logging.WARNING)

workMins, breakMins = parseArgs(sys.argv)

adapter = Adapter.get()

if Platform == Platforms.Windows:
    if not adapter.isAdmin():
        print("Relaunching as admin")
        adapter.relaunchAsAdmin()
        sys.exit(0)

t = Timer(workMins, breakMins)


def mainLoop():
    t.tick()


schedule.every(1).seconds.do(mainLoop)

while True:
    schedule.run_pending()
    sleep(1)
            # settings
            session.set_relationship_bounds(enabled=False, potency_ratio=1.21)

            # actions
            session.unfollow_users(amount=1000,
                                   allFollowing=True,
                                   style="RANDOM",
                                   unfollow_after=3 * 60 * 60,
                                   sleep_delay=450)

        except Exception:
            print(traceback.format_exc())

    requests.get(
        "https://api.telegram.org/******/sendMessage?chat_id=*****&text='InstaPy Unfollower WEDNESDAY Stopped @ {}'"
        .format(datetime.now().strftime("%H:%M:%S")))


# schedulers
schedule.every().day.at("09:30").do(follow)
schedule.every().day.at("13:30").do(follow)
schedule.every().day.at("17:30").do(follow)

schedule.every().day.at("00:05").do(unfollow)

schedule.every().wednesday.at("03:00").do(xunfollow)

while True:
    schedule.run_pending()
    time.sleep(1)
    if(dayDifference>=42):
        print("Viable flat base length. Check volume % change...")
    else:
        print("Too short for viable base pattern.\n")


def checkCorrectionPercentage(stock, oldHighDate, high):
        oldToNewHighRange = get_data(stock , start_date = oldHighDate, end_date = todayDate)
        rangeLow = oldToNewHighRange.loc[oldToNewHighRange['low'].idxmin()]
        rangeLow = rangeLow['low']
        percentageDifference = ((high - rangeLow)/high) * 100
        percentageDifference = "%.2f" % round(percentageDifference,2)
        return percentageDifference

sendNewInfo()
schedule.every(3).minutes.do(compareHighs)
# def startChecks():
#     schedule.every(3).minutes.do(compareHighs)
#
# def sleep():
#     schedule.cancel_job(startChecks)
#     time.sleep(43200)
#
# schedule.every().day.at("06:28").do(sendNewInfo)
# schedule.every().day.at("06:35").do(startChecks)
# schedule.every().day.at("13:00").do(sleep)

while 1:
    schedule.run_pending()
    time.sleep(1)
Example #47
0
def fill():
    """Fill in the task to be scheduled."""
    schedule.every().day.at(onhour).do(screenon)
    schedule.every().day.at(offhour).do(screenoff)
Example #48
0

def compress_html():
    """Compress downloaded .html files"""
    if not os.path.exists(PATH_HTML):
        os.makedirs(PATH_HTML)
    os.chdir(PATH_HTML)
    try:
        zip_csv = ZipFile(SITE_NAME + '_' + DATE + '_html.zip', 'a')  #
        for file in glob.glob("*" + DATE + "*" + "html"):
            zip_csv.write(file)
            os.remove(file)
        logging.info("Compressing HTML files")
    except Exception as e:
        logging.error('Error when compressing html')
        logging.info(type(e).__name__ + str(e))
    os.chdir(PROJECT_PATH)


# Run scripts if argument is 'test', run and hibernate if 'run' else hibernate
if "test" in sys.argv:
    main()
else:
    if "run" in sys.argv:
        main()
    start_time = '01:' + str(random.randint(0, 59)).zfill(2)
    schedule.every().day.at(start_time).do(main)
    while True:
        schedule.run_pending()
        time.sleep(1)
Example #49
0
def run_scheduler(interval):
	schedule.every(interval).minutes.do(job, interval)
	while True:
		# print('waiting...')
		schedule.run_pending()
		time.sleep(1)
Example #50
0
    brecha = generator_float(brecha_list)

    #Creating tweet
    api.update_status(f"""Último precio
    {next(dolar_type).capitalize()} $ {next(last_price)}
    {next(dolar_type).capitalize()} $ {next(last_price)}
    {next(dolar_type).capitalize()} $ {next(last_price)}
    {next(dolar_type).capitalize()} $ {next(last_price)}
    {next(dolar_type).capitalize()} $ {next(last_price)}
    {next(dolar_type).capitalize()} $ {next(last_price)}
    {next(dolar_type).capitalize()} $ {next(last_price)}
    {next(dolar_type).capitalize()} $ {next(last_price)}""")

    api.update_status(f"""Brecha
    {next(dolar_type_1).capitalize()} % {next(brecha)}
    {next(dolar_type_1).capitalize()} % {next(brecha)}
    {next(dolar_type_1).capitalize()} % {next(brecha)}
    {next(dolar_type_1).capitalize()} % {next(brecha)}
    {next(dolar_type_1).capitalize()} % {next(brecha)}
    {next(dolar_type_1).capitalize()} % {next(brecha)}
    {next(dolar_type_1).capitalize()} % {next(brecha)}
    {next(dolar_type_1).capitalize()} % {next(brecha)}""")
    


if __name__=='__main__':   
    run()   
    schedule.every(30).minutes.do(run)
    while True:
        schedule.run_pending()
        time.sleep(1) 
Example #51
0
    print ' 1. Thoi gian lap lai chuong trinh tinh bang ngay :'
    print ' 2. Thoi gian lap lai chuong trinh tinh bang ngay co dinh hang tuan (monday, sunday ....)'
    print ' 3. Thoi gian lap lai chuong trinh tinh bang gio'
    print ' 4. Thoi gian lap lai chuong trinh tinh bang phut'
    print ' 5. Thoi gian lap lai chuong trinh tinh bang giay'

    choose = raw_input('Hay chon theo nhu cau cua ban: ')
    if choose == '1':
        print 'Ban chon lap lai chuong trinh hang ngay'
        choose1 = raw_input(
            'Ban co muon chon thoi diem de chay truong trinh khong? y/n :')
        if choose1 == 'y' or choose1 == 'yes':
            gio, phut = nhapthoigian()
            print 'chuong trinh se duoc thuc thi vao luc %s:%s hang ngay !' % (
                gio, phut)
            schedule.every().days.at("%s:%s" % (gio, phut)).do(job)
            break
        elif choose1 == 'n' or choose1 == 'no':
            print 'Chuong trinh se thuc thi vao luc 00:00 hang ngay'
            schedule.every().days.do(job)
            break
        else:
            print 'Chon sai lua chon y/n:'
            pass

    elif choose == '2':
        print '2. Thu hai - Monday'
        print '3. Thu ba - Tuesday'
        print '4. Thu tu - Wednesday'
        print '5. Thu nam - Thursday'
        print '6. Thu sau - Friday'
            zf.write(os.path.join(TargetPath, f),
                     f)  #第二引数にファイル名だけ渡すとディレクトリ無しで.zip作ることできる

    with paramiko.SSHClient() as client:
        #初回ログイン時に「Are you sure you want to continue connecting(yes/no)?」と聞かれても接続できるようにする
        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        client.connect(hostname=host,
                       port=host_port,
                       username=host_username,
                       password=host_password)
        #SFTPセッション開始
        sftp_connection = client.open_sftp()
        #ローカルへファイルからリモートサーバーへ転送
        sftp_connection.put(SFTPPath, SFTPPath)
        client.close()
    #フォルダごと削除
    shutil.rmtree(TargetPath)
    #フォルダを作成
    os.mkdir(TargetPath)
    print("finish " + timename)


#関数の読み出しコマンド
#schedule.every().day.at("00:00").do(SFTP)
#schedule.every(1).minutes.do(SFTP)
schedule.every(1).hour.do(SFTP)

while True:
    #定期実行の読み出し
    schedule.run_pending()
Example #53
0
                    mcid]
            elif i <= 38:
                player_data_build_from21_to40_dict[mcid] = player_data_dict[
                    mcid]
            else:
                break
            i += 1

        draw_graph(today, player_data_from1_to20_dict,
                   player_data_from21_to40_dict,
                   player_data_build_from1_to20_dict,
                   player_data_build_from21_to40_dict)

        push_to_discord(today)
        push_to_twitter(today)

        #完全なるネタ機能、飽きたらやめる
        neta(today)

    except:
        unexpected_error()  #こちらでは考えつかないエラーが起きたときunexpected_error()に回す。


login()  #login()を実行。プログラム起動時に実行される。
schedule.every().hour.at(":01").do(do_every_hour)  #do_every_hour()を毎時1分に実行
schedule.every().day.at("23:58").do(do_every_day)  #do_every_every()を毎日23:58に実行

#ちょっとは動作軽くしたいじゃん?一秒間に何回も確認してたら重くなるやん?ってことで1秒休み
while True:
    schedule.run_pending()
    time.sleep(1)
Example #54
0
   # Array que vai receber os dados do deputados
   list_deputados = []

   #Conexao com api dos dados publicos
   obj = DadosAbertos()

   # Listando os deputados
   list_dep = obj.deputados()

   for dep in list_dep:
       info = {
             '_id'    : dep['id'],
             'Nome'   : dep['nome'],
             'Partido': dep['siglaPartido'],
             'Foto'   : dep['urlFoto']
       }
       list_deputados.append(info)


   # Inserindo dados no mongodb
   retorno = table.insert_many(list_deputados)


#Criando o schedule
schedule.every().day.at("10:30").do(coleta)
schedule.every().minute.at(":17").do(coleta)


while True:
   schedule.run_pending()
   time.sleep(1)
Example #55
0
import datetime
import schedule
import time
import main
import util


def shift():
    main.shift()


def duty():
    if not util.judgeFriday():
        main.duty()


schedule.every().day.at("08:30").do(shift)
schedule.every().day.at("10:00").do(duty)

while True:
    schedule.run_pending()
    time.sleep(60)
Example #56
0
def log_run_metrics(cycle_start_time, recent_media_added, users_added,
                    users_updated, user_recent_media_added):
    print '\nDONE: inserting new records into database at %s.' % datetime.datetime.now(
    )
    print 'Inserted: RecentMedia: %d -- Users Added: %d -- Users Updated: %d -- UserRecentMedia: %d'\
          % (recent_media_added, users_added, users_updated, user_recent_media_added)
    print 'Cycle run time taken: %s' % (datetime.datetime.now() -
                                        cycle_start_time)
    print 'Process start time was: %s' % process_start_time
    print 'Process run time currently: %s' % (datetime.datetime.now() -
                                              process_start_time)


if __name__ == '__main__':
    api = get_instagram_api()

    # Open cluster connection to the raw keyspace, and build/connect to our tables
    open_cassandra_session()

    # Schedule our job, and begin it, sleeping for 120 seconds between each job before rerunning
    print 'Scheduling job for every 140 seconds, time is %s.' % datetime.datetime.now(
    )
    schedule.every(140).seconds.do(produce_raw_layer)

    # Process start time
    process_start_time = datetime.datetime.now()

    while True:
        schedule.run_all()
        time.sleep(120)
Example #57
0
                DEG_2_RAD)
    #print twd

    values = []
    value = {"path": "environment.wind.directionMagnetic", "value": twd}
    values.append(value)
    value = {"path": "environment.wind.speedTrue", "value": tws}
    values.append(value)
    update = {}
    update["values"] = values
    update["timestamp"] = datetime.datetime.strptime(
        data["GetSingleStationResult"]["Samples"][1]["Updated"],
        "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%dT%H:%M:%SZ")
    updates = []
    updates.append(update)
    delta = {}
    delta["updates"] = updates
    delta['context'] = 'vessels.viva2'
    print delta

    mqtt_client.publish(json.dumps(delta), "wx_viva2")


schedule.every(30).seconds.do(scrape)

while True:
    schedule.run_pending()
    time.sleep(1)

#publish.single("paho/test/single", "boo", hostname="test.mosquitto.org")
#publish.single(self, self.MQTTsubTopic, payload = msg, qos = 0, retain = False)
Example #58
0
                "https://api.exchangeratesapi.io/history?start_at=2000-01-01&end_at="
                + now + "&symbols=USD")
            result = json.loads(raw_data.content.decode('utf-8'))["rates"]
            try:
                for key, value in result.items():
                    outFile.write('\n' + str(datetime.now()) + ":**currency:" +
                                  str(key) + ", " + str(value))
                    self.api_bridge.insert_currency(key,
                                                    value["USD"],
                                                    symbol="USD")
            finally:
                # close connection
                self.api_bridge.close()


if __name__ == "__main__":

    def job():
        currency = CurrencyHistory()
        currency.get_currency_history()

        stock = StockHistory()
        stock.get_stock_history()

    schedule.every(1).minutes.do(job)
    # schedule.every().day.at("18:00").do(job)

    while True:
        schedule.run_pending()
        time.sleep(1)
Example #59
0
            # send_mail_163_to_all('[*]360cert daily',,temp360resp, type='plain')
            send_mail_A(Recipient_list,
                        '[*]xuanwulab ' + current_riqi,
                        tempxuanwu,
                        type='plain')
            today_has_send_xuanwulab = 1

    else:
        print "[xuanwulab] 今天已发过了"

    sys.stdout.flush()  # 实时输出日志 适用于 python x.py > x.log的实时输出


# 爬daily----------------------------------^️
# 全局变量
schedule.every(8).minutes.do(crawl_all_new)  # 方法名后面不带括号,就可以执行成功。

# 收件人
Recipient_list = ['*****@*****.**', '*****@*****.**']

if __name__ == '__main__':
    print 'start_time程序启动时间:' + str(start_time)

    global_crawl_times += 1  # 已经爬行次数
    crawl_all_new()
    sys.stdout.flush()

    while True:
        schedule.run_pending()
        time.sleep(30)
        print("alive..")
                     span_name="fetch-price",
                     transport_handler=http_transport_handler,
                     sample_rate=100.0):
        data = fetch_price(stock)
        if data:
            send2_kafka(producer, data)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('symbol', help='the symbol of the stock')
    parser.add_argument('topic_name', help='the name of the topic')
    parser.add_argument('kafka_broker', help="the location of the kafka")

    args = parser.parse_args()
    symbol = args.symbol
    topic_name = args.topic_name
    kafka_broker = args.kafka_broker

    producer = KafkaProducer(bootstrap_servers=kafka_broker)

    #stock = get_quote(symbol)

    schedule.every(1).second.do(fetch_price_and_send, producer, symbol)

    atexit.register(shutdown_hook, producer)

    while True:
        schedule.run_pending()
        time.sleep(1)