예제 #1
0
def on_cmd(full_message, user, value):
	if (not hasattr(user, 'menu')) or user.menu is None:
		return False # Let cmd pass through
	
	menu_text, options, exit_callback, timeout = user.menu

	if value == 'again':
		tell_menu(user, user.menu)
		return True
	elif value == 'exit':
		callback = exit_callback
	elif len(options) and options[0] == 'prompt':
		callback = options[1]
	else:
		try:
			n = int(value)
		except ValueError:
			tell(user, "Not a number: %s" % value)
			return True
		if not 0 < n <= len(options):
			tell(user, "%d not a valid option" % n)
			return True
		callback = options[n-1][1]

	new_menu = callback(user, value)
	user.menu = None
	schedule.clear(('menu', user))
	if new_menu is not None:
		display(user, *new_menu)

	return True
예제 #2
0
파일: feeder.py 프로젝트: Danko90/cifpy3
    def load_feeds(self):
        schedule.clear()
        feeds = {}
        self.logging.debug("Getting List of Feeds")
        files = os.listdir(cif.options.feed_directory)
        feed_files = []
        for file in files:
            if file.endswith(".yml"):
                self.logging.debug("Found Feed File: {0}".format(file))
                feed_files.append(os.path.join(cif.options.feed_directory, file))

        feed_files.sort()
        for feed_file in feed_files:
            self.logging.info("Loading Feed File: {0}".format(feed_file))
            feeds[feed_file] = cif.feeder.Feed(feed_file)
            self.logging.info("Scheduling Feed File: {0}".format(feed_file))
            if 'feeds' not in feeds[feed_file].feed_config:
                self.logging.info("{0} does not contain feeds key".format(feed_file))
                continue
            for feed_name in feeds[feed_file].feed_config['feeds'].keys():
                if "interval" in feeds[feed_file].feed_config['feeds'][feed_name]:
                    if feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "hourly":
                        self.logging.info(repr(schedule.every().hour.at("00:00").do(feeds[feed_file].process, feed_name)))
                    elif feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "daily":
                        self.logging.info(repr(schedule.every().day.at("00:00").do(feeds[feed_file].process, feed_name)))
                    elif feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "weekly":
                        self.logging.info(repr(schedule.every().day.at("00:00").do(feeds[feed_file].process, feed_name)))
                else:
                    self.logging.info(repr(schedule.every(1).minute.do(feeds[feed_file].process, feed_name)))
예제 #3
0
def test_map_play_list_information():
    """
        Verify proper mapping of Pi Window Schedule Information to a set of show tasks
    """
    # clear any currently scheduled items
    schedule.clear()

    # set up a mock Pi Windows API response for testing
    @all_requests
    def window_response_content(url, request):
        return {
            'status_code': 200,
            'content': example_show_schedule_response
        }

    # fire off the task that periodically checks for show updates
    with HTTMock(window_response_content):
        fetch_show_schedule_task()

    # verify we have the proper tasks queued up to run the show
    task_names = [job.job_func.func.func_name for job in schedule.jobs]
    nose.tools.assert_equals(task_names.count('cache_files_task'), 1)
    nose.tools.assert_equals(task_names.count('pre_show_task'), 1)
    nose.tools.assert_equals(task_names.count('showtime_task'), 1)
    nose.tools.assert_equals(task_names.count('post_show_task'), 1)
예제 #4
0
def test_play_scheduled():
    """
        Verify play API schedule operation to queue up a show in the future
    """
    initial_job_count = len(schedule.jobs)
    with app.test_client() as client:
        request_data = {
            "play_list": [
                {
                    "photo": "https://s3.amazonaws.com/pivideo-testing/ssnl_logo.png",
                    "duration": 5
                },
                {
                    "video": "https://s3.amazonaws.com:443/hubology-video-village-media/media/DJI_0127.MOV"
                }
            ],
            "loop": True,
            "start_time": "04:00",
            "end_time": "04:30"
        }
        response = client.post('/play', data=json.dumps(request_data),
                               content_type='application/json')
        nose.tools.assert_equals(200, response.status_code)
        response_data = json.loads(response.data)
        nose.tools.assert_equals('scheduled', response_data['status'])
        job_count = len(schedule.jobs)
        task_names = [job.job_func.func.func_name for job in schedule.jobs]
        nose.tools.assert_equals(job_count, initial_job_count + 4)
        nose.tools.assert_equals(task_names.count('cache_files_task'), 1)
        nose.tools.assert_equals(task_names.count('pre_show_task'), 1)
        nose.tools.assert_equals(task_names.count('showtime_task'), 1)
        nose.tools.assert_equals(task_names.count('post_show_task'), 1)
        schedule.clear()
예제 #5
0
def main():
    port = "5918"
    if len(sys.argv) > 1:
        port = sys.argv[1]
    
    socket = initiate_zmq(port)
    logging.basicConfig(filename='./log/ingest_lottery.log', level=logging.INFO)
    tz = pytz.timezone(pytz.country_timezones('cn')[0])
    schedule.every(30).seconds.do(run, socket, tz)
    while True:
        try:
            schedule.run_pending()
            time.sleep(1)
        except KeyboardInterrupt:
            now = datetime.now(tz)
            message = "CTRL-C to quit the program at [%s]" % now.isoformat()
            logging.info(message)
            break
        except Exception as e:
            now = datetime.now(tz)
            message = "Error at time  [%s]" % now.isoformat()
            logging.info(message)
            logging.info(e)
            # reschedule the job
            schedule.clear()
            socket = initiate_zmq(port)
            schedule.every(30).seconds.do(run, socket, tz)
예제 #6
0
 def set_settings(self, settings):
     """Set the current settings"""
     if settings is not None and "alarms" in settings:
         # Only set settings if settings is actually set and "alarms" exists
         # within settings
         self.settings = settings
         schedule.clear() # Delete all scheduled alarms
         self.create_alarms()
예제 #7
0
    def _do_reload(self):
        schedule.clear()
        self.feeds = {}
        self.logging.debug("Getting List of Feeds")
        files = os.listdir(cif.options.feed_directory)
        feed_files = []
        for file in files:
            if file.endswith(".yml"):
                self.logging.debug("Found Feed File: {0}".format(file))
                feed_files.append(os.path.join(cif.options.feed_directory, file))

        feed_files.sort()
        for feed_file in feed_files:
            self.logging.info("Loading Feed File: {0}".format(feed_file))

            loaded = self._load_feed(feed_file)
            if loaded is None:
                self.logging.error("Feed File: '{0}' came back as None".format(feed_file))
                continue

            self.feeds[feed_file] = loaded

            self.logging.info("Scheduling Feed File:".format(feed_file))

            if 'feeds' not in self.feeds[feed_file]:
                continue

            for feed_name in self.feeds[feed_file]['feeds'].keys():

                if "interval" in self.feeds[feed_file]['feeds'][feed_name]:
                    if self.feeds[feed_file]['feeds'][feed_name]['interval'] == "hourly":

                        self.logging.debug(
                            repr(schedule.every().hour.at("00:00").do(self._run_feed, feed_file, feed_name))
                        )

                    elif self.feeds[feed_file]['feeds'][feed_name]['interval'] == "daily":

                        self.logging.debug(
                            repr(schedule.every().day.at("00:00").do(self._run_feed, feed_file, feed_name))
                        )

                    elif self.feeds[feed_file]['feeds'][feed_name]['interval'] == "weekly":

                        self.logging.debug(
                            repr(schedule.every().week.at("00:00").do(self._run_feed, feed_file, feed_name))
                        )

                    else:

                        self.logging.debug(
                            repr(schedule.every().hour.at("00:00").do(self._run_feed, feed_file, feed_name))
                        )
                else:
                    self.logging.debug(
                        repr(schedule.every().hour.at("00:00").do(self._run_feed, feed_file, feed_name))
                    )
예제 #8
0
def createSchedules(sockets):
    """Create all of the on and off schedules for all the sockets"""

    # Clear the existing schedule, because in cases where a switch on or
    # off time is defined as sunset or similar, the time will change daily
    logger.debug("Clearing existing schedule")
    schedule.clear()

    logger.info("Creating schedule")
    for socket in sockets:
        setSchedules(socket.name, 'on', socket.onTimes)
        setSchedules(socket.name, 'off', socket.offTimes)
예제 #9
0
    def start(self):
        schedule.clear()
        if self.job_name == 'subito':
            schedule.every(self.timeout).seconds.do(self.subito_job)
        elif self.job_name == 'idealista':
            schedule.every(self.timeout).seconds.do(self.idlista_job)
        else:
            logger.error('unknown job %s', self.job_name)
            sys.exit(2)

        while True:
            schedule.run_pending()
            time.sleep(1)
예제 #10
0
def reload():
    global dashdayjob
    global configfile
    schedule.clear()
    if os.path.isfile('config/dashday.cfg') == True:
        configfile = configparser.ConfigParser()
        try:
            configfile.read('config/dashday.cfg')
        except PermissionError:
            handlers.criterr("Permissions error on dashday.cfg. Please ensure you have write permissions for the directory.")
    else:
        print("No configuration file found. Please configure Dashday.")
        exit()
    dashdayjob = schedule.every().day.at(configfile['Schedule']['runat']).do(start)
    web.serv.updateScheduledRun(dashdayjob.next_run.strftime("%d/%m/%y %H:%M:%S"))
예제 #11
0
def delete_cache(first_time='0:05', second_time='1:05'):
    try:
        # Deleting folders from 1am to 23pm at 24.05 o'clock
        schedule.every().day.at(first_time).do(delete_unnecessary_folders23)
        # Deleting folders from which we created during 24th hour at 1.05 o'clock
        schedule.every().day.at(second_time).do(delete_unnecessary_folders24)

        while 1:
            schedule.run_pending()
            sleep(50)
    except Exception as e:
        print("{0} Error: {1}".format(strftime("%Y-%m-%d %H:%M:%S", gmtime()), e))
        schedule.clear()
        print('Notification: Timer is reset')
        delete_cache()
예제 #12
0
    def main(self):
        self.log.info("Starting")
        self.get_config()
        self.log.debug("directory Selected: " + self.config["basedir"])

        self.ec2_auto = {}
        # self.config_scheduled_cmd()
        self.ec2_autodiscovery()

        while 1:

            event_handler = ChangeHandler(self.config, self.ec2_auto, self.log)
            observer = Observer()
            _track = Observer()
            observer.schedule(event_handler, self.config["basedir"], recursive=True)
            _track.schedule(event_handler, "/tmp/", recursive=True)
            observer.start()
            _track.start()
            try:
                while True:

                    if os.path.getmtime(self.params.config) != self.last_load:
                        self.log.info("Config change reloading data")
                        self.get_config()

                        self.ec2_autodiscovery()

                        schedule.clear()
                        # self.config_scheduled_cmd()

                    time.sleep(1)
                    schedule.run_pending()
                    event_handler.wait_to_notify()

            except KeyboardInterrupt:
                quit("Exit")
                observer.stop()
                _track.stop()
        observer.join()
        _track.join()
        self.log.debug("Finished")
예제 #13
0
    def __init__(self, *args, **kwargs):
        """ Initialise attributes and register all behaviours """
        self.logging = logging
        self.__log('Starting Assistant')
        self.db = Database()
        self.mode = kwargs.get('mode', 'console')

        self.behaviours = {}

        self.dir = os.path.dirname(os.path.realpath(__file__))
        self.files = self.dir + '/files'

        self.config = config.Config()
        self.responder = None

        self.admin = self.config.get_or_request('Admin')

        self.register_behaviours()
        self.register_responders()

        schedule.clear()
        schedule.every(5).minutes.do(self.idle)
예제 #14
0
def refresh_schedule():
    try:
        schedule.clear('user')
        # for every user
        for u in get_all_user_config():
            system_time = get_system_nofity_time(u['undisturbed_start'],
                                                 u['undisturbed_end'])
            # is location set
            if u['latitude']:
                # add user schdule
                user_schedule = get_user_time(u)
                if user_schedule:
                    schedule.every().day.at(user_schedule).do(
                        notify, u['id'], 'user').tag('user')
            else:
                schedule.every().wednesday.at(system_time).do(
                    broadcast, u['id']).tag('user')
                schedule.every().saturday.at(system_time).do(
                    broadcast, u['id']).tag('user')

        logging.info(schedule.default_scheduler.jobs)
    except Exception as e:
        logging.fatal(e)
예제 #15
0
    def go_click(self):
        self.save_settings()

        schedule.clear()
        if config['schedule']['schedule_time']:
            schedule.every().day.at(config['schedule']['schedule_time']).do(self.start_click, sched_task=True)
        else:
            schedule.every().day.at("08:00").do(self.start_click)

        try:
            self.task_monitor.isAlive()
        except AttributeError:
            self.task_monitor = threading.Thread(target=self.run_monitor)
        else:
            if not self.task_monitor.isAlive():
                self.task_monitor = threading.Thread(target=self.run_monitor)
        finally:
            if not self.task_monitor.isAlive():
                self.task_monitor.setDaemon(True)
                self.task_monitor.start()

        app.log('当前账号: %s' % config['dingtalk']['username'])
        app.log('等待下次学习时间: %s' % schedule.next_run())
예제 #16
0
파일: loginweb.py 프로젝트: somTian/tools
def sche_run():
    schedule.clear()

    schedule.every(10).seconds.do(login_web)

    # schedule.every().day.at("8:00").do(login_web)
    # schedule.every().day.at("10:00").do(login_web)
    # schedule.every().day.at("14:00").do(login_web)
    # schedule.every().day.at("16:00").do(login_web)
    # schedule.every().day.at("18:00").do(login_web)
    # schedule.every().day.at("20:00").do(login_web)

    #设置定时任务
    # schedule.every(10).minutes.do(job)  # 每隔 10 分钟运行一次 job 函数
    # schedule.every().hour.do(job)  # 每隔 1 小时运行一次 job 函数
    # schedule.every().day.at("10:30").do(job)  # 每天在 10:30 时间点运行 job 函数
    # schedule.every().monday.do(job)  # 每周一 运行一次 job 函数
    # schedule.every().wednesday.at("13:15").do(job)  # 每周三 13:15 时间点运行 job 函数
    # schedule.every().minute.at(":17").do(job)  # 每分钟的 17 秒时间点运行 job 函数

    while True:
        schedule.run_pending()  # 运行所有可以运行的任务
        time.sleep(1)
예제 #17
0
def main():
	data = get_recent_broadcasts()
	programs = [Program(info) for info in data]
	programs.sort()
	programs = [prg for prg,i in zip(programs,range(len(programs))) # There are overlaps between "today" and "yesterday"'s schedules
				   if prg.pid != programs[i-1].pid] # So we can remove adjacent duplicates


	remaining_programs = get_remaining_programs(programs) 
	download(remaining_programs)
	remaining_programs = fill_in_files(remaining_programs)
	remaining_programs = generate_empty(remaining_programs)
	schedule.clear()
	for program in remaining_programs:
		hour = program.start.format("HH:mm")
		schedule.every().day.at(hour).do(schedule_play(program._file))
	current_program, current_time = get_current_shifted(remaining_programs)
	play(current_program, current_time) 

	pad_short_programs(remaining_programs)
	trim_programs(remaining_programs)
	build_playlist(remaining_programs)
	burn(remaining_programs)
예제 #18
0
def flood_watch(range, notify_list, delay):
  global adc
  global service
  global notifier
  global flood_detector

  running = True

  adc = ADC2()
  adc.open()

  if conf.has_section('service'):
    base_url = conf.get('service', 'baseUrl')
    base_port = conf.get('service', 'basePort')
    endpoint = conf.get('service', 'postEndpoint')
    conf_endpoint = conf.get('service', 'confEndpoint')
    service = RESTNotifier(base_url, base_port, endpoint)
    service.post_configuration(conf_endpoint, delay, range)

  notifier = SMTPNotifier(conf.get('smtp', 'user'), conf.get('smtp', 'password'))
  notifier.add_notifiees(notify_list)

  flood_detector = FloodDetector(range)

  schedule.every(delay).minutes.do(check_flood)
  check_flood()

  while running:
    try:
      schedule.run_pending()
      time.sleep(READ_SLEEP)
    except KeyboardInterrupt:
      running = False
      schedule.clear()
      adc.close()
      sys.exit('\nExplicit close.')
예제 #19
0
파일: mslurm.py 프로젝트: dellhpc/omnia
def monitor_slurm():
    """monitor_slurm Monitor Slurm

    Monitor Slurm Metrics
    """
    connection = utils.init_tsdb_connection()
    node_id_mapping = utils.get_node_id_mapping(connection)
    os_idrac_hostname_mapping = utils.get_os_idrac_hostname_mapping()    
    slurm_config = utils.get_config('slurm_rest_api')
    
    #Schedule fetch slurm
    schedule.every().minutes.at(":00").do(fetch_slurm, 
                                          slurm_config, 
                                          connection, 
                                          node_id_mapping,
                                          os_idrac_hostname_mapping)

    while True:
        try:
            schedule.run_pending()
            time.sleep(1)
        except KeyboardInterrupt:
            schedule.clear()
            break
예제 #20
0
def test_play_scheduled():
    """
        Verify play API schedule operation to queue up a show in the future
    """
    initial_job_count = len(schedule.jobs)
    with app.test_client() as client:
        request_data = {
            "play_list": [{
                "photo":
                "https://s3.amazonaws.com/pivideo-testing/ssnl_logo.png",
                "duration": 5
            }, {
                "video":
                "https://s3.amazonaws.com:443/hubology-video-village-media/media/DJI_0127.MOV"
            }],
            "loop":
            True,
            "start_time":
            "04:00",
            "end_time":
            "04:30"
        }
        response = client.post('/play',
                               data=json.dumps(request_data),
                               content_type='application/json')
        nose.tools.assert_equals(200, response.status_code)
        response_data = json.loads(response.data)
        nose.tools.assert_equals('scheduled', response_data['status'])
        job_count = len(schedule.jobs)
        task_names = [job.job_func.func.func_name for job in schedule.jobs]
        nose.tools.assert_equals(job_count, initial_job_count + 4)
        nose.tools.assert_equals(task_names.count('cache_files_task'), 1)
        nose.tools.assert_equals(task_names.count('pre_show_task'), 1)
        nose.tools.assert_equals(task_names.count('showtime_task'), 1)
        nose.tools.assert_equals(task_names.count('post_show_task'), 1)
        schedule.clear()
예제 #21
0
    def open_connection(self):
        try:
            if not self._stopper.isSet():
                print("Adding HTTP Socket")
                with exhaustingTools.lock:
                    self._counter.increment()
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.settimeout(180)
                # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
                sock.connect((self._ip, PORT))
                self._connections.append(sock)
                print("Added HTTP Socket")

        except Exception as ex:
            print(ex)
            print("Connection error, Could not open new connection")
            self.stopit()
            schedule.cancel_job(self._job)
            schedule.clear()
            #for sock in self._connections:
            #    sock.shutdown(1)
            #    sock.close()
            self.bucket.put(sys.exc_info())
예제 #22
0
파일: main.py 프로젝트: dnllc3/Tick-Tock
    def importPrescription(self, info, times, times24):

        scheduleFile = QFile("schedule.txt")
        if not scheduleFile.open(QIODevice.ReadWrite | QIODevice.Text):
            return
        scheduleFile.resize(0)

        prescriptionText = re.sub(
            ' +', ' ', info[1] + ", " + info[2] + " times " + info[3] +
            ", for " + info[4] + " " + info[5])
        self.medication_text.setText(prescriptionText)
        out = QTextStream(scheduleFile)
        out << prescriptionText << "\n"

        if info[0] == 1:
            self.prescription_label.setText("Prescription 1")
            out << "Prescription 1" << "\n"
        if info[0] == 2:
            self.prescription_label.setText("Prescription 2")
            out << "Prescription 2" << "\n"
        time_spins = {
            1: self.time_spin_1,
            2: self.time_spin_2,
            3: self.time_spin_3,
            4: self.time_spin_4,
            5: self.time_spin_5,
        }
        schedule.clear('alarm')
        for i, alarm_time in enumerate(times):
            if alarm_time != '':
                out << times[i] << "\n"
                time_spins[i + 1].setText(times[i])
                schedule.every().day.at(times24[i]).do(self.alarmOn,
                                                       info[0]).tag('alarm')
        t = threading.Timer(1.0, lambda: self.wait())
        t.start()
예제 #23
0
    def createConfig(self):

        try:        
            arrayToConfig, message = self.getFirstFromArray()
        except:
            schedule.clear('daily-tasks')
            return schedule.CancelJob        

        if message == 'A tloc went down':
            self.setMessage('A device went down. config will be sent to vsmart from DIA to NO DIA')
            job_thread = threading.Thread(target=self.fromDiatoNoDia, args=(arrayToConfig,))
            job_thread.daemon = True
            job_thread.start()      
            self.waitingForDevices = False               
        
        if message == 'A tloc came up':
            self.setMessage('A device came up. Config will be sent to vsmart from NO DIA to DIA')
            job_thread = threading.Thread(target=self.fromNoDiatoDia, args=(arrayToConfig,))
            job_thread.daemon = True            
            job_thread.start()
            self.waitingForDevices = False
        
        schedule.clear('daily-tasks')
        return schedule.CancelJob
예제 #24
0
def initialise():
    print("#####SETTING UP#####")
    chrome_options = ''

    ## THIS IS SYSTEM SPECIFIC - CHANGE THE executable_path ON PI
    driver = webdriver.Chrome(executable_path='C:\\Users\\tomjo\\PycharmProjects\\GymBookerRevamp\\venv\\Scripts\\chromedriver.exe',
                              chrome_options=chrome_options)
    wait = WebDriverWait(driver, 30)
    driver.get('https://www.johnsonellis.xyz/i-made-that')

    schedule.clear()
    print("--------->Constructing schedule commands...")
    scheduleCodeSectionsTemplate = ["schedule.every().", '#DAY#', ".at(\"06:00\").do(bookClass, \'", '#CLASSTYPE#',"\', \'", "#CLASSTIME&NAME#", "\')"]
    codeLinesToExec = []
    for classes in GymClasses.gymClasses:
        codeToExec = ""
        scheduleCodeSections = scheduleCodeSectionsTemplate
        scheduleCodeSections[1] = classes['Day']
        scheduleCodeSections[3] = classes['Type']
        scheduleCodeSections[5] = classes['Name']
        for section in scheduleCodeSections:
            codeToExec += section
        codeLinesToExec.append(codeToExec)
    print("---->Commands constructed!")

    print("--------->Executing constructed commands...")
    for codeLines in codeLinesToExec:
        exec(codeLines)

    print("#->These are the scheduled jobs:")
    print("#####")
    for job in schedule.jobs:
        print(job)
    print("#####")

    print("#####SETUP SUCCESSFUL#####")
예제 #25
0
def send_message(lista):
    # username and pass
    username = ""
    paswd = ""

    # selecciona mensaje al azar
    sel_msg = lista[random.randint(0, len(MENSAJES) - 1)]

    # log in
    client = fbchat.Client(username, paswd)

    # got this from the chat's URL
    friend_uid = ""

    sent = client.sendMessage(sel_msg, thread_id=friend_uid)

    if sent:
        print("Message sent successfully!")

    # a cuantas personas le quiero manfar mensaje
    # no_of_friends = int(input("Number of friends: "))
    # for i in range(no_of_friends):
    #     name = str(input("Name: "))
    #     friends = client.searchForUsers(name)  # return a list of names
    #     friend = friends[0]
    #     msg = input("Message: ")
    #     print(friend.uid)
    #     sent = client.sendMessage(msg, thread_id=friend.uid)
    #     #sent = client.send(msg, friend.uid)
    #     #client.send(Message(text=msg), thread_id=friend.uid, thread_type=ThreadType.USER)
    #     if sent:
    #         print("Message sent successfully!")

    client.logout()

    schedule.clear()
예제 #26
0
def api_update_job():
    """This sends a status update to the smartsettia API and returns the status code"""
    url = DOMAIN + "api/update"
    data = {
        "uuid": UUID,
        "token": TOKEN,
        "version": VERSION,
        "hostname": HOSTNAME,
        "ip": IP,
        "mac_address": MAC_ADDRESS,
        "time": the_time(),
        "cover_status": cover_status(),
        "error_msg": "",
        "limitsw_open": 0,
        "limitsw_closed": 1,
        "light_in": 0,
        "light_out": 100,
    }
    headers = {
        "Content-type": "application/json",
        "Accept": "application/json",
        "Authorization": "Bearer " + TOKEN
    }
    try:
        response = requests.post(url, json=data, headers=headers)
    except requests.exceptions.RequestException as e:
        print("{}: api/update request failed: {}".format(the_time(), e))
        return
    if response.status_code in [201]:
        response = response.json()
        global NAME, IMAGE_RATE, SENSOR_RATE, UPDATE_RATE
        if NAME != response['data']['name']:
            NAME = response['data']['name']
            print(the_time() + ": Device name changed to " + NAME)
        if IMAGE_RATE != response['data']['image_rate']:
            IMAGE_RATE = response['data']['image_rate']
            schedule.clear('api_image_job')
            schedule_api_image_job()
        if SENSOR_RATE != response['data']['sensor_rate']:
            SENSOR_RATE = response['data']['sensor_rate']
            schedule.clear('api_sensor_job')
            schedule_api_sensor_job()
        if UPDATE_RATE != response['data']['update_rate']:
            UPDATE_RATE = response['data']['update_rate']
            schedule.clear('api_update_job')
            schedule_api_update_job()
        print("{}: api/update success".format(the_time()))
    else:
        print("{}: api/update failed with status_code {}\n{}".format(
            the_time(), response.status_code, response.text))
        time.sleep(60)
    return
예제 #27
0
def cancelId(id):
    try:
        B.clear('agendaID' + str(id))
        B.clear('agendaPreID' + str(id))
        B.clear('agendaCnxID' + str(id))
        sitAceitas = [Idioma.traducao('Aguardando'), Idioma.traducao('Agendado')]
        roboCtrl.instance().operContrl.cancelAgendaId(id, Idioma.traducao('Cancelado'), sitAceitas)
        return True
    except Exception as e:
        try:
            D.error(e)
            return False
        finally:
            e = None
            del e
예제 #28
0
 def test_clear_by_tag(self):
     every().second.do(make_mock_job(name="job1")).tag("tag1")
     every().second.do(make_mock_job(name="job2")).tag("tag1", "tag2")
     every().second.do(make_mock_job(name="job3")).tag(
         "tag3", "tag3", "tag3", "tag2")
     assert len(schedule.jobs) == 3
     schedule.run_all()
     assert len(schedule.jobs) == 3
     schedule.clear("tag3")
     assert len(schedule.jobs) == 2
     schedule.clear("tag1")
     assert len(schedule.jobs) == 0
     every().second.do(make_mock_job(name="job1"))
     every().second.do(make_mock_job(name="job2"))
     every().second.do(make_mock_job(name="job3"))
     schedule.clear()
     assert len(schedule.jobs) == 0
예제 #29
0
 def test_clear_by_tag(self):
     every().second.do(make_mock_job(name='job1')).tag('tag1')
     every().second.do(make_mock_job(name='job2')).tag('tag1', 'tag2')
     every().second.do(make_mock_job(name='job3')).tag('tag3', 'tag3',
                                                       'tag3', 'tag2')
     assert len(schedule.jobs) == 3
     schedule.run_all()
     assert len(schedule.jobs) == 3
     schedule.clear('tag3')
     assert len(schedule.jobs) == 2
     schedule.clear('tag1')
     assert len(schedule.jobs) == 0
     every().second.do(make_mock_job(name='job1'))
     every().second.do(make_mock_job(name='job2'))
     every().second.do(make_mock_job(name='job3'))
     schedule.clear()
     assert len(schedule.jobs) == 0
예제 #30
0
 def test_clear_by_tag(self):
     every().second.do(make_mock_job(name='job1')).tag('tag1')
     every().second.do(make_mock_job(name='job2')).tag('tag1', 'tag2')
     every().second.do(make_mock_job(name='job3')).tag('tag3', 'tag3',
                                                       'tag3', 'tag2')
     assert len(schedule.jobs) == 3
     schedule.run_all()
     assert len(schedule.jobs) == 3
     schedule.clear('tag3')
     assert len(schedule.jobs) == 2
     schedule.clear('tag1')
     assert len(schedule.jobs) == 0
     every().second.do(make_mock_job(name='job1'))
     every().second.do(make_mock_job(name='job2'))
     every().second.do(make_mock_job(name='job3'))
     schedule.clear()
     assert len(schedule.jobs) == 0
예제 #31
0
def create_weekly_irrigation():
    global week
    global week_day_int
    water_off()  # firstable close water for safety reasons
    schedule.clear("Round-1")
    schedule.clear("Round-2")
    schedule.clear("Round-3")
    week = request.get_json()

    week_day_int[6] = week[
        "sunday"]  # setup days in integers for schedule_irrigation
    week_day_int[0] = week["monday"]
    week_day_int[1] = week["tuesday"]
    week_day_int[2] = week["wednesday"]
    week_day_int[3] = week["thursday"]
    week_day_int[4] = week["friday"]
    week_day_int[5] = week["saturday"]

    check_irrigation_every_day()
    return jsonify(week)
예제 #32
0
def set_state(state: str, wait_time=3600):
    """state can be 'on' or 'off' """
    if state == "on":
        open_door()
    else:
        close_door()
    schedule.clear("open_door")
    schedule.clear("close_door")
    schedule.clear("auto_door")

    def restart_jobs():
        # TODO: prendre en compte les modifications qui peuvent être faites pendant le temps de pause
        time.sleep(wait_time)
        schedule.every().day.at(f"{hour_on_off[0]}:00").do(open_door).tag(
            "open_door")
        schedule.every().day.at(f"{hour_on_off[1]}:00").do(close_door).tag(
            "close_door")
        task_auto = schedule.every().hour.do(lock_door.main, *hour_on_off)
        task_auto.run()

    t2 = Thread(target=restart_jobs, daemon=True)
    t2.start()
예제 #33
0
파일: worker.py 프로젝트: suprech/kino-bot
    def stop(self):
        schedule.clear()

        self.slackbot.send_message(text=MsgResource.WORKER_STOP)
예제 #34
0
def EndTime():
    print "Turn Off Pump End ",datetime.datetime.now()
    return schedule.clear("Event 1")
예제 #35
0
def exit_handler(signum, frame):
    log.info("Received %s, canceling jobs and exiting.", signal.Signals(signum).name)
    schedule.clear()
    exit()
예제 #36
0
파일: python_code.py 프로젝트: J3Patel/fead
def stopHourly():
    s.clear("hourly")
예제 #37
0
    def test_until_time(self):
        mock_job = make_mock_job()
        # Check argument parsing
        with mock_datetime(2020, 1, 1, 10, 0, 0) as m:
            assert every().day.until(datetime.datetime(3000, 1, 1, 20, 30)).do(
                mock_job
            ).cancel_after == datetime.datetime(3000, 1, 1, 20, 30, 0)
            assert every().day.until(datetime.datetime(3000, 1, 1, 20, 30, 50)).do(
                mock_job
            ).cancel_after == datetime.datetime(3000, 1, 1, 20, 30, 50)
            assert every().day.until(datetime.time(12, 30)).do(
                mock_job
            ).cancel_after == m.replace(hour=12, minute=30, second=0, microsecond=0)
            assert every().day.until(datetime.time(12, 30, 50)).do(
                mock_job
            ).cancel_after == m.replace(hour=12, minute=30, second=50, microsecond=0)

            assert every().day.until(
                datetime.timedelta(days=40, hours=5, minutes=12, seconds=42)
            ).do(mock_job).cancel_after == datetime.datetime(2020, 2, 10, 15, 12, 42)

            assert every().day.until("10:30").do(mock_job).cancel_after == m.replace(
                hour=10, minute=30, second=0, microsecond=0
            )
            assert every().day.until("10:30:50").do(mock_job).cancel_after == m.replace(
                hour=10, minute=30, second=50, microsecond=0
            )
            assert every().day.until("3000-01-01 10:30").do(
                mock_job
            ).cancel_after == datetime.datetime(3000, 1, 1, 10, 30, 0)
            assert every().day.until("3000-01-01 10:30:50").do(
                mock_job
            ).cancel_after == datetime.datetime(3000, 1, 1, 10, 30, 50)
            assert every().day.until(datetime.datetime(3000, 1, 1, 10, 30, 50)).do(
                mock_job
            ).cancel_after == datetime.datetime(3000, 1, 1, 10, 30, 50)

        # Invalid argument types
        self.assertRaises(TypeError, every().day.until, 123)
        self.assertRaises(ScheduleValueError, every().day.until, "123")
        self.assertRaises(ScheduleValueError, every().day.until, "01-01-3000")

        # Using .until() with moments in the passed
        self.assertRaises(
            ScheduleValueError,
            every().day.until,
            datetime.datetime(2019, 12, 31, 23, 59),
        )
        self.assertRaises(
            ScheduleValueError, every().day.until, datetime.timedelta(minutes=-1)
        )
        self.assertRaises(ScheduleValueError, every().day.until, datetime.time(hour=5))

        # Unschedule job after next_run passes the deadline
        schedule.clear()
        with mock_datetime(2020, 1, 1, 11, 35, 10):
            mock_job.reset_mock()
            every(5).seconds.until(datetime.time(11, 35, 20)).do(mock_job)
            with mock_datetime(2020, 1, 1, 11, 35, 15):
                schedule.run_pending()
                assert mock_job.call_count == 1
                assert len(schedule.jobs) == 1
            with mock_datetime(2020, 1, 1, 11, 35, 20):
                schedule.run_all()
                assert mock_job.call_count == 2
                assert len(schedule.jobs) == 0

        # Unschedule job because current execution time has passed deadline
        schedule.clear()
        with mock_datetime(2020, 1, 1, 11, 35, 10):
            mock_job.reset_mock()
            every(5).seconds.until(datetime.time(11, 35, 20)).do(mock_job)
            with mock_datetime(2020, 1, 1, 11, 35, 50):
                schedule.run_pending()
                assert mock_job.call_count == 0
                assert len(schedule.jobs) == 0
예제 #38
0
 def setUp(self):
     schedule.clear()
예제 #39
0
def stopMailer():
    schedule.clear('daily-tasks')
    print('Job Scheduled Stopped!')
예제 #40
0
def main():
    """Declare command line options"""
    parser = ArgumentParser(
        description='ouroboros',
        formatter_class=RawTextHelpFormatter,
        epilog=
        'EXAMPLE: ouroboros -d tcp://1.2.3.4:5678 -i 20 -m container1 container2 -l warn'
    )

    core_group = parser.add_argument_group(
        "Core", "Configuration of core functionality")
    core_group.add_argument('-v',
                            '--version',
                            action='version',
                            version=VERSION)

    core_group.add_argument(
        '-d',
        '--docker-sockets',
        nargs='+',
        default=Config.docker_sockets,
        dest='DOCKER_SOCKETS',
        help='Sockets for docker management\n'
        'DEFAULT: "unix://var/run/docker.sock"\n'
        'EXAMPLE: -d unix://var/run/docker.sock tcp://192.168.1.100:2376')

    core_group.add_argument('-t',
                            '--docker-tls-verify',
                            default=False,
                            dest='DOCKER_TLS_VERIFY',
                            action='store_true',
                            help='Enable docker TLS\n'
                            'REQUIRES: docker cert mount')

    core_group.add_argument(
        '-i',
        '--interval',
        type=int,
        default=Config.interval,
        dest='INTERVAL',
        help='Interval in seconds between checking for updates\n'
        'DEFAULT: 300')

    core_group.add_argument(
        '-l',
        '--log-level',
        choices=['debug', 'info', 'warn', 'error', 'critical'],
        dest='LOG_LEVEL',
        default=Config.log_level,
        help='Set logging level\n'
        'DEFAULT: info')

    core_group.add_argument('-u',
                            '--self-update',
                            default=False,
                            dest='SELF_UPDATE',
                            action='store_true',
                            help='Let ouroboros update itself')

    core_group.add_argument('-o',
                            '--run-once',
                            default=False,
                            action='store_true',
                            dest='RUN_ONCE',
                            help='Single run')

    docker_group = parser.add_argument_group(
        "Docker", "Configuration of docker functionality")
    docker_group.add_argument('-m',
                              '--monitor',
                              nargs='+',
                              default=Config.monitor,
                              dest='MONITOR',
                              help='Which container(s) to monitor\n'
                              'DEFAULT: All')

    docker_group.add_argument('-n',
                              '--ignore',
                              nargs='+',
                              default=Config.ignore,
                              dest='IGNORE',
                              help='Container(s) to ignore\n'
                              'EXAMPLE: -n container1 container2')

    docker_group.add_argument(
        '-k',
        '--label-enable',
        default=False,
        dest='LABEL_ENABLE',
        action='store_true',
        help='Only watch ouroboros enable labeled containers\n'
        'Note: labels take precedence over monitor/ignore'
        'DEFAULT: False')

    docker_group.add_argument('-c',
                              '--cleanup',
                              default=False,
                              dest='CLEANUP',
                              action='store_true',
                              help='Remove old images after updating')

    docker_group.add_argument(
        '-L',
        '--latest',
        default=False,
        dest='LATEST',
        action='store_true',
        help='Check for latest image instead of pulling current tag')

    docker_group.add_argument('-r',
                              '--repo-user',
                              default=None,
                              dest='REPO_USER',
                              help='Private docker registry username\n'
                              'EXAMPLE: [email protected]')

    docker_group.add_argument('-R',
                              '--repo-pass',
                              default=None,
                              dest='REPO_PASS',
                              help='Private docker registry password\n'
                              'EXAMPLE: MyPa$$w0rd')

    data_group = parser.add_argument_group(
        'Data Export', 'Configuration of data export functionality')
    data_group.add_argument('-D',
                            '--data-export',
                            choices=['prometheus', 'influxdb'],
                            default=None,
                            dest='DATA_EXPORT',
                            help='Enable exporting of data for chosen option')

    data_group.add_argument('-a',
                            '--prometheus-addr',
                            default=Config.prometheus_addr,
                            dest='PROMETHEUS_ADDR',
                            help='Bind address to run Prometheus exporter on\n'
                            'DEFAULT: 127.0.0.1')

    data_group.add_argument('-p',
                            '--prometheus-port',
                            type=int,
                            default=Config.prometheus_port,
                            dest='PROMETHEUS_PORT',
                            help='Port to run Prometheus exporter on\n'
                            'DEFAULT: 8000')

    data_group.add_argument('-I',
                            '--influx-url',
                            default=Config.influx_url,
                            dest='INFLUX_URL',
                            help='URL for influxdb\n'
                            'DEFAULT: 127.0.0.1')

    data_group.add_argument('-P',
                            '--influx-port',
                            type=int,
                            default=Config.influx_port,
                            dest='INFLUX_PORT',
                            help='PORT for influxdb\n'
                            'DEFAULT: 8086')

    data_group.add_argument('-U',
                            '--influx-username',
                            default=Config.influx_username,
                            dest='INFLUX_USERNAME',
                            help='Username for influxdb\n'
                            'DEFAULT: root')

    data_group.add_argument('-x',
                            '--influx-password',
                            default=Config.influx_password,
                            dest='INFLUX_PASSWORD',
                            help='Password for influxdb\n'
                            'DEFAULT: root')

    data_group.add_argument(
        '-X',
        '--influx-database',
        default=Config.influx_password,
        dest='INFLUX_DATABASE',
        help='Influx database name. Required if using influxdb')

    data_group.add_argument('-s',
                            '--influx-ssl',
                            default=False,
                            dest='INFLUX_SSL',
                            action='store_true',
                            help='Use SSL when connecting to influxdb')

    data_group.add_argument(
        '-V',
        '--influx-verify-ssl',
        default=False,
        dest='INFLUX_VERIFY_SSL',
        action='store_true',
        help='Verify SSL certificate when connecting to influxdb')

    notification_group = parser.add_argument_group(
        'Notifications', 'Configuration of notification functionality')
    notification_group.add_argument(
        '-w',
        '--webhook-urls',
        nargs='+',
        default=Config.webhook_urls,
        dest='WEBHOOK_URLS',
        help='Webhook POST urls\n'
        'EXAMPLE: -w https://domain.tld/1234/asdf http://123.123.123.123:4040/re235'
    )

    notification_group.add_argument(
        '-y',
        '--pushover-token',
        default=Config.pushover_token,
        dest='PUSHOVER_TOKEN',
        help='Pushover token to authenticate against application\n'
        'EXAMPLE: -y af2r52352asd')

    notification_group.add_argument(
        '-Y',
        '--pushover-device',
        default=Config.pushover_device,
        dest='PUSHOVER_DEVICE',
        help='Device to receive pushover notification\n'
        'EXAMPLE: -Y SamsungGalaxyS8')

    notification_group.add_argument('-z',
                                    '--pushover-user',
                                    default=Config.pushover_user,
                                    dest='PUSHOVER_USER',
                                    help='Pushover user bound to application\n'
                                    'EXAMPLE: -z asdfweawefasdfawef')

    notification_group.add_argument('-e',
                                    '--smtp-host',
                                    default=Config.smtp_host,
                                    dest='SMTP_HOST',
                                    help='SMTP relay hostname\n'
                                    'EXAMPLE: -e smtp.gmail.com')

    notification_group.add_argument('-E',
                                    '--smtp-port',
                                    default=Config.smtp_port,
                                    type=int,
                                    dest='SMTP_PORT',
                                    help='SMTP relay port\n'
                                    'EXAMPLE: -E 587')

    notification_group.add_argument('-f',
                                    '--smtp-starttls',
                                    default=False,
                                    dest='SMTP_STARTTLS',
                                    action='store_true',
                                    help='SMTP relay uses STARTTLS')

    notification_group.add_argument('-F',
                                    '--smtp-username',
                                    default=Config.smtp_username,
                                    dest='SMTP_USERNAME',
                                    help='SMTP relay username\n'
                                    'EXAMPLE: -F [email protected]')

    notification_group.add_argument('-g',
                                    '--smtp-password',
                                    default=Config.smtp_password,
                                    dest='SMTP_PASSWORD',
                                    help='SMTP relay password\n'
                                    'EXAMPLE: -g MyPa$$w0rd')

    notification_group.add_argument(
        '-G',
        '--smtp-recipients',
        default=Config.smtp_recipients,
        dest='SMTP_RECIPIENTS',
        nargs='+',
        help='SMTP notification recipients\n'
        'EXAMPLE: -G [email protected] [email protected]')

    notification_group.add_argument('-j',
                                    '--smtp-from-email',
                                    default=Config.smtp_from_email,
                                    dest='SMTP_FROM_EMAIL',
                                    help='SMTP from email\n'
                                    'EXAMPLE: -g [email protected]')

    notification_group.add_argument('-J',
                                    '--smtp-from-name',
                                    default=Config.smtp_from_name,
                                    dest='SMTP_FROM_NAME',
                                    help='SMTP from name\n'
                                    'DEFAULT: Ouroboros')

    args = parser.parse_args()

    if environ.get('LOG_LEVEL'):
        log_level = environ.get('LOG_LEVEL')
    else:
        log_level = args.LOG_LEVEL
    ol = OuroborosLogger(level=log_level)
    ol.logger.info('Version: %s-%s', VERSION, BRANCH)
    config = Config(environment_vars=environ, cli_args=args)
    config_dict = {
        key: value
        for key, value in vars(config).items() if key.upper() in config.options
    }
    ol.logger.debug("Ouroboros configuration: %s", config_dict)

    data_manager = DataManager(config)
    notification_manager = NotificationManager(config, data_manager)

    for socket in config.docker_sockets:
        docker = Docker(socket, config, data_manager, notification_manager)
        schedule.every(config.interval).seconds.do(
            docker.update_containers).tag(f'update-containers-{socket}')

    schedule.run_all()

    if args.RUN_ONCE:
        for socket in config.docker_sockets:
            schedule.clear(f'update-containers-{socket}')

    while schedule.jobs:
        schedule.run_pending()
        sleep(1)
예제 #41
0
def cancel_scedule():
    schedule.clear('score_updates')
예제 #42
0
        dao.session.commit()
        
        print("....................FIM.................", trigger.id)


    return _function




def run_threaded(job_func):
    job_thread = threading.Thread(target=job_func)
    job_thread.start()

if __name__ == '__main__':
    dao = DAO()
    result = dao.select_trigger()
    for i in result:
        job = make_func(i)
        schedule.every(i.period).seconds.do(run_threaded, job)

    while True:
        try:
            schedule.run_pending()
            time.sleep(1)

        except KeyboardInterrupt:
            schedule.clear()
            print("\nBye")
            exit(0)
예제 #43
0
        dao.session.commit()
        
        print("....................FIM.................", trigger.id)


    return _function




def run_threaded(job_func):
    job_thread = threading.Thread(target=job_func)
    job_thread.start()

if __name__ == '__main__':
    dao = DAO()
    result = dao.select_trigger()
    for i in result:
        job = make_func(i)
        schedule.every(i.period).seconds.do(run_threaded, job)

    while True:
        try:
            schedule.run_pending()
            time.sleep(1)

        except KeyboardInterrupt:
            schedule.clear()
            print("\nBye")
            exit(0)
예제 #44
0
def work_schedule():
    schedule.clear()
    schedule.every(1).days.do(work_once)
    while True:
        schedule.run_pending()
예제 #45
0
 def tearDown(self):
     schedule.clear()
예제 #46
0
    def test_run_continuously(self):
        """Check that run_continuously() runs pending jobs.
        We do this by overriding datetime.datetime with mock objects
        that represent increasing system times.

        Please note that it is *intended behavior that run_continuously()
        does not run missed jobs*. For example, if you've registered a job
        that should run every minute and you set a continuous run interval
        of one hour then your job won't be run 60 times at each interval but
        only once.
        """
        # Monkey-patch datetime.datetime to get predictable (=testable) results
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 6, 12, 15, 0)
        original_datetime = datetime.datetime
        datetime.datetime = MockDate

        mock_job = make_mock_job()

        # Secondly Tests
        # Initialize everything.
        schedule.clear()
        mock_job.reset_mock()
        every().second.do(mock_job)

        # Start a new continuous run thread.
        stop_thread_flag = schedule.run_continuously(0)
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 0

        # Secondly first second.
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 6, 12, 15, 1)
        mock_job.reset_mock()
        datetime.datetime = MockDate
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 1

        # Secondly second second.
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 6, 12, 15, 2)
        datetime.datetime = MockDate
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 2

        # Minutely Tests
        # (Re)Initialize everything.
        schedule.clear()
        mock_job.reset_mock()
        stop_thread_flag.set()
        every().minute.do(mock_job)

        # Start a new continuous run thread.
        stop_thread_flag = schedule.run_continuously(0)
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 0

        # Minutely first minute.
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 6, 12, 16, 2)
        mock_job.reset_mock()
        datetime.datetime = MockDate
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 1

        # Minutely second minute.
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 6, 12, 17, 2)
        datetime.datetime = MockDate
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 2

        # Hourly Tests
        # (Re)Initialize everything.
        schedule.clear()
        mock_job.reset_mock()
        stop_thread_flag.set()
        every().hour.do(mock_job)

        # Start a new continuous run thread.
        stop_thread_flag = schedule.run_continuously(0)
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 0

        # Hourly first hour.
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 6, 13, 17, 2)
        mock_job.reset_mock()
        datetime.datetime = MockDate
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 1

        # Hourly second hour.
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 6, 14, 17, 2)
        datetime.datetime = MockDate
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 2

        # Daily Tests
        # (Re)Initialize everything.
        schedule.clear()
        mock_job.reset_mock()
        stop_thread_flag.set()
        every().day.do(mock_job)

        # Start a new continuous run thread.
        stop_thread_flag = schedule.run_continuously(0)
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 0

        # Daily first day.
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 7, 14, 17, 2)
        mock_job.reset_mock()
        datetime.datetime = MockDate
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 1

        # Daily second day.
        class MockDate(datetime.datetime):
            @classmethod
            def today(cls):
                return cls(2010, 1, 6)

            @classmethod
            def now(cls):
                return cls(2010, 1, 8, 14, 17, 2)
        datetime.datetime = MockDate
        # Allow a small time for separate thread to register time stamps.
        time.sleep(0.001)

        assert mock_job.call_count == 2

        schedule.clear()
        mock_job.reset_mock()
        stop_thread_flag.set()
        datetime.datetime = original_datetime
예제 #47
0
 def init_scheduler(self):
     print("init_scheduler")
     schedule.clear()
     schedule.every().day.at(self.wecker_startzeit).do(self.start_dimming)
예제 #48
0
    def run(self):

        site_ip = urlparse(self._ip)
        # path = site_ip.path


        # if path == "":
        #   path = "/"

        # HOST = url[2]
        HOST = site_ip[2]
        #HOST = self._ip

        request = GETREQUEST + "Host: " + self._url + CRLF + REQUEST_TERMINAL
        sock = None

        # Try first connection to check if the url or path is different than the standard root and given url

        try:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            # sock.settimeout(0.30)
            # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
            sock.connect((HOST, PORT))

            sock.send(request.encode())
            response = http.client.HTTPResponse(sock)
            response.begin()
            response.read()
            #data = (sock.recv(1000000))

            #source = FakeSocket(data)
            #response = http.client.HTTPResponse(source)
            #response.begin()
            if response.status == 301:
                self._path = response.getheader("Location")
            elif response.status == 302:
                self._url = response.getheader("Location")
                temp = urlparse(self._url)
                self._ip = socket.gethostbyname_ex(temp[1])
                self._path = temp[2]

            #sock.shutdown(1)
            #sock.close()
        except Exception as ex:
            print("Could not open new socket")
            self.bucket.put(sys.exc_info())

        data = []

        request = self.createRequestString(self._url, self._path)
        self.open_connection()
        while not self._stopper.isSet():
            try:
                if self._stopper.isSet():
                    schedule.cancel_job(self._job)
                    schedule.clear()
                    #for sock in self._connections:
                    #    sock.shutdown(1)
                    #    sock.close()
                    break
                schedule.run_pending()
                for sock in self._connections:
                    try:
                        sock.send(request.encode())
                        response = http.client.HTTPResponse(sock)
                        response.begin()
                        response.read()

                        #data = (sock.recv(8192))

                        #if data == "":
                        #    raise Exception("Server closed the connection")
                        # temp = data.decode("utf-8")
                        #source = FakeSocket(data)
                        #response = http.client.HTTPResponse(source)
                        #response.begin()
                        if response.status != 200:
                            raise Exception("Server stopped responding 200 OK")
                    except Exception as ex:
                        print(ex)
                        raise Exception("Failure to send request or decrypt response")

                # print(response.status, response.code)
                time.sleep(TIME_INTERVAL)
            except Exception as ex:
                # print("Connection forcibly closed, could not send request")
                print(ex)
                self.stopit()
                schedule.cancel_job(self._job)
                #for sock in self._connections:
                #    sock.shutdown(1)
                #    sock.close()
                self.bucket.put(sys.exc_info())
                # raise Exception("Connection forcibly closed")

        print("Closing Sockets")
        for sock in self._connections:
           sock.shutdown(socket.SHUT_RDWR)
           sock.close()
        print("Finished closing sockets " + str(len(self._connections)))
예제 #49
0
 def handleAction(self, actionTitle, actionThumb, actionPath, actionTag):
     log('handleAction')
     liz =  xbmcgui.ListItem(actionTitle, thumbnailImage=actionThumb, path=actionPath)
     liz.setProperty('IsPlayable', 'true')
     self.myPlayer.play(actionPath, liz)
     if actionTag: schedule.clear(actionTag)
예제 #50
0
 def stop(self):
     self.theadStop = True
     schedule.clear()
     self.join()
예제 #51
0
 def setUp(self):
     schedule.clear()
예제 #52
0
def restart():
    getLogger(__name__).info('>> restarting periodic-data processing module')
    schedule.clear()
    _schedule_job()
예제 #53
0
 def set_alarm(self):
     schedule.clear()
     #schedule.every().day.at("06:30").do(refresh_browser)
     #schedule.every().day.at("12:30").do(refresh_browser)
     schedule.every().day.at("07:30").do(briefing_weather)