def run_scheduler(self): schedule.every(24).days.do(self.job) while True: schedule.run_pending() if self.isCancelSchedule: schedule.cancel_job(self.job) return
def wrapper(todo): args, kwargs = (), {} task_settings = self.settings job = None def _doer(): self.spinned = False with self.app.app_context(): todo(*args, **kwargs) self.spinned = True self.spinned_once = True if task_settings.time: job = getattr(schedule.every(), task_settings.every)\ .at(task_settings.time.strftime('%H:%M'))\ .do(_doer) else: job = getattr(schedule.every(), task_settings.every).do(_doer) while not self.cut_circut: schedule.run_pending() self.sleep() schedule.cancel_job(job) self.dead = True
def delete(self): task = self.entry.get() for e in self.tasks: if task == e.split(':')[1]: self.tasks.discard(e) schedule.cancel_job(task) break
def _delayed_register(self): try: self.proxy = self._bus.get_object( 'org.gnome.Mutter.IdleMonitor', '/org/gnome/Mutter/IdleMonitor/Core') self.idlemon = dbus.Interface(self.proxy, "org.gnome.Mutter.IdleMonitor") self._idle_watch_id = self.idlemon.AddIdleWatch(self._timeout) self._active_watch_id = self.idlemon.AddUserActiveWatch() self.idlemon.connect_to_signal("WatchFired", self.isIdle) self._bsensor.register() idleing = self.idlemon.GetIdletime() if idleing > self._timeout: self._bsensor.turnOff() self._is_idle = True else: self._bsensor.turnOn() self._is_idle = False if self._register_delay_id is not None: schedule.cancel_job(self._register_delay_id) if self._IdelingPolling is not None: schedule.cancel_job(self._IdelingPolling) self._IdelingPolling = schedule.every(2).minutes.do( self.isIdleDead) except dbus.exceptions.DBusException: self._log.warning( "DBus Signale von Mutter verbinden fehlgeschlagen! Wird in 30 Sekunden erneut probiert,,," ) pass
def schedule_show(start_time, end_time, play_list_entries, loop=False): """ Establish scheduled tasks necessary for a future show """ # remove any existing show related tasks show_task_names = ('cache_files_task', 'pre_show_task', 'showtime_task', 'post_show_task') jobs_to_cancel = [] for job in schedule.jobs: if job.job_func.func.func_name in show_task_names: jobs_to_cancel.append(job) for job in jobs_to_cancel: schedule.cancel_job(job) try: start_time_value = arrow.get(start_time, 'HH:mm') except ValueError: start_time_value = arrow.get(start_time, 'HH:mm:ss') try: end_time_value = arrow.get(end_time, 'HH:mm') except ValueError: end_time_value = arrow.get(end_time, 'HH:mm:ss') pre_show_time = (start_time_value - datetime.timedelta(minutes=5)).format('HH:mm') schedule.every(1).seconds.do(cache_files_task, play_list_entries) schedule.every().day.at(pre_show_time).do(pre_show_task) schedule.every().day.at(start_time_value.format('HH:mm')).do(showtime_task, play_list_entries, loop=loop) schedule.every().day.at(end_time_value.format('HH:mm')).do(post_show_task)
def remind(email): subject = 'REMINDER!!!!!' message = f'Hi, aren\'t you forgetting something?' email_from = settings.EMAIL_HOST_USER recipient_list = [email] send_mail(subject, message, email_from, recipient_list) schedule.cancel_job(job)
def no_schedule(message): try: if is_start(info, message.chat.id): f = False admins = bot.get_chat_administrators(message.chat.id) for admin in admins: if admin.user.id == message.from_user.id: f = True if f: group_index = index_finder(info, message.chat.id) try: pm_sched = info[group_index]['schedule_mute'][ 'pm_sched'] schedule.cancel_job(pm_sched) except: pass else: bot.reply_to(message, "Sorry, But You're Not Admin!") else: bot.send_message(message.chat.id, 'Please Start The Bot First') except: pass
def schedule_show(start_time, end_time, play_list_entries, loop=False): """ Establish scheduled tasks necessary for a future show """ # remove any existing show related tasks show_task_names = ('cache_files_task', 'pre_show_task', 'showtime_task', 'post_show_task') jobs_to_cancel = [] for job in schedule.jobs: if job.job_func.func.func_name in show_task_names: jobs_to_cancel.append(job) for job in jobs_to_cancel: schedule.cancel_job(job) try: start_time_value = arrow.get(start_time, 'HH:mm') except ValueError: start_time_value = arrow.get(start_time, 'HH:mm:ss') try: end_time_value = arrow.get(end_time, 'HH:mm') except ValueError: end_time_value = arrow.get(end_time, 'HH:mm:ss') pre_show_time = (start_time_value - datetime.timedelta(minutes=2)).format('HH:mm') schedule.every(1).seconds.do(cache_files_task, play_list_entries) schedule.every().day.at(pre_show_time).do(pre_show_task) schedule.every().day.at(start_time_value.format('HH:mm')).do( showtime_task, play_list_entries, loop=loop) schedule.every().day.at(end_time_value.format('HH:mm')).do(post_show_task)
def job(): global BPA State = -1 for xh in xhs: O_O["xh"] = xh O_O["tw"] = str(round(random.uniform(36, 37), 1)) I_I["xh"] = xh data["param"] = str(O_O) checkdata["param"] = str(I_I) O_O["lxfs"] = "".join(random.sample(sjHead, 1)) + str( random.randint(1, 99999999)) header["User-Agents"] = "".join(random.sample(UserAgents, 1)) print(" ", xh, ":") r = requests.post(url=url, data=checkdata, headers=header) print(" ", r, r.text) text = json.loads(r.text) if (int(text["data"]) != -1): r = requests.post(url=url, data=data, headers=header) print(" ", O_O["xh"], ":") print(" ", r, r.text) r = requests.post(url=url, data=checkdata, headers=header) print(" ", r, r.text) text = json.loads(r.text) if (int(text["data"]) != -1): State = text["data"] print(" 未报平安,状态码:", State) if (State == -1): schedule.cancel_job(BPAN) BPA = 0 print("报平安结果:", times, "全部通过!!!") print(content) time.sleep(1)
def service_routine(): job_regular_at_11 = schedule.every().day.at("10:45").do( prediction_task, FLAG_EMA_ORDER_1) job_regular_at_15 = schedule.every().day.at("14:45").do( prediction_task, FLAG_EMA_ORDER_2) job_regular_at_19 = schedule.every().day.at("18:45").do( prediction_task, FLAG_EMA_ORDER_3) job_regular_at_23 = schedule.every().day.at("22:45").do( prediction_task, FLAG_EMA_ORDER_4) job_initial_train = schedule.every().day.at("23:50").do( prediction_task, FLAG_INITIAL_MODEL_TRAIN) while run_service: try: schedule.run_pending() time.sleep(1) except KeyboardInterrupt: stop() schedule.cancel_job(job=job_regular_at_11) schedule.cancel_job(job=job_regular_at_15) schedule.cancel_job(job=job_regular_at_19) schedule.cancel_job(job=job_regular_at_23) schedule.cancel_job(job=job_initial_train) exit(0)
async def well_time(self, ctx, hour: int = None, min: int = 0): now = datetime.now(timezone.utc) today = self.today() if hour is None: next_period = now + (today + self.time - now) % timedelta(days=1) await ctx.send( f"Next well day ({(next_period-self.time).strftime(r'%b %d')}) starts on <t:{cfg.timestamp(next_period)}>.\n" f"Use {cfg.Config.config['prefix']}well_time [hour] [min] (in GMT) to adjust well time." ) else: self.time = timedelta(hours=hour, minutes=min) self.hour = int(self.time.total_seconds()) // 3600 self.minute = int(self.time.total_seconds()) % 3600 // 60 local_time = (today + self.time).astimezone() schedule.cancel_job(self.schedule) self.schedule = schedule.every().day.at( f'{local_time.hour:02d}:{local_time.minute:02d}').do( self.check).tag('cogs.well') cursor = cfg.db.cursor() cursor.execute( f'''UPDATE settings SET value = '{self.hour:02d}:{self.minute:02d}' WHERE setting = 'well_time' ''') cfg.db.commit() next_period = now + (today + self.time - now) % timedelta(days=1) await ctx.send( f"Next well day ({(next_period-self.time).strftime(r'%b %d')}) starts on <t:{cfg.timestamp(next_period)}>." )
def service_routine(): ## TODO : Debugging # prediction_task(FLAG_EMA_ORDER_3) ## for test # TODO : Debugging 시에 주석처리 job_regular_at_11 = schedule.every().day.at("10:45").do(prediction_task, FLAG_EMA_ORDER_1) job_regular_at_15 = schedule.every().day.at("14:45").do(prediction_task, FLAG_EMA_ORDER_2) job_regular_at_19 = schedule.every().day.at("18:45").do(prediction_task, FLAG_EMA_ORDER_3) job_regular_at_23 = schedule.every().day.at("22:45").do(prediction_task, FLAG_EMA_ORDER_4) #TODO : SURVEY_DURATION-1 일차, 밤 11 30분에 모델 초기화 job_initial_train = schedule.every().day.at("23:30").do(prediction_task, FLAG_INITIAL_MODEL_TRAIN) # 23:15 while run_service: try: schedule.run_pending() time.sleep(1) except KeyboardInterrupt: stop() schedule.cancel_job(job=job_regular_at_11) schedule.cancel_job(job=job_regular_at_15) schedule.cancel_job(job=job_regular_at_19) schedule.cancel_job(job=job_regular_at_23) schedule.cancel_job(job=job_initial_train) # 마지막날 10시 exit(0)
def jobs_maintainer(): # get all active symbols cursor = order_db.find({ '$or': [{ 'status': OrderStatus.WAITING }, { 'status': OrderStatus.PLACED }] }).distinct('symbol') working = set() # run jobs for not working, but active symbols for symbol in cursor: if symbol not in jobs_pool: log.info('Worker started, symbol: %s', symbol) jobs_pool[symbol] = schedule.every(JOB_INTERVAL).seconds.do( worker, symbol=symbol) jobs_pool[symbol].run() working.add(symbol) # remove jobs for working, but not active symbols for k in list(jobs_pool.keys()): if k not in working: log.info('Worker stopped, symbol: %s', k) schedule.cancel_job(jobs_pool[k]) jobs_pool.pop(k)
def voting(): palm = {} # set 'local'. def passing(): # returns False return False for n in range(len(aux)): # 'getting gems'? Or energy.. candide = set(need(n).mot) candido = set(need(n).submot) for e in range(len(candide)): palm.add(list(candide)[e]) for o in range(len(candido)): palm.add(list(candido)[o]) for l in range(len(palm)): say(list(palm[l])) # converts set into a list every each row. schedule.every(3).minutes.do(passing()) # schedule its False. message = hear.get["ts"] while True: time.sleep(1) if "next" or "siguiente" in hear.get["text"]: schedule.cancel_job(passing()) break
def run_service_once(self, service_detail): username, application_id, service_name, end, service_instance_id = service_detail[ 0], service_detail[1], service_detail[2], service_detail[ 3], service_detail[4] print("+MSG TO SLCM TO START \t\t", service_instance_id) #send request to service life cycle manager to start service self.send_request_to_service_life_cyle(username, application_id, service_name, service_instance_id, "start") data = { "service_id": service_instance_id, "username": username, "application_id": application_id, "service_name": service_name, "end": end } self.started[service_instance_id] = data if (service_instance_id in self.single_instances.keys()): del self.single_instances[service_instance_id] job_id = schedule.every().day.at(end).do( self.exit_service, ((service_instance_id, username, application_id, service_name))) try: if (self.job_dict[service_instance_id]): print("here") schedule.cancel_job(self.job_dict[service_instance_id]) except: pass self.job_dict[service_instance_id] = job_id
def remove_job_by_id(message_id): logger.info( f"removeing job of message {message_id} meesage_id in job_dict:{message_id in job_dict}" ) if message_id in job_dict: schedule.cancel_job(job_dict[message_id]) del job_dict[message_id] logger.info(f"{message_id} is removed F")
def updat(self): task = self.entry.get() for e in self.tasks: if task in e: self.tasks.discard(e) schedule.cancel_job(task) self.add() break
def delete_schedule(tags): ''' will delete scheduling functions by finding tags in schedule.jobs better to use name and time in tags tuple ''' for item in schedule.jobs: if set(tags).issubset(item.tags): schedule.cancel_job(item)
def stop_access_thread(self): self._access_check_stop = True if self._access_check is not None: self._access_check.join() self._access_check = None if self._shed_Job is not None: schedule.cancel_job(self._shed_Job) self._shed_Job = None
def edit_remind(id, num_text): """Функция редактирования напоминания""" remind = reminds[id][num_text[0] - 1] reminds[id].remove(remind) schedule.cancel_job(remind['job']) add_remind([remind['time'], num_text[1], id]) num_text[0], num_text[1] = None, None
def reconfigure_urgent(uid, cid, new_interval, bot_callback): hours, minutes = map(int, new_interval.split(':')) total_mins = hours * 60 + minutes if uid in urgent_task: schedule.cancel_job(urgent_task[uid]) #cancel previous job schedule.every(total_mins).minutes.do(bot_callback, uid, cid) #create new job
def delete_job(text): a = schedule.jobs for job in a: if text in job.job_func.args[0].args[0]: schedule.cancel_job(job) for i in p.scan_iter(): if p.get(i).decode('utf-8') == text: p.delete(i) return
def all(message): now = datetime.datetime.now() m = now.minute h = now.hour if h == 16 and m ==16: s = requests.Session() s.get('https://api.telegram.org/bot{0}/deletemessage?message_id={1}&chat_id={2} '.format( '1293717665:AAEb7-Eqv15tUO1khA30w_CU7mw1cT56J8w', message.message_id, message.chat.id )) schedule.cancel_job(job1)
def test_idle_seconds(self): assert schedule.next_run() is None mock_job = make_mock_job() with mock_datetime(2020, 12, 9, 21, 46): job = every().hour.do(mock_job) assert schedule.idle_seconds() == 60 * 60 schedule.cancel_job(job) assert schedule.next_run() is None
def stop(self): schedule.cancel_job(self._daily_job) schedule.cancel_job(self._job) for i in range(0, len(self._paths)): try: d = self._paths[i] d["f"].close() except: self.__logger.exception("Schlie0en der Datei fehlgeschlagen!")
def _continuous_loop(config, database, ts_connection_pool): ####################################### # Begins the connect to Teamspeak ####################################### bot_loop_forever = True while bot_loop_forever: try: LOG.info("Connecting to Teamspeak server...") bot_instance = None audit_trigger_job = None http_server = None try: bot_instance = Bot(database, ts_connection_pool, config) http_server = create_http_server(bot_instance, port=config.ipc_port) http_server.start() LOG.info("BOT Database Audit policies initiating.") # Always audit users on initialize if user audit date is up (in case the script is reloaded several # times before audit interval hits, so we can ensure we maintain user database accurately) bot_instance.trigger_user_audit() # Set audit schedule job to run in X days audit_trigger_job = schedule.every( config.audit_interval).days.at("06:00").do( bot_instance.trigger_user_audit) bot_instance.listen_for_events() finally: if bot_instance is not None: bot_instance.close() if audit_trigger_job is not None: schedule.cancel_job(audit_trigger_job) if http_server is not None: LOG.info("Stopping Http Server") http_server.stop() except (KeyboardInterrupt, SystemExit): LOG.info("Shutdown signal received. Shutting down:") bot_loop_forever = False # stop loop except (ts3.query.TS3TransportError, ConnectionInitializationException, ConnectionRefusedError, OSError) as ex: LOG.warning( "A Connection Problem with the Teamspeak Server occurred. Trying again in %s seconds...", config.bot_sleep_conn_lost, exc_info=ex) time.sleep(config.bot_sleep_conn_lost) except Exception as ex: LOG.warning( "Unexpected Exception occurred. Trying again in %s seconds...", config.bot_sleep_conn_lost, exc_info=ex) time.sleep(config.bot_sleep_conn_lost)
def main(): while (1): #print("Hello") global dq global meta_data while (len(dq) > 0): #print("Hello 1") global i i = i + 1 meta_data = dq.popleft() # file=open("running.txt","a+") # file.write(str(meta_data["algoid"])+" at location "+meta_data["location"]+" is running "+ "from "+meta_data["start_time"]) if (meta_data["form"] == "run"): if (meta_data["days"] == "everyday" and meta_data["request_type"] != "immediate"): regular(meta_data["days"], meta_data["start_time"], meta_data["duration"], meta_data["algo"]) elif (meta_data["days"] != "everyday" and meta_data["days"] != ""): notregular(meta_data["days"], meta_data["start_time"], meta_data["duration"], meta_data["algo"]) elif (meta_data["request_type"] == "immediate"): # print("heyyyyyyyyyyyyyy") # global file print( "\n [Schedular] : Scheduling immediately Service with id : ", meta_data["algoid"]) curpath = str(os.path.dirname(os.path.realpath(__file__))) file = open(curpath + "/dashboard/running.txt", "a") # file=open("running.txt","a") file.write( str(meta_data["algoid"]) + " at location " + meta_data["location"] + " is scheduled immediately\n") file.close() cm.Schedular_to_ServiceLifeCycle_Producer_interface( meta_data) # immediate(meta_data["duration"],meta_data["algo"]) elif (meta_data["request_type"] == "periodic"): print("in periodic scheduling") period(meta_data["duration"], meta_data["start_time"], meta_data["algo"]) start_new_thread(pending, ()) else: schedule.cancel_job(eval(meta_data["algo"]))
def connect_after_close(self, err): """ 当socket断掉之后的重连 :return: """ if not err: # 对应handle_close中的schedule.every方法 schedule.cancel_job(self.job) self.timer = 0 self.remote.emitter.emit('reconnect')
def wrapper(*args, **kwargs): try: return job_func(*args, **kwargs) except: logger.warning(traceback.format_exc()) # sentry.captureException(exc_info=True) if cancel_on_failure: logger.warning("异常 任务结束: {}".format(schedule.CancelJob)) schedule.cancel_job(job_func) return schedule.CancelJob
def clear(self) -> None: try: for job_id in range(self.get_num_active_crons()): schedule.cancel_job(self._active_crons[job_id][4]) self._active_crons.clear() _LOGGER.debug("All reservation crons erased") except Exception as e: _LOGGER.error( "Internal error while clearing all reservation crons") _LOGGER.error(e)
def stop(self): if self._IdelingPolling is not None: schedule.cancel_job(self._IdelingPolling) if self._register_delay_id is not None: schedule.cancel_job(self._register_delay_id) if self._idle_watch_id is not None: self.idlemon.RemoveWatch(self._idle_watch_id) if self._active_watch_id is not None: self.idlemon.RemoveWatch(self._active_watch_id) self._bsensor.turnOff()
def wrapper(*args, **kwargs): try: return job_func(*args, **kwargs) except: logger.warning(traceback.format_exc()) # 在此处发送钉钉消息 if cancel_on_failure: logger.warning("异常 任务结束: {}".format(schedule.CancelJob)) schedule.cancel_job(job_func) return schedule.CancelJob
def main(arg01): global keep_running if arg01 == "start": schedule.every(5).seconds.do(job) keep_running = True while keep_running: schedule.run_pending() time.sleep(1) elif arg01 == "stop": schedule.cancel_job(job)
def run2(self): # self.open_connection() # Run a simple get operation - this allows us to change the url or the path in case we get 301 or 302 message try: h = http.client.HTTPConnection(self._url, 80, timeout=10) # h. h.request("GET", self._path, headers={"Connection": " keep-alive"}) r1 = h.getresponse() r1.read() if r1.status == 301: # Moved permanently, the path has changed self._path = r1.getheader("Location") elif r1.status == 302: # Moved temporarily, the whole url has changed self._url = r1.getheader("Location") h.close() except: raise Exception("Failed on first connection") while True: try: if self._stopper.isSet(): schedule.cancel_job(self._job) for h1 in self._connections: h1.close() break #return schedule.run_pending() for h1 in self._connections: try: h1.request("GET", self._path, headers={"Connection": " keep-alive"}) except http.client.CannotSendRequest: print("Could not send request") raise Exception("Previous response was not received") r1 = h1.getresponse() r1.read() if r1.status != 200: raise Exception("Server stopped responding 200 or 302") time.sleep(TIME_INTERVAL) except ConnectionError as ex: print("Connection error, forcibly closed") self.bucket.put(sys.exc_info()) self.bucket.put(ex) # self.stopit() # raise Exception("Connection forcibly closed") except Exception as e: print(e) #print("Oops! something went wrong with HTTP testing") self.stopit() self.bucket.put(sys.exc_info()) self.bucket.put(e)
def scheduler_app(): mongo = MongoPlugin( uri=conf("mongodb")["uri"], db=conf("mongodb")["db"], json_mongo=True).get_mongo() for cube in mongo['cube'].find({'scheduler_status': True}): rules(cube) for dashboard in mongo['dashboard'].find({'scheduler_status': True}): elements = [e['id'] for e in dashboard['element']] for e in elements: element = mongo['element'].find_one({'slug': e}) cube = mongo['cube'].find_one({'slug': element['cube']}) rules(cube, dashboard['scheduler_type'], dashboard['scheduler_interval']) while True: for cube in mongo['cube'].find({'scheduler_status': True}): if cube['slug'] not in register: rules(cube) for dashboard in mongo['dashboard'].find({'scheduler_status': True}): elements = [e['id'] for e in dashboard['element']] for e in elements: element = mongo['element'].find_one({'slug': e}) cube = mongo['cube'].find_one({'slug': element['cube']}) if cube['slug'] not in register: rules(cube, dashboard['scheduler_type'], dashboard['scheduler_interval'], dashboard['slug']) for cube in mongo['cube'].find({'scheduler_status': False}): if cube['slug'] in register: schedule.cancel_job(onrun[cube['slug']]) del onrun[cube['slug']] register.remove(cube['slug']) for dashboard in mongo['dashboard'].find({'scheduler_status': False}): elements = [e['id'] for e in dashboard['element']] for e in elements: try: element = mongo['element'].find_one({'slug': e}) cube = mongo['cube'].find_one({'slug': element['cube']}) jobn = u"{}-{}".format(cube['slug'], dashboard['slug']) if jobn in register: schedule.cancel_job(onrun[jobn]) del onrun[jobn] register.remove(jobn) except: pass schedule.run_pending() sleep(1)
def schedule_events_task(): import schedule from datetime import date, timedelta from analyticsengine.dbmanager.mfc import create_daily_tables unsync_dev_key = config.get('constants', 'REDIS_UNSYNC_DEV_LIST_KEY') unsync_flag = False """ Job to create daily DB tables calculate the next day's date and pass it to create all the tables for next day. """ def create_daily_cf_job(): tomorrow = date.today()+timedelta(days=1) tomorrow_strf = tomorrow.strftime('%m%d%Y') LOG.info("Creating tables for date: " + tomorrow_strf) create_daily_tables(tomorrow_strf) """ Schedule daily DB table creation. Will create DB tables for next day at 23:30 of every day """ schedule.every().day.at("23:30").do(create_daily_cf_job) """ Job to recheck un-synced devices if unsync_dev_list exist, config request should be sent to see if the device can be moved to sync. devices are popped from unsync list as they are prepared for recheck. when device get send to check config(Sync check), they get added to unsync if its not able to sync. """ def recheck_unsync_devices(): unsync_list = List(key=config.get('constants', 'REDIS_UNSYNC_DEV_LIST_KEY'), redis=r) recheck_devices = [] while len(unsync_list) > 0: recheck_devices.append(unsync_list.pop()) LOG.info("Processing unsync device list") recheck_task = chain(request_cluster_config.s(recheck_devices), update_unsync_list.s()) recheck_task.apply_async() while True: schedule.run_pending() gevent.sleep(1) if r.exists(unsync_dev_key): if not unsync_flag: LOG.info("Unsync device list found. will schedule job to recheck the status") schedule.every(int(config.get('collector', 'RECHECK_UNSYNC_FREQUENCY'))).minutes.do(recheck_unsync_devices) unsync_flag = True else: LOG.debug("Recheck Unsync devices is already scheduled and is in progress.") else: if unsync_flag: LOG.info("No Unsync devices found. Removing unsync devices rechecking from scheduler") schedule.cancel_job(recheck_unsync_devices) unsync_flag = False
def clear(self): schedule.cancel_job(self.job) config = r.hgetall(RRD_BUCKET_KEY + ":" + self.name) stat_keys = r.smembers(RRD_BUCKET_KEY + ":" + "rrdbucket1" + ":" + "keys") # Remove Stats info associated with this bucket fields = {"full", "current_row"} for stat in stat_keys: r.hdel(RRD_BUCKET_KEY + ":" + self.name + ":" + stat, *fields) fields = {"name", "rows", "steps", "aggregation"} r.hdel(RRD_BUCKET_KEY + ":" + self.name, *fields) r.hdel(RRD_BUCKET_KEY + ":" + self.name, "*") r.zremrangebyrank(RRD_BUCKET_KEY + ":" + self.name + ":" + "rrd", 0, -1) r.delete(RRD_BUCKET_KEY + ":" + self.name + ":" + "keys")
def hook(): # print("hook") if request.method == "POST": global ANT_COUNT global job # retrieve the message in JSON and then transform it to Telegram object update = telegram.Update.de_json(request.get_json(force=True)) set_config_key('last_update_id', update.update_id) print(update.message) chat_id = update.message.chat.id msg_id = update.message.message_id # Telegram understands UTF-8, so encode text for unicode compatibility text = update.message.text.encode('utf-8') if text: # print(text[0], text[1], text[2], text[3]) # print(text[0:4]) print(text) if text.lower() == '/cmd': custom_keyboard = [ [telegram.Emoji.THUMBS_UP_SIGN, telegram.Emoji.THUMBS_DOWN_SIGN, telegram.Emoji.ALARM_CLOCK, telegram.Emoji.ALIEN_MONSTER]] reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard) bot.sendMessage(chat_id=chat_id, text=socket.gethostname(), reply_to_message_id=msg_id, reply_markup=reply_markup) elif text.lower() == 'status': bot.sendMessage(chat_id=chat_id, text=(get_status())) elif text[0:7] == "magnet:": rt.load_torrent_simple(text, "url", True, True) bot.sendMessage(chat_id=chat_id, text=text[0:7]) elif text == telegram.Emoji.ALARM_CLOCK: job = schedule.every(5).seconds.do(other_job) bot.sendMessage(chat_id=chat_id, text="set alarmClock") elif text == telegram.Emoji.ALIEN_MONSTER: for job in schedule.jobs: schedule.cancel_job(job) elif text == telegram.Emoji.THUMBS_UP_SIGN: ANT_COUNT += 1 bot.sendMessage(chat_id=chat_id, text=str(ANT_COUNT)) elif text == telegram.Emoji.THUMBS_DOWN_SIGN: ANT_COUNT -= 1 bot.sendMessage(chat_id=chat_id, text=str(ANT_COUNT)) else: # repeat the same message back (echo) bot.sendMessage(chat_id=chat_id, text=text) return 'ok'
def re_nofity(self): # list to reexecute if (len(self.queue_to_nofity) > 0 ): self.log.info("List to re-execute:"+str(len(self.queue_to_nofity))) for notif, cfg in self.queue_to_nofity.iteritems(): self.log.debug("re_nofity:"+cfg["url"]) self.current_notif = cfg self.notify() # wait for all threads to finish if self.queue_thread_nofity: for t in self.queue_thread_nofity: t.join() else: self.log.debug("List to re-execute empty") schedule.cancel_job(self.re_nofity)
def test_cancel_job(self): def stop_job(): return schedule.CancelJob mock_job = make_mock_job() every().second.do(stop_job) mj = every().second.do(mock_job) assert len(schedule.jobs) == 2 schedule.run_all() assert len(schedule.jobs) == 1 assert schedule.jobs[0] == mj schedule.cancel_job('Not a job') assert len(schedule.jobs) == 1 schedule.default_scheduler.cancel_job('Not a job') assert len(schedule.jobs) == 1 schedule.cancel_job(mj) assert len(schedule.jobs) == 0
def open_connection(self): try: if not self._stopper.isSet(): print("Adding HTTP Socket") with exhaustingTools.lock: self._counter.increment() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(180) # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.connect((self._ip, PORT)) self._connections.append(sock) print("Added HTTP Socket") except Exception as ex: print(ex) print("Connection error, Could not open new connection") self.stopit() schedule.cancel_job(self._job) schedule.clear() #for sock in self._connections: # sock.shutdown(1) # sock.close() self.bucket.put(sys.exc_info())
def remove_job(user): if user['chat_id'] in jobs: schedule.cancel_job(jobs[user['chat_id']]) remainders.remove({'chat_id': user['chat_id']})
def run(self): site_ip = urlparse(self._ip) # path = site_ip.path # if path == "": # path = "/" # HOST = url[2] HOST = site_ip[2] #HOST = self._ip request = GETREQUEST + "Host: " + self._url + CRLF + REQUEST_TERMINAL sock = None # Try first connection to check if the url or path is different than the standard root and given url try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # sock.settimeout(0.30) # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.connect((HOST, PORT)) sock.send(request.encode()) response = http.client.HTTPResponse(sock) response.begin() response.read() #data = (sock.recv(1000000)) #source = FakeSocket(data) #response = http.client.HTTPResponse(source) #response.begin() if response.status == 301: self._path = response.getheader("Location") elif response.status == 302: self._url = response.getheader("Location") temp = urlparse(self._url) self._ip = socket.gethostbyname_ex(temp[1]) self._path = temp[2] #sock.shutdown(1) #sock.close() except Exception as ex: print("Could not open new socket") self.bucket.put(sys.exc_info()) data = [] request = self.createRequestString(self._url, self._path) self.open_connection() while not self._stopper.isSet(): try: if self._stopper.isSet(): schedule.cancel_job(self._job) schedule.clear() #for sock in self._connections: # sock.shutdown(1) # sock.close() break schedule.run_pending() for sock in self._connections: try: sock.send(request.encode()) response = http.client.HTTPResponse(sock) response.begin() response.read() #data = (sock.recv(8192)) #if data == "": # raise Exception("Server closed the connection") # temp = data.decode("utf-8") #source = FakeSocket(data) #response = http.client.HTTPResponse(source) #response.begin() if response.status != 200: raise Exception("Server stopped responding 200 OK") except Exception as ex: print(ex) raise Exception("Failure to send request or decrypt response") # print(response.status, response.code) time.sleep(TIME_INTERVAL) except Exception as ex: # print("Connection forcibly closed, could not send request") print(ex) self.stopit() schedule.cancel_job(self._job) #for sock in self._connections: # sock.shutdown(1) # sock.close() self.bucket.put(sys.exc_info()) # raise Exception("Connection forcibly closed") print("Closing Sockets") for sock in self._connections: sock.shutdown(socket.SHUT_RDWR) sock.close() print("Finished closing sockets " + str(len(self._connections)))
def stop(self): schedule.cancel_job(self.schedule) [input.stop() for input in self.inputs.values()] __logger__.info("Inputs stopped")
def __update__(self, type): self.publish({'state': type}) schedule.cancel_job(self.schedules[type]) self.__schedule_next__(type)
def stop(self): super(AstralInput, self).stop() for type in self.schedules: schedule.cancel_job(self.schedules[type])
def stop(self): schedule.cancel_job(self.job)
if cube['slug'] not in register: rules(cube) for dashboard in mongo['dashboard'].find({'scheduler_status': True}): elements = [e['id'] for e in dashboard['element']] for e in elements: element = mongo['element'].find_one({'slug': e}) cube = mongo['cube'].find_one({'slug': element['cube']}) if cube['slug'] not in register: rules(cube, dashboard['scheduler_type'], dashboard['scheduler_interval'], dashboard['slug']) for cube in mongo['cube'].find({'scheduler_status': False}): if cube['slug'] in register: schedule.cancel_job(onrun[cube['slug']]) del onrun[cube['slug']] register.remove(cube['slug']) for dashboard in mongo['dashboard'].find({'scheduler_status': False}): elements = [e['id'] for e in dashboard['element']] for e in elements: element = mongo['element'].find_one({'slug': e}) cube = mongo['cube'].find_one({'slug': element['cube']}) jobn = u"{}-{}".format(cube['slug'], dashboard['slug']) if jobn in register: schedule.cancel_job(onrun[jobn]) del onrun[jobn] register.remove(jobn) schedule.run_pending()
def stop(self): self.schedule = [schedule.cancel_job(sched) for sched in self.schedule]
def start_typing(channel): send_typing(channel) task = schedule.every(2).seconds.do(send_typing, channel) yield task schedule.cancel_job(task)