async def predict(request: EngineRequest): log.info('Starting request...') try: # Validate request if request.secret_access == env.get_secret_access(): # Open image image = np.array(Image.open(request.img).convert('RGB')) # Run inference result, error_message = await run(net, image, request.to_remove, request.color_removal) if result: # Save image tmp_file_name = os.path.join(TMP_FOLDER, '{}.png'.format(uuid.uuid4())) result.save(tmp_file_name) # Return log.info('Image saved in: {}'.format(tmp_file_name)) return dict(error=False, img=tmp_file_name, message=EXAMPLE_MESSAGE_SUCCESS) else: return dict(error=True, img=None, message=error_message) else: return dict(error=True, img=None, message='Unauthorized.') except Exception as e: error_message = 'Error on request: [{}]'.format(e) log.error(error_message) log.exception(e) return dict(error=True, img=None, message=error_message)
def start(update, context, first_run=True): # prevent someone from sending new requests while current one is still processed while pbot.has_request(): context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=ChatAction.TYPING) log.info("[Personal bot] Waiting for request to be processed...") sleep(5) msg = 'Чим я можу допомогти?' if first_run else 'Може ще щось?' header_buttons = InlineKeyboardButton(mm_buttons.BALANCE, callback_data=mm_buttons.BALANCE) button_list = [ InlineKeyboardButton(mm_buttons.REBOOT_CONFIRM, callback_data=mm_buttons.REBOOT_CONFIRM), InlineKeyboardButton(mm_buttons.FIX_CONFIRM, callback_data=mm_buttons.FIX_CONFIRM), InlineKeyboardButton(mm_buttons.SMS_NUM, callback_data=mm_buttons.SMS_NUM), InlineKeyboardButton(mm_buttons.USSD_NUM, callback_data=mm_buttons.USSD_NUM) ] reply_markup = InlineKeyboardMarkup( build_menu(button_list, n_cols=2, header_buttons=header_buttons)) send_bot_msg(update, context, msg=msg, reply_markup=reply_markup, noedit=True)
def _init_uri_routes(self): """ 初始化uri路由 """ logger.info('init uri routes start >>>', caller=self) handlers = route.make_routes(self.handler_pathes) self.handlers = handlers logger.info('init uri routes done <<<', caller=self)
def save(self, df): log.info("Save as parquet") HdfsUtils(None).write(df=df, path=config("PATH_PARQUET_RESULT"), format=config("PARQUET_FORMAT"), partition_name="dt_partition", save_mode=config("OVERWRITE_MODE"))
def update(self, key, value, date=datetime.now()): log.info("[DB] Update value for key '%s' (new value '%s')" % (key, value)) sql = ''' UPDATE db_dict SET value = ?, date = ? WHERE key = ?''' self.conn.execute(sql, (value, date, key))
def main(help, top, auto_commit, gtk, mail_smtp, mail_user, mail_pass, qq_user, qq_pass): if help: log.info(help_info()) else: all_cves = {} srcs = [ Cert360(), Nsfocus(), QiAnXin(), RedQueen(), AnQuanKe(), Vas(), NVD(), CNVD(), CNNVD(), Tenable() ] for src in srcs: cves = src.cves() if cves: to_log(cves) all_cves[src] = cves if all_cves: page.to_page(top) mail.to_mail(gtk, all_cves, mail_smtp, mail_user, mail_pass) qq.to_group(all_cves, qq_user, qq_pass) wechat.to_wechat(all_cves) if auto_commit: git.auto_commit()
def checkpoint(frames): if flags.disable_checkpoint: return checkpointpath = os.path.expandvars( os.path.expanduser('%s/%s/%s' % (flags.savedir, flags.xpid, 'model.tar'))) log.info('Saving checkpoint to %s', checkpointpath) torch.save( { 'model_state_dict': model.state_dict(), 'state_embedding_model_state_dict': state_embedding_model.state_dict(), 'forward_dynamics_model_state_dict': forward_dynamics_model.state_dict(), 'inverse_dynamics_model_state_dict': inverse_dynamics_model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'state_embedding_optimizer_state_dict': state_embedding_optimizer.state_dict(), 'forward_dynamics_optimizer_state_dict': forward_dynamics_optimizer.state_dict(), 'inverse_dynamics_optimizer_state_dict': inverse_dynamics_optimizer.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), 'flags': vars(flags), }, checkpointpath)
def get(self, key, field="value", all_fields=False, notify=True): if notify: log.info("[DB] Get value for key '%s'" % key) sql = ''' SELECT id, key, value, date FROM db_dict WHERE key = ?''' results = self.conn.execute(sql, (key, )) id, _, value, date = results[0] if results else [ None, None, None, None ] if all_fields: result = { k: v for k, v in zip(["id", "value", "date"], [id, value, date]) } elif field == "value": result = value else: if field == "id": result = id elif field == "date": result = date else: raise Exception("Incorrect field name expected: %s" % field) if notify: log.info("[DB] Obtained value for key '%s' == '%s'" % (key, result)) return result
def process_request(self, goip): if not self.has_request(): log.info("[ProcessRequest] No request to process") return update = self.request.update context = self.request.context try: threading.Thread(target=start_over, args=( update, context, )).start() send_bot_msg(update, context, msg="Виконую запит") result = self.request.process(goip) if result: bot.send(result) send_bot_msg(update, context, msg="Тринь, ісполнєно!") except Exception as e: try: send_bot_msg(update, context, msg="Трапилась помилка") log.error( "[Personal bot] Exception while processing request: %s" % e) except Exception as e1: log.error( "[Personal bot] Exception while handling exception: %s\noriginal exception: %s" % (e1, e)) finally: self.request = None
def batch_and_learn(i, lock=threading.Lock()): """Thread target for the learning process.""" nonlocal frames, stats timings = prof.Timings() while frames < flags.total_frames: timings.reset() batch, agent_state = get_batch(free_queue, full_queue, buffers, initial_agent_state_buffers, flags, timings) stats = learn(model, learner_model, batch, agent_state, optimizer, scheduler, flags, position_count=position_count, action_hist=action_hist) timings.time('learn') with lock: to_log = dict(frames=frames) to_log.update({k: stats[k] for k in stat_keys}) plogger.log(to_log) frames += T * B if i == 0: log.info('Batch and learn: %s', timings.summary())
def checkpoint(frames): if flags.disable_checkpoint: return checkpointpath = os.path.expandvars( os.path.expanduser('%s/%s/%s' % (flags.savedir, flags.xpid, 'model.tar'))) log.info('Saving checkpoint to %s', checkpointpath) torch.save( { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), 'position_count': position_count, 'flags': vars(flags), }, checkpointpath) # Action histogram logger action_hist_path = os.path.expandvars( os.path.expanduser('%s/%s/%s' % (flags.savedir, flags.xpid, 'action_hist.tar'))) try: action_hist_list = torch.load(action_hist_path) except FileNotFoundError: action_hist_list = [] action_hist_list.append(action_hist.return_full_hist()) torch.save(action_hist_list, action_hist_path)
def __init__(self, url, uname, pwd, sip, spwd): if not url.startswith("http"): url = "http://%s" % url self.url = url self.uname = uname self.pwd = pwd self.sip = sip self.spwd = spwd self.init_browser() self.init_sms() # if daily call duration is from today if vs.daily_calls_duration(field="date", default="1970-01-01 00:00:00.000").date( ) != current_date().date(): vs.increase_weekly_call_duration( vs.daily_calls_duration()) # as we will reset this value reset_daily_values() # set default values for the daily status else: log.info( "[GoipMonitor] Recent restart - do not reset daily calls duration." ) if passed_more_that_sec(vs.monitor_slept_at(notify=True), 30 * 60): # if not restarted within 20-30 minutes bot.send(random_list_item(GREETING_PHRASES)) else: log.info("[GoipMonitor] Regular restart - no greeting was sent")
def main( help, top, auto_commit, mail_by_github, mail_smtp, mail_user, mail_pass, ): if help: log.info(help_info()) else: all_cves = {} srcs = [Cert360(), NsFocus(), QiAnXin(), RedQueen(), AnQuanKe(), Vas()] for src in srcs: cve_list = src.cves() if cve_list: to_log(cve_list) all_cves[src] = cve_list if all_cves: page.to_page(top) mail.to_mail(mail_by_github, all_cves, mail_smtp, mail_user, mail_pass) if auto_commit: git.auto_commit()
def __init__(self, recreate=False): database = r"%s/sqlite.db" % CUR_DIR # this will create separate DB for each platform used log.info("[DB] Start the module") self.conn = Sqlite3Worker(database) self.conn.execute(self.create_table) if recreate: self.conn.execute(self.drop_index) self.conn.execute(self.create_index)
def reset_config(self): log.info("[Reset config] Re-setting") # as GoIP's SMPP is not started after configuration is reset SmsWrapper.kill() BrowserWrapper.b.go_relative_url("reset_config.html") sleep(20) # login with default password self.init_browser(pwd=DEFAULT_GOIP_PWD)
def yearly_status(): has_status, valid_till = parse_ussd(USSD_YEARLY_STATUS, YEARLY_STATUS_REGEX, [False, None]) if has_status: log.info("[Yearly status] Found information: valid till '%s'" % valid_till) valid_till = datetime.strptime(valid_till, "%d.%m.%y") return has_status, valid_till
def read_json_file(path): try: log.info('Reading ' + path + ' file ...') with open(path, 'r') as f: log.info(path + ' file opened.') return json.load(f) except IOError as err: # whatever reader errors you care about log.error('|IOError| - File not found or couldn\'t be open')
def write_json_file(path, content): try: log.info('Opening ' + path + ' file ...') with open(path, 'w') as outfile: json.dump(content, outfile, ensure_ascii=False) log.info(path + ' file writed.') except IOError as err: # whatever reader errors you care about log.error('|IOError| - File not found or couldn\'t be open to write')
def reset_daily_values(money=0.0): log.info("[Reset daily values] Setting initial values") if not money: _, money, _, _ = balance() vs.set_current_balance(money) vs.increase_overall_call_duration(vs.daily_calls_duration()) vs.set_daily_calls_duration(0) vs.set_daily_fixed_times(0) vs.set_daily_ok_calls_amount(0) vs.set_daily_failed_calls_amount(0)
def reboot(self): log.info("[Reboot] Rebooting caller") SmsWrapper.kill() bot.send("Перезавантажую дзвонилку.") BrowserWrapper.b.go_relative_url("reboot.html") sleep(30) self.init_browser() self.init_sms(notify=True) log.info("[Reboot] Finished reboot") bot.send("Перезавантажено дзвонилку.")
def load(self): log.info("Load Oracle data Contracts") return (HdfsUtils(self.spark_session).readOracle( format=config("JDBC_FORMAT"), table_name="TB_CONTRACT").withColumnRenamed( "CONTRACT_ID", "contract_id").withColumnRenamed( "CLIENT_ID", "client_id").withColumnRenamed( "CLIENT_NAME", "client_name").withColumnRenamed( "PERCENTAGE", "percentage").withColumnRenamed( "IS_ACTIVE", "is_active"))
def get_dataframe_from_json(content): try: log.info('Generating DataFrame...') df = pd.DataFrame(data=content['articles']) log.info('Done') print(get_columns_name(df)) df.info(memory_usage='deep') return df except ValueError as err: log.error('|DATAFRAME.py| - Can\'t create pandas dataframe') log.error(err)
def call_monitor(self): self.calculate_status() if self.status == self.IDLE: if self.call_or_dialing_started(): self.finish_call() # back to idle return log.info("[Monitor call status] Status string = '%s'" % self.status) if self.status in self.STARTING_CALL_STATUSES: self.start_dialing() if self.status == self.CONNECTED: self.start_call()
def auto_commit(): log.info('正在提交变更...') try: repo = git.Repo(env.PRJ_DIR) repo.git.add('*') repo.git.commit(m='[Threat-Broadcast] %s' % time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) repo.git.push() log.info('提交变更成功') except: log.error('提交变更失败')
def process_received_msg(pdu): frm = pdu.source_addr.decode() content = decode_msg(pdu.short_message) if not content or len(content) == 0: log.info( "[Process Received SMS] Got long message. Using alternative logic") content = decode_msg(pdu.message_payload) content = content or "<empty>" log.info("[Process Received SMS] Message from: %s, content: %s" % (frm, content)) bot.send("Отримано СМС від %s\n%s" % (frm, content), escape=True)
def balance(): has_status, money, tariff, valid_till = parse_ussd(USSD_GENERAL_STATUS, BALANCE_REGEX, [False, 0, None, None]) if has_status: log.info( "[Balance] Found information: money '%s', tariff '%s', valid till '%s'" % (money, tariff, valid_till)) money = float(money) valid_till = datetime.strptime(valid_till, "%d.%m.%Y") return has_status, money, tariff, valid_till
def monthly_status(): has_status, minutes_left, valid_till = parse_ussd(USSD_MONTHLY_STATUS, MONTHLY_STATUS_REGEX, [False, None, None]) valid_days = 0 if has_status: log.info( "[Monthly status] Found information: minutes left '%s', valid till '%s'" % (minutes_left, valid_till)) valid_till_date = datetime.strptime(valid_till, "%d.%m.%y") valid_days = (valid_till_date - current_time()).days return has_status, minutes_left, valid_days
def close(self, force=False): if not self._all_processes: return log.info("[Close] Disconnecting the SMPP client") try: self.client.disconnect() except Exception as e: log.error(e) if not force: sleep(2) log.info("[Close] Terminating processes for SMS monitoring") for process in self._all_processes: process.terminate()
def close(self, err_log=False): if not self.driver: return log.info("[Browser] Close") try: self.driver.close() # close the current page self.driver.service.process.send_signal( signal.SIGTERM) # kill the specific phantomjs child proc self.driver.quit() # quit the node proc except Exception as e: if err_log: log.error("[Browser] Close exception : {}".format(e)) self.driver = None
def auto_commit(): log.info("正在提交变更...") try: repo = git.Repo(config.PRJ_DIR) repo.git.add("*") repo.git.commit( m="[Threat-Broadcast] %s" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))) repo.git.push() log.info("提交变更成功") except: log.error("提交变更失败")