def copy_workflow(workflow_id, hapikey_origin=hapikey_origin, hapikey_target=hapikey_target, silent=False, simulate=False): print ("start processing workflow " + str(workflow_id)) #workflow = requests.get(url_wf(str(workflow_id), hapikey_origin)).json() #print(workflow) #wf_type = workflow["type"] #name = workflow["name"] #newId = workflow["migrationStatus"]["flowId"] #actions = process_actions(workflow["actions"], apply_schema) #body = { # "name": str(prefix) + name, # "type": wf_type, # "onlyEnrollsManually": True, # "actions": actions #} # print(body) body = process_workflow(workflow_id, hapikey_origin) if simulate: r = {"text": "not really doing anything"} silent = True else: r = requests.post(url_create_wf(hapikey_target), json = body) with open(config.log_destination+"wf"+str(workflow_id)+"_"+str(r.status_code)+".json", "w") as data_file: json.dump(r.json(), data_file, indent=2) if not r and not silent: logger.log_event("copy_failure") print("Workflow " + str(workflow_id) + " could not be copied (Error " + str(r.status_code) +", see log subdirectory for full http response).") elif not silent: logger.log_event("copy_success") print ("Workflow " + str(workflow_id) + " successfully copied.") set_id("workflowId", workflow_id, int(r.json()["id"])) return r
async def reaction_change(payload, guild, status): reactions = admin_file.get('reactions', {}) channel_id = str(payload.channel_id) if channel_id not in reactions: return message_id = str(payload.message_id) if message_id not in reactions[channel_id]: return reactions = reactions[channel_id][message_id] emoji = str(payload.emoji) for reaction in reactions: if reaction['reaction'] == emoji: role = get_role(reaction['role'], guild.roles) member = guild.get_member(payload.user_id) if status: await member.add_roles(role) logger.log_event( 'addrole', 'added role {} to user {}.'.format(role, member)) else: await member.remove_roles(role) logger.log_event( 'removerole', 'removed role {} from user {}.'.format(role, member))
def server(message): id = message.chat.id user = players.users.get(id) if user is None: bot.send_message(id, config.unknown_error) return if was_help_cmd(message): help_cmd(message) return user.check_point = config.offline_check_points[4] logger.log_event(user.uid, 'Server', user.name) saver.save_users(players.users) code = offtxt.server_code l = message.text.split(' ') if len(l) > 1: bot.send_message(id, config.offline_too_much_words_in_code_error) bot.register_next_step_handler(message, server) return if len(l[0]) != len(code): bot.send_message(id, config.offline_mismatch_len_code_error) bot.register_next_step_handler(message, server) return if message.text == code: bot.send_message(id, 'Осталось уже совсем немного...') tasks(message) else: bot.send_message(id, config.offline_wrong_code_error) bot.register_next_step_handler(message, server)
def gen_token_cmd(message): markup = types.InlineKeyboardMarkup() markup.add(types.InlineKeyboardButton(text='Супер админ', callback_data='sa'), types.InlineKeyboardButton(text='Админ', callback_data='admin'), types.InlineKeyboardButton(text='КПшник', callback_data='kp')) bot.send_message(message.chat.id, 'Выберите нужный вам тип токена:', reply_markup=markup) logger.log_event(message.chat.id, 'Gen_token called', get_user_name(message.chat.id))
def traf(message): id = message.chat.id user = players.users.get(id) if user is None: bot.send_message(id, config.unknown_error) return if message.text == '/continue': bot.send_message(id, 'Введите код с трафарета') bot.register_next_step_handler(message, traf) return if was_help_cmd(message): help_cmd(message) return user.check_point = config.offline_check_points[3] logger.log_event(user.uid, 'Traf', user.name) saver.save_users(players.users) code = offtxt.traf_codes[user.uid] l = message.text.split(' ') if len(l) > 1: bot.send_message(id, config.offline_too_much_words_in_code_error) bot.register_next_step_handler(message, traf) return if len(l[0]) != len(code): bot.send_message(id, config.offline_mismatch_len_code_error) bot.register_next_step_handler(message, traf) return if message.text == code: bot.send_photo(id, offtxt.traf_photo_id, caption='Найдите его...') bot.register_next_step_handler(message, server) else: bot.send_message(id, config.offline_wrong_code_error) bot.register_next_step_handler(message, traf)
def send_image_to_server(filename): """Send image to server""" # Load image from disk and convert to numpy array image = cv2.imread(filename, 1) image_matrix = np.asarray(image) # Compute MD5 hash and log it hash_md5 = hashlib.md5() hash_md5.update(image_matrix) hash_image_matrix = hash_md5.hexdigest() logger.log_event("Client: sent image matrix MD5 hash is: " + hash_image_matrix) # Serialize image matrix (marked below with horizontal rules) # NOTE: THIS SERIALIZATION NEEDS TO MATCH THE SERVER'S DESERIALIZATION # --------------------------------------------------------------------- # Pickle to byte string image_pickled = pickle.dumps(image_matrix, protocol=3) # --------------------------------------------------------------------- # Send image to server url = "http://52.28.196.4:5000/api/detect/faces" # Set headers to send as binary stream headers = {'content-type': 'application/octet-stream'} response = requests.post(url, data=image_pickled, headers=headers) print(response.json())
def start_buffering(self): self.pipeline.set_state(gst.STATE_PAUSED) self.is_buffering = True self.buffering_time.append(time()) logger.log_event("Buffering started.")
def index(request: flask.Request): """HTTP Cloud Function Args: request (flask.Request): The request object. Returns: The response text, or any set of values that can be turned into a Response object using `make_response` """ # nonlocal MODEL try: if request.method == "GET": raise NotImplementedError("GET method not supported") headers = cors(request) form_result = parse_multipart(request) # Load image image_file = form_result["files"][0] filename, *args = image_file image = load_image(filename) # Load Model model = load_model("v1") score = model.predict(image).flatten() prediction = tf.where(score < THRESHOLD, 0, 1).numpy() prediction = prediction[0].item() score = score[0].item() prob_is_jollof = f"{round((1 - score) * 100, 2)}%" is_jollof = True if prediction == 0 else False delete_file(filename) return ( jsonify( data=dict( prediction=prediction, score=score, is_jollof=is_jollof, probability_is_jollof=prob_is_jollof, ), status="success", ), 200, headers, ) except Exception as e: print("Error: ", e) errorMessage = e.__str__() log_event(request, "error", errorMessage) return ( jsonify(error=errorMessage, status="error"), 500, )
def write_claws_to_file(claws): #### Output ##### # Writing claws to file crispy_kitten = global_vars.review_file_path + "ck_" + strftime( "%m-%d-%y-%H-%M-%S", gmtime()) + ".txt" leopard_txt = open(crispy_kitten, "w+") leopard_txt.write(claws) leopard_txt.close() logger.log_event(crispy_kitten + " created.")
def read_sheet(): sheet = client.open("Hive Mind Giga-Sheet").worksheet('Assessment Sheet') sheet_values = sheet.get_all_values() del_rows = [] for i, row in sorted(enumerate(sheet_values), reverse=True): if all([cell == '' for cell in row]): del sheet_values[i] del_rows.append(i) sheet_values = [[cell.strip().lower() for cell in row] for row in sheet_values] header_row = -1 i = 0 while (header_row is -1) and (i < len(sheet_values)): if set(headers) <= set(sheet_values[i]): header_row = i i = i + 1 if header_row is -1: return del_cols = [] for i in sorted(range(0, len(sheet_values[0])), reverse=True): if sheet_values[header_row][i] is '': for j in range(0, len(sheet_values)): del sheet_values[j][i] del_cols.append(i) raiders = {} offset = 0 for i, row in enumerate(sheet_values[1:]): while (i + offset in del_rows or i + offset is 1): offset += 1 cells = {sheet_values[header_row][j]:row[j] for j in range(0, len(row))} raider = {} raider['row'] = i + offset # 1-indexed raider['class'] = cells['class'] raider['role'] = cells['role'] raider['team'] = cells['team assign.'] raider['attendance'] = cells['attendance'] raider['performance'] = {key:cells[key] for key in ['bwl', 'mc', 'ony']} raiders[cells['name']] = raider prev_raiders = list(raiders_file.read().keys()) raiders_file.write(raiders) added = [name for name in raiders.keys() if not name in prev_raiders] removed = [name for name in prev_raiders if not name in raiders.keys()] message = 'update_raiders task finished with {} raiders.'.format(len(raiders)) if (len(added) > 0): message += ' Added {} members: {}.'.format(len(added), ', '.join(added)) if (len(removed) > 0): message += ' Removed {} members: {}.'.format(len(removed), ', '.join(removed)) logger.log_event('raider_update', message)
def stop_buffering(self): self.pipeline.set_state(gst.STATE_PLAYING) self.is_buffering = False # Calculating duration of last playback interruption duration = time() - self.buffering_time[-1] self.buffering_time[-1] = duration logger.log_event("Playback started.")
def start_cmd(message): user = players.users.get(message.chat.id) if user is None or not user.is_running: bot.send_message(message.chat.id, '''Привет! Это Лиза. Я вижу, вы решили помочь мне с квестом. Что ж, давайте начинать! Я буду присылать вам задания. Решайте их - получите новые. Удачи! Но сначала, введите код, который был отправлен на почту''') bot.register_next_step_handler(message, get_token) else: bot.send_message(message.chat.id, 'Мы уже познакомились! Продолжай помогать мне:)') logger.log_event(message.chat.id, 'Start called', get_user_name(message.chat.id))
def off_start(message): id = message.chat.id user = players.users.get(id) if user is None: bot.send_message(id, config.unknown_error) return logger.log_event(user.uid, 'Started', user.name) bot.send_message(id, config.offline_geophotos_text) bot.send_photo(id, offtxt.geophotos_id[user.uid]) bot.register_next_step_handler(message, check_start)
def start(message): id = message.chat.id bot.send_message(id, 'Начинаем!') user = players.users[id] if user is None: bot.send_message(message.chat.id, config.unknown_error) return logger.log_event(id, "Start called", user.name) if type(user) is players.User: send_task(message)
def final(message): id = message.chat.id user = players.users.get(id) if user is None: bot.send_message(id, config.unknown_error) return user.check_point = config.offline_check_points[6] logger.log_event(user.uid, 'Final', user.name) saver.save_users(players.users) bot.send_message(id, config.offline_final_txt) bot.register_next_step_handler(message, check_final)
def __init__(self): super(Main, self).__init__() self.parse_args() logger.log_event("Download started.") self.pipeline = VideoTorrentPlayer(self.torrent_path, self.use_fake_sink, self.save_path, self.algorithm, self.stream_length, self.buffer_size) self.message_handler = MessageHandler(self, self.pipeline)
def book_code(message): id = message.chat.id user = players.users.get(id) if user is None: bot.send_message(id, config.unknown_error) return user.check_point = config.offline_check_points[1] logger.log_event(user.uid, 'Books', user.name) saver.save_users(players.users) task = offtxt.books_names[user.uid] bot.send_message(id, config.offline_books_text1 + task + config.offline_books_text2) bot.register_next_step_handler(message, check_books)
def tasks(message): id = message.chat.id user = players.users.get(id) if user is None: bot.send_message(id, config.unknown_error) return user.check_point = config.offline_check_points[5] logger.log_event(user.uid, 'Tasks', user.name) saver.save_users(players.users) num = user.curr_off_task bot.send_message(id, offtxt.final_tasks[num]) bot.register_next_step_handler(message, check_off_tasks)
def anti_photo(message): id = message.chat.id user = players.users.get(id) if user is None: bot.send_message(id, config.unknown_error) return user.check_point = config.offline_check_points[2] logger.log_event(user.uid, 'Anti-photo', user.name) saver.save_users(players.users) a = offtxt.photocross_tasks[user.uid] bot.send_message(id, config.offline_photocross_text) for i in range(len(a)): bot.send_photo(id, offtxt.photocross_id[a[i]]) bot.register_next_step_handler(message, check_photos)
async def wclRaidTask(self): dates = schedule.schedule_file.get('dates', on_error=[]) for entry in dates: is_now = schedule.isNow(entry['start'], entry['end']) if is_now is not self.is_now: if is_now: logger.log_event('raid_task', 'Raid task started.') else: logger.log_event('raid_task', 'Raid task ended.') if is_now: await asyncio.wait( fs={ loop.run_in_executor(None, get_new_parses, ['dps', 'hps'], self.to_plot) }) self.is_now = is_now while (len(self.to_plot) > 0): f = self.to_plot[0] fight = f['fight'] parses = f['parses'] report = f['report'] image_path = defs.dir_path + '/boss_summaries/' + fight[ 'name'] + '.png' await asyncio.wait( fs={loop.run_in_executor(None, plot_fight, f, image_path)}) link = 'https://classic.warcraftlogs.com/reports/' + str( report['id']) + '#fight=' + str(fight['id']) pre_message = "__**" + fight['name'] + "**__" + '\n' pre_message += 'Participants: ' + str(len(parses)) + '\n' pre_message += 'Time: ' + str( round((fight['end_time'] - fight['start_time']) / 1000, 1)) + 's' + '\n' pre_message += 'Deaths: ' + fight['deaths'] + '\n' post_message = "Log link: " + link channel = self.bot.get_channel(entry['id']) await channel.send(content=pre_message, file=discord.File(image_path)) await channel.send(content=post_message) os.remove(image_path) del self.to_plot[0] gc.collect()
def migration_update_coinbase_newcoins(self): cb = self.db.session.query(CoinbaseInfo).filter( CoinbaseInfo.newcoins == None).filter( CoinbaseInfo.block_id > self.migration_last_id).first() if cb == None: return False self.migration_last_id = cb.block_id log_event('Migrate', 'cb', hexlify(cb.transaction.txid)) cb.newcoins = cb.transaction.totalvalue - cb.block.totalfee self.db.session.add(cb) self.db.session.flush() return True
def check_mempool_for_doublespends(self): if self.last_mempool_check_blk == self.last_synced_blk: return False work_done = 0 for unconfirmed_coinbase_tx, coinbaseinfo in self.db.mempool_query( result_columns=(Transaction, CoinbaseInfo)).join( Transaction.coinbaseinfo).all(): height = coinbaseinfo.height if height is None or height > self.db.chaintip().height: continue unconfirmed_coinbase_tx.doublespends_id = self.db.chaintip( ).coinbaseinfo.transaction_id self.db.session.add(unconfirmed_coinbase_tx) log_tx_event(hexlify(unconfirmed_coinbase_tx.txid), 'DSpent', coinbase=True, height=height) work_done += 1 if not work_done: DoubleSpendTransaction = aliased(Transaction) for double_spend_tx, parent_tx_id, parent_txid in self.db.mempool_query( result_columns=( Transaction, DoubleSpendTransaction.id, DoubleSpendTransaction.txid)).join( Transaction.txinputs).join( TransactionInput.input).join( DoubleSpendTransaction, TransactionOutput.transaction).filter( DoubleSpendTransaction.doublespends_id != None).group_by( Transaction.id).all(): double_spend_tx.doublespends_id = parent_tx_id self.db.session.add(double_spend_tx) log_tx_event(hexlify(double_spend_tx.txid), 'DSpent', parent=hexlify(parent_txid)) work_done += 1 if work_done == 0: self.last_mempool_check_blk = self.last_synced_blk return False log_event('Commit', '%d' % work_done, 'double spent transactions') self.db.session.commit() return True
def indexer(context): def main_loop(): context.db.reset_session() context.query_mempool() if context.sync_blocks(): return True timeout = time() + 3 if do_until_timeout(context.update_single_balance, timeout): return True # Data migration is done in bulk (with large commits!) if do_until_timeout(context.migrate_old_data, timeout): context.db.session.commit() return True return False log('\nChecking database state...\n') context.verify_state() log('\nPerforming initial sync...\n') context.sync_blocks(initial=True) log('\nSwitching to live tracking of mempool and chaintip.\n') do_in_loop(operation=main_loop, before_sleep=lambda: log_event('Synced', 'chn', ''))
async def update_attendance_task(self): scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] creds = ServiceAccountCredentials.from_json_keyfile_name( defs.dir_path + '/client_secret.json', scope) client = gspread.authorize(creds) sheet = client.open("Hive Mind Giga-Sheet").worksheet( 'Assessment Sheet') participants = get_participants() to_update = [] col = raiders.get_col('attendance') for name in participants: attendance = round(participants[name]['attendance'] * 100) sheet_attendance = raiders.getRaiderAttribute(name, 'attendance') try: sheet_attendance = float(sheet_attendance.replace('%', '')) except ValueError: sheet_attendance = None if attendance != sheet_attendance: row = raiders.getRaiderAttribute(name, 'row') val = float(attendance) / 100 to_update.append({'row': row, 'val': val}) if len(to_update) > 0: min_row = min([u['row'] for u in to_update]) max_row = max([u['row'] for u in to_update]) first = gspread.utils.rowcol_to_a1(min_row, col) last = gspread.utils.rowcol_to_a1(max_row, col) cells = sheet.range('{}:{}'.format(first, last)) for update in to_update: row = update['row'] - min_row cells[row].value = update['val'] sheet.update_cells(cells) logger.log_event( 'attendance_update', 'update_attendance task finished with {} updates.'.format( len(to_update)))
def get_suppressionListIds(value_origin): assert isinstance(value_origin, list) list_copy = [] for list_id in value_origin: mapped_list_id = get_target_id("listId", list_id) if mapped_list_id is None: logger.log_event("suppression_list_dependency", { "listId": str(list_id), "substituted": False }) else: logger.log_event("suppression_list_dependency", { "listId": str(list_id), "substituted": True }) list_copy.append(mapped_list_id) return list_copy
def process_actions(actions, node_processor): # takes and returns a list of actions # node_processor must take an action (dict) and return a list of actions action_list = [] logger.set_segment_context("branching") for action in actions: if action["type"]=="WORKFLOW_ENROLLMENT": logger.log_event("action_dependency", {"workflowId": str(action["workflowId"])}) if action["type"]=="BRANCH": logger.log_event("branching_action") branch_node = action.copy() branch_node["rejectActions"] = process_actions(action["rejectActions"], node_processor) branch_node["acceptActions"] = process_actions(action["acceptActions"], node_processor) action_list.extend(node_processor(branch_node)) else: action_list.extend(node_processor(action)) return action_list
def route_detect_faces(): """Detect faces""" logger.log_event("Server: received face detection request") if request.method == "POST": # Receive data and deserialize to image matrix (marked below with horizontal rules) # NOTE: THIS DESERIALIZATION NEEDS TO MATCH THE CLIENT (SENDER'S) SERIALIZATION # --------------------------------------------------------------------- image_matrix_pickled = request.get_data() # Unpickle from byte string image_matrix = pickle.loads(image_matrix_pickled) # --------------------------------------------------------------------- # Write image to disk (optionally) if configuration.DEBUG_SERVER_WRITE_IMAGE: timestamp = '{0:%Y-%m-%d-%H-%M-%S}'.format(datetime.datetime.now()) image_filename = timestamp + ".jpg" cv2.imwrite( configuration.DEBUG_SERVER_WRITE_IMAGE_DIR + "/" + image_filename, image_matrix) logger.log_event("Server: image written to disk at " + configuration.DEBUG_SERVER_WRITE_IMAGE_DIR + "/" + image_filename) # Compute MD5 hash and log it hash_md5 = hashlib.md5() hash_md5.update(image_matrix) hash_image_matrix = hash_md5.hexdigest() logger.log_event("Server: received image matrix MD5 hash is: " + hash_image_matrix) # Detect faces and classify faces bounding_boxes, predictions = classifier_face.classify(image_matrix) logger.log_event("Server: finished face detection on image") # Build response and send it response = { "bounding_boxes": bounding_boxes, "predictions": predictions } return json.dumps(response) else: logger.log_error("Server: received a bad requesst") response = {"message": "bad request"} return json.dumps(response)
def get_unenrollmentSetting(value_origin): assert isinstance(value_origin["excludedWorkflows"], list) workflows_list_copy = [] for wf_id in value_origin["excludedWorkflows"]: mapped_wf_id = get_target_id("workflowId", wf_id) if mapped_wf_id is None: logger.log_event("concurrent_workflow_dependency", { "workflowId": str(wf_id), "substituted": False }) else: logger.log_event("concurrent_workflow_dependency", { "workflowId": str(wf_id), "substituted": True }) workflows_list_copy.append(mapped_wf_id) value_origin["excludedWorkflows"] = workflows_list_copy return value_origin
def delete_user_cmd(message): id = message.chat.id user = players.users.get(id) logger.log_event(id, 'Delete user called', get_user_name(id)) if user is not None: if user.get_type() == 'admin' and user.is_super: markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1) for u in players.users.values(): tmp = '' if u.get_type() == 'admin' and u.is_super: tmp = ' (super) ' markup.add(types.KeyboardButton(u.name + ' - ' + tmp + u.get_type() + ' ChatID: ' + str(u.chatID))) bot.send_message(id, 'Выберите пользователя для удаления:', reply_markup=markup) bot.register_next_step_handler(message, del_user) else: bot.send_message(id, config.permission_error) else: bot.send_message(id, config.registration_error)
def get_target_id(attribute, value_origin): if value_origin is None: raise ValueError( "value_origin must not be None in function get_target_id") elif value_origin == "": return "" else: # will look up the mapping in id_mappings.json # if there is no mapping it will apply any fallback value provided (which may be null/None) # if there is no mapping and no fallback, it RETURNS THE ORIGINAL VALUE # (remove mappings and fallback if an ID should not be changed) substitution_value = attribute_to_getter[attribute](value_origin) logger.log_event( "id_substitution", { "type": str(attribute), "original_id": str(value_origin), "mapped_id": str(substitution_value) }) return substitution_value
def check_status(self): download_progress = self.src.get_property("download_progress") * 100 if download_progress < 100: download_rate = self.src.get_property("download_rate") / 1024 self.download_rates.append(download_rate) pieces = self.src.get_property("pieces") num_pieces = self.src.get_property("num_pieces") logger.log("Download progress: %d%%" % download_progress) logger.log("Download rate: %d KiB/s" % download_rate) logger.log("Pieces: %s" % pieces) logger.log("Number of pieces: %s" % num_pieces) elif not self.download_finished: self.download_finished = True self.download_time = time() - self.download_time logger.log_event("Download finished.") upload_rate = self.src.get_property("upload_rate") / 1024 self.upload_rates.append(upload_rate) logger.log("Upload rate: %d KiB/s" % upload_rate) peers = self.src.get_property("num_peers") seeds = self.src.get_property("num_seeds") connected_peers = self.src.get_property("num_connected_peers") connected_seeds = self.src.get_property("num_connected_seeds") num_uploads = self.src.get_property("num_uploads") distributed_copies = self.src.get_property("distributed_copies") next_announce = self.src.get_property("next_announce") logger.log("Connected peers (total): %d (%d)" % (connected_peers, peers)) logger.log("Connected seeds (total): %d (%d)" % (connected_seeds, seeds)) logger.log("Number of unchocked peers: %d" % num_uploads) logger.log("Distributed copies: %d" % distributed_copies) logger.log("Next annouce to tracker (s): %d" % next_announce) return True
def handle_eos_message(self, bus, message): logger.log_event("Playback finished.") self.main_loop.quit()
def quit(self): logger.log_event("Stopping...") self.pipeline.close() super(Main, self).quit()