def ClientShutdown(self): self.ServerRestart() logger.info("closing connection ...") self.socket.close() logger.info(f"server connection closed on port: {self.port}")
def process_series_options(message: str) -> SendInformation: logger.info(f"Processing received message: {message}") series_name = '' year = datetime.datetime.now().year season = '' episode = '' quality = '' parameters = message[7:].split(' -')[1:] for parameter in parameters: parameter = parameter.strip() if parameter.startswith('n ') or parameter.startswith('name '): series_name = parse_name(parameter) elif parameter.startswith('y ') or parameter.startswith('year '): year = parse_year(parameter) elif parameter.startswith('s ') or parameter.startswith('season '): season = parse_season(parameter) elif parameter.startswith('e ') or parameter.startswith('episode '): episode = parse_episode(parameter) elif parameter.startswith('q ') or parameter.startswith('quality '): quality = parse_quality(parameter) if check_correct_parameters(series_name, year, season, episode, quality): options = Options(series_name, year, season, episode, quality) query_result = find_episode_torrents(options) else: query_result = SendInformation('', f"{INCORRECT_SERIES_FORMAT}") return query_result
def generate_template(template_information: List[DisplaySubtitleInformation], cover_url: str, display_limit: int) -> SendInformation: logger.info('Generating message with format for Telegram.') first_message = '' remaining_messages = [] for index, movie_subtitle in enumerate(template_information, start=1): if index <= display_limit: temp_text = f"<strong><u>Option {index}</u></strong>\n" temp_text = temp_text + f"\n" temp_text = temp_text + f"<strong>Rating:</strong> {movie_subtitle.rating}\n" temp_text = temp_text + f"<strong>Language:</strong> {movie_subtitle.language}\n" temp_text = temp_text + f"<strong>Uploader:</strong> {movie_subtitle.uploader}\n" temp_text = temp_text + f"<strong>Compatible torrents:</strong>\n\n{movie_subtitle.compatible_torrents}\n\n" temp_text = temp_text + f"\U0001F5C3<strong>:</strong> <a title=\"Subtitle File\" href=\"{movie_subtitle.download_link}\">" \ f"Subtitle File</a>\n" temp_text = temp_text + f"\n" exceeds_size = message_exceeds_size(first_message) if exceeds_size: remaining_messages.append(temp_text) else: first_message = first_message + temp_text else: break if remaining_messages: reduce_remaining_parts = join_remaining_parts(remaining_messages) result = SendInformation(cover_url, first_message, reduce_remaining_parts) else: result = SendInformation(cover_url, first_message) return result
def status_command(update: Update, context: CallbackContext) -> None: """Sends a message to the user to check if the bot is running when the /status command is issued.""" user_info = update.effective_message.from_user identifier = get_user(user_info) logger.info(f"Status check requested by user: {identifier}") update.effective_message.reply_text(STATUS_COMMAND)
def filter_album_tracks(oauth_token: str, recent_tracks: list) -> list: try: tracks = [] for track in recent_tracks: album_tracks = get_album_tracks(oauth_token, track['album_id']) df_album_tracks = pd.DataFrame(album_tracks) df_album_tracks = df_album_tracks.filter(['items']) df_album_tracks = pd.json_normalize( df_album_tracks.to_dict('records')) df_album_tracks = df_album_tracks.filter( ['items.id', 'items.track_number']) df_album_tracks.rename(columns={ 'items.id': 'track_id', 'items.track_number': 'track_number' }, inplace=True) df_album_tracks[ 'track_number'] = df_album_tracks['track_number'] - 1 current_track = pd.merge(pd.DataFrame(recent_tracks), df_album_tracks, on='track_id', how='inner').to_dict('records')[0] tracks.append(current_track) logger.info('Album tracks have been filtered successfully') return tracks except Exception as error: logger.error(f'Error to filter album tracks: {error}') raise
def webhook(event, context): """ Reply to input message (only /start) """ bot = configure_telegram() logger.info('Event: {}'.format(event)) if event.get('body'): logger.info('Message received') update = telegram.Update.de_json(json.loads(event.get('body')), bot) chat_id = update.message.chat.id text = update.message.text if text == '/start': try: Chat.get(chat_id) except DoesNotExist: chat = Chat(chat_id) chat.save() reply_text = """카이스트 아라의 Food 게시판 업데이트 알림 봇입니다. 매일 오전 11시에 전날의 게시글이 전달됩니다.""" bot.send_message(chat_id=chat_id, text=reply_text) logger.info('Message sent') else: logger.info('Input message ignored') else: logger.info('Input message ignored') return OK_RESPONSE return ERROR_RESPONSE
def send_remaining_messages(update: Update, identifier: str, remaining_messages: List[str]) -> None: logger.info(f"Sending remaining messages to the user: {identifier}") for remaining_message in remaining_messages: update.effective_message.reply_html(remaining_message, disable_web_page_preview=True)
def _evaluate_and_order_asset(self, asset: Asset, value_to_order: float): actual_positions = self._ordered_mapping[asset.market_type][ asset.asset_type] # There might be multiple positions for the same asset. Find the least valued asset. least_valued_pos = min(actual_positions.values(), key=lambda x: x.updated_data.market_value) pos_md = self._latest_market_data[ least_valued_pos.updated_data.contract_id] # Quantity of stocks must be an integer. This might change for other markets. ask_price = pos_md.ask_price if ask_price is None: raise SystemError( f"Ask price is not available for: {least_valued_pos.name}") quantity = int(value_to_order / ask_price) logger.info(f"Placing order: BUY >> {least_valued_pos.name}, " f"Quantity: {quantity}, " f"Ask Price: {pos_md.ask_price}") self._transaction_mgr.queue_for_execution( self._broker_interface.place_single_order, least_valued_pos.updated_data.contract_id, least_valued_pos.name, quantity, OrderTypes.LIMIT, OrderActions.BUY_ORDER, least_valued_pos.updated_data.sec_type, least_valued_pos.updated_data.currency, limit_price=ask_price)
def main(): """Main function Run this file with `python app.py` """ print("\n") logger.info("Success! Here is you're config:\n") logger.info(json.dumps(config, indent=4))
def _persist_data(self, var_data, session_id="", channel="", business_name="", flow_id="", business_id=""): # change this method to perform async if var_data == {}: return 1 object_id = str(uuid.uuid4()) timestamp = datetime.datetime.utcnow() collection = DB["user_data"] document = { "_id": object_id, "user_id": self.user_id, "flow_id": flow_id, "session_id": session_id, "business_id": business_id, "data": var_data, "channel": channel, "business_name": business_name, "timestamp": timestamp } try: saved_document_id = collection.insert_one(document).inserted_id logger.info( f"Variable data saved with object_id {saved_document_id}") return 1 except Exception as err: logger.error(err) raise
def parse_settings(settings_file) -> dict: """ Creates multiple dictionaries containing the settings parsed from a settings file. Each type of plot has its own settings dictionary. settings_file is the name of the text file containing the settings Return values: data is a pandas.DataFrame object which contains the alternative splicing data hive_plot_settings is a dictionary containing the settings for the hive plot struct_plot_settings is a dictionary containing the settings for the structure plot """ try: config = configparser.ConfigParser() logger.info('Reading settings from {0}...'.format(settings_file)) config.read(settings_file) # hive_plot_settings = parse_hive_plot_settings(config) # struct_plot_settings = parse_struct_plot_settings(config) return parse_sashimi_settings(config) # logger.error('Done reading settings.') # return hive_plot_settings, struct_plot_settings, sashimi_plot_settings except IOError: logger.error('{0} is not a valid file path') sys.exit(1)
def load_configuration(): global dropbox_client, local_cloud_path, upload_prefix, enable_upload, download_prefix, enable_download, \ thin_mode_byte_length, tmp_path, sync_interval_in_sec file_handle = open(CONFIGURATION_FILE, 'r') config_data = file_handle.read() file_handle.close() configs = json.loads(config_data) init_logger(configs["server_configuration"]["log_path"]) local_cloud_path = configs["server_configuration"]["local_cloud_path"] upload_prefix = configs["server_configuration"]["upload_filename_magic"] download_prefix = configs["server_configuration"][ "download_filename_magic"] enable_download = json.loads( configs["server_configuration"]["enable_download"].lower()) enable_upload = json.loads( configs["server_configuration"]["enable_upload"].lower()) thin_mode_byte_length = int( configs["server_configuration"]["bytes_length_in_thin_mode"]) tmp_path = configs["server_configuration"]["tmp_path"] sync_interval_in_sec = configs["server_configuration"][ "sync_interval_in_sec"] if configs["cloud_credential"]["dropbox"][ "access_token"] is not DEFAULT_ACCESS_TOCKEN: dropbox_client = DropboxClient( configs["cloud_credential"]["dropbox"]["access_token"]) logger.info("Finish loading configuration")
def spawn_food(amount=1): logger.info('Spawned %s food', amount) for i in range(amount): Food() return
def set_wallpaper(path: str): if system_platform == 'Windows': logger.info("Windows platform") set_set_wallpaper_win(path) else: logger.info("Unix platform") set_set_wallpaper_unix(path)
def innateDbGene(data, filename): """ parser file (tsv) from innateDb IN : dic + tsv file OUT : dic """ number = 1 #number of lines workInProgress = 100 #when inform user with open(filename, newline='') as tsvFile: for row in tsvFile.readlines()[1:]: column = row.rstrip().split('\t') geneID = column[3] if geneID not in data: #traitement GO terms goTerms = column[14] goTerms = goterms2xmlformat(goTerms) data[geneID] = Gene(geneID, column[5], column[6], "innateDb", column[15], "", "", column[2], goTerms) if data[geneID].accession == "": #traitement accession data[geneID].accEnsembl() if data[geneID].sequence == "" and data[geneID].accession != "": #traitement sequence data[geneID].seqUniprot() #log.debug(str(data[geneID].echo())) number += 1 if number % workInProgress == 0: log.info("Parsed " + str(number) + " lines") log.info("Parsed a total of " + str(number) + " lines") return data
def test_for_nans(data, name): if not len(data): raise ValueError('Data is empty!') nan_rows = data[data.isnull().values.any(axis=1)] if not len(nan_rows): logger.info('\t\tNo Nans found in {}'.format(name)) return logger.info( '\t\tFound {} rows ({:.1f}%) and {} columns (out of {}) with Nans in {}' .format(len(nan_rows), 100 * len(nan_rows) / len(data), data.isnull().values.any(axis=0).sum(), len(data.columns), name)) nan_counts = {} for source, source_data in data.groupby('source'): source_nans = {} for column in source_data.columns: col_nans = source_data[column].isnull().values.sum() if col_nans: source_nans[column] = col_nans nan_counts[source] = source_nans for k, v in nan_counts.items(): logger.error('\t\tFor source {}, found NaNs in {} columns:'.format( k, len(v))) for l, c in v.items(): logger.error('\t\t\t{:20} {:8} ({:.1f}%)'.format( l, c, 100 * c / len(data))) raise ValueError
def get_image_from_event(event): content_type_header = event["headers"]["content-type"] body = base64.b64decode(event["body"]) logger.info("Body Loaded") picture = decoder.MultipartDecoder(body, content_type_header).parts[0] filename = get_picture_filename(picture).replace('"', "") return picture, filename
def filter_data(data, msg, keep_condition=None, drop_indices=None, keep_indices=None, quiet_mode=False): """ Filters a pandas data frame and logs the result with 'msg'. This runs in two modes, either supply a condition under which data is to be *kept*, or supply a list of indices to drop. """ n_before = len(data) if drop_indices is not None: assert keep_condition is None assert keep_indices is None data = data.drop(drop_indices) elif keep_indices is not None: assert keep_condition is None assert drop_indices is None data = data.loc[keep_indices] else: data = data[keep_condition] n_after = len(data) if not quiet_mode: logger.info('\t\tRemoved {} data ({:.1f}%) which '.format( n_before - n_after, 100 * (n_before - n_after) / n_before) + msg) data.is_copy = False return data
def get_session(): logger.info("Session route hit") user_id = request.args.get("user_id") response = UserController.get(user_id) return response
def __save_dicts(self): with open(self.save_path_w2i, 'w') as f: json.dump(self.word2index, f) with open(self.save_path_i2w, 'w') as f: json.dump(self.index2word, f) logger.info('Saved')
def start_command(update: Update, context: CallbackContext) -> None: """Sends a welcome message when the command /start is issued.""" user_info = update.effective_message.from_user identifier = get_user(user_info) logger.info(f"Bot started for user: {identifier}") update.effective_message.reply_text(WELCOME_START_COMMAND)
def generate_template_information(movies_data: ByIMDb) -> DisplayMovieInformation: logger.info('Generating template information.') title = movies_data.title year = movies_data.year rating = f"{movies_data.rating} / 10" runtime = f"{movies_data.runtime} minute(s)" genres = movies_data.genres youtube_trailer = movies_data.youtube_trailer mpa_rating = movies_data.mpa_rating large_cover_image = movies_data.large_cover_image raw_torrents = movies_data.torrents torrents = [] for torrent in raw_torrents: torrent_url = torrent.torrent_url quality = torrent.quality raw_release_type = torrent.release_type release_type = parse_release_type(raw_release_type) health = f"{torrent.seeds} Seed(s) / {torrent.peers} Peer(s)" size = HumanBytes.format(torrent.size_bytes, metric=True) option = DisplayTorrentInformation(torrent_url, quality, release_type, health, size) torrents.append(option) result = DisplayMovieInformation(title, year, rating, runtime, genres, youtube_trailer, mpa_rating, large_cover_image, torrents) return result
def echo(update: Update, context: CallbackContext) -> None: """Sends a message when the user sends a no command.""" user_info = update.effective_message.from_user identifier = get_user(user_info) logger.info(f"A no command has been sent from user: {identifier}") update.effective_message.reply_text(WRONG_FORMAT_ECHO_COMMAND)
def register_jobs(jobs_config_path): with open(jobs_config_path) as json_file: config = json.load(json_file) query_hashes = set() for query in config['queries']: frequency = query['frequency'] query_hash = query['query_hash'] if query_hash in query_hashes: raise ValueError( f"Query hash {query_hash} is duplicated, a same query can't be set multiple time" ) query_hashes.add(query_hash) frequency_number = int(frequency[:-1]) if frequency[-1] == 's': schedule.every(frequency_number).seconds.do( run_threaded, job, query_hash) elif frequency[-1] == 'm': schedule.every(frequency_number).minutes.do( run_threaded, job, query_hash) elif frequency[-1] == 'h': schedule.every(frequency_number).hours.do(run_threaded, job, query_hash) else: raise ValueError( f'Config expect a frequency: <x>[s|m|h], got {frequency}') if len(query_hashes) == 0: raise ValueError('No query found') next_run: datetime.timedelta = schedule.next_run() - datetime.datetime.now( ) logger.info('Loaded %s queries with success, next run in %1.0f s', len(query_hashes), next_run.total_seconds())
def prepare(self, region: SpliceRegion, ): data = [] for i, f in enumerate(self.files): with pyBigWig.open(f) as r: try: data.append(r.values(region.chromosome, region.start, region.end + 1)) except RuntimeError as e: logger.warning(e) logger.info("may be caused by the mismatch of chromosome") if region.chromosome.startswith("chr"): data.append(r.values(region.chromosome.replace("chr", ""), region.start, region.end + 1)) else: data.append(r.values("chr" + region.chromosome, region.start, region.end + 1)) self.data = np.array(data) if self.clustering and self.data.shape[0] > 1: data = linkage(self.data, method=self.clustering_method, metric=self.distance_metric) order = dendrogram(data, orientation='right') self.data = self.data[order["leaves"], :] if self.do_scale: """ y = (x – mean) / standard_deviation """ # b = (self.data.transpose() - np.mean(self.data, axis=1)) / np.std(self.data, axis=1) # self.data = b.transpose() self.data = zscore(self.data, axis=1) pass
def say(bot, message, message_to_say=None): logger.info(message) if not message_to_say: message_to_say = get_message(message) if message_to_say: send_functions[message_to_say.message_type](bot, message.chat.id, message_to_say.value)
def get_title(quality: str, raw_title: str) -> str: logger.info(f"Recovering title.") index = raw_title.find(quality) title = raw_title[0:index] return title
def solve_puzzle(ctx): """ Solves a given puzzle """ puzzle = PuzzleUtils.load_puzzle(ctx.obj[PUZZLE_FILE_KEY]) puzzle.solve() logger.info(puzzle.dump_grid())
def is_bam(infile: str) -> bool: u""" check if input file is bam or sam file :param infile: path to input file :return: Boolean """ try: create = False if not os.path.exists(infile + ".bai"): create = True elif os.path.getctime(infile + ".bai") < os.path.getctime(infile): try: os.remove(infile + ".bai") create = True except PermissionError as err: logger.warn(err) create = False else: try: with pysam.AlignmentFile(infile) as r: r.check_index() except ValueError: create = True if create: logger.info(f"Creating index for {infile}") pysam.index(infile) return True except pysam.utils.SamtoolsError: return False
def StopTorcs(containerID: str): # sourcery skip: merge-list-extend logger.info(f"stopping torcs process on container {containerID}") subprocess.Popen( ["docker", "exec", containerID, "pkill", "torcs"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, )