async def on_message(message): ''' Handles incoming messages. :param message: Posted message. :return: ''' message.content = message.content.strip() # Remove excess whitespace message.content = " ".join( message.content.split()) # Remove duplicate whitespace if message.author == client.user: return if message.content.startswith( command_start ): #TODO: Look at bot command prefix parameter for discord py library logger.info("User {} ran {} on guild {}.".format( message.author, message.content, message.guild.name)) command = message.content.split(" ")[0].replace(command_start, "", 1).lower() command = resolve_command(command) if command in command_lookup: command_info = command_lookup[command] safe, args = await process_command_params(message, command_info) if safe: await command_info.run(message, args) else: logger.info("Command not recognized.")
def current_version(binary, version_modpath): """ Summary: Returns current binary package version if locally installed, master branch __version__ if the binary being built is not installed locally Args: :root (str): path to the project root directory :binary (str): Name of main project exectuable :version_modpath (str): path to __version__ module Returns: current version number of the project, TYPE: str """ pkgmgr = 'apt' pkgmgr_bkup = 'apt-cache' if which(binary): if which(pkgmgr): cmd = pkgmgr + ' show ' + binary + ' 2>/dev/null | grep Version | head -n1' elif which(pkgmgr_bkup): cmd = pkgmgr_bkup + ' policy ' + binary + ' 2>/dev/null | grep Installed' try: installed_version = subprocess.getoutput(cmd).split(':')[1].strip() return greater_version(installed_version, __version__) except Exception: logger.info( '%s: Build binary %s not installed, comparing current branch version to master branch version' % (inspect.stack()[0][3], binary)) return greater_version(masterbranch_version(version_modpath), __version__)
def query(path): try: # we want to log a timestamp when request was received. Using logger auto timestamp for that logger.info(f'{request.method} - /{path} - {dict(request.args)}') if request.method != 'GET': logger.error(f'{request.method} method not allowed\n') return if request.args.get('invalid') == '1': logger.error('Got "invalid = 1" parameter\n') return if path not in ['api/', 'api']: logger.error('Invalid path\n') return process1() is_success = process2() if not is_success: return process3() finally: return make_response()
def ospackages(pkg_list): """Summary Install OS Package Prerequisites Returns: Success | Failure, TYPE: bool """ try: for pkg in pkg_list: if is_installed(pkg): logger.info(f'{pkg} binary is already installed - skip') continue elif which('yum'): cmd = 'sudo yum install ' + pkg + ' 2>/dev/null' print(subprocess.getoutput(cmd)) elif which('dnf'): cmd = 'sudo dnf install ' + pkg + ' 2>/dev/null' print(subprocess.getoutput(cmd)) else: logger.warning( '%s: Dependent OS binaries not installed - package manager not identified' % inspect.stack()[0][3]) except OSError as e: logger.exception('{}: Problem installing os package {}'.format( inspect.stack()[0][3], pkg)) return False return True
def list_dirs(relative_path): images = [] dirs = [] try: path = join(static_dir, relative_path) all_files = listdir(path) relative_path = "/" if relative_path == "" else "/" + relative_path + "/" areas = [] points = [] dirs = [] images = [] for f in all_files: if f.endswith("_areas.txt"): img_areas = [] with open(join(path, f)) as f_areas: for line in f_areas: annotations = line.rstrip('\n').split(' ') img_areas.append({ 'x': annotations[0], 'y': annotations[1], 'width': annotations[2], 'height': annotations[3] }) areas.append((relative_path + f.rsplit('_areas.txt')[0], json.dumps(img_areas))) if f.endswith("_points.txt"): img_points = [] with open(join(path, f)) as f_areas: for line in f_areas: annotations = line.rstrip('\n').split(' ') img_points.append({ 'x': annotations[0], 'y': annotations[1] }) points.append((relative_path + f.rsplit('_points.txt')[0], json.dumps(img_points))) if f.endswith(".jpg") or f.endswith(".JPEG"): images.append(relative_path + f) if os.path.isdir(join(path, f)): dirs.append(unicode(f, "utf-8") if type(f) != unicode else f) areas = dict(areas) points = dict(points) except Exception as e: logger.info("Exception occured {}", e.message) logger.exception(e) return render_template("browse.html", title='Browse', dirs=sorted(dirs), images=sorted(images), areas=areas, points=points, total=len(images) + len(dirs))
def cache_popularities(movie_pages=5, tv_show_pages=2): """ Saves the popularity scores of the most popular movies and TV shows in a dictionary in memory. """ movies = get_all_entries('/movie/popular', end_page=movie_pages) tv_shows = get_all_entries('/tv/popular', end_page=tv_show_pages) for production in movies + tv_shows: popularities[production['id']] = production['popularity'] logger.info(('Cached the {} most popular movies and {} most ' 'popular TV shows.').format(len(movies), len(tv_shows)))
def load_stopwords(path): try: with open(path, encoding="utf-8", mode="r+") as f: logger.info("\nLoading stopword list...\n") stopwords = [] for line in f.readlines(): stopwords.append(line.strip()) return stopwords except: logger.error("Stop word list not found in assets") return []
def get_search_results_metadata(production_name, person_name, character_name): """ Searches Google Images for the given terms and returns a list of dictionaries containing metadata about each search result, including urls to the original image and to a thumbnail version. """ # Clean up the search terms to compose the Google Images search url. production_name = production_name.strip().replace(' ', '+') person_name = person_name.strip().replace(' ', '+') character_name = character_name.strip().replace(' ', '+') query = u"{}+{}".format(production_name, person_name) url = u'https://www.google.com/search?tbm=isch&q={}'.format(query) # Pose as a Firefox browser. (Otherwise we get an older version of # the Google Search app, intended for non-javascript browsers, with # less relevant search results.) headers = {'User-Agent': ('Mozilla/5.0 (Windows NT 6.1; WOW64; ' 'rv:34.0) Gecko/20100101 Firefox/34.0'), 'Accept-Language': 'en'} # Download and parse the search results page. logger.info(u'Requesting Google Images search for {}'.format(query)) r = get(url, headers=headers) soup = BeautifulSoup(r.content, 'html.parser') # See the appendix below for the HTML structure of the interesting # part of this page. # We search for all the img-tags within the div with id 'res': search_results_img_tags = soup.find('div', {'id': 'res'})\ .find_all('img') search_results_metadata = [] for img in search_results_img_tags: # We find interesting metadata in a json-dictionary located in # the content of the div-tag immediately after the img's # parent (an a-tag). (This div has class 'rg_meta'). metadata = json.loads(img.parent.find_next_sibling('div').text) # Create a dictionary with interesting metadata and add it to # the results. search_results_metadata.append({ 'thumb_url': metadata['tu'], 'thumb_width': metadata['tw'], 'thumb_height': metadata['th'], 'image_url': metadata['ou'], 'image_width': metadata['ow'], 'image_height': metadata['oh'], 'image_type': metadata['ity'], 'source_page_url': metadata['ru'], 'source_domain': metadata['isu'], 'title': metadata['pt'], 'description': metadata['s'], # Other possibly interesting metadata: # 'cb', 'cl', 'cr', 'ct' # 'itg', 'sc' # (^what are those?) # 'id', 'rid' (thumb id, doc id) }) return search_results_metadata
def run(): hostname = run_cmd("hostname")[0].strip("\n") dbs = databases_to_process(hostname, db_kind="MYSQL") for db in dbs: dbname = db.get("database") logger.info("==> Processing %s" % dbname) run_mkdirs(db) run_dump_schema(db) run_dump_database(db, print_output=True) run_upload_gcs(db, print_output=True) run_make_gcs_flag(db, DUMP_DONE_FLAG) logger.info("<== Finished Processing %s" % dbname)
async def on_ready(): ''' Notify that bot is ready. :return: ''' for guild in client.guilds: try: await reset_lounge_category(guild) await setup_stream_channel(guild) except Exception: logger.info("Couldn't setup server {}.".format(guild.name)) await setup_stream_loop() await setup_server_loop() # Separate loop. If one fails, all will try: for guild in client.guilds: await set_bot_icon(guild) except Exception: logger.info("Couldn't set bot profile picture.") # Set bot to look ready logger.info("The bot is ready!") print("The bot is ready!") await client.change_presence(activity=discord.Game( name="Github: {}".format(config["github_repo_name"]))) for guild in client.guilds: logger.info("I am currently in guild {} with name {}.".format( guild.id, guild.name))
def __run(hostname): dbs = databases_to_process(hostname) for db in dbs: dbname = db.get("database") logger.info("==> Processing %s" % dbname) is_restorable = ( check_flag(db, DUMP_DONE_FLAG) and (not check_flag(db, RESTORE_DONE_FLAG)) and (not check_flag(db, LOCKDB_FLAG)) ) if is_restorable: logger.info("== ==> Working on : %s" % dbname ) run_mkdirs(db, mode="remote") run_copy_data_to_local(db) # lock the processed db run_make_gcs_flag(db, LOCKDB_FLAG) run_create_database(db, print_output=True) run_restore_to_cloudsql(db, print_output=True) run_make_gcs_flag(db, RESTORE_DONE_FLAG) # Unlock the processed db run_rm_gcs_flag(db, LOCKDB_FLAG) logger.info("<== Finished Processing %s \n" % dbname) else: logger.info("<== Skipping %s \n" % dbname)
async def setup_stream_channel(guild): """ Ensure every guild has a stream channel :return: """ # Check that each guild has channel stream_channel = get(guild.text_channels, name=config["stream"]["discord_channel"]) # Create if needed if stream_channel is None: overwrites = {guild.default_role: lounge_tc_disallow, guild.me: lounge_tc_allow} stream_channel = await guild.create_text_channel( config["stream"]["discord_channel"], overwrites=overwrites) logger.info("Stream channel created for guild {}.".format(guild.name)) # Set permissions await stream_channel.set_permissions(guild.default_role, overwrite=lounge_tc_read_only) await stream_channel.set_permissions(guild.me, overwrite=lounge_tc_allow) # Cleanup channel messages if not stream_channel.last_message_id is None: last_message = await stream_channel.fetch_message(stream_channel.last_message_id) else: last_message = None if last_message is None or last_message.author != client.user: await stream_channel.purge() embed = discord.Embed() embed.add_field( name="**Here is the current list of TagPro streams on Twitch:**\n", value="If you don't see your stream here, make sure your game is set to TagPro!\n", inline=False) embed.add_field(name="Streams", value="Waaaaaa Luiiiggii", inline=False) m = await stream_channel.send("", embed=embed) logger.info("Purged stream channel on server {}.".format(guild.name)) else: m = last_message m.embeds[0].color = int(config["stream"]["panel_color"], 16) add_stream_panel(m)
async def on_guild_join(guild): """ Handles joining a server. :param guild: :return: """ logger.info("Joined guild {} with name {}.".format(guild.id, guild.name)) if not str(guild.id) in server_settings: server_settings[str(guild.id)] = copy.deepcopy(default_server_settings) server_settings[str(guild.id)]["owner_id"] = guild.owner.id save_server_settings() await reset_lounge_category(guild) await setup_stream_channel(guild)
def get_api_response(path, params=None): """ Queries v3 of themoviedb.org's API for the resource at the given path, with the optional url params, and returns the result as a dictionary. API reference: http://docs.themoviedb.apiary.io/ The current rate limit is 40 requests every 10 seconds. """ # We should not use '{}', a mutable object, as the default # argument value for 'params' here. See: # http://stackoverflow.com/a/1145781/2611913 if params is None: params = {} url = 'https://api.themoviedb.org/3'+path logger.info(u'Requesting themoviedb resource {}{}'.format( path, "?{}".format(urlencode(params)) if params else "")) params.update({'api_key': os.getenv('THEMOVIEDB_API_KEY')}) # Convert the json to an ordered dictionary, preserving the # insertion order of key-value pairs in the original json. return get(url, params=params).json(object_pairs_hook=OrderedDict)
def process2(): logger.info('Starting process2') if request.args.get('notawaiting') == '1': logger.error('Got "notawaiting=1" parameter') logger.info('Closing process2\n') return False logger.info('Doing complicated calculations for process2') logger.info('Completed process2') return True
def process_order_phones(): while True: try: orders = Orders.query.filter( Orders.normalized_phone_number.is_(None), Orders.contact_phone.isnot(None)).order_by( Orders.created.asc()).first() if orders: normalized_number = normalize_phone(orders.contact_phone) orders.normalized_phone_number = normalized_number logger.info('orders.id=%s with phone %s processed to %s', orders.id, orders.contact_phone, orders.normalized_phone_number) db.session.commit() except (StatementError, OperationalError, InvalidRequestError) as sqlalchemy_exc: db.session.rollback() logger.info('database connection problem, rollback') sleep(timeout_check)
async def update_streams(): """ Update the game streams :return: """ while True: r = requests.get(config["stream"]["twitch_api_url"], params={'game': config["stream"]["twitch_game"], "client_id": tokens["twitch_client_id"]}, headers={'Accept': 'application/vnd.twitchtv.v5+json'}) try: r_json = json.loads(r.text) streams = r_json["streams"] except ValueError: logger.info("Couldn't load twitch response as json, got HTML status {}.".format(r.status_code)) except KeyError: logger.info("Couldn't load twitch streams, but response is json: {}".format(r.text)) else: if len(streams) == 0: stream_header = "**No TagPro streams found :c**\n" stream_message = "Try making your own stream!\n" else: stream_header = "**Found {} streams.**".format(len(streams)) stream_message = "" for stream in streams: stream_name = stream['channel']['status'] host = stream['channel']['display_name'] url = stream['channel']['url'] stream_message += "{} is streaming \"{}\" at: {}\n".format(host, stream_name, url) d = datetime.datetime.now() timezone = pytz.timezone(config["time_zone"]) d_localized = timezone.localize(d) stream_message += "*Updated: {}*".format(d_localized.strftime("%I:%M %p %Z").lower()) for m in stream_panels: embed = m.embeds[0] embed.set_field_at(1, name=stream_header, value=stream_message) await m.edit(embed=embed) await asyncio.sleep(config["stream"]["update_frequency"])
def query_page(relative_path): q = request.form['query'] search_engine = request.form['engine'] max = int(request.form['max']) skip = int(request.form['skip']) if search_engine == 'google': searcher = google_searcher elif search_engine == 'flickr': global flickr_searcher if flickr_searcher is None: flickr_searcher = searchtools.query.FlickrAPISearch() searcher = flickr_searcher elif search_engine == 'bing': global bing_searcher if bing_searcher is None: bing_searcher = searchtools.query.BingAPISearch() searcher = bing_searcher elif search_engine == 'instagram': global instagram_searcher if instagram_searcher is None: instagram_searcher = specific_engines.InstagramSearcher() searcher = instagram_searcher elif search_engine == 'yandex': searcher = yandex_searcher else: searcher = imagenet_searcher try: images = searcher.query(q, num_results=max)[skip:] except Exception as e: logger.info("Exception occurred {}", e.message) logger.exception(e) images = [] return render_template("query.html", title='Home', images=images, total=len(images))
def connect(self): if not REDIS_URL: logger.info('No brain on this bot.') return logger.info('Brain Connecting...') try: pool = redis.ConnectionPool( host=REDIS_URL, port=REDIS_PORT, max_connections=MAX_CONNECTION, db=0 ) self.redis = redis.Redis(connection_pool=pool) self.redis.set('foo', 'bar') logger.info('Brain Connected: {}'.format(REDIS_URL)) except Exception as e: logger.error(traceback.format_exc()) raise e
def process3(): logger.info('Starting process3') logger.info('Doing complicated calculations for process3') logger.info('Completed process3\n')
def list_dirs(relative_path): images = [] dirs = [] try: path = join(static_dir, relative_path) all_files = listdir(path) relative_path = "/" if relative_path == "" else "/" + relative_path + "/" areas = [] points = [] labels = [] dirs = [] images = {} for f in all_files: if f.endswith("_areas.txt"): img_areas = [] with open(join(path, f)) as f_areas: for line in f_areas: annotations = line.rstrip('\n').split(' ') img_areas.append({ 'x': annotations[0], 'y': annotations[1], 'width': annotations[2], 'height': annotations[3] }) areas.append((relative_path + f.rsplit('_areas.txt')[0], json.dumps(img_areas))) if f.endswith("_points.txt"): img_points = [] with open(join(path, f)) as f_areas: for line in f_areas: annotations = line.rstrip('\n').split(' ') img_points.append({ 'x': annotations[0], 'y': annotations[1] }) points.append((relative_path + f.rsplit('_points.txt')[0], json.dumps(img_points))) if f.endswith("_labels.txt"): with open(join(path, f)) as f_label: img_label = f_label.read() img_label = img_label[:-1] labels.append((relative_path + f.rsplit('_labels.txt')[0], json.dumps(img_label))) if f.endswith(".jpg") or f.endswith(".JPEG"): url = relative_path + f images[f] = {'url': url, 'name': f, 'mark': False} if os.path.isdir(join(path, f)): dirs.append(unicode(f, "utf-8") if type(f) != unicode else f) marks_path = os.path.join(path, '_marks.txt') if os.path.exists(marks_path): for line in open(marks_path): photo_name = line.rstrip('\n') if photo_name in images: images[photo_name]['mark'] = True images = sorted(images.values(), key=lambda x: x['name']) mark_now = False for idx, img in enumerate(images): if img['mark']: mark_now = False if mark_now else True if mark_now: img['mark_class'] = 'mark_border' if img['mark'] else 'mark' else: img['mark_class'] = 'mark_border' if img['mark'] else '' areas = dict(areas) points = dict(points) labels = dict(labels) except Exception as e: logger.info("Exception occured {}", e.message) logger.exception(e) return render_template("browse.html", title='Browse', dirs=sorted(dirs), images=images, areas=areas, points=points, labels=labels, total=len(images) + len(dirs))
text = "" for line in f.readlines(): text += line.strip() text += "\n" return text except: logger.error("Text file not found in corpus") exit if __name__ == "__main__": print( "=============================================================================================" ) logger.info("\n Now start to Generate word cloud\n") keyword = sys.argv[1] stopword_path = "./assets/stopwords.txt" corpus_path = f"corpus/{keyword}_corpus.txt" stopwords_cn = load_stopwords(stopword_path) corpus = load_corpus(corpus_path) w = WordCloud(width=1200, height=800, background_color="white", contour_width=1, contour_color='steelblue', font_path="msyh.ttc", max_words=300, collocations=False, stopwords=stopwords_cn)
from loggers import logger try: logger.info("nihao") print("hello word") except Exception as e: print(e)
def main(): logger.info("Program started") parser = XMLParser() link = parser.get_first_link("data/select.xml") if not link: logger.warning("No link exists") response = requests.get(link) with open('data/zip/xml_download.zip', 'wb') as output: output.write(response.content) logger.info("File download successful") with zipfile.ZipFile('data/zip/xml_download.zip', 'r') as zip_ref: zip_ref.extractall('data/unzip') logger.info("File unzipped successful") for filename in os.listdir('data/unzip'): logger.info(f"Professing file {filename}") if filename.endswith(".xml"): filepath = os.path.join('data/unzip', filename) destination_file = '.'.join(filename.split('.')[:-1]) + '.csv' destination_path = os.path.join('data/csv', destination_file) converter = XMLToCSVConverter(filepath, destination_path) converter.convert() logger.info(f"File created at path {destination_path}") logger.info('Task completed .... :)')
def print_progress(i, max_len, per=1): if (i + 1) % per != 0: return print(f"Progress: {i + 1} / {max_len}") if __name__ == "__main__": keyword, max_pages, order, tid = argv_parser() all_queries = construct_bili_query(keyword, max_pages, order, tid) all_video_urls = [] url2oid = {} all_danmaku = [] # Collect all candidate videos logger.info("Collecting all candidate videos...\n\n") for i, q in enumerate(all_queries): print_progress(i, len(all_queries)) urls = extract_video_urls(q) logger.info(f"Detected {len(urls)} videos on page {i + 1}") all_video_urls.extend(urls) print("\n\n----------------------------------------------------------\n\n") logger.info("Collecting videos urls done!") logger.info(f"Totally detected {len(all_video_urls)} videos.") print("\n\n----------------------------------------------------------\n\n") # Collect all oid of videos print("\n\n\n") logger.info("Collecting danmaku from videos...\n\n")