def get_last_update(data_url): """ Retrieve data from Johns Hopinks CSV files given a accessible URL. :param data_url: Data URL. :return: Dictionary with the last update grouped by location. """ result_dict = dict() for attempt in range(JOHNS_HOPKINS_DATA_ATTEMPTS): try: response = requests.get( data_url, headers={'User-Agent': JOHNS_HOPKINS_USER_AGENT}, timeout=JOHNS_HOPKINS_DATA_TIMEOUT) rows = [ row for row in csv.reader(StringIO(response.text), delimiter=',') ][1:] for row in rows: dict_key = row[1] if not row[0] else f'{row[0]}, {row[1]}' result_dict[dict_key] = int(row[-1]) return result_dict except Exception as e: log.error( f'Cannot retrieve information from {data_url} - Attempt [{attempt + 1}] - [{e}]' ) time.sleep(JOHNS_HOPKINS_DATA_RTD) if attempt == JOHNS_HOPKINS_DATA_ATTEMPTS - 1: log.exception(e) return None
def run(): """ Main function in order to process a new iteration of the Twitter bot. :return: Iteration done. """ try: for i, item in enumerate(RESOURCES): item_name = item['name'] log.info(f'{i + 1}/{len(RESOURCES)} Processing {item_name}...') results = retriever.get_last_update(item['data_url']) if results is not None: item_data_path = item['data_path'] item_data_path_exists = os.path.isfile(item_data_path) # Get old results if they exist old_results = dict() if item_data_path_exists: with open(item_data_path, 'r') as item_data_file: old_results = json.load(item_data_file) # Save latest results with open(item_data_path, 'w') as item_data_file: json.dump(results, item_data_file) # Check for differences if it did not exist before if item_data_path_exists: total_worldwide = sum(old_results.values()) diff_results = list(dictdiffer.diff(results, old_results)) for j, diff_tuple in enumerate(diff_results): log.info(f'{j + 1}/{len(diff_results)} New changes found: [{diff_tuple}]') total_worldwide = _notify_changes(diff_tuple, item_name, item['icon'], results, total_worldwide) time.sleep(TIME_BETWEEN_RESOURCES) except Exception as e: log.error(f'Unexpected error: [{e}]') log.exception(e)
def get(name=None): """ API endpoint for returning a given device by its name, all if name is not specified. :return: All devices. """ try: for attempt in range(1, RETRIEVE_RETRY + 1): log.info(f'Attempt [{attempt}]...') chromecast_list = pychromecast.get_chromecasts() chromecast_list = [ Device(chromecast_item) for chromecast_item in chromecast_list ] if name is not None: log.info(f'Applying filter name by name: [{name}]') chromecast_list = [ device for device in chromecast_list if device.has_device_name(name) ] chromecast_list = [ device.serialize() for device in chromecast_list ] if chromecast_list: return response.make( error=False, response=dict(chromecast_list=chromecast_list)) else: log.warn(f'No devices found at attempt {attempt}.') time.sleep(RETRIEVE_RTD) log.warn(f'No devices found.') return response.make(error=False, response=dict(chromecast_list=[])) except Exception as e: log.error(f'Unexpected error: [{e}]') log.exception(e) return response.make(error=True, message=MESSAGE_UNEXPECTED_ERROR)
def login(): try: with Timer('Get Auth URL'): auth_url = spotify_api.get_auth_url() return response.make(error=False, response=dict(redirect=auth_url)) except Exception as e: log.error( f'Exception while processing {login.__name__} function: [{e}]') log.exception(e) return response.make(error=True, message=MESSAGE_ERROR)
def download_models(): try: if 'credentials' not in flask.session: return flask.redirect(flask.url_for(oauth2_authorize.__name__)) thing.download_models( flask.session.get('credentials', {}).get('access_token')) return flask.redirect(THINGIVERSE_API_DONE) except Exception as e: message = f'Exception in {download_models.__name__} function: [{e}]' log.error(message) log.exception(e) return flask.jsonify(dict(error=True, response=message))
def playlist(code, github_user): try: with Timer('Request token retrieving'): access_token = spotify_api.get_access_token(code) if not access_token: return response.make(error=True, message=MESSAGE_TOKEN_NOT_FOUND) with Timer('Get profile data'): user_id = spotify_api.get_current_user_id(access_token) if not user_id: return response.make(error=True, message=MESSAGE_SPOTIFY_NOT_FOUND) with Timer('Playlist generation'): playlist_id, playlist_url = spotify_api.post_playlist( access_token, user_id, github_user) if not playlist_id and not playlist_url: return response.make(error=True, message=MESSAGE_SPOTIFY_PLAYLIST_ERROR) with Timer('Retrieve commits from user'): commit_messages = github_api.get_commit_messages(github_user) if not commit_messages: return response.make(error=True, message=MESSAGE_COMMIT_NOT_FOUND) with Timer('Retrieve most common words'): most_common_words = nltk.extract_most_common(commit_messages) with Timer('Search for tracks'): with ThreadPool(CONCURRENT_POOL) as pool: thread_args = [(access_token, word) for word in most_common_words] track_uri_list = list( pool.imap(spotify_api.search_for_tracks, thread_args)) track_uri_list = [t for t in track_uri_list if t] with Timer('Add tracks to the playlist'): success = spotify_api.add_tracks_to_playlist( access_token, playlist_id, track_uri_list) if not success: return response.make(error=True, message=MESSAGE_SPOTIFY_TRACK_ERROR) return response.make(error=False, response=dict(url=playlist_url)) except Exception as e: log.error( f'Exception while processing {playlist.__name__} function: [{e}]') log.exception(e) return response.make(error=True, message=MESSAGE_ERROR)
def run(): """ Main function in order to process a new iteration of the Twitter bot. :return: Iteration done. """ try: resources_list = [DATA_CONFIRMED, DATA_DEATHS, DATA_RECOVERED] confirmed_results, deaths_results, recovered_results = retriever.get_last_update( ) if all([confirmed_results, deaths_results, recovered_results]): for i, item_name in enumerate(resources_list): log.info( f'{i + 1}/{len(resources_list)} Processing {item_name}...') # Get results results = None if item_name == DATA_CONFIRMED: results = confirmed_results elif item_name == DATA_DEATHS: results = deaths_results elif item_name == DATA_RECOVERED: results = recovered_results # Process if results is not None: item_data_path = DATA_PATH_DICT[item_name] item_data_path_exists = os.path.isfile(item_data_path) # Get old results if they exist old_results = dict() if item_data_path_exists: with open(item_data_path, 'r') as item_data_file: old_results = json.load(item_data_file) # Save latest results with open(item_data_path, 'w') as item_data_file: json.dump(results, item_data_file) # Check for differences if it did not exist before if item_data_path_exists: total_worldwide = sum(old_results.values()) diff_results = list( dictdiffer.diff(results, old_results)) for j, diff_tuple in enumerate(diff_results): log.info( f'{j + 1}/{len(diff_results)} New changes found: [{diff_tuple}]' ) total_worldwide = _notify_changes( diff_tuple, item_name, ICON_DICT[item_name], results, total_worldwide) time.sleep(TIME_BETWEEN_RESOURCES) except Exception as e: log.error(f'Unexpected error: [{e}]') log.exception(e)
def oauth2_callback(): try: auth_code = flask.request.args.get('code') auth_query_parameters = dict( client_id=os.environ.get('THINGIVERSE_API_CLIENT_ID'), client_secret=os.environ.get('THINGIVERSE_API_CLIENT_SECRET'), code=auth_code) response = requests.post(THINGIVERSE_API_TOKEN, params=auth_query_parameters) query_string = dict(parse_qsl(response.text)) flask.session['credentials'] = dict( access_token=query_string.get('access_token')) return flask.redirect(flask.url_for(download_models.__name__)) except Exception as e: message = f'Exception in {oauth2_callback.__name__} function: [{e}]' log.error(message) log.exception(e) return flask.jsonify(dict(error=True, response=message))
def oauth2_authorize(): try: auth_query_parameters = dict( client_id=os.environ.get('THINGIVERSE_API_CLIENT_ID'), redirect_uri=flask.url_for(oauth2_callback.__name__, _external=True), response_type='code') url_args = '&'.join([ '{}={}'.format(key, quote(val)) for key, val in auth_query_parameters.items() ]) auth_url = '{}/?{}'.format(THINGIVERSE_API_AUTH, url_args) return flask.redirect(auth_url) except Exception as e: message = f'Exception in {oauth2_authorize.__name__} function: [{e}]' log.error(message) log.exception(e) return flask.jsonify(dict(error=True, response=message))
import time from src import scheduler from src.helper import log, env if __name__ == '__main__': try: log.info('auto-factorial was born!') if not env.is_development(): scheduler.scheduler.start() else: log.info( 'Running jobs manually since DEVELOPMENT MODE is enabled, and then sleep eternally.' ) scheduler.auto_factorial_cron_job() while True: time.sleep(1000) except Exception as e: log.error(f'Unexpected error {e} in auto-factorial') log.exception(e) finally: log.info('Rest in peace, auto-factorial.')
def get(name, source_url): """ API endpoint for playing a source to a device given its name. :param name: Device name. :param source_url: Source URL. :return: Source playing at given device. """ try: # Random generation, if needed random_video = None if source_url == PLAY_RANDOM: log.info('Getting random video...') random_video = random_extractor.get() source_url = random_video.get('sources')[0] # Extracting information log.info('Extracting source names...') source_ending = '.{}'.format(source_url.split('.')[-1]) source_name = source_url.split( '/')[-1] if not random_video else random_video.get('title') log.info(f'Source ending: [{source_ending}]') log.info(f'Source name: [{source_name}]') if source_ending not in MIME_TYPES: log.warn(MESSAGE_PLAY_DEVICE_NOT_FOUND) return response.make(error=True, message=MESSAGE_PLAY_DEVICE_NOT_FOUND) # Getting devices for attempt in range(1, RETRIEVE_RETRY + 1): log.info(f'Attempt [{attempt}]...') chromecast_list = pychromecast.get_chromecasts() chromecast_list = [ Device(chromecast_item) for chromecast_item in chromecast_list ] chromecast_list = [ device for device in chromecast_list if device.has_device_name(name) ] if chromecast_list: # Process media chromecast_device = chromecast_list[0] log.info( f'Chromecast device: [{chromecast_device.uuid} - {chromecast_device.name}]' ) log.info('Waiting for being loaded...') chromecast_device.chromecast_object.wait() log.info('Loaded.') mime_type = MIME_TYPES[source_ending] log.info(f'MIME type: [{mime_type}]') log.info(f'Cast type: [{chromecast_device.cast_type}]') if (chromecast_device.cast_type == 'audio' and 'video' in mime_type) or \ (chromecast_device.cast_type == 'group' and 'video' in mime_type): log.warn(MESSAGE_PLAY_MIME_DEVICE_NOT_SUPPORTED) return response.make( error=True, message=MESSAGE_PLAY_MIME_DEVICE_NOT_SUPPORTED) # Play media log.info('Playing media...') chromecast_device.chromecast_object.media_controller.play_media( url=source_url, content_type=mime_type, title=source_name) chromecast_device.chromecast_object.media_controller.block_until_active( ) log.info('Done!') return response.make( error=False, response=dict(message=MESSAGE_PLAY_SUCCESS)) else: log.warn(f'No devices found at attempt {attempt}.') time.sleep(RETRIEVE_RTD) log.warn(f'No devices found.') return response.make(error=True, message=MESSAGE_PLAY_DEVICE_NOT_FOUND) except Exception as e: log.error(f'Unexpected error: [{e}]') log.exception(e) return response.make(error=True, message=MESSAGE_UNEXPECTED_ERROR)
def get(github_user): try: with Timer('Initialize response dictionary'): resp = {} with Timer('Basic information'): basic_information = github_api.get_basic_user_information( github_user) if not basic_information: return response.make(error=True, message=MESSAGE_USER_NOT_FOUND) resp['username'] = github_user resp['photo'] = response.get('avatar_url', basic_information) resp['public_repos'] = response.get('public_repos', basic_information) resp['public_gists'] = response.get('public_gists', basic_information) resp['followers'] = response.get('followers', basic_information) resp['following'] = response.get('following', basic_information) with Timer('Repositories'): repos_list = github_api.get_repos_from_user(github_user) if not repos_list: return response.make(error=True, message=MESSAGE_USER_NOT_FOUND) resp['repo_amount'] = len(repos_list) resp['repo_fork_amount'] = sum( [response.get('fork', d, default=False) for d in repos_list]) resp['repo_total_size'] = sum( [response.get('size', d, default=0) for d in repos_list]) / 1000 resp['repo_total_stars'] = sum([ response.get('stargazers_count', d, default=0) for d in repos_list ]) resp['repo_total_forks'] = sum([ response.get('forks_count', d, default=0) for d in repos_list ]) resp['repo_total_open_issues'] = sum([ response.get('open_issues', d, default=0) for d in repos_list ]) resp['repo_avg_size'] = formatter.to_float( resp['repo_total_size'] / resp['repo_amount']) resp['repo_avg_stars'] = formatter.to_float( resp['repo_total_stars'] / resp['repo_amount']) resp['repo_avg_forks'] = formatter.to_float( resp['repo_total_forks'] / resp['repo_amount']) resp['repo_avg_open_issues'] = formatter.to_float( resp['repo_total_open_issues'] / resp['repo_amount']) with Timer('Languages & topics - threads'): with ThreadPool(CONCURRENT_POOL) as pool: thread_args = [(github_user, response.get('name', r)) for r in repos_list if response.get('name', r)] language_response_list = list( pool.imap(github_api.get_languages, thread_args)) topic_response_list = list( pool.imap(github_api.get_topics, thread_args)) with Timer('Languages & topics - amount'): languages_dict = {} for language_response in language_response_list: if language_response: for key, value in language_response.items(): if key not in languages_dict: languages_dict[key] = 0 languages_dict[key] += value topics_dict = {} for topic_response in topic_response_list: if topic_response: for topic in topic_response: if topic not in topics_dict: topics_dict[topic] = 0 topics_dict[topic] += 1 with Timer('Languages - percentage'): resp['languages'] = [] total_languages = sum(languages_dict.values()) sorted_languages = sorted(languages_dict.items(), key=operator.itemgetter(1), reverse=True) for idx in range(0, len(sorted_languages)): if idx < GITHUB_LANGUAGES_MAX: language_name, language_amount = sorted_languages[idx] percentage = formatter.to_float( (language_amount / total_languages) * 100) resp['languages'].append( dict(label=language_name, amount=language_amount, percentage=percentage)) else: language_amount = sum([ v[1] for v in sorted_languages[GITHUB_LANGUAGES_MAX:] ]) percentage = formatter.to_float( (language_amount / total_languages) * 100) resp['languages'].append( dict(label='Others', amount=language_amount, percentage=percentage)) break with Timer('Topics - percentage'): resp['topics'] = [] total_topics = sum(topics_dict.values()) sorted_topics = sorted(topics_dict.items(), key=operator.itemgetter(1), reverse=True) for idx in range(0, len(sorted_topics)): if idx < GITHUB_TOPICS_MAX: topic_name, topic_amount = sorted_topics[idx] percentage = formatter.to_float( (topic_amount / total_topics) * 100) resp['topics'].append( dict(label=topic_name, amount=topic_amount, percentage=percentage)) else: topic_amount = sum( [v[1] for v in sorted_topics[GITHUB_TOPICS_MAX:]]) percentage = formatter.to_float( (topic_amount / total_topics) * 100) resp['topics'].append( dict(label='others', amount=topic_amount, percentage=percentage)) break with Timer('Contributors - thread'): with ThreadPool(CONCURRENT_POOL) as pool: thread_args = [(github_user, response.get('name', r)) for r in repos_list if response.get('name', r)] contributor_response_list = list( pool.imap(github_api.get_contributors, thread_args)) with Timer('Contributors - save'): resp['commits'] = 0 resp['commits_user'] = 0 resp['commits_contributor'] = 0 contributors_dict = {} for contributor_response in contributor_response_list: if contributor_response: for contributor in contributor_response: contributor_name = response.get('login', contributor) if contributor_name: contributions = response.get('contributions', contributor, default=0) resp['commits'] += contributions if contributor_name.lower() == github_user.lower(): resp['commits_user'] += contributions else: if contributor_name not in contributors_dict: contributors_dict[contributor_name] = dict( commits=0, photo=response.get('avatar_url', contributor, default=''), url=response.get('html_url', contributor, default='')) resp['commits_contributor'] += contributions contributors_dict[contributor_name][ 'commits'] += contributions resp['commits_user_percentage'] = formatter.to_float( (resp['commits_user'] / resp['commits']) * 100) resp['commits_contributor_percentage'] = formatter.to_float( (resp['commits_contributor'] / resp['commits']) * 100) resp['contributors'] = [] for contributor_name, contributor_dict in contributors_dict.items( ): contributor_dict['label'] = contributor_name resp['contributors'].append(contributor_dict) resp['contributors'] = sorted(resp['contributors'], key=lambda k: k['commits'], reverse=True) return response.make(error=False, response=resp) except Exception as e: log.error(f'Exception while processing {get.__name__} function: [{e}]') log.exception(e) return response.make(error=True, message=MESSAGE_ERROR)