return current_count / percent_through else: return 0 def avg_page_count(books): count = 0 pages = 0 for i in books: pages += int(i['number_of_pages']) count += 1 if count > 0: return float(pages) / count else: return 0 data = load_data() book_count = len(data['books']) print('Books Read: {0}'.format(book_count)) this_year_book_count = books_in_past_year(data['books']) print('Books in Past Year: {0}'.format(this_year_book_count)) print('Avg Page Count: {0:.1f}'.format(avg_page_count(data['books']))) print('Projected Year Count: {0:.0f}'.format(projected_reading(data['books']))) # Now generate HTML report parts = [['header', ['Goodreads Report']], ['big_num', ['Books Read', book_count]], ['big_num', ['Books in Past Year', this_year_book_count]]] generator.build_report('goodreads_main', parts)
application_hits = {} type_hits = {} # General Data Processing for i in data['records']: try: application_hits[i['name']] += i['val'] except: application_hits[i['name']] = i['val'] try: type_hits[i['type']] += i['val'] except: type_hits[i['type']] = i['val'] top_applications = sorted(application_hits.items(), key=lambda kv: kv[1]) top_applications.reverse() top_types = sorted(type_hits.items(), key=lambda kv: kv[1]) top_types.reverse() print('Generating report...') # Now generate HTML report parts = [ ['header', ['RescueTime Report']], ['top3', ['Top Applications', top_applications[0][0], top_applications[1][0], top_applications[2][0]]], ['top3', ['Top Types', top_types[0][0], top_types[1][0], top_types[2][0]]] ] generator.build_report('rescuetime_main', parts) print('Done.')
year_avg_parts = [] for i in avg_all_years.keys(): year_result = [ ['subheader', [i]], [ 'big_num', ['Average Watchtime Per Day', avg_all_years[i]['avg_all']] ], [ 'big_num', ['Average TV Watchtime Per Day', avg_all_years[i]['avg_tv']] ] ] generator.check_html_directory('trakt') generator.build_report('trakt/{0}report'.format(i), year_result) year_avg_parts += year_result year_avg_parts += [[ 'link', ['trakt/{0}report.html'.format(i), '{0} Full Report'.format(i)] ]] # Now generate HTML report parts = [['header', ['Trakt.tv Report']], [ 'top3', ['Top Shows', show_list[0][0], show_list[1][0], show_list[2][0]] ]] parts += year_avg_parts parts += [['subheader', ['Watchtime Per Weekday']], ['image', ['figures/trakt_watchtime_weekday.png']],
# Standard imports. Add analysis specific ones. import sys import datetime import json from os import path sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) from utils import loadConfig import utils import generator config = loadConfig.getConfig() # Write your analysis script here parts = [ ['header', ['Boilerplate Report']], ] generator.build_report('lastfm_main', parts)
activities += [activity] return {'activities': activities} data = load_data() # Graph average running pace x = [] y = [] for i in data['activities']: if i['type'] == 'running': x += [i['date']] y += [i['avg_pace']] #plt.plot(x, y) ax = plt.subplot() #ax.plot(x, y) ax.yaxis.set_major_locator(HourLocator()) ax.yaxis.set_major_formatter(DateFormatter('%M:%s')) ax.xaxis_date() ax.xaxis.set_major_locator(YearLocator()) ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d')) plt.plot(x, y) generator.check_figure_directory() plt.savefig('html/figures/garmin_total_avg_running_pace.png', dpi=400) plt.close() parts = [['header', ['Garmin Report']], ['subheader', ['Average Running Pace']], ['image', ['figures/garmin_total_avg_running_pace.png']]] generator.build_report('garmin_main', parts)
] else: sentiment_html = ['big_num', ['Average Shout Sentiment', 'Not configured']] def get_top_entry(dictionary, name): top = sorted(dictionary.items(), key=lambda kv: kv[1]) top.reverse() return generator.build_top3_count(name, top) topzips_html = get_top_entry(zipcode_count, 'Top Zipcodes') topstates_html = get_top_entry(state_count, 'Top States') topplaces_html = get_top_entry(places_count, 'Top Places') toptypes_html = get_top_entry(categories_count, 'Top Types') year_counts = [] for i in year_count.keys(): year_counts += [[ 'big_num', ['{0} Check-in Count'.format(i), year_count[i]] ]] checkin_total = json_data['checkins']['count'] print('Total check-ins:', checkin_total) parts = [['header', ['Swarm Report']], ['big_num', ['Check-in Count', checkin_total]], sentiment_html, topzips_html, topstates_html, topplaces_html, toptypes_html] parts += year_counts generator.build_report('swarm_main', parts)
each_hr_count = {} for i in hr_data: if i['bpm'] < lowest_hr: lowest_hr = i['bpm'] if i['bpm'] > highest_hr: highest_hr = i['bpm'] try: each_hr_count[i['bpm']] += 1 except KeyError: each_hr_count[i['bpm']] = 1 avg_hr = sum(list(i['bpm'] for i in hr_data)) / len(hr_data) print('Heart Rate Points: {0}'.format(len(hr_data))) print('Highest HR:', highest_hr) print('Lowest HR:', lowest_hr) plt.scatter(each_hr_count.keys(), each_hr_count.values()) plt.xlabel('BPM') plt.ylabel('Count') plt.savefig('html/figures/gyroscope_hr_scatter.png', dpi=200) plt.close() parts = [['header', ['Gyroscope Report']], ['subheader', ['Heart Rate Analysis']], ['big_num', ['Highest HR', highest_hr]], ['big_num', ['Lowest HR', lowest_hr]], ['big_num', ['Average HR', avg_hr]], ['image', ['figures/gyroscope_hr_scatter.png']]] generator.build_report('gyroscope_main', parts)
bottom = num_days - 365 if bottom < 0: bottom = 0 print('Hours Last Year:', calc_total_time(json_data['days'][bottom:num_days]) / (60 * 60)) # Gather last month metrics total_time = calc_total_time(json_data['days'][num_days - 31:num_days]) languages = calc_top_languages(json_data['days'][num_days - 31:num_days]) last_month_hours = total_time / (60 * 60) print('Last Month Hours:', last_month_hours) print('Last Month Top Languages: 1)', languages[0][0], '2)', languages[1][0]) try: most_edited_file_this_month = most_edited_files( json_data['days'][num_days - 31:num_days])[0][0] except: most_edited_file_this_month = "None" print('Most Edited File This Month:', most_edited_file_this_month) print('Weekend Coding Percentage: {0:.2f}%'.format( weekend_weekday_percentage(json_data['days'][num_days - 31:num_days]) * 100)) # Now generate HTML report parts = [['header', ['Wakatime Report']], ['big_num', ['Last Month Hours', last_month_hours]], [ 'big_num', ['Mosted Edited File This Month', most_edited_file_this_month] ]] generator.build_report('wakatime_main', parts)
def process(flag='all'): songs_recorded = len(json_data['data']) artist_hits = {} song_hits = {} songs_per_day = {} songs_per_year = {} songs_per_month = {} songs_per_weekday = {} weekend_avg = 0 weekend_count = 0 weekday_avg = 0 weekday_count = 0 last_day = None to_process = None if flag == 'all': to_process = json_data['data'] elif flag == 'year': to_process = data_current_year() for j in to_process: #songs_recorded += len(i) #for j in i: date = utils.timestamp_to_datetime(int(j['date']['uts'])) day_of_week = date.weekday() if day_of_week == 5 or day_of_week == 6: # Weekend weekend_avg += 1 if not last_day == date.strftime('%Y-%m-%d'): weekend_count += 1 else: weekday_avg += 1 if not last_day == date.strftime('%Y-%m-%d'): weekday_count += 1 try: songs_per_year[date.year] += 1 except: songs_per_year[date.year] = 1 try: songs_per_month[date.month] += 1 except: songs_per_month[date.month] = 1 try: songs_per_weekday[date.weekday()] += 1 except: songs_per_weekday[date.weekday()] = 1 try: artist = j['artist']['name'] try: artist_hits[artist] += 1 except: artist_hits[artist] = 1 try: song_hits[(j['name'], artist)] += 1 except: song_hits[(j['name'], artist)] = 1 except KeyError: pass last_day = date.strftime('%Y-%m-%d') weekday_avg = weekday_avg / weekday_count weekend_avg = weekend_avg / weekend_count print('Weekday Average:', weekday_avg) print('Weekend Average:', weekend_avg) top_years = sorted(songs_per_year.items(), key=lambda kv: kv[1]) top_years.reverse() top_artists = sorted(artist_hits.items(), key=lambda kv: kv[1]) top_artists.reverse() top_songs = sorted(song_hits.items(), key=lambda kv: kv[1]) top_songs.reverse() top_songs_html_info = [ 'Top Songs', '{0} by {1}'.format(top_songs[0][0][0], top_songs[0][0][1]), '{0} by {1}'.format(top_songs[1][0][0], top_songs[1][0][1]), '{0} by {1}'.format(top_songs[2][0][0], top_songs[2][0][1]), '{0} by {1}'.format(top_songs[3][0][0], top_songs[3][0][1]), '{0} by {1}'.format(top_songs[4][0][0], top_songs[4][0][1]) ] print("Songs recorded: {0}".format(songs_recorded)) print('Generating report...') # Generate scrobbles per year graphic year_xs = np.arange(len(songs_per_year.keys())) plt.bar(year_xs, songs_per_year.values()) plt.xticks(year_xs, songs_per_year.keys()) plt.savefig('html/figures/lastfm_scrobble_years.png', dpi=200) plt.close() month_xs = np.arange(len(songs_per_month.keys())) plt.bar(month_xs, songs_per_month.values()) plt.xticks(month_xs, [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ]) plt.savefig('html/figures/lastfm_scrobble_month.png', dpi=200) plt.close() weekday_xs = np.arange(len(songs_per_weekday.keys())) plt.bar(weekday_xs, songs_per_weekday.values()) plt.xticks(weekday_xs, ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']) plt.savefig('html/figures/lastfm_scrobble_weekday.png', dpi=200) plt.close() # Now generate HTML report parts = [['header', ['Last.fm Report']], ['big_num', ['Songs recorded', songs_recorded]], ['big_num', ['Top Year', top_years[0][0]]], ['big_num', ['Weekend Average Count', weekend_avg]], ['big_num', ['Weekday Average Count', weekday_avg]], generator.build_top3('Top Artists', top_artists), ['top5', top_songs_html_info], ['subheader', ['Scrobbles Per Year']], ['image', ['figures/lastfm_scrobble_years.png']], ['subheader', ['Scrobbles Per Month']], ['image', ['figures/lastfm_scrobble_month.png']], ['subheader', ['Scrobbles Per Weekday']], ['image', ['figures/lastfm_scrobble_weekday.png']]] if flag == 'all': generator.build_report('lastfm_main', parts) elif flag == 'year': generator.build_report('lastfm_current_year', parts)
time_spacings += [i['time'][j + 1] - i['time'][j]] avg_time_spacing = np.mean(time_spacings) std_time_spacing = np.std(time_spacings) activity_count = len(json_data['activities']) print('Activity Count:', activity_count) print('Lifetime Distance:', lifetime_distance) print('Lifetime HR Change:', hr_change, '±', hr_change_std * CONFIDENCE_LEVEL) print('Avg GPS Time Spacing:', avg_time_spacing, '±', std_time_spacing * CONFIDENCE_LEVEL) parts = [['header', ['Strava Report']], ['big_num', ['Activity Count', activity_count]], ['big_num', ['Lifetime Distance', lifetime_distance]], [ 'big_num', [ 'Lifetime HR Change', '{0}±{1}'.format(hr_change, hr_change_std * CONFIDENCE_LEVEL) ] ], [ 'big_num', [ 'Avg. GPS Time Spacing', '{0}±{1}'.format(avg_time_spacing, std_time_spacing * CONFIDENCE_LEVEL) ] ]] generator.build_report('strava_main', parts)
if tracker_id_to_name(i['tracker_id']) == j['tracker']: log_time = i['time'] date = utils.timestamp_to_datetime(log_time, True) val = int(i['value']) current_year = datetime.datetime.now().year days = (datetime.date.today() - datetime.date(current_year, 1, 1)).days if date.year == current_year: try: sums[j['tracker']] += val except: sums[j['tracker']] = val for i in sums.keys(): for j in year_goals: if j['tracker'] == i: print('Averaged {0} {1} compared to expected {2}'.format( sums[i]/days,i,j['avg'])) goal_parts += [['subheader', ['{0} Goal Progress'.format(i)]], ['completion_bar', [sums[i]/days, j['avg']]]] # Now Show Report overview_report() recent_changes_report() goals() # Now generate HTML report parts = [ ['header', ['Nomie Report']], ] parts += goal_parts generator.build_report('nomie_query_main', parts)
bottom = 0 top = len(day_data) last_year = day_data[bottom:top] year_norms = {} for i in BASELINE_METRICS.keys(): year_norms[i] = 0 last_year = remove_untracked_days(last_year) for i in last_year: for j in year_norms.keys(): try: year_norms[j] += i['day']['totals'][j] except KeyError: pass # Normal for day to not have some values num_entries = len(last_year) for i in year_norms.keys(): year_day_avg = year_norms[i] / num_entries add_message('Year average for {0} is {1:.1f}'.format(i, year_day_avg)) return current_health(json_data) recent_changes(json_data) longterm_health(json_data) # Now generate HTML report parts = [ ['header', ['Nutrition Report']], ] parts += html_parts generator.build_report('nutrition_main', parts)
print('Top Monthly category by count: ', top_category(last_month_data)) print('Top Monthly category by value: ', top_category(last_month_data, 'value')) last_month_spending = spending(last_month_data) print('Last month spending: ', last_month_spending) last_month_earnings = earnings(last_month_data) print('Last month earnings: ', last_month_earnings) # Now do yearly reports yearly_report_data = all_year_reports(data) years_parts = [] for i in yearly_report_data.keys(): year_result = [ ['subheader', [i]], ['big_num', ['Spending', yearly_report_data[i]['spending']]], ['big_num', ['Earnings', yearly_report_data[i]['earnings']]]] if len(yearly_report_data[i]['top_categories']) >= 1: year_result += [ ['big_num', ['Top Category', yearly_report_data[i]['top_categories'][0], ]]] else: year_result += [ ['big_num', ['Top Category', 'None']]] generator.check_html_directory('mint') generator.build_report('mint/{0}report'.format(i), year_result) years_parts += year_result years_parts += [ ['link', ['mint/{0}report.html'.format(i), '{0} Full Report'.format(i)]] ] # Now generate HTML report parts = [ ['header', ['Mint Finance Report']], ['big_num', ['Last month spendings', '$' + str(last_month_spending)]], ['big_num', ['Last month earnings', '$' + str(last_month_earnings)]] ] parts += years_parts generator.build_report('mint_main', parts)