def main(): season_metrics = GC.seasonMetrics() fig = go.Figure() fig.add_trace( go.Bar( x=season_metrics['date'], y=season_metrics['Distance'], name="Distance", ) ) fig.update_layout( title="Small trend distance example", paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, showlegend=True, font=dict( color=gc_text_color, size=12 ), xaxis_title='Date', yaxis_title='Distance (km)', ) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): act = GC.activity() activity = pd.DataFrame(act, index=act['seconds']) activity = remove_incorrect_lat_long_values(activity) all_intervals = pd.DataFrame(GC.activityIntervals()) filtered_intervals = all_intervals[all_intervals.type == map_interval_type].filter( ["start", "stop", "color", "name"]) # TODO Find way to always give a correct zoom # long_diff = max(filter_act.longitude) - min(filter_act.longitude) # lat_diff = max(filter_act.latitude) - min(filter_act.latitude) # biggest_diff = max(long_diff, lat_diff) # # diff = np.arange(0.016, 0.8, 0.01).tolist() # zoom_values = np.linspace(12.5, 9, num=len(diff)) # index = min(enumerate(diff), key=lambda x: abs(x[1] - biggest_diff)) # print("lat/long diff: " + str(biggest_diff) + "zoom: " + str(zoom_values[index[0]])) fig = go.Figure( go.Scattermapbox(lat=activity.latitude, lon=activity.longitude, hoverinfo="skip", name="Entire Ride")) fig.update(layout=dict(mapbox_style="open-street-map", margin=go.layout.Margin(l=0, r=0, b=0, t=25, pad=0), mapbox=dict( center=dict( lat=activity.latitude.mean(), lon=activity.longitude.mean(), ), zoom=9, ))) for index, row in filtered_intervals.iterrows(): interval = activity[(activity.seconds >= row.start) & (activity.seconds < row.stop)] hovertext = [] for i, value in interval.iterrows(): hovertext.append(row["name"] + "<br>" + str(round(value.distance, 2)) + "Km") fig.add_trace( go.Scattermapbox(lon=interval.longitude, lat=interval.latitude, marker={ 'size': 10, 'symbol': "circle" }, hoverinfo="text", hovertext=hovertext, name=row["name"])) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): activity = GC.activity() activity_df = pd.DataFrame(activity, index=activity['seconds']) # Set max on two hours else i get a memory problem activity_df = activity_df.head(30 * 60 * 2) # activity_df = activity_df.head(500) center_lat = ((activity_df.latitude.max() - activity_df.latitude.min()) / 2) + activity_df.latitude.min() center_lon = ((activity_df.longitude.max() - activity_df.longitude.min()) / 2) + activity_df.longitude.min() max_altitude = activity_df.longitude.max() activity_df['x'], activity_df['y'], activity_df['z'] = zip( *activity_df.apply(lambda row: lla2flat([ row['latitude'], row['longitude'], row['altitude'] ], [center_lat, center_lon], 0, -max_altitude), axis=1)) min_x = activity_df.x.min() max_x = activity_df.x.max() min_y = activity_df.y.min() max_y = activity_df.y.max() # Magic number play around to get nice result between 1 and 0.01 precision = 0.1 number_of_x = (max_x - min_x) * precision number_of_y = (max_y - min_y) * precision print("create array") values = np.zeros(int(number_of_x + 1) * int(number_of_y + 1)).reshape( int(number_of_y + 1), int(number_of_x + 1)) for i in range(activity_df.seconds.count()): print("process second:" + str(i)) index_y = int( number_of_y / 2) - (int(activity_df.y.iloc[i] * precision) * -1) index_x = int( number_of_x / 2) - (int(activity_df.x.iloc[i] * precision) * -1) print("x =" + str(activity_df.x.iloc[i])) print("y =" + str(activity_df.y.iloc[i])) print("z =" + str(activity_df.z.iloc[i])) values[index_y][index_x] = activity_df.z.iloc[i] * -1 print("create data ") data = [go.Surface(z=values)] layout = go.Layout() print("create figure ") fig = go.Figure(data) print("show ") plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): season_metrics = GC.seasonMetrics() chart_data = get_time_line_data(season_metrics) start_formatted = season_metrics['date'][0].strftime( "%Y, %m, %d,") + season_metrics['time'][0].strftime("%H,%M") stop_formatted = season_metrics['date'][-1].strftime( "%Y, %m, %d,") + season_metrics['time'][-1].strftime("%H,%M") season_metrics['date'][-1] start_time = "new Date(" + str(start_formatted) + ").getTime();" stop_time = "new Date(" + str(stop_formatted) + ").getTime();" html = get_complete_html(chart_data, start_time, stop_time) temp_file.writelines(html) temp_file.close() GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): season_info = GC.season() html = [] for duration in durations: html.append("<h1>MMP: " + str(format_seconds(duration)) + "</h1>") html.append(get_table_for_duration(duration, str(season_info['name'][0]))) html.append("</br>") write_html(html)
def main(): activity_metric = GC.activityMetrics() act = GC.activity() activity = pd.DataFrame(act, index=act['seconds']) peak_duration = [1, 5, 10, 15, 20, 30, 60, 120, 180, 300, 480, 600, 1200, 1800, 3600, 5400] metric_duration_name = ["1_sec", "5_sec", "10_sec", "15_sec", "20_sec", "30_sec", "1_min", "2_min", "3_min", "5_min", "8_min", "10_min", "20_min", "30_min", "60_min", "90_min"] season_peaks = pd.DataFrame(GC.seasonMetrics(all=True, filter='Data contains "P"')) season_peaks = season_peaks.filter(regex="^date$|^time$|_Peak_Power$", axis=1) rows = "" for i in range(len(peak_duration)): metric_name = metric_duration_name[i] + "_Peak_Power" # remove peaks after activities date all_time_season_peak = season_peaks.loc[ (season_peaks.date <= activity_metric['date'])] last_x_months_date = activity_metric['date'] - dateutil.relativedelta.relativedelta( months=best_peaks_of_x_months) last_x_months_season_peak = season_peaks.loc[ (season_peaks.date <= activity_metric['date']) & (season_peaks.date > last_x_months_date)] sorted_all_time_season_peaks = all_time_season_peak.sort_values(by=[metric_name],ascending=False)[:3] sorted_last_x_months_season_peak_ = last_x_months_season_peak.sort_values(by=[metric_name], ascending=False)[:3] peak_power_curr_activity = activity_metric[metric_name] sorted_all_time_season_peaks_tolist = sorted_all_time_season_peaks[metric_name].tolist() sorted_x_months_season_peaks_tolist = sorted_last_x_months_season_peak_[metric_name].tolist() row = get_html_table_row(peak_power_curr_activity, sorted_all_time_season_peaks_tolist, metric_name, "All Time") if row: rows = rows + str(row) else: row = get_html_table_row(peak_power_curr_activity, sorted_x_months_season_peaks_tolist, metric_duration_name[i], "Last " + str(best_peaks_of_x_months) + " Months") if row: rows = rows + str(row) create_end_html_float(rows) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def plot(panda_dict): # Go plot fig = make_subplots(rows=len(panda_dict['field'].unique()), cols=1, subplot_titles=list(panda_dict['field'].unique())) count = 1 for tracking_field in panda_dict['field'].unique(): chart = panda_dict[panda_dict['field'] == tracking_field] for color in chart['colors'].unique(): trace = chart[chart['colors'] == color] fig.append_trace(go.Bar(x=trace['names'], y=trace['aggregated'], marker_color=color, showlegend=True, text=trace['aggregated'], textposition='auto', hoverinfo='text', hovertext=trace['notes'], name=tracking_field + "-" + str(trace['filter_names'].iloc[0])), row=count, col=1) fig.update_yaxes( title_text=str(trace['units'].iloc[0]), showgrid=True, ticks="inside", color='white', row=count, col=1, tickformat='d', ) count = count + 1 fig['layout'].update( title_text="Tracking Custom fields (aggregated) by dates", paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, font=dict(color=gc_text_color, size=12), yaxis=dict(showgrid=True), ) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def get_table_for_duration(duration, season_name): all_seasons = pd.DataFrame(GC.seasonPeaks(all=True, series='power', duration=duration)) season = pd.DataFrame(GC.seasonPeaks(series='power', duration=duration)) field = 'peak_power_' + str(duration) avg_top5_season = season.nlargest(5, field).mean().values[0] avg_top5_all_time = all_seasons.nlargest(5, field).mean().values[0] all_max = all_seasons[field].max() season_max = season[field].max() df = pd.DataFrame({ 'Max': [all_max, season_max], 'Avg top 5': [avg_top5_all_time, avg_top5_season], 'Max \'' + season_name + '\' vs avg top 5 all time (%)': ['', round((season_max / avg_top5_all_time) * 100, 2)], }) df = df.round(2) df.index = ['All', season_name] return df.to_html()
def main(): act = GC.activity() fig = go.Figure() fig.add_trace( go.Scatter(x=act['seconds'], y=act['heart.rate'], name="Heart rate", line=dict(color='red'))) fig.update_layout( title="Small heart rate example", paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, showlegend=True, font=dict(color=gc_text_color, size=12), xaxis_title='seconds', yaxis_title='heart reate (bmp)', ) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): activity = GC.activity() activity_intervals = GC.activityIntervals() activity_metric = GC.activityMetrics() zones = GC.athleteZones(date=activity_metric["date"], sport="bike") fig = altitude_3d_figure(activity, activity_intervals, zones, coloring_mode, slice_distance, polygon_limit) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def get_hr_list_of_activities(activities_subset): start_time = time.time() heart_rate_speed_dict = {} for activity in activities_subset: current_activity = GC.activity(activity=activity) for hr, speed in zip(current_activity['heart.rate'], current_activity['speed']): # In some cases (zwift) hr is not a round integer therefore round the hr to a integer # This when grouping all speed data at certain HR hr_rounded = int(round(hr, 0)) if heart_rate_speed_dict.get(hr_rounded): heart_rate_speed_dict[hr_rounded].append(speed) else: heart_rate_speed_dict[hr_rounded] = [speed] #for i in heart_rate_speed_dict: # print("HR: "+ str(i) + ", speed list: " + str(heart_rate_speed_dict[i])) print("get activities took: " + str(time.time() - start_time)) return heart_rate_speed_dict
def main(): activity_metric = GC.activityMetrics() act = GC.activity() activity = pd.DataFrame(act, index=act['seconds']) zone = GC.athleteZones(date=activity_metric["date"], sport="bike") activity_intervals = GC.activityIntervals() zones_low = zone['zoneslow'][0] zone_colors = zone['zonescolor'][0] # Possible to override colors # zone_colors = ["rgb(127, 127, 127)", # "rgb(255, 85, 255)", # "rgb(51, 140, 255)", # "rgb(89, 191, 89)", # "rgb(255, 204, 63)", # "rgb(255, 102, 57)", # "rgb(255, 51, 12)"] if 'latitude' in activity: geo_html = geo_plot_html(activity) fig = altitude_3d_figure(activity, activity_intervals, zone, coloring_mode, slice_distance, polygon_limit) altitude_html = plotly.offline.plot(fig, output_type='div', auto_open=False) else: geo_html = "<h2>Unable to draw activities ride plot no GPS data</h2>" if 'power' in activity: tiz_power_html = tiz_html(activity_metric, zone, type="L") if 'latitude' in activity: ride_html = ride_plot_html(activity, zone_colors, zones_low) else: ride_html = "<h2>Unable to draw activities ride plot no GPS data</h2>" else: ride_html = "<h2>Unable to draw activities ride plot (no power data)</h2>" tiz_power_html = "<h2>Unable to draw Time in Zone power (no power data)</h2>" if 'heart.rate' in activity: tiz_hr_html = tiz_html(activity_metric, zone, type="H") else: tiz_hr_html = "<h2>Unable to draw Time in Zone heart rate (no HR data)</h2>" create_end_html_float(activity_metric, geo_html, ride_html, tiz_power_html, tiz_hr_html, altitude_html) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): start_time = datetime.now() assets_dir = write_css() print('write css duration: {}'.format(datetime.now() - start_time)) start_time = datetime.now() activity_metric = GC.activityMetrics() act = GC.activity() activity = pd.DataFrame(act, index=act['seconds']) intervals = pd.DataFrame(GC.activityIntervals()) # all pmc data pmc_dict = GC.seasonPmc(all=True, metric="BikeStress") pmc = pd.DataFrame(pmc_dict) zone = GC.athleteZones(date=activity_metric["date"], sport="bike") zones_low = zone['zoneslow'][0] zone_colors = zone['zonescolor'][0] cp = zone['cp'][0] season_peaks = pd.DataFrame(GC.seasonMetrics(all=True, filter='Data contains "P"')) print('Gathering data duration: {}'.format(datetime.now() - start_time)) # Possible to override colors # zone_colors = ["rgb(127, 127, 127)", # "rgb(255, 85, 255)", # "rgb(51, 140, 255)", # "rgb(89, 191, 89)", # "rgb(255, 204, 63)", # "rgb(255, 102, 57)", # "rgb(255, 51, 12)"] interval_type_options = [] for interval_type in intervals.type.unique().tolist(): interval_type_options.append({"label": interval_type, "value": interval_type}) app = dash.Dash(assets_folder=assets_dir) # cache = Cache(app.server, config={ # 'CACHE_TYPE': 'simple' # }) # cache.clear() app.layout = html.Div([ html.Div([ get_title_html(activity_metric), html.Div([ html.P("Medals Power"), html.P(get_medals_html(activity, activity_metric, season_peaks, HR=False)), ], className="medals_power"), html.Div([ html.P("Medals Heart Rate"), html.P(get_medals_html(activity, activity_metric, season_peaks, HR=True)), ], className="medals_hr") ], className='top'), html.Div([ html.Div(["Select which intervals to show on the map: ", dcc.Dropdown(id="interval-type", value=( "USER" if "USER" in intervals.type.unique().tolist() else intervals.type.unique().tolist()[ 0]), options=interval_type_options, style={'width': '200px'})], className="row", style={"display": "block", "margin-left": "0px"}), html.P(dcc.Graph(id="map-graph")), ], className="map"), html.Div([ html.Div([ html.P(dcc.Graph(figure=tiz_fig(activity, activity_metric, zone, metric_type="L"))), ], className="tiz_power"), html.Div([ html.P(dcc.Graph(figure=tiz_fig(activity, activity_metric, zone, metric_type="H"))), ], className="tiz_hr"), ], className="tiz"), html.Div([html.P(dcc.Graph(figure=tsb_if_fig(activity, activity_metric, pmc)))], className="tsb_if"), html.Div([ dcc.Tabs(id="tabs-example", value='structured', children=[ dcc.Tab(label='Structured', value="structured", children=[ html.Div([ "Choose Interval Type for structured overview: ", dcc.Dropdown(id="interval-type-ride-plot", value=( "USER" if "USER" in intervals.type.unique().tolist() else intervals.type.unique().tolist()[0]), options=interval_type_options, style={'width': '200px'})], className="row", style={"display": "block", "margin-left": "0px"}), html.P(dcc.Graph(id="ride-plot-graph-structured")), ]), dcc.Tab(label='Smooth', value='smooth', children=[ html.Div([ "Choose smoothness value (lower value needs longer loading time): ", dcc.Slider( id='smooth-value-ride-plot', min=5, max=200, step=5, value=20, ) ], className="row", style={"display": "block", "margin-left": "0px", "width": "500px"}), html.P(dcc.Graph(id="ride-plot-graph-smooth")), ]), ]), # html.Div([ # "Structured or Smooth: ", # dcc.Dropdown(id="view-type", value="Structured", # options=[{"label": "Structured", "value": "Structured"}, # {"label": "Smooth", "value": "Smooth"}], # style={'width': '200px'})], # className="row", # style={"display": "block", "margin-left": "0px"}), # html.P(dcc.Graph(id="ride-plot-graph")), ], className="ride_plot"), ], className='container') @app.callback( Output('map-graph', 'figure'), [Input('interval-type', 'value')]) def update_map_figure(value_type): return geo_plot_fig(activity, intervals, value_type) @app.callback( Output('ride-plot-graph-structured', 'figure'), [Input('interval-type-ride-plot', 'value')]) def update_structured_ride_plot(selected_interval_type): before = datetime.now() if 'power' in activity and 'latitude' in activity: fig = ride_plot_structured_fig(activity, intervals, zone_colors, zones_low, cp, selected_interval_type) else: fig = go.Figure() fig.update_layout(title="Unable to draw activities ride plot (no power data)") fig.update_layout(empty_chart_dict) print('Create ride plot duration: {}'.format(datetime.now() - before)) return fig @app.callback( Output('ride-plot-graph-smooth', 'figure'), [Input('smooth-value-ride-plot', 'value')]) def update_smooth_ride_plot(smooth_value): before = datetime.now() if 'power' in activity and 'latitude' in activity: fig = ride_plot_smooth(activity, zone_colors, zones_low, smooth_value=int(smooth_value)) else: fig = go.Figure() fig.update_layout(title="Unable to draw activities ride plot (no power data)") fig.update_layout(empty_chart_dict) print('Create ride plot duration: {}'.format(datetime.now() - before)) return fig return app
def main(): start_time = datetime.now() season = GC.season(compare=True) all_lat_long_df = pd.DataFrame() activities_list = GC.activities(filter='Data contains "G"') start_dt = datetime.combine(season['start'][0], datetime.min.time()) end_dt = datetime.combine(season['end'][0], datetime.min.time()) activities_sub_list = [] for i in np.arange(0, len(activities_list)): if start_dt <= activities_list[i] <= end_dt: activities_sub_list.append(activities_list[i]) activity_time_calculation = round(len(activities_sub_list) * 0.5) map_time_calculation = round(len(activities_sub_list) * 0.05) html_write_time_calculation = round(len(activities_sub_list) * 0.2) duration = activity_time_calculation + map_time_calculation + html_write_time_calculation msg = """You are about to process: """ + str(len(activities_sub_list)) + """ (activities) Based on your selection an rough estimation on how long the script will run: """ +\ str(timedelta(seconds=duration)) + """ Do you want to continue? """ if mb.askyesno("Expected run time", msg): for activity in activities_sub_list: current_activity = GC.activity(activity=activity) current_activity_df = pd.DataFrame( current_activity, index=current_activity['seconds']).filter( ["longitude", "latitude"]) all_lat_long_df = all_lat_long_df.append(current_activity_df) heat_data = all_lat_long_df[['latitude', 'longitude']].to_numpy() print('Gathering data est:{} act:{} '.format( activity_time_calculation, round((datetime.now() - start_time).total_seconds(), 2))) before = datetime.now() fmap = folium.Map( tiles='CartoDB positron' if light_map else 'CartoDB dark_matter', prefer_canvas=True) HeatMap(heat_data, radius=radius, blur=blur, gradient=heatmap_grad['light' if light_map else 'dark'], min_opacity=0.3, max_val=1).add_to(fmap) fmap.fit_bounds(fmap.get_bounds()) print('Create map est:{} act:{}'.format( map_time_calculation, round((datetime.now() - before).total_seconds(), 2))) before = datetime.now() html = fmap.get_root().render() temp_file.writelines(html) temp_file.close() print('Write HTML est:{} act:{}'.format( html_write_time_calculation, round((datetime.now() - before).total_seconds()), 2)) print('Total time est:{} act:{}'.format( timedelta(seconds=duration), str(datetime.now() - start_time).split('.')[0])) else: create_cancel_html() GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): start_time = datetime.now() activity_metric = GC.activityMetrics() act = GC.activity() activity = pd.DataFrame(act, index=act['seconds']) intervals = pd.DataFrame(GC.activityIntervals()) # all pmc data pmc_dict = GC.seasonPmc(all=True, metric="BikeStress") pmc = pd.DataFrame(pmc_dict) zone = GC.athleteZones(date=activity_metric["date"], sport="bike") zones_low = zone['zoneslow'][0] zone_colors = zone['zonescolor'][0] season_peaks = pd.DataFrame( GC.seasonMetrics(all=True, filter='Data contains "P"')) end_gather_time = datetime.now() print('Gathering data duration: {}'.format(end_gather_time - start_time)) # Possible to override colors # zone_colors = ["rgb(127, 127, 127)", # "rgb(255, 85, 255)", # "rgb(51, 140, 255)", # "rgb(89, 191, 89)", # "rgb(255, 204, 63)", # "rgb(255, 102, 57)", # "rgb(255, 51, 12)"] if 'latitude' in activity: before = datetime.now() geo_html = geo_plot_html(activity, intervals, show_interval_type) print('Create map html duration: {}'.format(datetime.now() - before)) else: geo_html = "<h2>Unable to draw activities ride plot no GPS data</h2>" if 'power' in activity: before = datetime.now() tiz_power_html = tiz_html(activity_metric, zone, type="L") print('Create time in zone power html duration: {}'.format( datetime.now() - before)) if 'latitude' in activity: before = datetime.now() ride_html = ride_plot_html(activity, zone_colors, zones_low) print('Create ride html duration: {}'.format(datetime.now() - before)) else: ride_html = "<h2>Unable to draw activities ride plot no GPS data</h2>" before = datetime.now() tsb_if_power_html = tsb_if_html(activity_metric, pmc) print('Create tsb vs if html duration: {}'.format(datetime.now() - before)) before = datetime.now() medals_power_html = get_medals_html(activity_metric, season_peaks) print('Create medals power html duration: {}'.format(datetime.now() - before)) else: ride_html = "<h2>Unable to draw activities ride plot (no power data)</h2>" tiz_power_html = "<h2>Unable to draw Time in Zone power (no power data)</h2>" tsb_if_power_html = "<h2>Unable to draw TSB vs IF (no power data)</h2>" medals_power_html = "" if 'heart.rate' in activity: before = datetime.now() tiz_hr_html = tiz_html(activity_metric, zone, type="H") print( 'Create time in zone hr html duration: {}'.format(datetime.now() - before)) before = datetime.now() medals_hr_html = get_medals_html(activity_metric, season_peaks, HR=True) print('Create medals hr html duration: {}'.format(datetime.now() - before)) else: tiz_hr_html = "<h2>Unable to draw Time in Zone heart rate (no HR data)</h2>" medals_hr_html = "" before = datetime.now() create_end_html_float(activity_metric, medals_power_html, medals_hr_html, geo_html, ride_html, tiz_power_html, tiz_hr_html, tsb_if_power_html) print('Create end html duration: {}'.format(datetime.now() - before)) end_total_time = datetime.now() print('Complete duration: {}'.format(end_total_time - start_time)) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): weight = "regress" # one of "none", "regress" or "date" or "logistic" # Get data mmp = GC.seasonMeanmax() pmax = max(mmp['power']) # get meanmax power data as whole watts yy = np.rint(np.asarray(mmp["power"])[1:]) secs = np.asarray(range(1, len(yy))) # truncate to first 2 hours of data if len(yy) > 7200: yy = yy[0:7200] secs = secs[0:7200] # initial fit params = Parameters() params.add('ftp', value=200) params.add('frc', value=11000) params.add('pmax', value=1000) params.add('a', value=40) params.add('tau2', value=50) params.add('tte', value=1800) out = minimize(peronnet_thibault, params, args=(secs, yy)) #out.params.pretty_print() print("FTP=", out.params["ftp"].value, "PMax=", out.params["pmax"].value, "FRC=", out.params["frc"].value, "TTE=", out.params["tte"].value) # model derived values zero = np.zeros(len(yy)) mod = peronnet_thibault(out.params, secs, zero) * -1 # substract predicted (mod) and measured (yy) residual = np.subtract(yy, mod) # Normalize a = -10 b = 10 minimal = min(residual) maximal = max(residual) # norm = [(number - a) / (b - a) for number in residual] norm = [ a + ((number - minimal) * (b - a) / (maximal - minimal)) for number in residual ] # Find short medium long duration test targets short_duration_bracket = [15, 40] # 15 - 40 seconds medium_duration_bracket = [60, 900] # 1 - 15 minutes long_duration_bracket = [1200, 2400] # 20 - 40 minutes short_duration_index = norm.index( min(norm[short_duration_bracket[0]:short_duration_bracket[1]])) medium_duration_index = norm.index( min(norm[medium_duration_bracket[0]:medium_duration_bracket[1]])) long_duration_index = norm.index( min(norm[long_duration_bracket[0]:long_duration_bracket[1]])) # Start building chart_not_working_yet_after_single_extract fig = make_subplots( rows=3, cols=3, specs=[[{ "colspan": 3 }, None, None], [{ "colspan": 3 }, None, None], [{}, {}, {}]], subplot_titles=("Peronnet Thibault Model", "Normalized/Residual", "Short duration test target", "Medium duration test target", "Long duration test target"), vertical_spacing=0.10, ) # meanmax curve fig.add_trace(go.Scatter( x=secs, y=yy, mode='lines', line=dict(color='orange', width=1, dash='dash'), name="mean maximal", hovertext=[ "Watts: " + str(watts) + "<br>Time: " + str(format_seconds(i)) for i, watts in zip(secs, yy) ], hoverinfo="text", ), row=1, col=1) # model curve fig.add_trace(go.Scatter( x=secs, y=mod, line=dict(shape='spline'), name="Peronnet Thibault model", hovertext=[ "Watts: " + str(int(watts)) + "<br>Time: " + str(format_seconds(i)) for i, watts in zip(secs, mod) ], hoverinfo="text", ), row=1, col=1) # Residual fig.add_trace(go.Scatter( x=secs, y=residual, line=dict(shape='spline'), name="Residual", hovertext=[ "Residual Watts: " + str(int(watts)) + "<br>Time: " + str(format_seconds(i)) for i, watts in zip(secs, residual) ], hoverinfo="text", ), row=2, col=1) # Normalized fig.add_trace(go.Scatter( x=secs, y=norm, line=dict(shape='spline'), name="Normalized", hovertext=[ "Normalized: " + str(round(normalize, 2)) + "<br>Time: " + str(format_seconds(i)) for i, normalize in zip(secs, norm) ], hoverinfo="text", ), row=2, col=1) add_duration_target(fig, mod[short_duration_index], yy[short_duration_index], short_duration_bracket, secs[short_duration_index], "Short", 1, pmax) add_duration_target(fig, mod[medium_duration_index], yy[medium_duration_index], medium_duration_bracket, secs[medium_duration_index], "Medium", 2, pmax) add_duration_target(fig, mod[long_duration_index], yy[long_duration_index], long_duration_bracket, secs[long_duration_index], "Long", 3, pmax) # tick_values = np.logspace(0.01, math.log10(max(xx)), 50, base=10, endpoint=True) tick_values = [ 1, 2, 3, 4, 5, 6, 8, 10, 15, 20, 30, 40, 60, 120, 180, 240, 360, 480, 600, 900, 1200, 1800, 2400, 3600, 7200 ] fig.update_layout( go.Layout( paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, font=dict(color=gc_text_color, size=12), showlegend=True, xaxis1=dict( type='log', tickangle=45, tickvals=tick_values, ticktext=[format_seconds(i) for i in tick_values], ), xaxis2=dict( type='log', tickangle=45, tickvals=tick_values, ticktext=[format_seconds(i) for i in tick_values], ), margin={'t': 10}, )) current_annotation_list = list(fig["layout"]["annotations"]) current_annotation_list.append( # FTP report go.layout.Annotation( x=1, y=400, showarrow=False, text='FTP: %d Pmax: %d FRC: %d TTE: %d' % (out.params["ftp"].value, out.params["pmax"].value, out.params["frc"].value, out.params["tte"].value), font=dict(family='Courier New, monospace', size=20, color="#ff0000"), xref="x1", yref="y1")) fig["layout"]["annotations"] += tuple(current_annotation_list) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) # Workaround for fixing margin text = pathlib.Path(temp_file.name).read_text() text = text.replace('<body>', '<body style="margin: 0px;">') pathlib.Path(temp_file.name).write_text(text) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): activity = GC.activity() activity_df = pd.DataFrame(activity, index=activity['seconds']) # activity_df = activity_df.head(30 * 60) # activity_df = activity_df.head(365) reference_location = [ ((activity_df.latitude.max() - activity_df.latitude.min()) / 2) + activity_df.latitude.min(), ((activity_df.longitude.max() - activity_df.longitude.min()) / 2) + activity_df.longitude.min() ] max_altitude = activity_df.altitude.max() activity_df['x'], activity_df['y'], activity_df['z'] = zip( *activity_df.apply(lambda row: lla2flat([ row['latitude'], row['longitude'], row['altitude'] ], reference_location, 0, -max_altitude), axis=1)) # activity_df['x'], activity_df['y'], activity_df['z'] = zip(*activity_df.apply(lambda row: # geodetic_to_ecef(row['latitude'], # row['longitude'], # row['altitude']) # # # , axis=1)) gradiant_dict = { 'breaks': [-15, -7.5, 0, 7.5, 15, 20, 100], # 'colors': ['blue', 'lightblue', 'green', 'gray', 'yellow', 'orange', 'red'], # <-15 darkblue, <-7.5 mid blue, <0 lightblue , >0 green, >7.5 yellow, >15 light red, >20 dark red 'colors': [ 'rgba(0,0,133,0.6)', 'rgba(30,20,255,0.6)', 'rgba(80,235,255,0.6)', 'rgba(80,255,0,0.6)', 'rgba(255,255,0,0.6)', 'rgba(235,0,0,0.6)', 'rgba(122, 0,0,0.6)' ] } gradiant_df = pd.DataFrame(gradiant_dict) data = [] data.append( go.Scatter( mode='lines+markers', x=activity_df.seconds, y=activity_df.z, name="Altitude", showlegend=True, )) data.append( go.Scatter( mode='lines+markers', x=activity_df.seconds, y=activity_df.slope, name="Slope", showlegend=True, )) activity_df['smooth_slope'] = activity_df.slope.rolling(20).mean() data.append( go.Scatter( mode='lines+markers', x=activity_df.seconds, y=activity_df.smooth_slope, name="Sooth Slope", showlegend=True, )) # Slice per x seconds slice_value = 10 number_slices = len(activity_df.index) / slice_value shapes = [] cumulative_gain = 0 paths = [] for i in range(int(number_slices)): start = i * slice_value # last slice take last sample if i == int(number_slices) - 1: stop = -1 else: stop = (i * slice_value) + slice_value tmp = "" # # new_df = activity_df.iloc[i*slice_value:(i*slice_value)+slice_value][['x', 'z', 'altitude']] # # new_df = activity_df.iloc[[(i * slice_value), ((i * slice_value) + slice_value)]][['x', 'y', 'altitude', 'seconds']] # # # determine bucket altitude_gain = activity_df.z.iloc[stop] - activity_df.z.iloc[start] index = bisect.bisect_left(gradiant_df.breaks, altitude_gain) color = gradiant_df.colors[index] start_x = activity_df.x.iloc[start] stop_x = activity_df.x.iloc[stop] start_y = activity_df.y.iloc[start] stop_y = activity_df.y.iloc[stop] start_altitude = activity_df.z.iloc[start] stop_altitude = activity_df.z.iloc[stop] if i == 0 or paths[-1]['color'] != color: # create new path (polygon) path_new = ("M " + str(activity_df.seconds.iloc[start]) + "," + "0" + " L" + str(activity_df.seconds.iloc[start]) + "," + str(start_altitude) + " L" + str(activity_df.seconds.iloc[stop]) + "," + str(stop_altitude) + " L" + str(activity_df.seconds.iloc[stop]) + "," + "0Z") paths.append({ 'path': path_new, 'color': color, }) else: # extend previous polygon last = len(paths) - 1 prev_path = paths[last] new_path = prev_path['path'].rsplit('L', 1)[0] + \ " L" + str(activity_df.seconds.iloc[stop]) + "," + str(stop_altitude) + \ " L" + str(activity_df.seconds.iloc[stop]) + "," + "0Z" paths[last]['path'] = new_path # path_new = ("M " + str(activity_df.seconds.iloc[start]) + "," + "0" + # " L" + str(activity_df.seconds.iloc[start]) + "," + str(start_altitude) + # " L" + str(activity_df.seconds.iloc[stop]) + "," + str(stop_altitude) + # " L" + str(activity_df.seconds.iloc[stop]) + "," + "0 Z") # paths.append({'path': path_new, # 'color': color, # }) print("Number of polygons: " + str(len(paths))) for path in paths: shapes.append( go.layout.Shape(type="path", path=path['path'], fillcolor=path['color'], line=dict(color=path['color'], width=1) # line_color="Red", )) fig = go.Figure(data=data) fig.update_layout(shapes=shapes, ) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def get_filtered_rides(): for gc_filter in filters: filters[gc_filter]['season_metrics'] = GC.seasonMetrics( all=True, filter=filters[gc_filter]['filter'], compare=False)
def main(): # get data selected_seasons = GC.season(compare=True) compares = GC.seasonMetrics(compare=True, filter='Data contains "P"') # all pmc data pmc_dict = GC.seasonPmc(all=True, metric="BikeStress") pmc = pd.DataFrame(pmc_dict, index=pmc_dict['date']) fig = go.Figure() intensity_factor = [] stress_balance = [] for compare, season_name in zip(compares, selected_seasons['name']): if 'Workout_Title' in compare[0]: metrics = pd.DataFrame(compare[0], index=compare[0]['date']).filter( ['date', 'IF', 'color', 'Workout_Code', 'BikeStress', 'Workout_Title']) else: metrics = pd.DataFrame(compare[0], index=compare[0]['date']).filter( ['date', 'IF', 'color', 'Workout_Code', 'BikeStress']) # Filter out IF = 0 (probably rides without power) metrics = metrics.loc[metrics.IF > 0] # combine pmc and metric data merged_metrics = pd.merge(metrics, pmc) stress_balance.extend(merged_metrics.sb.tolist()) intensity_factor.extend(merged_metrics.IF.tolist()) # Determine the radius of the circles based on BikeStress (on a scale of 30-100) # merged_metrics['radius'] = np.sqrt((merged_metrics.BikeStress / 3.1415927)) a = 30 b = 100 minimal = merged_metrics.BikeStress.min() maximal = merged_metrics.BikeStress.max() div = maximal - minimal if div == 0: div = 1 # norm = [(number - a) / (b - a) for number in residual] merged_metrics['radius'] = a + ((merged_metrics.BikeStress - minimal) * (b - a) / div) # norm = [a + ((number - minimal) * (b - a) / (maximal - minimal)) for number in residual] # Determine hovertext titles = "Title: " + merged_metrics.Workout_Title.map(str) if 'Workout_Title' in merged_metrics else "" merged_metrics['date'] = pd.to_datetime(merged_metrics.date) merged_metrics['hovertext'] = "Date: " + merged_metrics.date.dt.strftime('%d-%m-%Y').map(str) + "<br>" + \ "TSS: " + merged_metrics.BikeStress.astype(int).map(str) + "<br>" + \ "TSB: " + round(merged_metrics.sb, 1).map(str) + "<br>" + \ titles # make transparent for overlapping # colors <- adjustcolor(merged_metrics.color, 0.6) if not len(compares) == 1: color = compare[1] merged_metrics['color'] = color merged_metrics['legend'] = season_name else: merged_metrics['legend'] = merged_metrics['Workout_Code'] for i in merged_metrics.color.unique(): cur_metrics = merged_metrics.loc[merged_metrics.color == i] trace_name = cur_metrics.iloc[0]['legend'] if not trace_name: trace_name = "None" # Add scatter traces fig.add_traces( go.Scatter( x=cur_metrics.sb, y=cur_metrics.IF, mode='markers+text', marker=dict( size=cur_metrics.radius, color=cur_metrics.color ), name=trace_name, showlegend=True, hoverinfo="text", hovertext=cur_metrics.hovertext, text=cur_metrics['date'].dt.strftime('%d-%m-<br>%Y'), textfont=dict( size=8, color='darkgray', ) ) ) # print(merged_metrics[['date', 'sb', 'BikeStress', 'radius', 'Workout_Code', 'color', 'legend']].head()) # Add Quadrant text min_intensity_factor = min(min(intensity_factor) * 0.9, 0.7) max_intensity_factor = max(max(intensity_factor) * 1.1, 0.9) min_stress_balance = min(min(stress_balance) * 1.2, -5) max_stress_balance = max(max(stress_balance) * 1.2, 5) annotation = [ get_annotation(min_stress_balance / 2, min_intensity_factor * 1.03, "Maintain"), get_annotation(max_stress_balance / 2, max_intensity_factor * 0.98, "Race"), get_annotation(min_stress_balance / 2, max_intensity_factor * 0.98, "Overload"), get_annotation(max_stress_balance / 2, min_intensity_factor * 1.03, "Junk") ] fig.update_layout( title="TSB vs IF (" + ",".join(selected_seasons['name']) + ")", paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, font=dict( color=gc_text_color, size=12 ), annotations=annotation, ) # Add horizontal IF 0.85 line fig.add_trace( go.Scatter( x=[min_stress_balance, max_stress_balance], y=[0.85, 0.85], mode='lines', showlegend=False, line=dict( color="White", dash='dash' ) ) ) # Add vertical TSB 0 line fig.add_trace( go.Scatter( x=[0, 0], y=[min_intensity_factor, max_intensity_factor], mode='lines', showlegend=False, line=dict( color="White", dash='dash' ) ) ) # Set axes properties fig.update_xaxes(range=[min_stress_balance, max_stress_balance], zeroline=False, gridcolor='gray', mirror=True, ticks='outside', showline=True, ) fig.update_yaxes(range=[min_intensity_factor, max_intensity_factor], gridcolor='gray', mirror=True, ticks='outside', showline=True, ) # fig.show() plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): athlete = GC.athlete() athlete_name = athlete['name'] athlete_gender = athlete['gender'] # TODO investigate if/how FTP/CP and weight is needed from specific time i.s.o. latest. try: body_measurements = GC.seasonMeasures(all=True, group="Body") athlete_kg = body_measurements['WEIGHTKG'][-1] # when athlete weight incorrect fall back on athlete default weight if athlete_kg <= 0: athlete_kg = athlete['weight'] except (SystemError, TypeError): # when an exception might be thrown when no body measures are used fall back on athlete default weight athlete_kg = athlete['weight'] azz = GC.athleteZones(date=0, sport='bike') inckg = 1 # real value = 1 lcp = azz['cp'][-1] * inckg lftp = azz['ftp'][-1] * inckg # Fetch meanmax power for current selected season Bike activities selected_date_ranges = GC.season(compare=True) # Fetch all season metrics (used to determine workout title) all_season_metrics = GC.seasonMetrics(all=True) best_peaks = {} for duration in durations: best_peaks['best_peaks' + str(duration)] = GC.seasonPeaks( all=True, filter='Data contains "P"', series="wpk", duration=duration) date_ranges_peaks = {} for duration in durations: date_ranges_peaks['date_ranges_peaks' + str(duration)] = GC.seasonPeaks( filter='Data contains "P"', series="wpk", duration=duration, compare=True) # Power profile chart Allen Hunter 2019 power_profile_category = [ ' ', 'Novice1', 'Novice2', 'Fair', 'Moderate', 'Good', 'Very good', 'Excellent', 'Exceptional', 'World class', ] ipv5s = 1.05 # (var estimate...) ipv1h = 0.01667 # (var estimate...) tm = np.arange(0, durations[len(durations) - 1] + 1) pvm = len(tm) / 3600 # Power profile chart values world_class = pd.Series( [23.06 * ipv5s, 23.06, 10.68, 6.86, 5.93, 5.93 * (1 - (ipv1h * pvm))]) exceptional = pd.Series( [21.25 * ipv5s, 21.25, 9.97, 6.23, 5.36, 5.36 * (1 - (ipv1h * pvm))]) excellent = pd.Series( [19.43 * ipv5s, 19.43, 9.27, 5.59, 4.79, 4.79 * (1 - (ipv1h * pvm))]) very_good = pd.Series( [17.61 * ipv5s, 17.61, 8.57, 4.96, 4.22, 4.22 * (1 - (ipv1h * pvm))]) good = pd.Series( [15.80 * ipv5s, 15.80, 7.86, 4.32, 3.65, 3.65 * (1 - (ipv1h * pvm))]) moderate = pd.Series( [13.98 * ipv5s, 13.98, 7.16, 3.69, 3.08, 3.08 * (1 - (ipv1h * pvm))]) fair = pd.Series( [12.17 * ipv5s, 12.17, 6.45, 3.06, 2.51, 2.51 * (1 - (ipv1h * pvm))]) novice2 = pd.Series( [10.35 * ipv5s, 10.35, 5.75, 2.42, 1.93, 1.93 * (1 - (ipv1h * pvm))]) novice1 = pd.Series( [8.23 * ipv5s, 8.23, 4.93, 1.68, 1.27, 1.27 * (1 - (ipv1h * pvm))]) untrained = pd.Series([0, 0, 0, 0, 0, 0]) # world_class # exceptional Domestic_pro # excellent Cat1 # very_good Cat2 # good Cat3 # moderate Cat4 # fair Cat5 # novice2 Untrained2 # novice1 Untrained1 # Untrained1 # Default power profile chart durations if durations[len(durations) - 1] >= 3600: pp_durations = [1, 5, 60, 300, 3600, len(tm) - 1] if durations[len(durations) - 1] < 3600: pp_durations = [1, 5, 60, 300, len(tm) - 1, 3600] pan = [ untrained, novice1, novice2, fair, moderate, good, very_good, excellent, exceptional, world_class ] mmpsdf = pd.DataFrame() # fill in the for every category the W/Kg values determined by power profile chart for i in range(len(power_profile_category)): mmpsdf[power_profile_category[i]] = np.where( tm == pp_durations[0], pan[i][0], np.where( tm == pp_durations[1], pan[i][1], np.where( tm == pp_durations[2], pan[i][2], np.where( tm == pp_durations[3], pan[i][3], np.where( tm == pp_durations[4], pan[i][4], np.where(tm == pp_durations[5], pan[i][5], np.nan)))))) # Interpolate values between de default durations mmpsdf[power_profile_category] = mmpsdf[ power_profile_category].interpolate(method='pchip') best_peaks_y_values = determine_best_peak(best_peaks, all_season_metrics, durations, mmpsdf, power_profile_category) selected_date_ranges_y = determine_season_peaks(date_ranges_peaks, selected_date_ranges, all_season_metrics, durations, mmpsdf, power_profile_category) print("Season Y values" + str(selected_date_ranges_y)) y_scale = np.arange(0, len(power_profile_category)) # Create the plot fig = go.Figure() # Add annotations used for the y-axis labels for i in y_scale: fig.add_annotation( x=-0.9, y=i + 0.5, xref='x', yref='y', text=power_profile_category[i], showarrow=False, ) # Add annotation for athlete information fig.add_annotation( x=len(x_labels) / 2, y=9.8, # x=-0.01, # y=1.1, text="Athlete: " + str(athlete_name) + '<br>Gender: ' + str(athlete_gender) + '<br>Weight:' + str(round(athlete_kg, 1)) + "kg " + '<br>FTP: ' + str(round(lftp / athlete_kg, 1)) + "W/kg, " + str(round(lftp)) + "W" + '<br>CP: ' + str(round(lcp / athlete_kg, 1)) + "W/kg, " + str(round(lcp)) + "W", showarrow=False, font=dict(size=11, color='rgb(210,210,210)'), align='left', ) # Add bars for the best ever peaks mapped on category fig.add_trace( go.Bar(x=x_labels, y=best_peaks_y_values['peaks_weighted'], name='Best Ever', hovertext=[ determine_hover_text(peak, date, name, athlete_kg) for peak, date, name in zip( best_peaks_y_values['peaks'], best_peaks_y_values['activity_dates'], best_peaks_y_values['activity_names']) ], hoverinfo="text", marker=dict(color='orange', ))) # Per date range add a smooth line (spline) to compare to your best for season_dict in selected_date_ranges_y: print(season_dict) fig.add_trace( go.Scatter( x=x_labels, y=season_dict['peaks_weighted'], hovertext=[ determine_hover_text(peak, date, name, athlete_kg) for peak, date, name in zip(season_dict['peaks'], season_dict['activity_dates'], season_dict['activity_name']) ], hoverinfo="text", mode='lines+markers', showlegend=True, line_shape='spline', name=season_dict['name'], line=dict(color=season_dict['color']), )) # 11.26 W/Kg for 5s is stated as untrained values date_ranges_peaks5 = {'peak_wpk_5': [11.26]} average_untrained_weighted = get_category_index_value( 5, date_ranges_peaks5, mmpsdf, power_profile_category) fig.add_annotation( x=len(x_labels) - 0.1, y=average_untrained_weighted - 0.2, text='Average <br>Untrained', align='left', showarrow=False, ) fig.update_layout( margin=dict(l=20, r=50, b=150, t=100, pad=4), title='Power Profile ' + str(athlete['name']), paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, font=dict(color=gc_text_color, size=12), ) # Add horizontal average untrained line fig.add_shape( type="line", x0=-1, y0=average_untrained_weighted, x1=len(x_labels), y1=average_untrained_weighted, line=dict( color="Red", dash="dashdot", ), ) fig.update_xaxes( showline=False, showgrid=False, ) fig.update_yaxes( showticklabels=False, tickvals=y_scale, range=[0, max(y_scale) + 1], gridwidth=2, gridcolor='DarkGray', ) fig.write_html(temp_file.name, auto_open=False) text = Path(temp_file.name).read_text() text = text.replace('<body>', '<body style="margin: 0px;">') Path(temp_file.name).write_text(text) GC.webpage(Path(temp_file.name).as_uri())
def main(): assets_dir = write_css() app = dash.Dash(assets_folder=assets_dir) season_metrics_raw = pd.DataFrame(GC.seasonMetrics(all=True)) season_metrics = get_season_metrics_for_metrics_per_year( season_metrics_raw) years = sorted(season_metrics.year.unique().tolist(), reverse=True) year_options = [] for year in years: year_options.append({"label": year, "value": year}) metrics_options = [] cumulative_metrics = get_cumulative_metrics() for column in cumulative_metrics.metric.tolist(): metrics_options.append({"label": column, "value": column}) app.layout = html.Div([ html.Div([html.H1("Annual Progress Year")], style={'textAlign': "center"}), html.Div( [ dcc.Dropdown(id="year-value", multi=True, value=[years[0]], options=year_options) ], className="row", style={ "display": "block", "width": "60%", "margin-left": "auto", "margin-right": "auto" }), html.Div( [ dcc.Dropdown(id="type-value", value="Distance", multi=True, options=metrics_options) ], className="row", style={ "display": "block", "width": "60%", "margin-left": "auto", "margin-right": "auto" }), html.Div([dcc.Graph(id="my-graph")], ), html.Div([html.Pre(id='click-data')], ), ], className="container") @app.callback(Output('my-graph', 'figure'), [ Input('year-value', 'value'), Input('type-value', 'value'), Input('my-graph', 'clickData') ]) def update_figure(selected, metrics, click_data): # if only one metric is selected it is not an list transform to list if not isinstance(metrics, list): metrics = [metrics] cols = 1 if len(metrics) > 1: cols = 2 rows = int( Decimal(len(metrics) / 2).to_integral_value(rounding=ROUND_HALF_UP)) row_height = 500 fig = make_subplots(rows=rows, cols=cols, subplot_titles=metrics, row_heights=np.full(rows, row_height).tolist()) fig.update_layout( title="Aggregated '" + ','.join(metrics) + "'", paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, clickmode='event', autosize=False, height=row_height * rows, font=dict( color=gc_text_color, size=chart_title_size, ), ) row = 1 col = 1 chart_counter = 1 vlines = [] for metric in metrics: metric_type = cumulative_metrics[cumulative_metrics.metric == metric].type.tolist()[0] if selected: # Workaround if there is an leap year you need to process that first # This because with x-axis not being a sequence number!!! # So change order of selected years reorder = False for selected_year in selected: if calendar.isleap(selected_year): reorder = True first = selected_year if reorder and len(selected) > 1 and selected[0] != first: selected.remove(first) selected.insert(0, first) color_index = 0 for selected_year in selected: dff = season_metrics[season_metrics.year == selected_year].copy() if metric_type == 'time': hover_text = [ str(metric) + ": " + str(format_hms_seconds(duration)) + "<br>Date: " + date.strftime("%d-%m-%Y") for duration, date in zip( dff[metric].cumsum().tolist(), dff.date) ] else: hover_text = [ str(metric) + ": " + str(cumsum) + "<br>Date: " + date.strftime("%d-%m-%Y") for cumsum, date in zip( dff[metric].cumsum().tolist(), dff.date) ] fig.append_trace( go.Scatter( x=dff.period, y=dff['cumsum_' + str(metric)], # name=selected_year, mode='lines+markers', hoverinfo="text", hovertext=hover_text, showlegend=False, line={'color': colors[color_index]}, marker={ 'size': 8, "opacity": dff.opacity, "line": { 'width': 0.5, } }, ), row=row, col=col) color_index = color_index + 1 yaxis = { "title": metric, "gridcolor": 'gray', } max_y = season_metrics[season_metrics.year.isin(selected)].groupby( ["year"])["cumsum_" + str(metric)].max().max() if metric_type == 'time': # season_metrics[season_metrics.year.isin()] season_metrics[season_metrics.year == selected_year].copy() number_of_ticks = 20 # 5% large y axis for the visibility tickvals = range(0, int(max_y * 1.05), int(max_y / number_of_ticks)) ticktext = [ format_hms_seconds(tickval) for tickval in tickvals ] yaxis = { "title": metric, "gridcolor": 'gray', "tickvals": [tickval for tickval in tickvals], "ticktext": ticktext, } fig.update_yaxes( yaxis, row=row, col=col, ) fig.update_xaxes( { "title": "Day of Year", "gridcolor": 'gray', "tickangle": 45, }, row=row, col=col, ) if click_data: vlines.append( dict(type='line', yref='y' + str(chart_counter), y0=0, y1=max_y * 1.1, xref='x' + str(chart_counter), x0=click_data['points'][0]['pointIndex'], x1=click_data['points'][0]['pointIndex'], line=dict(color="Red", width=1))) fig.update_layout(shapes=vlines) chart_counter = chart_counter + 1 # set the next row and column if col == 2: col = 1 row = row + 1 else: col = 2 add_legend_data(fig, selected) return fig @app.callback(Output('click-data', 'children'), [ Input('my-graph', 'clickData'), Input('year-value', 'value'), Input('type-value', 'value') ]) def display_click_data(click_data, selected_years, selected_metrics): if not click_data: return html.Div([html.H1("Selected a date")]) else: selected_date = click_data['points'][0]['x'] if not isinstance(selected_years, list): selected_years = [selected_years] if not isinstance(selected_metrics, list): selected_metrics = [selected_metrics] dff = season_metrics[(season_metrics.period == selected_date) & ( season_metrics.year.isin(selected_years))].copy() dff = dff.sort_values('year') # Create header for table and header = [html.Th('Year')] for metric in selected_metrics: dff['delta_' + str(metric)] = round( dff['cumsum_' + str(metric)].diff(+1), 2) header.append(html.Th(metric)) header.append(html.Th('Δ ' + str(metric))) # For every year create and new row in the table table_rows_data = [] for i in range(len(dff)): tr = [html.Td(dff.iloc[i].year)] for metric in selected_metrics: metric_type = cumulative_metrics[ cumulative_metrics.metric == metric].type.tolist()[0] cumsum = round(dff.iloc[i]['cumsum_' + str(metric)], 2) delta = round(dff.iloc[i]['delta_' + str(metric)], 2) if metric_type == 'time': cumsum = format_hms_seconds(cumsum) delta = format_hms_seconds(delta) if dff.iloc[i]['delta_' + str(metric)] >= 0: style = styles['green'] else: style = styles['red'] tr.append(html.Td(cumsum)) tr.append(html.Td(delta, style=style)) table_rows_data.append(html.Tr(tr)) return html.Div([ html.H1("Selected Date: " + str(click_data['points'][0]['x'])), html.Table([html.Tr(header)] + [tr for tr in table_rows_data]) ]) return app
## ## Python program will run on selection. ## from GC_Wrapper import GC_wrapper as GC import pathlib from datetime import datetime import time import collections import tempfile import plotly import plotly.graph_objs as go import numpy as np import statistics compares = GC.season(compare=True) activities_list = GC.activities(filter='isRun<>0') athlete_zones = GC.athleteZones() hr_max = max(athlete_zones['hrmax']) temp_file = tempfile.NamedTemporaryFile(mode="w+t", prefix="GC_", suffix=".html", delete=False) # Define GC background color gc_bg_color = 'rgb(52,52,52)' # Define GC Text color gc_text_color = 'rgb(255,255,255)' # For an given subset of activities this function will return a data frame that contains all hr with corresponding speed
def main(): # Get data activity_metric = GC.activityMetrics() activity = GC.activity(activity=None) zone = GC.athleteZones(date=activity_metric["date"], sport="bike") all_intervals = GC.activityIntervals() selected_type = None if 'power' in activity: if len(all_intervals['type']) > 0: all_intervals = pd.DataFrame(all_intervals) selected_type = determine_selection_type(all_intervals) else: fail_msg = "No intervals found in this activities, possible solutions: <br>" \ "Create manual intervals or enable interval auto-discovery via Tools->Options->Intervals" else: fail_msg = "No power data found in this activities " if selected_type: # Define chart title title = "Average Power per Interval " \ "(CP:" + str(zone["cp"][0]) + ") " + \ "Selected Interval Type=" + str(selected_type) intervals = all_intervals[all_intervals['type'].str.contains(selected_type)] # Identify for every interval the zone color breaks = zone["zoneslow"][0] zone_colors = zone["zonescolor"][0] interval_colors = [] avg_power_pct = [] for interval in intervals["Average_Power"]: index = bisect.bisect_left(breaks, interval) interval_colors.append(zone_colors[index - 1]) avg_power_pct.append(str(round((interval / zone["cp"][0]) * 100, 1)) + "%") # Add percentage labels legend = [] zone_index = 1 for zone in breaks: legend.append("Z" + str(zone_index) + "(" + str(zone) + ")") zone_index += 1 # array of lap names to printed on the x-axis lap_names = np.asarray(intervals["name"]) # array of y values watts_y = np.asarray(intervals["Average_Power"]) # define x-axis (start time of the intervals) x = np.asarray(intervals["start"]) # arrays used for text for every interval distance = np.asarray(intervals["Distance"]) stop = np.asarray(intervals["stop"]) duration = np.asarray(intervals["Duration"]) # duration = [stop - start for stop, start in zip(stop, x)] # define x-axis heart rate heart_rate = np.asarray(list(activity['heart.rate'])) # define x-axis seconds seconds = np.asarray(list(activity['seconds'])) # Start building chart_not_working_yet_after_single_extract fig = go.Figure() add_legend_data(fig, legend, zone_colors) add_default_layout(fig, title, watts_y) if selected_type == "USER" or selected_type == "ALL": add_annotation(fig, x, watts_y, duration, distance, avg_power_pct, lap_names) add_interval_shapes(fig, x, watts_y, duration, lap_names, interval_colors) add_heart_rate_line(fig, seconds, heart_rate) else: x = np.arange(0.5, len(lap_names), 1) add_annotation(fig, x, watts_y, duration, distance, avg_power_pct, lap_names, bar_chart=True) add_interval_bars(fig, x, watts_y, lap_names, interval_colors, selected_type) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) else: create_unavailable_html(fail_msg) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): start_t = datetime.now() activity = GC.activity() activity_intervals = GC.activityIntervals() activity_metric = GC.activityMetrics() zones = GC.athleteZones(date=activity_metric["date"], sport="bike") activity_df = pd.DataFrame(activity, index=activity['seconds']) season_peaks = GC.seasonPeaks(all=True, filter='Data contains "P"', series='power', duration=1) print('Gathering data duration: {}'.format(datetime.now() - start_t)) # For testing purpose select only x number of seconds if temp_duration_selection: activity_df = activity_df.head(temp_duration_selection) min_altitude = activity_df.altitude.min() activity_df.altitude = activity_df.altitude - min_altitude + 0.0001 # small offset need for cesium rendering # Stop if no gps data is found in the activity if "latitude" in activity: start_t = datetime.now() interval_entities = get_interval_entities(activity_df, activity_intervals, zones) print('Get interval entities duration: {}'.format(datetime.now() - start_t)) start_t = datetime.now() altitude_entities = determine_altitude_entities( activity_df, zones, slice_distance) print('Get altitude entities duration: {}'.format(datetime.now() - start_t)) start_t = datetime.now() selected_interval_entities, selected_interval_data_sources = get_selected_interval_entities( activity_df, activity_intervals) print('Get select intervals entities duration: {}'.format( datetime.now() - start_t)) start_t = datetime.now() czml_block = get_czml_block_str(activity_df, activity_metric) print('Get entire ride entities + ride path duration: {}'.format( datetime.now() - start_t)) start_t = datetime.now() if "power" in activity: max_watt = max(season_peaks['peak_power_1']) power_zone_ranges = get_zone_ranges(zones['zoneslow'][0], zones['zonescolor'][0], max_watt) else: max_watt = 1500 power_zone_ranges = "" print('Get power ranges duration: {}'.format(datetime.now() - start_t)) start_t = datetime.now() hr_lthr = zones['lthr'][0] hr_max = zones['hrmax'][0] zones_hr = [] for i in hr_zone_pct: zones_hr.append(round(hr_lthr / 100 * i)) if "heart.rate" in activity: hr_zone_ranges = get_zone_ranges(zones_hr, zone_hr_colors, hr_max, axis="axis_hr") else: hr_zone_ranges = "" print('Get heart rate ranges duration: {}'.format(datetime.now() - start_t)) start_t = datetime.now() write_html(activity_df, activity_metric, activity_intervals, altitude_entities, interval_entities, selected_interval_entities, selected_interval_data_sources, czml_block, power_zone_ranges, max_watt, hr_zone_ranges, hr_max) print('write html duration: {}'.format(datetime.now() - start_t)) else: write_no_valid_data_html() GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): weight = "regress" # one of "none", "regress" or "date" or "logistic" # Get data mmp = GC.seasonMeanmax() pmax = max(mmp['power']) # get meanmax power data as whole watts yy = np.rint(np.asarray(mmp["power"])[1:]) dd = np.asarray(mmp["power_date"])[1:] xx = np.asarray(range(1, len(yy))) ep = np.ones(len(yy)) ##none drops thru print(len(yy), len(ep)) # truncate to first 2 hours of data if len(yy) > 7200: yy = yy[0:7200] xx = xx[0:7200] ep = ep[0:7200] # Fit with "regress" weighting if enough data if len(yy) > 1200: x = xx[120:1200] y = yy[120:1200] * x d = dd[120:1200] slope, intercept, r, p, e = stats.linregress(x, y) print("Classic CP=", slope, "W'=", intercept) if weight == "regress": # weight the actuals vs regression # scale by uncertainty - big numbers = less good data ep = 1 / (yy / (slope + (intercept / xx))) # anchor on 1s power... not happy !!!! ep[0] = 1 if weight == "date": # only keep one unique effort per day # use regress to determine hardest effort nodate = datetime.date(1900, 1, 1) for i in range(0, len(yy) - 1): # if it survived pass if (dd[i] != nodate): working = dd[i] keep = i weight = (yy[i] - slope) * xx[i] # whizz thru all entries to end with the # same date and remember which to keep # when keep is bettered, clear it for t in range(i + 1, len(yy)): if (dd[t] == working): thisweight = ((yy[t] * xx[t]) - weight) / xx[t] if (thisweight > slope): dd[keep] = nodate ep[keep] = 8 keep = t else: dd[t] = nodate ep[t] = 8 ep[keep] = 1 dd[keep] = nodate # anchor 1s power ep[0] = 1 # initial fit params = Parameters() params.add('paa', value=811) params.add('paadec', value=-2) params.add('cp', value=280) params.add('tau', value=1.208) params.add('taudel', value=-4.8) params.add('cpdel', value=-0.9) params.add('cpdec', value=-0.583) params.add('cpdecdel', value=-180) # handy zero = np.zeros(len(yy)) one = np.ones(len(yy)) # fit out = minimize(excp, params, args=(xx, yy, ep)) out.params.pretty_print() w1 = intercept w2 = out.params["cp"].value * out.params["tau"].value w = (w1 + w2) / 2 print("Extended CP=", out.params["cp"].value, "W'=", out.params["cp"].value * out.params["tau"].value) # model derived values mod = excp(out.params, xx, zero, one) * -1 # substract predicted (mod) and measured (yy) residual = np.subtract(yy, mod) # Normalize a = -10 b = 10 minimal = min(residual) maximal = max(residual) # norm = [(number - a) / (b - a) for number in residual] norm = [ a + ((number - minimal) * (b - a) / (maximal - minimal)) for number in residual ] # Find short medium long duration test targets short_duration_bracket = [15, 40] # 15 - 40 seconds medium_duration_bracket = [60, 900] # 1 - 15 minutes long_duration_bracket = [1200, 2400] # 20 - 40 minutes short_duration_index = norm.index( min(norm[short_duration_bracket[0]:short_duration_bracket[1]])) medium_duration_index = norm.index( min(norm[medium_duration_bracket[0]:medium_duration_bracket[1]])) long_duration_index = norm.index( min(norm[long_duration_bracket[0]:long_duration_bracket[1]])) # Start building chart_not_working_yet_after_single_extract fig = make_subplots( rows=3, cols=3, specs=[[{ "colspan": 3 }, None, None], [{ "colspan": 3 }, None, None], [{}, {}, {}]], subplot_titles=("Extended CP Model", "Normalized/Residual", "Short duration test target", "Medium duration test target", "Long duration test target"), vertical_spacing=0.10, ) # meanmax curve fig.add_trace(go.Scatter( x=xx, y=yy, mode='markers', name="mean maximal", hovertext=[ "Watts: " + str(watts) + "<br>Time: " + str(format_seconds(i)) for i, watts in zip(xx, yy) ], hoverinfo="text", ), row=1, col=1) # model curve fig.add_trace(go.Scatter( x=xx, y=mod, line=dict(shape='spline'), name="eCP model", hovertext=[ "Watts: " + str(int(watts)) + "<br>Time: " + str(format_seconds(i)) for i, watts in zip(xx, mod) ], hoverinfo="text", ), row=1, col=1) # Residual fig.add_trace(go.Scatter( x=xx, y=residual, line=dict(shape='spline'), name="Residual", hovertext=[ "Residual Watts: " + str(int(watts)) + "<br>Time: " + str(format_seconds(i)) for i, watts in zip(xx, residual) ], hoverinfo="text", ), row=2, col=1) # Normalized fig.add_trace(go.Scatter( x=xx, y=norm, line=dict(shape='spline'), name="Normalized", hovertext=[ "Normalized: " + str(round(normalize, 2)) + "<br>Time: " + str(format_seconds(i)) for i, normalize in zip(xx, norm) ], hoverinfo="text", ), row=2, col=1) add_duration_target(fig, mod[short_duration_index], yy[short_duration_index], short_duration_bracket, xx[short_duration_index], "Short", 1, pmax) add_duration_target(fig, mod[medium_duration_index], yy[medium_duration_index], medium_duration_bracket, xx[medium_duration_index], "Medium", 2, pmax) add_duration_target(fig, mod[long_duration_index], yy[long_duration_index], long_duration_bracket, xx[long_duration_index], "Long", 3, pmax) # tick_values = np.logspace(0.01, math.log10(max(xx)), 50, base=10, endpoint=True) tick_values = [ 1, 2, 3, 4, 5, 6, 8, 10, 15, 20, 30, 40, 60, 120, 180, 240, 360, 480, 600, 900, 1200, 1800, 2400, 3600, 7200 ] fig.update_layout( go.Layout( paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, font=dict(color=gc_text_color, size=12), showlegend=True, xaxis1=dict( type='log', tickangle=45, tickvals=tick_values, ticktext=[format_seconds(i) for i in tick_values], ), xaxis2=dict( type='log', tickangle=45, tickvals=tick_values, ticktext=[format_seconds(i) for i in tick_values], ), margin={'t': 10}, )) current_annotation_list = list(fig["layout"]["annotations"]) current_annotation_list.append( # cp report go.layout.Annotation(x=1, y=400, showarrow=False, text='CP ~%d (%d - %d)' % ((slope + out.params["cp"].value) / 2, slope, out.params["cp"].value), font=dict(family='Courier New, monospace', size=20, color="#ff0000"), xref="x1", yref="y1")) current_annotation_list.append( # w' report go.layout.Annotation(x=1, y=220, xref='x1', yref='y1', text="W\' ~%d (%d - %d)" % (w, w1, w2), font=dict(family='Courier New, monospace', size=20, color="#ff0000"), showarrow=False)) fig["layout"]["annotations"] += tuple(current_annotation_list) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): compares = GC.season(compare=True) activities_list = GC.activities( filter='Data contains "P" and Data contains "H"') athlete_zones = GC.athleteZones() hr_max = max(athlete_zones['hrmax']) start_time = time.time() data = [] for start, end, color, name in zip(compares['start'], compares['end'], compares['color'], compares['name']): # print("start: " + str(start) + ", end: " + str(end) + ", color: " + str(color)) start_dt = datetime.combine(start, datetime.min.time()) end_dt = datetime.combine(end, datetime.min.time()) activities_sub_list = [] for i in np.arange(0, len(activities_list)): if activities_list[i] >= start_dt and activities_list[i] <= end_dt: activities_sub_list.append(activities_list[i]) # print(activities_sub_list) heart_rate_power_dict = get_hr_list_of_activities(activities_sub_list) mean_power_by_hr = create_mean_power_by_hr(heart_rate_power_dict) # print(mean_power_by_hr) trace = go.Scatter( x=list(mean_power_by_hr.keys()), y=list(mean_power_by_hr.values()), mode='lines+markers', line=dict(color=color, ), name=name, showlegend=True, ) data.append(trace) # End for layout = go.Layout( title="Average power at HR", paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, font=dict(color=gc_text_color, size=12), yaxis=dict( title="Watts", nticks=50, rangemode='nonnegative', showgrid=True, zeroline=True, showline=True, gridcolor="grey", ), xaxis=dict( range=[90, hr_max + 5], nticks=int(hr_max / 5), ticks='outside', showgrid=True, zeroline=True, showline=True, gridcolor="grey", title="HR", rangemode='nonnegative', ), margin=go.layout.Margin(l=100, r=0, b=100, t=150, pad=0), ) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) GC.webpage(pathlib.Path(temp_file.name).as_uri()) print("Total graph time: " + str(time.time() - start_time))
if thread.name == "dash": print("terminating thread: " + str(thread.getName())) print("terminating thread_id : " + str(thread.ident)) ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(thread.ident), ctypes.py_object(SystemExit)) def wait_for_server(): timeout = time.time() + 30 # seconds from now while True: try: resp = requests.get("http://127.0.0.1:8050/ ") if resp.status_code == 200: print("Server (Re)started go load webpage") break except requests.exceptions.ConnectionError: print("No Connection YET") if time.time() > timeout: print("STOP retry web server connection (timed out)") break if __name__ == '__main__': kill_previous_dash_server() threading.Thread(target=run_server, args=(main(), ), name="dash").start() wait_for_server() # Use for development #run_server(main()) GC.webpage("http://127.0.0.1:8050/")
} th, td { padding: 8px; text-align: left; border-top: 1px solid #343434; border-left: 1px solid #343434; border-right: 1px solid #343434; border-bottom: 1px solid #ddd; } </style> <meta charset="utf-8"> </head> <body class="black_back"> ''' + ' '.join([str(line) for line in html]) + ''' </body> </html> ''' temp_file.writelines(html) temp_file.close() if __name__ == "__main__": main() GC.webpage(pathlib.Path(temp_file.name).as_uri())
def main(): start_time = datetime.now() assets_dir = write_css() print('write css duration: {}'.format(datetime.now() - start_time)) start_time = datetime.now() activity_metrics = GC.activityMetrics(compare=True) activities = [] for activity_metric in activity_metrics: activity_date = datetime.combine(activity_metric[0]['date'], activity_metric[0]['time']) act = GC.activity(activity=activity_date) act_intervals = GC.activityIntervals(type="USER", activity=activity_date) activities.append({ 'activity': pd.DataFrame(act, index=act['seconds']), 'metrics': activity_metric[0], 'intervals': act_intervals, }) interval_options = [] for activity in activities: time_title = "(" + str( datetime.combine(activity['metrics']['date'], activity['metrics']['time'])) + ")" intervals = activity['intervals'] for i in range(len(intervals['name'])): name = intervals['name'][i] interval_options.append({ "label": name + " " + time_title, "value": name + ";;" + time_title }) print('Gathering data duration: {}'.format(datetime.now() - start_time)) app = dash.Dash(assets_folder=assets_dir) app.layout = html.Div([ html.Div([ "Select smooth value for graph: ", dcc.Slider( id='smooth-value-ride-plot', min=1, max=200, step=4, value=20, ) ], className="row", style={ "display": "block", "margin-left": "0px", "width": "500px" }), html.P(dcc.Graph(id="ride-plot-graph-smooth"), className="ride_plot"), html.Div([html.Pre(id='zoom-data')], ), html.Div( [ dcc.Dropdown(id="interval-value", value="", multi=True, options=interval_options) ], className="row", style={ "display": "block", "width": "60%", "margin-left": "auto", "margin-right": "auto" }), html.P(dcc.Graph(id="interval-plot-graph-smooth"), className="ride_plot"), html.Div([html.Pre(id='zoom-data-interval')], ), ], className='container') @app.callback(Output('ride-plot-graph-smooth', 'figure'), [Input('smooth-value-ride-plot', 'value')]) def update_smooth_ride_plot(smooth_value): before = datetime.now() fig = ride_plot_smooth(activities, smooth_value=int(smooth_value)) print('Create ride plot duration: {}'.format(datetime.now() - before)) return fig @app.callback(Output('interval-plot-graph-smooth', 'figure'), [ Input('smooth-value-ride-plot', 'value'), Input('interval-value', 'value') ]) def update_interval_plot(smooth_value, interval_selected): before = datetime.now() fig = interval_plot_smooth(activities, interval_selected, smooth_value) print('Create interval plot duration: {}'.format(datetime.now() - before)) return fig @app.callback(Output('zoom-data', 'children'), [Input('ride-plot-graph-smooth', 'relayoutData')] ) # this triggers the event def zoom_event(relayout_data): header = [html.Th('metric')] table_rows_data = [] tr_power = [html.Td('Avg Power')] tr_heartrate = [html.Td('Avg HR')] tr_cadence = [html.Td('Avg Cadence')] for activity in activities: act1 = activity['activity'].filter( ["seconds", "power", "heart.rate", "cadence"]) start = act1.seconds.iloc[0] stop = act1.seconds.iloc[-1] if relayout_data and 'xaxis.range[0]' in relayout_data: act1 = act1.loc[ (act1.seconds >= relayout_data['xaxis.range[0]']) & (act1.seconds <= relayout_data['xaxis.range[1]'])] start = relayout_data['xaxis.range[0]'] stop = relayout_data['xaxis.range[1]'] header.append( html.Th(" (" + str( datetime.combine(activity['metrics']['date'], activity['metrics']['time'])) + ")")) metric = 'power' if metric in act1: tr_power.append(html.Td(round(act1.power.mean(), 2))) metric = 'heart.rate' if metric in act1: tr_heartrate.append(html.Td(round(act1[metric].mean(), 2))) metric = 'cadence' if metric in act1: tr_cadence.append(html.Td(round(act1[metric].mean(), 2))) table_rows_data.append(html.Tr(tr_power)) table_rows_data.append(html.Tr(tr_heartrate)) table_rows_data.append(html.Tr(tr_cadence)) return html.Div([ html.H1("Selected Time: " + str(format_hms_seconds(start)) + " - " + str(format_hms_seconds(stop))), html.Table([html.Tr(header)] + [tr for tr in table_rows_data]) ]) @app.callback( Output('zoom-data-interval', 'children'), [ Input('interval-plot-graph-smooth', 'relayoutData'), # this triggers the event Input('interval-value', 'value') ]) def zoom_event_interval(relayout_data, intervals_selected): start = 0 stop = 0 header = [html.Th('metric')] table_rows_data = [] tr_power = [html.Td('Avg Power')] tr_heartrate = [html.Td('Avg HR')] tr_cadence = [html.Td('Avg Cadence')] for process_interval in intervals_selected: interval_name, activity_date = process_interval.split(";;") for process_activity in activities: current_time_title = "(" + str( datetime.combine( process_activity['metrics']['date'], process_activity['metrics']['time'])) + ")" if current_time_title == activity_date: process_intervals = process_activity['intervals'] for index in range(len(process_intervals['name'])): if process_intervals['name'][index] == interval_name: act1 = process_activity['activity'].filter( ["seconds", "power", "heart.rate", "cadence"]) act1 = act1.loc[ (act1.seconds >= process_intervals['start'] [index]) & (act1.seconds <= process_intervals['stop'][index])] act1.seconds = np.arange(len(act1)) stop = max(stop, act1.seconds.iloc[-1]) if relayout_data and 'xaxis.range[0]' in relayout_data: act1 = act1.loc[ (act1.seconds >= relayout_data['xaxis.range[0]']) & (act1.seconds <= relayout_data['xaxis.range[1]'])] start = relayout_data['xaxis.range[0]'] stop = relayout_data['xaxis.range[1]'] header.append( html.Th(interval_name + " (" + str( datetime.combine( activity['metrics']['date'], activity['metrics']['time'])) + ")")) metric = 'power' if metric in act1: tr_power.append( html.Td(round(act1.power.mean(), 2))) metric = 'heart.rate' if metric in act1: tr_heartrate.append( html.Td(round(act1[metric].mean(), 2))) metric = 'cadence' if metric in act1: tr_cadence.append( html.Td(round(act1[metric].mean(), 2))) table_rows_data.append(html.Tr(tr_power)) table_rows_data.append(html.Tr(tr_heartrate)) table_rows_data.append(html.Tr(tr_cadence)) return html.Div([ html.H1("Selected Time: " + str(format_hms_seconds(start)) + " - " + str(format_hms_seconds(stop))), html.Table([html.Tr(header)] + [tr for tr in table_rows_data]) ]) return app
def main(): RRs = list(GC.xdataSeries('HRV', 'R-R')) if RRs: # transform RR form milliseconds to seconds df_temp = pd.DataFrame() df_temp['RR'] = RRs RRs = df_temp.RR.div(1000).tolist() artifact_correction_threshold = 0.05 filtered_RRs = [] for i in range(len(RRs)): if RRs[ (i - 1)] * (1 - artifact_correction_threshold) < RRs[i] < RRs[ (i - 1)] * (1 + artifact_correction_threshold): filtered_RRs.append(RRs[i]) x = np.cumsum(filtered_RRs) df = pd.DataFrame() df['timestamp'] = x df['RR'] = filtered_RRs features_df = computeFeatures(df) print(features_df.head()) print("Mean Alpha 1: " + str(round(np.mean(features_df['alpha1']), 2))) threshold_sdnn = 10 # rather arbitrary, based on visual inspection of the data features_df_filtered = features_df.loc[ features_df['sdnn'] < threshold_sdnn, ] print("Mean Alpha 1, filtered based on sdnn > " + str(threshold_sdnn) + ": " + str(round(np.mean(features_df_filtered['alpha1']), 2))) from sklearn.linear_model import LinearRegression length = len(features_df['alpha1']) reg = LinearRegression().fit( features_df['alpha1'].values.reshape(length, 1), features_df['heartrate'].values.reshape(length, 1)) prediction = reg.predict(np.array(0.75).reshape(1, 1)) print("Predication: " + str(math.floor(prediction))) fig = go.Figure() fig.add_trace( go.Scatter( x=features_df.timestamp, y=features_df.alpha1, mode='lines+markers', )) # This can be used when an intensity is only increasing # also change xasis when enabling this # fig.add_trace(go.Scatter(x=features_df.heartrate, # y=features_df.alpha1, # mode='lines+markers',)) fig.update_layout( title='Estimated aerobic threshold heart rate (alpha 1 = 0.75): ' + (str(math.floor(prediction[0].item()))) + " bpm", paper_bgcolor=gc_bg_color, plot_bgcolor=gc_bg_color, font=dict(color=gc_text_color, size=12), xaxis=dict(title="Window", ), yaxis=dict(title="alpha 1", ), ) plotly.offline.plot(fig, auto_open=False, filename=temp_file.name) text = pathlib.Path(temp_file.name).read_text() text = text.replace('<body>', '<body style="margin: 0px;">') pathlib.Path(temp_file.name).write_text(text) else: create_empty_page() GC.webpage(pathlib.Path(temp_file.name).as_uri())