def plot_segment_performance_to_investment(config, segment_id): client = config.client activities = client.get_activities() ridden_segs = read_data(DATAFILE) try: efforts = ridden_segs[segment_id].efforts except KeyError: print('Segment with given ID not ridden at least once') print('Possibly update the your ridden segments or check the ID') return datetimes = [] # list of datetime.datetime objects distances = [] # list of distances in meters (floats) durations = [] # list of moving times, TimeIntervallAttribute (timedelta (seconds) probably) for a in activities: if a.type != unicode('Ride'): continue datetimes.append(a.start_date) distances.append(a.distance / 1000) # in km pls durations.append(a.moving_time) # TODO: Smooth curves a bit durations = [datetime.timedelta.total_seconds(x) / 60 for x in durations] plt.figure() effort_dates = [e.date for e in efforts] effort_times = [datetime.timedelta.total_seconds(e.elapsed_time) for e in efforts] plt.plot(datetimes, distances) plt.plot(datetimes, durations) plt.plot(effort_dates, effort_times) plt.show()
def plot_times(config, segment_id, distribution): """ Generates a plot to visualize the performance of the current athlete at a specific segment in comparison to other athletes. :param config: Config object providing API access via security token :param segment_id: ID of the strava segment in question :param distribution: Whether to plot the time distribution over efforts instead of a boxplot :return: """ client = config.client ridden_segs = read_data(DATAFILE) all_efforts = client.get_segment_efforts(segment_id) X = [e.elapsed_time for e in all_efforts] X = np.array([datetime.timedelta.total_seconds(x) for x in X]) Y = np.array([x for x in ridden_segs[segment_id].times]) if distribution: plt.xlabel('Time in seconds') sns.distplot(X, hist=False, rug=True) sns.distplot(Y, hist=False, rug=True) plt.show() else: plt.ylabel('Time in seconds') data = np.array([X, Y]) sns.boxplot(data=data, orient='v') plt.show()
def plot_segment_performance_to_investment(config, segment_id): client = config.client activities = client.get_activities() ridden_segs = read_data(DATAFILE) try: efforts = ridden_segs[segment_id].efforts except KeyError: print('Segment with given ID not ridden at least once') print('Possibly update the your ridden segments or check the ID') return datetimes = [] # list of datetime.datetime objects distances = [] # list of distances in meters (floats) durations = [ ] # list of moving times, TimeIntervallAttribute (timedelta (seconds) probably) for a in activities: if a.type != unicode('Ride'): continue datetimes.append(a.start_date) distances.append(a.distance / 1000) # in km pls durations.append(a.moving_time) # TODO: Smooth curves a bit durations = [datetime.timedelta.total_seconds(x) / 60 for x in durations] plt.figure() effort_dates = [e.date for e in efforts] effort_times = [ datetime.timedelta.total_seconds(e.elapsed_time) for e in efforts ] plt.plot(datetimes, distances) plt.plot(datetimes, durations) plt.plot(effort_dates, effort_times) plt.show()
def segment_ranking(config, order, segment): client = config.client ridden_segs = read_data(DATAFILE) # Restrict to given segment if -s option is specified filter_segments(ridden_segs, segment) # Crawl leaderboards and times for every segment athlete_id = client.get_athlete().id leaderboards = collections.defaultdict() for k in ridden_segs.keys(): try: leaderboards[k] = client.get_segment_leaderboard(k) except: continue ranks = { k: rank(leaderboards.get(k), athlete_id) for k in ridden_segs.keys() } rel_times = { k: relative_time(leaderboards.get(k), ridden_segs[k]) for k in ridden_segs.keys() } orderings = { 'tries': lambda x: len(x.efforts), 'elevation': lambda x: x.total_elevation_gain, 'rank': lambda x: ranks[x.id][0], 'time': lambda x: rel_times[x.id] } reverse = {'tries': True, 'elevation': True, 'rank': False, 'time': False} for v in sorted(ridden_segs.values(), key=orderings[order], reverse=reverse[order]): rank_string = '%3d/%4d' % (ranks[v.id][0], ranks[v.id][1]) print( u'Position: %s - Tries: %3d - Elevation gain: %4d - Average time: %4d sec. - ' 'Std. variation in time: %3d sec. - Relative to KOM: %3.1f%% ' '- Segment: %.30s - ID: %7s' % (rank_string, len(v.efforts), v.total_elevation_gain, v.avg_time, v.std_time, rel_times[v.id], v.name, v.id)) return
def segment_ranking(config, order, segment): client = config.client ridden_segs = read_data(DATAFILE) # Restrict to given segment if -s option is specified filter_segments(ridden_segs, segment) # Crawl leaderboards and times for every segment athlete_id = client.get_athlete().id leaderboards = collections.defaultdict() for k in ridden_segs.keys(): try: leaderboards[k] = client.get_segment_leaderboard(k) except: continue ranks = {k: rank(leaderboards.get(k), athlete_id) for k in ridden_segs.keys()} rel_times = {k: relative_time(leaderboards.get(k), ridden_segs[k]) for k in ridden_segs.keys()} orderings = { 'tries' : lambda x: len(x.efforts), 'elevation': lambda x: x.total_elevation_gain, 'rank': lambda x: ranks[x.id][0], 'time': lambda x: rel_times[x.id] } reverse = { 'tries': True, 'elevation': True, 'rank': False, 'time': False } for v in sorted(ridden_segs.values(), key=orderings[order], reverse=reverse[order]): rank_string = '%3d/%4d' % (ranks[v.id][0], ranks[v.id][1]) print (u'Position: %s - Tries: %3d - Elevation gain: %4d - Average time: %4d sec. - ' 'Std. variation in time: %3d sec. - Relative to KOM: %3.1f%% ' '- Segment: %.30s - ID: %7s' % (rank_string, len(v.efforts), v.total_elevation_gain, v.avg_time, v.std_time, rel_times[v.id], v.name, v.id)) return
def graph(): graph_date = request.args.get('date') graph_type = request.args.get('type') if graph_date is None: graph_date = datetime.now().strftime('%Y%m%d') if graph_type not in ROW_TITLES: graph_type = 'Temperature' date_range = [datetime.strptime(graph_date, '%Y%m%d')] * 2 date_range[0] -= timedelta(0) date_range[1] += timedelta(1) date_range = [datetime.strftime(dt, '%Y-%m-%d') for dt in date_range] data = cache.get('data') if data is None: data = read_data() cache.set('data', data, timeout=10 * 60) data = data[data.Datetime.dt.strftime('%Y%m%d') == graph_date] # Create a trace traces = [] titles = [] for g, d in data.groupby('Name'): trace = go.Scatter( name=g, x=d.Datetime, y=d[graph_type], marker={'color': '57A6C4'}, mode='lines', ) if not d[graph_type].isnull().values.all(): traces.append(trace) titles.append(g) # layout = go.Layout( # title='Sensor Measurements', # yaxis=dict( # title=graph_type, # # range=[0, 40], # ), # xaxis=dict( # range=date_range, # ), # showlegend=True) fig = tools.make_subplots(rows=len(traces), cols=1, shared_xaxes=True, specs=[[{}]] * len(traces), subplot_titles=titles) for i, trace in enumerate(traces): fig.append_trace(trace, i + 1, 1) fig['layout'].update({ 'height': 800, 'title': 'Sensor Measurements', 'showlegend': False, }) # graph = go.Figure(data=traces, layout=layout) graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder) return render_template('graph.html', graphJSON=graphJSON, sensors=ROW_TITLES[3:])