def authorization(request): client = Client() code = request.GET['code'] access_token = client.exchange_code_for_token(client_id=MY_STRAVA_CLIENT_ID, client_secret=MY_STRAVA_CLIENT_SECRET, code=code) # making a global variable to be used across views. don't know how this will work in practice client = Client(access_token=access_token) athlete = client.get_athlete() # Get current athlete details global athleteId athleteId = athlete.id # if athlete doesn't exist, add them if len(Athlete.objects.filter(athleteId=athleteId)) == 0: ath = Athlete.objects.create(name=str(athlete.firstname+' '+athlete.lastname), athleteId=athleteId, profilePic=athlete.profile, city=athlete.city, country=athlete.country, sex=athlete.sex, premium=athlete.premium, created_at=athlete.created_at, updated_at=athlete.updated_at, followers=athlete.follower_count, friends=athlete.friend_count, email=athlete.email, weight=athlete.weight, meas_pref=athlete.measurement_preference, runsSummary = DataFrame({}).to_json(orient='records'), fitLines = DataFrame({}).to_json(orient='records'), masterList = DataFrame({}).to_json(orient='records')) ath.profilePic.name = "rudyzPic" ath.save(update_fields=['profilePic']) # if athlete already exists, draw their file elif len(Athlete.objects.filter(athleteId=athleteId)) == 1: ath = Athlete.objects.get(athleteId=athleteId) ############################################ ##### compiling new runs, updating summary # athlete's existing runs summary existingSummary = DataFrame(pd.read_json(ath.runsSummary)) existingFitlines = DataFrame(pd.read_json(ath.fitLines)) masterList = DataFrame(pd.read_json(ath.masterList)) activities = list(client.get_activities()) # activity IDs of runs already in the system try: ids = existingSummary.activityId except AttributeError: ids = [] for i in range(len(activities)): #for i in range(30,37): # Ignoring activities already in the system if (len(ids) == 0) or (float(activities[i].id) not in list(ids)): try: # compiling df for raw json-ization activityId = activities[i].id run = client.get_activity_streams(activityId, types=['time','latlng','distance','heartrate','altitude','cadence']) latlng = run['latlng'].data time = run['time'].data distance = run['distance'].data heartrate = run['heartrate'].data altitude = run['altitude'].data cadence = run['cadence'].data date = activities[i].start_date_local activity = activityId dfi = thresher.assemble(date, activityId, heartrate, distance, time, altitude, latlng, cadence) # basic cleanup, only removing totally unreasonable values dfi = thresher.basicClean(dfi) # if we ever want to try our hand at improving strava's speed data (ie by predicting speed when GPS blanks), intervene here: #dfi = thresher.addDistDeltas(dfi) try: fitline = thresher.getFitlineLws(dfi) # this adds speed-shifted columns except: fitline = pd.DataFrame({}) try: mafScore = fitline[fitline.hr == 140.0].avgSpeed.iloc[0] print "MAF " print mafScore except: mafScore = np.nan fitline_json = fitline.to_json(orient='records') # getting summary info for run (as one-entry dict) runSummary = thresher.getSingleSummaryDf(dfi) # adding mafScore to summary runSummary['mafScore'] = mafScore print runSummary # adding predicted hr and speed values #dfi = thresher.getPred(dfi) # saving entry to database Activity.objects.create(act_id = activityId, name=str(activities[i].name), description=activities[i].description, act_type=activities[i].type, date=activities[i].start_date_local, timezone=activities[i].timezone, df=dfi.to_json(orient='records'), avgHr=runSummary['avgHr'], hrVar=runSummary['variation'], realMiles=runSummary['realMiles'], recovery=runSummary['recovery'], easy=runSummary['easy'], stamina=runSummary['stamina'], impulse=runSummary['impulse'], totalTime=runSummary['totalTime'], totalDist=runSummary['totalDist'], climb=runSummary['climb'], fitline=fitline_json, mafScore=mafScore, athlete=ath) # updating runs summary existingSummary = existingSummary.append(runSummary, ignore_index=True) existingFitlines = existingFitlines.append(fitline, ignore_index=True) masterList = masterList.append(dfi, ignore_index=True) except: continue # saving updated runs summary to athlete profile ath.runsSummary = existingSummary.to_json(orient='records') ath.save(update_fields=['runsSummary']) existingSummary.to_pickle("runsSummary.txt") # saving updated runs summary to athlete profile ath.fitLines = existingFitlines.to_json(orient='records') ath.save(update_fields=['fitLines']) ath.masterList = masterList.to_json(orient='records') ath.save(update_fields=['masterList']) # testing... existingSummary = pd.read_json(ath.runsSummary) #print(existingSummary) existingFitlines = pd.read_json(ath.fitLines) #print(existingFitlines) global path path = os.path.dirname(__file__) # updating dataframe, pickling for use in other views #global df #df = thresher.masterAssemble(client) masterDf = pd.read_json(ath.masterList) #print(masterDf) masterDf.to_pickle(str(path)+"/"+str(athlete.id)+"masterDf.txt") return render(request, 'stravaChimp/authorization.html', {'code':code, 'access_token':access_token, 'athleteId':athleteId})
def authorization(request): client = Client() code = request.GET['code'] access_token = client.exchange_code_for_token( client_id=MY_STRAVA_CLIENT_ID, client_secret=MY_STRAVA_CLIENT_SECRET, code=code) # making a global variable to be used across views. don't know how this will work in practice client = Client(access_token=access_token) athlete = client.get_athlete() # Get current athlete details global athleteId athleteId = athlete.id # if athlete doesn't exist, add them if len(Athlete.objects.filter(athleteId=athleteId)) == 0: ath = Athlete.objects.create( name=str(athlete.firstname + ' ' + athlete.lastname), athleteId=athleteId, profilePic=athlete.profile, city=athlete.city, country=athlete.country, sex=athlete.sex, premium=athlete.premium, created_at=athlete.created_at, updated_at=athlete.updated_at, followers=athlete.follower_count, friends=athlete.friend_count, email=athlete.email, weight=athlete.weight, meas_pref=athlete.measurement_preference, runsSummary=DataFrame({}).to_json(orient='records'), fitLines=DataFrame({}).to_json(orient='records'), masterList=DataFrame({}).to_json(orient='records')) ath.profilePic.name = "rudyzPic" ath.save(update_fields=['profilePic']) # if athlete already exists, draw their file elif len(Athlete.objects.filter(athleteId=athleteId)) == 1: ath = Athlete.objects.get(athleteId=athleteId) ############################################ ##### compiling new runs, updating summary # athlete's existing runs summary existingSummary = DataFrame(pd.read_json(ath.runsSummary)) existingFitlines = DataFrame(pd.read_json(ath.fitLines)) masterList = DataFrame(pd.read_json(ath.masterList)) activities = list(client.get_activities()) # activity IDs of runs already in the system try: ids = existingSummary.activityId except AttributeError: ids = [] for i in range(len(activities)): #for i in range(30,37): # Ignoring activities already in the system if (len(ids) == 0) or (float(activities[i].id) not in list(ids)): try: # compiling df for raw json-ization activityId = activities[i].id run = client.get_activity_streams(activityId, types=[ 'time', 'latlng', 'distance', 'heartrate', 'altitude', 'cadence' ]) latlng = run['latlng'].data time = run['time'].data distance = run['distance'].data heartrate = run['heartrate'].data altitude = run['altitude'].data cadence = run['cadence'].data date = activities[i].start_date_local activity = activityId dfi = thresher.assemble(date, activityId, heartrate, distance, time, altitude, latlng, cadence) # basic cleanup, only removing totally unreasonable values dfi = thresher.basicClean(dfi) # if we ever want to try our hand at improving strava's speed data (ie by predicting speed when GPS blanks), intervene here: #dfi = thresher.addDistDeltas(dfi) try: fitline = thresher.getFitlineLws( dfi) # this adds speed-shifted columns except: fitline = pd.DataFrame({}) try: mafScore = fitline[fitline.hr == 140.0].avgSpeed.iloc[0] print "MAF " print mafScore except: mafScore = np.nan fitline_json = fitline.to_json(orient='records') # getting summary info for run (as one-entry dict) runSummary = thresher.getSingleSummaryDf(dfi) # adding mafScore to summary runSummary['mafScore'] = mafScore print runSummary # adding predicted hr and speed values #dfi = thresher.getPred(dfi) # saving entry to database Activity.objects.create(act_id=activityId, name=str(activities[i].name), description=activities[i].description, act_type=activities[i].type, date=activities[i].start_date_local, timezone=activities[i].timezone, df=dfi.to_json(orient='records'), avgHr=runSummary['avgHr'], hrVar=runSummary['variation'], realMiles=runSummary['realMiles'], recovery=runSummary['recovery'], easy=runSummary['easy'], stamina=runSummary['stamina'], impulse=runSummary['impulse'], totalTime=runSummary['totalTime'], totalDist=runSummary['totalDist'], climb=runSummary['climb'], fitline=fitline_json, mafScore=mafScore, athlete=ath) # updating runs summary existingSummary = existingSummary.append(runSummary, ignore_index=True) existingFitlines = existingFitlines.append(fitline, ignore_index=True) masterList = masterList.append(dfi, ignore_index=True) except: continue # saving updated runs summary to athlete profile ath.runsSummary = existingSummary.to_json(orient='records') ath.save(update_fields=['runsSummary']) existingSummary.to_pickle("runsSummary.txt") # saving updated runs summary to athlete profile ath.fitLines = existingFitlines.to_json(orient='records') ath.save(update_fields=['fitLines']) ath.masterList = masterList.to_json(orient='records') ath.save(update_fields=['masterList']) # testing... existingSummary = pd.read_json(ath.runsSummary) #print(existingSummary) existingFitlines = pd.read_json(ath.fitLines) #print(existingFitlines) global path path = os.path.dirname(__file__) # updating dataframe, pickling for use in other views #global df #df = thresher.masterAssemble(client) masterDf = pd.read_json(ath.masterList) #print(masterDf) masterDf.to_pickle(str(path) + "/" + str(athlete.id) + "masterDf.txt") return render(request, 'stravaChimp/authorization.html', { 'code': code, 'access_token': access_token, 'athleteId': athleteId })
#import thresher import pandas as pd import datetime, os from pandas import DataFrame, pivot_table import numpy as np import statsmodels.api as sm import matplotlib.pyplot as plt import thresher from math import radians, cos, sin, asin, sqrt #urllib3.contrib.pyopenssl.inject_into_urllib3() df_summary = pd.read_pickle("runsSummary.txt") df_master = thresher.basicClean(pd.read_pickle("master_dfs/10319226masterDf.txt")) maf = df_summary[['date', 'mafScore']] print maf maf_smoothed = thresher.makeLws(df=maf, frac=.20) print maf_smoothed fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(maf_smoothed.date, maf_smoothed.mafScore, c='blue', alpha=0.25) #ax.scatter(df[x], df[y2], c='red', alpha=0.25) #ax.plot(df['speedLwsX'], df['speedLwsY'], c='blue') #ax.plot(lwsHatX, lwsHatY, c='red') #ax.set_title(str(x)+' '+str(y)) #ax.set_ylabel(str(y))
#import urllib3.contrib.pyopenssl from urllib2 import urlopen from json import load, dumps #import thresher import pandas as pd import datetime, os from pandas import DataFrame, pivot_table import numpy as np import statsmodels.api as sm import matplotlib.pyplot as plt import thresher from math import radians, cos, sin, asin, sqrt #urllib3.contrib.pyopenssl.inject_into_urllib3() df_summary = pd.read_pickle("runsSummary.txt") df_master = thresher.basicClean( pd.read_pickle("master_dfs/10319226masterDf.txt")) maf = df_summary[['date', 'mafScore']] print maf maf_smoothed = thresher.makeLws(df=maf, frac=.20) print maf_smoothed fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(maf_smoothed.date, maf_smoothed.mafScore, c='blue', alpha=0.25) #ax.scatter(df[x], df[y2], c='red', alpha=0.25) #ax.plot(df['speedLwsX'], df['speedLwsY'], c='blue') #ax.plot(lwsHatX, lwsHatY, c='red') #ax.set_title(str(x)+' '+str(y)) #ax.set_ylabel(str(y))