Beispiel #1
0
 def assign_athlete(self, athlete_id):
     session, engine = db_connect()
     athlete_info = session.query(athlete).filter(
         athlete.athlete_id == athlete_id).first()
     engine.dispose()
     session.close()
     self.athlete_id = athlete_id
     self.athlete_name = athlete_info.name
     self.athlete_sex = athlete_info.sex
     self.athlete_birthday = athlete_info.birthday
     self.hearrate_zones = {
         1: float(athlete_info.hr_zone_threshold_1),
         2: float(athlete_info.hr_zone_threshold_2),
         3: float(athlete_info.hr_zone_threshold_3),
         4: float(athlete_info.hr_zone_threshold_4)
     }
     if 'ride' in self.type.lower():
         self.power_zones = {
             1: float(athlete_info.cycle_power_zone_threshold_1),
             2: float(athlete_info.cycle_power_zone_threshold_2),
             3: float(athlete_info.cycle_power_zone_threshold_3),
             4: float(athlete_info.cycle_power_zone_threshold_4),
             5: float(athlete_info.cycle_power_zone_threshold_5),
             6: float(athlete_info.cycle_power_zone_threshold_6)
         }
     elif 'run' in self.type.lower():
         self.power_zones = {
             1: float(athlete_info.run_power_zone_threshold_1),
             2: float(athlete_info.run_power_zone_threshold_2),
             3: float(athlete_info.run_power_zone_threshold_3),
             4: float(athlete_info.run_power_zone_threshold_4)
         }
Beispiel #2
0
def pull_withings_data():
    # UTC dates will get sampled into daily
    if withings_connected():
        client = NokiaApi(nokia_creds(current_token_dict()),
                          refresh_cb=save_withings_token)
        df = pd.DataFrame(
            [measure.__dict__ for measure in client.get_measures()])
        df['date_utc'] = df['date'].apply(lambda x: datetime.strptime(
            str(x.format('YYYY-MM-DD HH:mm:ss')), '%Y-%m-%d %H:%M:%S'))
        df = df.drop(columns=['date'])
        df = df.set_index(df['date_utc'])
        df = df[['weight', 'fat_ratio', 'hydration']]
        # Convert to lbs
        df['weight'] *= 2.20462

        # Filter to days later than what is already in db
        session, engine = db_connect()
        withings_max_date = session.query(func.max(
            withings.date_utc)).first()[0]
        withings_max_date = datetime.strptime(
            '1991-08-30 00:00:00', '%Y-%m-%d %H:%M:%S'
        ) if not withings_max_date else withings_max_date
        engine.dispose()
        session.close()

        df = df[(df.index > withings_max_date) & (~np.isnan(df['weight'])) &
                (~np.isnan(df['fat_ratio']))]
        if len(df) > 0:
            dash_app.server.logger.info('New withings measurements found!')
            df.to_sql('withings', engine, if_exists='append', index=True)
Beispiel #3
0
def pull_readiness_data(oura, days_back=7):
    session, engine = db_connect()
    # Get latest date in db and pull everything after
    start = session.query(func.max(ouraReadinessSummary.report_date))
    engine.dispose()
    session.close()
    start = '1999-01-01' if start[0][0] is None else datetime.strftime(
        start[0][0] - timedelta(days=days_back), '%Y-%m-%d')

    dash_app.server.logger.debug(
        'Pulling readiness from max date in oura_readiness_summary {}'.format(
            start))
    oura_data = oura.readiness_summary(start=start)['readiness']

    if len(oura_data) > 0:
        df_readiness_summary = pd.DataFrame.from_dict(oura_data)
        # Readiness shows the 'summary' of the previous day.
        # To align with charts when filtering on date use readiness summary_date + 1 day
        df_readiness_summary['report_date'] = pd.to_datetime(
            df_readiness_summary['summary_date']) + timedelta(days=1)
        df_readiness_summary = df_readiness_summary.set_index('report_date')

        return df_readiness_summary
    else:
        return []
Beispiel #4
0
    def get_ftp(self):
        # TODO: Update with auto calculated critical power so users do not have to flag (or take) FTP tests
        if 'run' in self.type.lower():
            stryd_df = get_stryd_df_summary()
            stryd_df = stryd_df[stryd_df.index <= self.start_date_local]
            try:
                self.ftp = stryd_df.loc[stryd_df.index.max()].stryd_ftp
            except:
                # If no FTP test prior to current activity
                self.ftp = 223

        elif 'ride' in self.type.lower():
            # TODO: Switch over to using Critical Power for everything once we get the critical power model working
            session, engine = db_connect()
            try:
                self.ftp = float(
                    session.query(stravaSummary.average_watts).order_by(
                        stravaSummary.start_date_local.desc()).filter(
                            stravaSummary.start_date_local <
                            self.start_date_local,
                            stravaSummary.type.ilike('%ride%'),
                            stravaSummary.name.ilike('%ftp test%')).first()
                    [0]) * .95
            except:
                # If no FTP test prior to current activity
                self.ftp = 211

        else:
            self.ftp = None

        if self.ftp is not None:
            self.ftp = None if float(self.ftp) == 0.0 else self.ftp
Beispiel #5
0
def current_token_dict():
    try:
        session, engine = db_connect()
        token_dict = session.query(
            apiTokens.tokens).filter(apiTokens.service == 'Strava').first()
        token_dict = ast.literal_eval(token_dict[0]) if token_dict else {}
        engine.dispose()
        session.close()
    except BaseException as e:
        dash_app.server.logger.error(e)
        token_dict = {}
    return token_dict
Beispiel #6
0
def save_strava_token(token_dict):
    session, engine = db_connect()
    # Delete current key
    dash_app.server.logger.debug('Deleting current strava tokens')
    session.execute(delete(apiTokens).where(apiTokens.service == 'Strava'))
    # Insert new key
    dash_app.server.logger.debug('Inserting new strava tokens')
    session.add(
        apiTokens(date_utc=datetime.utcnow(),
                  service='Strava',
                  tokens=str(token_dict)))
    session.commit()
    engine.dispose()
    session.close()
Beispiel #7
0
def last_body_measurement_notification():
    session, engine = db_connect()
    last_measurement_date = session.query(func.max(withings.date_utc))[0][0]
    engine.dispose()
    session.close()

    if last_measurement_date:
        days_since_last_measurement = datetime.utcnow().date(
        ) - last_measurement_date.date()

        if days_since_last_measurement >= timedelta(days=7):
            return dbc.Alert(
                "It's been {:.0f} days since your last body measurement".
                format(days_since_last_measurement.days),
                color='primary',
                style={'borderRadius': '4px'})
Beispiel #8
0
def save_oura_token(token_dict):
    session, engine = db_connect()
    # Delete current key
    session.execute(delete(apiTokens).where(apiTokens.service == 'Oura'))
    # Insert new key
    try:
        session.add(
            apiTokens(date_utc=datetime.utcnow(),
                      service='Oura',
                      tokens=str(token_dict)))
        session.commit()
    except:
        session.rollback()
    # config.set("oura", "token_dict", str(token_dict))
    # with open('config.ini', 'w') as configfile:
    #     config.write(configfile)
    engine.dispose()
    session.close()
Beispiel #9
0
def save_withings_token(tokens):
    dash_app.server.logger.debug('***** ATTEMPTING TO SAVE TOKENS *****')

    # Withings API returns the following, when refreshing use this
    try:
        token_dict = {
            'access_token':
            tokens['access_token'],
            'token_expiry':
            int((datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()) +
            int(tokens['expires_in']),
            'token_type':
            tokens['token_type'],
            'user_id':
            tokens['userid'],
            'refresh_token':
            tokens['refresh_token']
        }

    # NokiaCredentials is an object (not dict)... When running for the first time use this (no record in db)
    except:
        token_dict = {
            'access_token': tokens.access_token,
            'token_expiry': tokens.token_expiry,
            'token_type': tokens.token_type,
            'user_id': tokens.user_id,
            'refresh_token': tokens.refresh_token
        }

    session, engine = db_connect()
    # Delete current key
    session.execute(delete(apiTokens).where(apiTokens.service == 'Withings'))
    # Insert new key
    session.add(
        apiTokens(date_utc=datetime.utcnow(),
                  service='Withings',
                  tokens=str(token_dict)))
    session.commit()

    engine.dispose()
    session.close()
    dash_app.server.logger.debug('***** SAVED TOKENS *****')
Beispiel #10
0
def insert_sleep_data(df_sleep_summary, df_sleep_samples, days_back=7):
    session, engine = db_connect()
    start = session.query(func.max(ouraSleepSummary.report_date))[0][0]
    start = '1999-01-01' if start is None else datetime.strftime(
        start - timedelta(days=days_back), '%Y-%m-%d')

    # Delete latest dates records from db to ensure values are being overridden from api pull
    try:
        dash_app.server.logger.debug(
            'Deleting >= {} records from oura_sleep_summary'.format(start))
        session.execute(
            delete(ouraSleepSummary).where(
                ouraSleepSummary.summary_date >= start))
        dash_app.server.logger.debug(
            'Deleting >= {} records from oura_sleep_samples'.format(start))
        session.execute(
            delete(ouraSleepSamples).where(
                ouraSleepSamples.summary_date >= start))
        session.commit()
    except BaseException as e:
        dash_app.server.logger.error(e)

    engine.dispose()
    session.close()

    # print(df_sleep_samples.index)
    # print(df_sleep_summary.index)

    # Insert Sleep Summary
    dash_app.server.logger.debug('Inserting oura sleep summary')
    try:
        db_insert(df_sleep_summary, 'oura_sleep_summary')
    except BaseException as e:
        dash_app.server.logger.error(e)

    # Insert Sleep Samples
    # dash_app.server.logger.debug('Inserting oura sleep samples')
    try:
        db_insert(df_sleep_samples, 'oura_sleep_samples')
    except BaseException as e:
        dash_app.server.logger.error(e)
Beispiel #11
0
def insert_readiness_data(df_readiness_summary, days_back=7):
    session, engine = db_connect()
    start = session.query(func.max(ouraReadinessSummary.report_date))
    start = '1999-01-01' if start[0][0] is None else datetime.strftime(
        start[0][0] - timedelta(days=days_back), '%Y-%m-%d')
    # Delete latest dates records from db to ensure values are being overridden from api pull
    try:
        dash_app.server.logger.debug(
            'Deleting >= {} records from oura_readiness_summary'.format(start))
        session.execute(
            delete(ouraReadinessSummary).where(
                ouraReadinessSummary.summary_date >= start))
        session.commit()
    except BaseException as e:
        dash_app.server.logger.error(e)

    engine.dispose()
    session.close()

    dash_app.server.logger.debug('Inserting oura readiness summary')
    db_insert(df_readiness_summary, 'oura_readiness_summary')
Beispiel #12
0
def last_ftp_test_notification(ftp_type):
    session, engine = db_connect()

    last_ftp_test_date = \
        session.query(func.max(stravaSummary.start_date_utc)).filter(
            (stravaSummary.name.ilike('%ftp test%')) & (stravaSummary.type.ilike(ftp_type))
        )[0][0]
    ftp_week_threshold = session.query(athlete).filter(
        athlete.athlete_id == 1).first().ftp_test_notification_week_threshold

    engine.dispose()
    session.close()

    if last_ftp_test_date:
        weeks_since_ftp_test = (
            (datetime.utcnow() - last_ftp_test_date).days) / 7.0
        if weeks_since_ftp_test >= ftp_week_threshold:
            return dbc.Alert(
                "It's been {:.1f} weeks since your last {} FTP test".format(
                    weeks_since_ftp_test, ftp_type),
                color='primary',
                style={'borderRadius': '4px'})
Beispiel #13
0
    def get_weight(self):
        # TODO: Build this out so weight data can be pulled from other data sources, and resort to athlete table if no source is connected that contains weight data
        session, engine = db_connect()

        # Try grabbing last weight in withings before current workout
        weight = session.query(withings.weight).filter(
            withings.date_utc <= self.start_date).order_by(
                withings.date_utc.desc()).first()
        # Else try getting most recent weight from withings
        if not weight:
            weight = session.query(withings.weight).order_by(
                withings.date_utc.asc()).first()
        # If no weights in withings, resort to manually entered static weight from athlete table
        if not weight:
            weight = session.query(athlete.weight_lbs).filter(
                athlete.athlete_id == self.athlete_id).first()
        engine.dispose()
        session.close()

        weight = float(weight[0])
        self.weight = weight
        self.kg = weight * 0.453592
Beispiel #14
0
    def get_rest_hr(self):
        # TODO: Build this out so hearrate data can be pulled from other data sources, and resort to athlete table if no source is connected that contains weight data
        # Assign rhr to activities by their start date
        session, engine = db_connect()
        # Try grabbing last resting heartrate from oura
        hr_lowest = session.query(ouraSleepSummary.hr_lowest).filter(
            ouraSleepSummary.report_date <= self.start_date.date()).order_by(
                ouraSleepSummary.report_date.desc()).first()

        # If activity is prior to first oura data record, use first oura data record
        if not hr_lowest:
            hr_lowest = session.query(ouraSleepSummary.hr_lowest).order_by(
                ouraSleepSummary.report_date.asc()).first()

        # Resort to manaully entered static athlete resting heartrate if no data source to pull from
        if not hr_lowest:
            hr_lowest = session.query(athlete.resting_hr).filter(
                athlete.athlete_id == self.athlete_id).first()

        engine.dispose()
        session.close()
        self.hr_lowest = hr_lowest[0]
Beispiel #15
0
def generate_exercise_charts(timeframe, muscle_options):
    session, engine = db_connect()
    df = pd.read_sql(sql=session.query(fitbod).statement, con=engine)
    engine.dispose()
    session.close()
    # Merge 'muscle' into exercise table for mapping
    df_muscle = pd.read_sql(sql=session.query(fitbod_muscles).statement,
                            con=engine)
    df = df.merge(df_muscle,
                  how='left',
                  left_on='Exercise',
                  right_on='Exercise')

    # Filter on selected msucles
    df = df[df['Muscle'].isin(muscle_options)]

    # Calculate Volume and aggregate to the daily (workout) level
    df['Volume'] = df['Reps'].replace(0, 1) * df['Weight'].replace(
        0, 1) * df['Duration'].replace(0, 1)
    # TODO: Change this to sum all volume at workout level instead of taking max of 1 set
    # df = df.loc[df.groupby(['date_UTC', 'Exercise'])['Volume'].agg(pd.Series.idxmax)].reset_index()
    df = df.groupby(['date_UTC', 'Exercise'])['Volume'].sum().reset_index()

    if timeframe == 'ytd':
        df = df[df['date_UTC'].dt.date >= date(datetime.today().year, 1, 1)]
    elif timeframe == 'l6w':
        df = df[df['date_UTC'].dt.date >= (datetime.now().date() -
                                           timedelta(days=42))]

    widgets = []
    for exercise in df['Exercise'].unique():
        df_temp = df[df['Exercise'] == exercise]
        try:
            # Calculate overall start to end % change (1 number)
            percent_change = ((df_temp['Volume'].tail(1).values[0] - df_temp['Volume'].head(1).values[0]) / \
                              df_temp['Volume'].head(1).values[0]) * 100
            backgroundColor = 'rgb(100,66,66)' if percent_change < 0 else 'rgb(66,100,66)' if percent_change > 0 else 'rgb(66,66,66)'
        except:
            backgroundColor = 'rgb(66,66,66)'

        # Only plot exercise if at least 2 different dates with that exercise
        if len(df_temp['date_UTC'].unique()) > 1:
            # Sort by date ascending
            df_temp = df_temp.sort_values(by=['date_UTC'])
            # Calculate trend of each data point vs starting point
            df_temp['% Change'] = df_temp['Volume'].apply(
                lambda x: ((x - df_temp['Volume'].head(1)) / df_temp['Volume'].
                           head(1)) * 100)
            tooltip = [
                'Volume:<b>{:.0f} ({}{:.1f}%)'.format(x, '+' if y >= 0 else '',
                                                      y)
                for (x, y) in zip(df_temp['Volume'], df_temp['% Change'])
            ]

            widgets.append(
                html.Div(
                    className='two columns maincontainer height-10',
                    style={'backgroundColor': backgroundColor},
                    children=[
                        dcc.Graph(
                            id=exercise + '-trend',
                            className='twelve columns nospace',
                            style={'height': '100%'},
                            config={
                                'displayModeBar': False,
                                # 'showLink': True  # to edit in studio
                            },
                            figure={
                                'data': [
                                    go.Scatter(x=df_temp['date_UTC'],
                                               y=df_temp['% Change'],
                                               mode='lines+markers',
                                               text=tooltip,
                                               hoverinfo='x+text',
                                               opacity=0.7,
                                               line={'color': teal}),
                                ],
                                'layout':
                                go.Layout(
                                    # title = metricTitle[metric],
                                    plot_bgcolor=
                                    backgroundColor,  # plot bg color
                                    paper_bgcolor=
                                    backgroundColor,  # margin bg color
                                    height=150,
                                    font=dict(
                                        color='rgb(220,220,220)',
                                        size=10,
                                    ),

                                    # hoverlabel={'font': {'size': 10}},
                                    xaxis=dict(
                                        showline=True,
                                        color='rgb(220,220,220)',
                                        showgrid=False,
                                        showticklabels=True,
                                        tickformat='%b %d',
                                        # Specify range to get rid of auto x-axis padding when using scatter markers
                                        # range=[df.index.max() - timedelta(days=41),
                                        #        df.index.max()],
                                        # rangeselector=dict(
                                        #     bgcolor='rgb(66, 66, 66)',
                                        #     bordercolor='#d4d4d4',
                                        #     borderwidth=.5,
                                        #     buttons=buttons,
                                        #     xanchor='center',
                                        #     x=.5,
                                        #     y=1,
                                        # ),
                                    ),
                                    yaxis=dict(
                                        showgrid=False,
                                        showticklabels=False,
                                        gridcolor='rgb(73, 73, 73)',
                                        gridwidth=.5,
                                        # tickformat='%',
                                    ),
                                    margin={
                                        'l': 0,
                                        'b': 25,
                                        't': 20,
                                        'r': 0
                                    },
                                    showlegend=False,
                                    annotations=[
                                        go.layout.Annotation(
                                            font={'size': 14},
                                            x=df_temp.loc[
                                                df_temp['date_UTC'].idxmax()]
                                            ['date_UTC'],
                                            y=df_temp.loc[
                                                df_temp['date_UTC'].idxmax()]
                                            ['% Change'],
                                            xref="x",
                                            yref="y",
                                            text="{:.1f}%".format(df_temp.loc[
                                                df_temp['date_UTC'].idxmax(
                                                )]['% Change']),
                                            showarrow=True,
                                            arrowhead=0,
                                            arrowcolor=white,
                                            ax=5,
                                            ay=-20)
                                    ],
                                    hovermode='x',
                                    autosize=True,
                                    title=exercise)
                            })
                    ]))
    # Set up each div of 6 graphs to be placed in
    num_divs = math.ceil(len(widgets) / 6)
    div_layout = []
    for i in range(0, num_divs):
        children = []
        for widget in widgets[:6]:
            children.append(widget)
            widgets.remove(widget)

        div_layout.append(
            html.Div(className='twelve columns', children=children))
        div_layout.append(
            html.Div(className='twelve columns',
                     style={
                         'backgroundColor': 'rgb(66, 66, 66)',
                         'paddingBottom': '1vh'
                     }))

    return div_layout
Beispiel #16
0
def pull_activity_data(oura, days_back=7):
    # Activity data updates throughout day and score is generated based off current day (in data)
    # Do not need to generate 'report date'
    session, engine = db_connect()
    # Get latest date in db and pull everything after
    start = session.query(func.max(ouraActivitySummary.summary_date))[0][0]
    engine.dispose()
    session.close()

    start = '1999-01-01' if start is None else datetime.strftime(
        start - timedelta(days=days_back), '%Y-%m-%d')

    dash_app.server.logger.debug(
        'Pulling activity from max date in oura_activity_summary {}'.format(
            start))
    oura_data = oura.activity_summary(start=start)['activity']

    if len(oura_data) > 0:
        df_activity_summary = pd.DataFrame.from_dict(oura_data).set_index(
            'summary_date')
        df_activity_summary = df_activity_summary.set_index(
            pd.to_datetime(df_activity_summary.index))

        df_activity_summary['day_end_local'] = pd.to_datetime(
            df_activity_summary['day_end'].apply(lambda x: x[:-6]))
        df_activity_summary['day_start_local'] = pd.to_datetime(
            df_activity_summary['day_start'].apply(lambda x: x[:-6]))
        df_activity_summary = df_activity_summary.drop(
            columns=['met_1min', 'day_end', 'day_start'], axis=1)

        # Generate Activity Samples
        df_1min_list, df_5min_list = [], []
        for x in oura_data:
            # build 1 min metrics df
            df_1min = pd.Series(x['met_1min'], name='met_1min').to_frame()
            df_1min['timestamp_local'] = pd.to_datetime(
                x['day_start']) + pd.to_timedelta(df_1min.index, unit='m')
            df_1min = df_1min.set_index('timestamp_local')
            # Remove timezone info from date, we are just storing whatever the local time was, where the person was
            df_1min.index = df_1min.index.tz_localize(None)
            df_1min['summary_date'] = x['summary_date']
            df_1min_list.append(df_1min)

            # build 5 min metrics df
            df_5min = pd.Series([int(y) for y in x['class_5min']],
                                name='class_5min').to_frame()
            df_5min['class_5min_desc'] = df_5min['class_5min'].fillna(
                '5').astype('str').map({
                    '0': 'Rest',
                    '1': 'Inactive',
                    '2': 'Low',
                    '3': 'Medium',
                    '4': 'High',
                    '5': 'Non-Wear'
                })
            df_5min['timestamp_local'] = pd.to_datetime(
                x['day_start']) + pd.to_timedelta(df_5min.index * 5, unit='m')
            df_5min = df_5min.set_index('timestamp_local')
            # Remove timezone info from date, we are just storing whatever the local time was, where the person was
            df_5min.index = df_5min.index.tz_localize(None)
            df_5min['summary_date'] = x['summary_date']
            df_5min_list.append(df_5min)

        df_1min = pd.concat(df_1min_list)
        df_5min = pd.concat(df_5min_list)

        df_activity_samples = df_1min.merge(df_5min,
                                            how='left',
                                            left_index=True,
                                            right_index=True)
        df_activity_samples['summary_date'] = df_activity_samples[
            'summary_date_x']
        df_activity_samples = df_activity_samples.drop(
            columns=['summary_date_x', 'summary_date_y'], axis=1)

        return df_activity_summary, df_activity_samples
    else:
        return [], []
Beispiel #17
0
def pull_sleep_data(oura, days_back=7):
    session, engine = db_connect()
    # Get latest date in db and pull everything after
    start = session.query(func.max(ouraSleepSummary.report_date))[0][0]
    engine.dispose()
    session.close()
    start = '1999-01-01' if start is None else datetime.strftime(
        start - timedelta(days=days_back), '%Y-%m-%d')

    dash_app.server.logger.debug(
        'Pulling sleep from max date in oura_sleep_summary {}'.format(start))
    oura_data = oura.sleep_summary(start=start)['sleep']

    if len(oura_data) > 0:
        # Sleep Summary
        df_sleep_summary = pd.DataFrame.from_dict(oura_data)
        # Sleep shows the 'summary' of the previous day.
        # To align with charts when filtering on date use readiness summary_date + 1 day
        df_sleep_summary['report_date'] = pd.to_datetime(
            df_sleep_summary['summary_date']) + timedelta(days=1)
        df_sleep_summary = df_sleep_summary.set_index('report_date')
        # Remove timestamps from bedtimes as we want whatever the time was locally
        df_sleep_summary['bedtime_end_local'] = pd.to_datetime(
            df_sleep_summary['bedtime_end'].apply(lambda x: x[:-6]))
        df_sleep_summary['bedtime_start_local'] = pd.to_datetime(
            df_sleep_summary['bedtime_start'].apply(lambda x: x[:-6]))

        df_sleep_summary = df_sleep_summary.drop(
            columns=['rmssd_5min', 'hr_5min', 'bedtime_end', 'bedtime_start'],
            axis=1)

        # Sleep Samples
        df_samples_list = []
        for x in oura_data:
            df = pd.concat([
                pd.Series(x['hr_5min'], name='hr_5min'),
                pd.Series(x['rmssd_5min'], name='rmssd_5min'),
                pd.Series([int(y) for y in x['hypnogram_5min']],
                          name='hypnogram_5min')
            ],
                           axis=1)
            df['hypnogram_5min_desc'] = df['hypnogram_5min'].map({
                1: 'Deep',
                2: 'Light',
                3: 'REM',
                4: 'Awake'
            })
            df['timestamp_local'] = pd.to_datetime(
                x['bedtime_start']) + pd.to_timedelta(df.index * 5, unit='m')

            df['summary_date'] = pd.to_datetime(x['summary_date'])
            df['report_date'] = df['summary_date'] + timedelta(days=1)
            df = df.set_index('timestamp_local')
            # Remove timezone info from date, we are just storing whatever the local time was, where the person was
            df.index = df.index.tz_localize(None)
            df_samples_list.append(df)

        df_sleep_samples = pd.concat(df_samples_list)

        return df_sleep_summary, df_sleep_samples
    else:
        return [], []
Beispiel #18
0
def hrv_training_workflow(min_non_warmup_workout_time, athlete_id=1):
    '''
    Query db for oura hrv data, calculate rolling 7 day average, generate recommended workout and store in db.
    Once stored, continuously check if workout has been completed and fill in 'Compelted' field
    '''

    # https://www.trainingpeaks.com/coach-blog/new-study-widens-hrv-evidence-for-more-athletes/

    session, engine = db_connect()

    # Check if entire table is empty, if so the earliest hrv plan can start is after 30 days of hrv readings
    db_test = pd.read_sql(sql=session.query(hrvWorkoutStepLog).filter(
        hrvWorkoutStepLog.athlete_id == athlete_id).statement,
                          con=engine,
                          index_col='date')

    if len(db_test) == 0:
        min_oura_date = session.query(func.min(
            ouraSleepSummary.report_date))[0][0]
        db_test.at[pd.to_datetime(min_oura_date + timedelta(29)),
                   'athlete_id'] = athlete_id
        db_test.at[pd.to_datetime(min_oura_date + timedelta(29)),
                   'hrv_workout_step'] = 0
        db_test.at[pd.to_datetime(min_oura_date + timedelta(29)),
                   'hrv_workout_step_desc'] = 'Low'
        db_test.at[pd.to_datetime(min_oura_date + timedelta(29)),
                   'completed'] = 0
        db_test.at[
            pd.to_datetime(min_oura_date + timedelta(29)),
            'rationale'] = 'This is the first date 30 day hrv thresholds could be calculated'
        db_insert(db_test, 'hrv_workout_step_log')

    # Check if a step has already been inserted for today and if so check if workout has been completed yet
    todays_plan = session.query(hrvWorkoutStepLog).filter(
        hrvWorkoutStepLog.athlete_id == athlete_id,
        hrvWorkoutStepLog.date == datetime.today().date()).first()

    if todays_plan:
        # If not yet "completed" keep checking throughout day
        if todays_plan.completed == 0:
            # If rest day, mark as completed
            if todays_plan.hrv_workout_step == 4 or todays_plan.hrv_workout_step == 5:
                todays_plan.completed = 1
                session.commit()
            else:
                workout = session.query(stravaSummary).filter(
                    stravaSummary.start_day_local == datetime.today().date(),
                    stravaSummary.elapsed_time >
                    min_non_warmup_workout_time).first()
                if workout:
                    todays_plan.completed = 1
                    session.commit()

    # If plan not yet created for today, create it
    else:
        hrv_df = pd.read_sql(
            sql=session.query(ouraSleepSummary.report_date,
                              ouraSleepSummary.rmssd).statement,
            con=engine,
            index_col='report_date').sort_index(ascending=True)

        # Wait for today's hrv to be loaded into cloud
        if hrv_df.index.max() == datetime.today().date(
        ):  # or (datetime.now() - timedelta(hours=12)) > pd.to_datetime(datetime.today().date()):

            step_log_df = pd.read_sql(
                sql=session.query(
                    hrvWorkoutStepLog.date, hrvWorkoutStepLog.hrv_workout_step,
                    hrvWorkoutStepLog.completed).filter(
                        hrvWorkoutStepLog.athlete_id == 1).statement,
                con=engine,
                index_col='date').sort_index(ascending=False)

            step_log_df = step_log_df[step_log_df.index ==
                                      step_log_df.index.max()]

            # Store last step in variable for starting point in loop
            last_db_step = step_log_df['hrv_workout_step'].iloc[0]

            # Resample to today
            step_log_df.at[pd.to_datetime(datetime.today().date()),
                           'hrv_workout_step'] = None
            step_log_df = step_log_df.set_index(
                pd.to_datetime(step_log_df.index))
            step_log_df = step_log_df.resample('D').mean()
            # Remove first row from df so it does not get re inserted into db
            step_log_df = step_log_df.iloc[1:]

            # We already know there is no step for today from "current_step" parameter, so manually add today's date
            step_log_df.at[pd.to_datetime(datetime.today().date()),
                           'completed'] = 0

            # Check if gap between today and max date in step log, if so merge in all workouts for 'completed' flag
            if step_log_df['completed'].isnull().values.any():
                workouts = pd.read_sql(sql=session.query(
                    stravaSummary.start_day_local,
                    stravaSummary.activity_id).filter(
                        stravaSummary.elapsed_time >
                        min_non_warmup_workout_time).statement,
                                       con=engine,
                                       index_col='start_day_local')
                # Resample workouts to the per day level - just take max activity_id in case they were more than 1 workout for that day to avoid duplication of hrv data
                workouts = workouts.set_index(pd.to_datetime(workouts.index))
                workouts = workouts.resample('D').max()
                step_log_df = step_log_df.merge(workouts,
                                                how='left',
                                                left_index=True,
                                                right_index=True)
                # Completed = True if a workout (not just warmup) was done on that day or was a rest day
                for x in step_log_df.index:
                    step_log_df.at[x, 'completed'] = 0 if np.isnan(
                        step_log_df.at[x, 'activity_id']) else 1

            # Generate row with yesterdays plan completions status for looping below through workout cycle logic
            step_log_df['completed_yesterday'] = step_log_df[
                'completed'].shift(1)

            # Drop historical rows that were used for 'yesterday calcs' so we are only working with todays data
            # step_log_df = step_log_df.iloc[1:]

            # Calculate HRV metrics
            hrv_df.set_index(pd.to_datetime(hrv_df.index), inplace=True)
            hrv_df = hrv_df.resample('D').mean()

            hrv_df['rmssd_7'] = hrv_df['rmssd'].rolling(7,
                                                        min_periods=0).mean()
            hrv_df['rmssd_7_yesterday'] = hrv_df['rmssd_7'].shift(1)
            hrv_df['rmssd_30'] = hrv_df['rmssd'].rolling(30,
                                                         min_periods=0).mean()
            hrv_df['stdev_rmssd_30_threshold'] = hrv_df['rmssd'].rolling(
                30, min_periods=0).std() * .5
            hrv_df['swc_upper'] = hrv_df['rmssd_30'] + hrv_df[
                'stdev_rmssd_30_threshold']
            hrv_df['swc_lower'] = hrv_df['rmssd_30'] - hrv_df[
                'stdev_rmssd_30_threshold']
            hrv_df['under_low_threshold'] = hrv_df['rmssd_7'] < hrv_df[
                'swc_lower']
            hrv_df['under_low_threshold_yesterday'] = hrv_df[
                'under_low_threshold'].shift(1)
            hrv_df['over_upper_threshold'] = hrv_df['rmssd_7'] > hrv_df[
                'swc_upper']
            hrv_df['over_upper_threshold_yesterday'] = hrv_df[
                'over_upper_threshold'].shift(1)
            for i in hrv_df.index:
                if hrv_df.at[
                        i,
                        'under_low_threshold_yesterday'] == False and hrv_df.at[
                            i, 'under_low_threshold'] == True:
                    hrv_df.at[i, 'lower_threshold_crossed'] = True
                else:
                    hrv_df.at[i, 'lower_threshold_crossed'] = False
                if hrv_df.at[
                        i,
                        'over_upper_threshold_yesterday'] == False and hrv_df.at[
                            i, 'over_upper_threshold'] == True:
                    hrv_df.at[i, 'upper_threshold_crossed'] = True
                else:
                    hrv_df.at[i, 'upper_threshold_crossed'] = False
            # Merge dfs
            df = pd.merge(step_log_df,
                          hrv_df,
                          how='left',
                          right_index=True,
                          left_index=True)

            last_step = last_db_step
            for i in df.index:
                # Completed / Completed_yesterday could show erroneous data for rest days, as the 0 is brought in based off if a workout is found in strava summary
                df.at[
                    i,
                    'completed_yesterday'] = 1 if last_step == 4 or last_step == 5 else df.at[
                        i, 'completed_yesterday']

                hrv_increase = df.at[i,
                                     'rmssd_7'] >= df.at[i,
                                                         'rmssd_7_yesterday']

                ### Low Threshold Exceptions ###
                # If lower threshold is crossed, switch to low intensity track
                if df.at[i, 'lower_threshold_crossed'] == True:
                    current_step = 4
                    rationale = '7 day HRV average crossed the 30 day baseline lower threshold.'
                    dash_app.server.logger.debug(
                        'Lower threshold crossed. Setting current step = 4')
                # If we are below lower threshold, rest until back over threshold
                elif df.at[i, 'under_low_threshold'] == True:
                    current_step = 5
                    rationale = '7 day HRV average is under the 30 day baseline lower threshold.'
                    dash_app.server.logger.debug(
                        'HRV is under threshold. Setting current step = 5')

                ### Upper Threshold Exceptions ###
                # If upper threshold is crossed, switch to high  intensity
                elif df.at[i, 'upper_threshold_crossed'] == True:
                    current_step = 1
                    rationale = '7 day HRV average crossed the 30 day baseline upper threshold.'
                    dash_app.server.logger.debug(
                        'Upper threshold crossed. Setting current step = 1')
                # If we are above upper threshold, load high intensity until back under threshold
                elif df.at[i, 'over_upper_threshold'] == True:
                    if hrv_increase:
                        current_step = 1
                        rationale = '7 day HRV average increased and is still over the 30 day baseline upper threshold.'
                    else:
                        current_step = 2
                        rationale = "7 day HRV average decreased but is still over the 30 day baseline upper threshold."
                    dash_app.server.logger.debug(
                        'HRV is above threshold. Setting current step = {}.'.
                        format(current_step))

                ### Missed Workout Exceptions ###
                # If workout was not completed yesterday but we are still within thresholds and current step is high/moderate go high if hrv increases, or stay on moderate if hrv decreases
                elif df.at[i, 'completed_yesterday'] == 0 and df.at[
                        i, 'under_low_threshold'] == False and df.at[
                            i, 'over_upper_threshold'] == False and (
                                last_step == 1 or last_step == 2):
                    if hrv_increase:
                        current_step = 1
                        rationale = "7 day HRV average increased and yesterday's workout was not completed."
                    else:
                        current_step = 2
                        rationale = "7 day HRV average decreased and yesterday's workout was not completed."
                    dash_app.server.logger.debug(
                        'No workout detected for previous day however still within thresholds. Maintaining last step = {}'
                        .format(current_step))
                else:
                    dash_app.server.logger.debug(
                        'No exceptions detected. Following the normal workout plan workflow.'
                    )
                    rationale = '7 day HRV average is within the tresholds. Following the normal workout plan workflow.'
                    # Workout workflow logic when no exceptions
                    if last_step == 0:
                        current_step = 1
                    elif last_step == 1:
                        current_step = 2 if hrv_increase else 6
                    elif last_step == 2:
                        current_step = 3
                    elif last_step == 3:
                        current_step = 1 if hrv_increase else 4
                    elif last_step == 4:
                        current_step = 6 if hrv_increase else 5
                    elif last_step == 5:
                        current_step = 6
                    elif last_step == 6:
                        current_step = 1 if hrv_increase else 4

                df.at[
                    i,
                    'completed'] = 1 if current_step == 4 or current_step == 5 else df.at[
                        i, 'completed']
                df.at[i, 'hrv_workout_step'] = current_step
                last_step = current_step

                df.at[i, 'rationale'] = rationale

            df['athlete_id'] = athlete_id
            df['hrv_workout_step_desc'] = df['hrv_workout_step'].map({
                0: 'Low',
                1: 'High',
                2: 'HIIT/MOD',
                3: 'Low',
                4: 'Rest',
                5: 'Rest',
                6: 'Low'
            })

            # Insert into db
            df = df[[
                'athlete_id', 'hrv_workout_step', 'hrv_workout_step_desc',
                'completed', 'rationale'
            ]]
            db_insert(df, 'hrv_workout_step_log')

    engine.dispose()
    session.close()
Beispiel #19
0
    def wss_score(self):
        '''
        Loop through each workout, calculate 1rm for all exercises for the given date, save to fitbod table, and calculate wSS for summary table
        :param date: Date of workout
        :param workout_seconds:
        :return:
        '''

        # Calculating inol at individual exercise level
        # https: // www.powerliftingwatch.com / files / prelipins.pdf

        # Default inol to provide bodyweight exercises where INOL cannot be calculated
        base_inol = .45
        # Set max inol an exercise can hit per workout (sum of sets)
        max_inol_per_exercise = 2

        # Convert pd date to datetime to compare in sqlalchemy queries
        date = self.start_date.date()
        # Query exercises within trailing 6 weeks of exercise date
        session, engine = db_connect()
        df = pd.read_sql(sql=session.query(fitbod).filter(
            (date - timedelta(days=180)) <= cast(fitbod.date_utc, Date),
            cast(fitbod.date_utc, Date) <= date).statement,
                         con=engine).sort_index(ascending=True)
        engine.dispose()
        session.close()

        # If no workout data found, return None as a WSS score can not be generated
        if len(df) == 0:
            return None, None

        else:
            df['Volume'] = df['Reps'].replace(0, 1) * df['Weight'].replace(
                0, 1) * df['Duration'].replace(0, 1)
            # Get 'Best' sets on all exercises in the 6 weeks preceeding current workout being analyzed
            df_1rm = df.copy()
            # Dont include current workout to get 1RMs to compare against
            df_1rm = df_1rm[df_1rm['date_UTC'] != date]
            df_1rm = df_1rm.loc[df_1rm.groupby('Exercise')['Volume'].agg(
                pd.Series.idxmax)].reset_index()
            # Calculate Brzycki 1RM based off last 6 weeks of workouts
            df_1rm['one_rep_max'] = (df_1rm['Weight'] *
                                     (36 / (37 - df_1rm['Reps'])))

            # TODO: Update from just adding 30% to max to a more accurate 1 'rep' max formula
            # Calculate max weight_duration for intensity on timed exercises that could use weights (i.e. planks) (+30% on max weight_duratopm)
            df_1rm.at[df_1rm['Reps'] == 0,
                      'weight_duration_max'] = (df_1rm['Weight'].replace(
                          0, 1) * df_1rm['Duration'].replace(0, 1)) * 1.3
            # Calculate max reps (for bodyweight exercises) (+30% on max reps)
            df_1rm.at[df_1rm['Weight'] == 0,
                      'max_reps'] = df_1rm['Volume'] * 1.3

            # Filter main df back to current workout which we are assigning 1rms to

            df = df[df['date_UTC'] == date]
            # Merge in 1rms
            df = df.merge(
                df_1rm[['Exercise', 'one_rep_max', 'weight_duration_max']],
                how='left',
                left_on='Exercise',
                right_on='Exercise')

            # Replace table placeholders with max values
            df['one_rep_max'] = df['one_rep_max_y'].fillna(0.0)
            df['weight_duration_max'] = df['weight_duration_max_y'].fillna(0)

            # Save 1rms to fitbod table
            session, engine = db_connect()
            for exercise in df[~np.isnan(df['one_rep_max']
                                         )]['Exercise'].drop_duplicates():
                records = session.query(fitbod).filter(
                    cast(fitbod.date_utc, Date) == date,
                    fitbod.exercise == exercise).all()
                for record in records:
                    record.one_rep_max = float(df.loc[
                        df['Exercise'] == exercise]['one_rep_max'].values[0])
                    record.weight_duration_max = int(
                        df.loc[df['Exercise'] ==
                               exercise]['weight_duration_max'].values[0])
                    session.commit()
            engine.dispose()
            session.close()

            df['set_intensity'] = df['Weight'] / df['one_rep_max']
            # Restrict max intensity from being 1 or greater for INOL calc
            df.at[df['set_intensity'] >= 1, 'set_intensity'] = .99

            # Set all inol to base INOLs so score gets applied to bodyweight exercises
            df['inol'] = base_inol
            # Calculate INOL where one_rep_max's exist in last 6 weeks
            df.at[((df['Weight'] != 0) & (df['one_rep_max'] != 0)),
                  'inol'] = df['Reps'] / ((1 - (df['set_intensity'])) * 100)

            # If one rep max was hit, set the exercise inol to max inol per exercise
            df = df.groupby(['date_UTC', 'Exercise']).sum().reset_index()
            df.at[(df['inol'] > max_inol_per_exercise),
                  'inol'] = max_inol_per_exercise

            # ## Doesn't Work well with INOL Formula since both reps and weight are really required for it to work
            # # For bodyweight exercise, there is no weight, so use reps/max reps for intensity
            # df.at[df['Weight'] == 0, 'inol'] = df['Reps'] / ((1 - (df['Reps'] / df['max_reps'])) * 100)
            # # For timed exercises i.e. plank, there are no reps, use max duration * weight for intensity, and '1' for the numerator
            # df['weight_duration'] = (df['Duration'].replace(0, 1) * df['Weight'].replace(0, 1))
            # df.at[((df['Reps'] == 0) & (df['Duration'] != 0)), 'inol'] = 1 / ((1 - (df['weight_duration'] / df['weight_duration_max'])) * 100)
            # ####

            # Convert INOLs to WSS
            # Get max amount of possible INOL from workout at a rate of 2 INOL per exercise
            max_inol_possible = df['Exercise'].nunique(
            ) * max_inol_per_exercise
            # Calculate intensity factor (how hard you worked out of the hardest that could have been worked expressed in inol)
            ri = df['inol'].sum() / max_inol_possible
            # Estimate TSS Based on Intensity Factor and Duration
            # https://www.trainingpeaks.com/blog/how-to-plan-your-season-with-training-stress-score/
            workout_tss = ri * ri * (self.df_samples['time'].max() /
                                     3600) * 100

            # Get max amount of possible TSS based on TSS per sec
            # max_tss_per_sec = (100 / 60 / 60)

            # TODO: Update seconds with datediff once set timestamps are added to dataset, for now use entire length of workout
            # max_tss_possible = workout_seconds * max_tss_per_sec
            # workout_tss = max_tss_possible * relative_intensity
            # df['seconds'] = 60
            # max_tss_possible = df['seconds'].sum() * max_tss_per_sec

            # Calculate WSS
            # df['wSS'] = (max_tss_per_sec * df['seconds']) * (df['inol'] / max_inol_possible)
            # return df['wSS'].sum()
            return workout_tss, ri
Beispiel #20
0
def pull_fitbod_data():
    # TODO: Update instead of truncate and load to only laod/delete new data as to preserve 1rm calculations
    dash_app.server.logger.debug('Logging into Nextcloud')
    oc = owncloud.Client(config.get('nextcloud', 'url'))
    # Login to NextCloud
    oc.login(config.get('nextcloud', 'username'),
             config.get('nextcloud', 'password'))
    # Get filename
    try:
        filepath = oc.list(config.get('fitbod', 'path'))[0].path
        dash_app.server.logger.debug('Fitbod file found')
    except:
        dash_app.server.logger.debug('No fitbod file found on nextcloud')
        filepath = None
    if filepath:
        filename = filepath.split('/')[-1]
        # Download file
        oc.get_file(filepath)
        # Convert file into df
        df = pd.read_csv(filename)

        # Remove non-lifting exercises
        df = df[df['Distance(m)'] == 0]

        df = df[
            (~df['Exercise'].str.contains('Running'))
            & (~df['Exercise'].str.contains('Cycling')) &
            (~df['Exercise'].str.contains('Rowing')) &
            (~df['Exercise'].str.contains('Elliptical')) &
            (~df['Exercise'].str.contains('Stair Stepper')) &
            (~df['Exercise'].str.contains('Foam')) &
            (~df['Exercise'].str.contains('Cat Cow')) &
            (~df['Exercise'].str.contains("Child's Pose")) &
            (~df['Exercise'].str.contains("Downward Dog")) &
            (~df['Exercise'].str.contains("Up Dog")) &
            (~df['Exercise'].str.contains("Stretch")) &
            (~df['Exercise'].str.contains("Butt Kick")) &
            (~df['Exercise'].str.contains("Chest Expansion")) &
            (~df['Exercise'].str.contains("Chin Drop")) &
            (~df['Exercise'].str.contains("Crab Pose")) &
            (~df['Exercise'].str.contains("Dead Hang")) &
            (~df['Exercise'].str.contains("Head Tilt")) &
            (~df['Exercise'].str.contains("Pigeon Pose")) &
            (~df['Exercise'].str.contains("Reach Behind and Open")) &
            (~df['Exercise'].str.contains("Seated Figure Four")) &
            (~df['Exercise'].str.contains("Seated Forward Bend")) &
            (~df['Exercise'].str.contains("Standing Forward Bend")) &
            (~df['Exercise'].str.contains("Shin Box Hip Flexor")) &
            (~df['Exercise'].str.contains("Shin Box Quad")) &
            (~df['Exercise'].str.contains("Single Leg Straight Forward Bend"))
            & (~df['Exercise'].str.contains("Standing Hip Circle")) &
            (~df['Exercise'].str.contains("Walkout")) &
            (~df['Exercise'].str.contains("Walkout to Push Up"))]

        # Create lbs column
        df['Weight'] = df['Weight(kg)'] * 2.20462
        # Modify columns in df as needed
        df['Date_UTC'] = pd.to_datetime(df['Date'])
        # Placeholder column for 1rm, max_reps
        df['one_rep_max'] = np.nan
        df['weight_duration_max'] = np.nan
        # Rename duration
        df = df.rename(columns={'Duration(s)': 'Duration'})
        # Remove unecessary columns
        df = df[[
            'Date_UTC', 'Exercise', 'Reps', 'Weight', 'Duration', 'isWarmup',
            'Note', 'one_rep_max', 'weight_duration_max'
        ]]
        # TODO: Date currently is not unique to set - only unique to workout so should not be used as index
        # df = df.set_index('Date_UTC')
        df.index.name = 'id'
        # DB Operations
        session, engine = db_connect()
        # Delete current database table
        try:
            session.execute('DROP TABLE fitbod')
            session.commit()
        except:
            None
        # Insert fitbod table into DB
        df.to_sql('fitbod', engine, if_exists='append', index=True)
        session.commit()
        engine.dispose()
        session.close()
        # Delete file in local folder
        os.remove(filename)
        # Empty the dir on nextcloud
        oc.delete(filepath)