예제 #1
0
    def get(self, request):
        user = request.user
        sleep_activities = SleepLog.objects.filter(user=user)

        serializer = SleepActivityDataframeBuilder(sleep_activities)
        sleep_aggregate = serializer.get_sleep_history_series()

        # because pandas uses a timeindex, when we go to json - it doesn't
        # play nicely with a typical json dump, so we do an additional load so drf can transmit nicely
        result = sleep_aggregate.to_json(date_format='iso')
        result = json.loads(result)
        return Response(data=result)
예제 #2
0
    def get(self, request):
        user = request.user
        sleep_activities = SleepLog.objects.filter(user=user)

        serializer = SleepActivityDataframeBuilder(sleep_activities)
        sleep_aggregate = serializer.get_sleep_history_series()

        # because pandas uses a timeindex, when we go to json - it doesn't
        # play nicely with a typical json dump, so we do an additional load so drf can transmit nicely
        result = sleep_aggregate.to_json(date_format='iso')
        result = json.loads(result)
        return Response(data=result)
예제 #3
0
    def _get_sleep_series_last_year(user):
        """
        :param user:
        :return: Series data of how much sleep that person has gotten minutes
        """
        start_date = get_current_date_years_ago(1)
        sleep_events = SleepLog.objects.filter(user=user, start_time__date__gte=start_date)
        builder = SleepActivityDataframeBuilder(sleep_events)
        series = builder.get_sleep_history_series()

        # anytime sleep is actually set at zero, the value should be NaN
        series[series == 0] = np.NaN

        return series
예제 #4
0
    def get(self, request):
        try:
            window = int(request.query_params[LOOKBACK_PARAM_NAME])
        except MultiValueDictKeyError:
            # MultiValueDictKeyError happens when a key doesn't exist
            window = 1
        except ValueError:
            # ValueError if something entered for a window that couldn't be interpreted
            return Response(status=400)

        user = request.user

        sleep_activities = SleepLog.objects.filter(user=user)
        builder = SleepActivityDataframeBuilder(sleep_activities)

        sleep_aggregate = builder.get_sleep_history_series()
        sleep_average = sleep_aggregate.rolling(window=window, min_periods=1).mean()

        result = sleep_average.to_json(date_format='iso')
        result = json.loads(result)
        return Response(data=result)
예제 #5
0
    def get(self, request):
        try:
            window = int(request.query_params[LOOKBACK_PARAM_NAME])
        except MultiValueDictKeyError:
            # MultiValueDictKeyError happens when a key doesn't exist
            window = 1
        except ValueError:
            # ValueError if something entered for a window that couldn't be interpreted
            return Response(status=400)

        user = request.user

        sleep_activities = SleepLog.objects.filter(user=user)
        builder = SleepActivityDataframeBuilder(sleep_activities)

        sleep_aggregate = builder.get_sleep_history_series()
        sleep_average = sleep_aggregate.rolling(window=window,
                                                min_periods=1).mean()

        result = sleep_average.to_json(date_format='iso')
        result = json.loads(result)
        return Response(data=result)
예제 #6
0
    def get(self, request):
        user = request.user

        bytes_io = io.BytesIO()
        writer = pd.ExcelWriter(bytes_io,
                                engine='xlsxwriter',
                                options={'remove_timezone': True})

        # supplement events
        supplement_events_worksheet_name = 'SupplementEvents'
        supplement_events = SupplementLog.objects.filter(user=user)
        df_builder = SupplementEventsDataframeBuilder(supplement_events)
        supplement_events_df = df_builder.get_flat_daily_dataframe()
        self._write_to_workbook(writer, supplement_events_df,
                                supplement_events_worksheet_name)

        # sleep events
        sleep_activities_worksheet_name = 'SleepActivities'
        sleep_activities = SleepLog.objects.filter(user=user)
        df_builder = SleepActivityDataframeBuilder(sleep_activities)
        sleep_activities_series = df_builder.get_sleep_history_series()
        self._write_to_workbook(writer, sleep_activities_series,
                                sleep_activities_worksheet_name)

        # user activity events
        user_activity_events_sheet_name = 'UserActivityEvents'
        user_activity_events = UserActivityLog.objects.filter(user=user)
        df_builder = UserActivityEventDataframeBuilder(user_activity_events)
        user_activity_events_df = df_builder.get_flat_daily_dataframe()
        self._write_to_workbook(writer, user_activity_events_df,
                                user_activity_events_sheet_name)

        # productivity logs
        productivity_log_sheet_name = 'DailyProductivityLog'
        productivity_log = DailyProductivityLog.objects.filter(user=user)
        df_builder = ProductivityLogEventsDataframeBuilder(productivity_log)
        # odd why this one isn't sorted the right way
        productivity_log_df = df_builder.get_flat_daily_dataframe().sort_index(
            ascending=True)
        self._write_to_workbook(writer, productivity_log_df,
                                productivity_log_sheet_name)

        all_dataframes = [
            productivity_log_df, supplement_events_df, user_activity_events_df
        ]
        concat_dataframe = pd.concat(all_dataframes, axis=1)

        # include sleep which is a series and not a dataframe
        cumulative_log_sheet_name = 'Aggregate Log'
        concat_dataframe[SLEEP_MINUTES_COLUMN] = sleep_activities_series
        self._write_to_workbook(writer, concat_dataframe,
                                cumulative_log_sheet_name)

        cumulative_14_day_dataframe_sheet_name = 'Aggregate 14 Log'
        cumulative_14_day_dataframe = concat_dataframe.rolling(
            window=14, min_periods=1).sum()[14:]
        self._write_to_workbook(writer, cumulative_14_day_dataframe,
                                cumulative_14_day_dataframe_sheet_name)

        cumulative_28_day_dataframe_sheet_name = 'Aggregate 28 Log'
        cumulative_28_day_dataframe = concat_dataframe.rolling(
            window=28, min_periods=1).sum()[28:]
        self._write_to_workbook(writer, cumulative_28_day_dataframe,
                                cumulative_28_day_dataframe_sheet_name)

        # make sure all the output gets writen to bytes io
        writer.close()

        # http response because we are providing data and not doing any template / rendering
        response = HttpResponse(
            bytes_io.getvalue(),
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )
        response[
            'Content-Disposition'] = 'attachment; filename=user_export_data.xlsx'
        return response
예제 #7
0
    def get(self, request, supplement_uuid):
        # TODO - Refactor this garbage, you can add some smart redis caching level to this

        supplement = get_object_or_404(Supplement,
                                       uuid=supplement_uuid,
                                       user=request.user)
        user = request.user

        serializer = SupplementLogRequestParametersSerializer(
            data=request.query_params)
        serializer.is_valid(raise_exception=True)
        params = serializer.validated_data

        start_date = params['start_date']
        end_date = get_current_userdate(user)

        supplement_events = SupplementLog.objects.filter(
            user=user,
            supplement=supplement,
            time__date__gte=start_date,
            time__date__lte=end_date)

        # no point if nothing exists
        if not supplement_events.exists():
            return Response([])

        # lots of crappy templating here, sorry.
        supplement_builder = SupplementEventsDataframeBuilder(
            supplement_events)
        # TODO - Really feels like you should build a helper on the builder to do this since you do it so often
        supplement_series = supplement_builder.build_dataframe(
        )['Quantity'].sort_index()

        # because the dataframe will also get things like "source" etc, and we only care about
        # quantity, take that series and then recast it as a numeric
        supplement_series = pd.to_numeric(supplement_series)

        productivity_logs = DailyProductivityLog.objects.filter(
            user=user, date__gte=start_date, date__lte=end_date)
        productivity_builder = ProductivityLogEventsDataframeBuilder(
            productivity_logs)
        productivity_series = productivity_builder.get_productive_timeseries()

        sleep_logs = SleepLog.objects.filter(user=user,
                                             start_time__date__gte=start_date)
        sleep_builder = SleepActivityDataframeBuilder(sleep_logs, user)
        sleep_series = sleep_builder.get_sleep_history_series()

        dataframe_details = {
            'sleep_time': sleep_series,
            'productivity_time': productivity_series,
            'quantity': supplement_series,
        }

        dataframe = pd.DataFrame(dataframe_details)
        # don't really need to convert it to local, just makes debugging make easier
        dataframe_localized = dataframe.tz_convert(user.pytz_timezone)
        """
        because events are datetime based, but productivity and sleep are date-based
        this parts get a little hairy, but we want the nans for 8/30 and 9/01 to be filled
        however, we cant just pad fill because if a log for productivity and sleep was missing
        the wrong result would be filled. so ... the code below is slightly magical


                                    productivity_time       sleep_time  quantity
        2017-08-30 00:00:00-04:00               1336.0  647.013778         0.0
        2017-08-30 19:51:36.483443-04:00           NaN         NaN         1.0
        2017-08-31 00:00:00-04:00               1476.0  726.132314         0.0
        2017-09-01 00:00:00-04:00                730.0  513.894938         0.0
        2017-09-01 14:51:36.483443-04:00           NaN         NaN         1.0
        """
        if not params['frequency']:
            dataframe_localized_date_index = dataframe_localized.index.date
            dataframe_localized_date_index = pd.DatetimeIndex(
                dataframe_localized_date_index, tz=request.user.pytz_timezone)

            productivity_series = dataframe_localized[
                'productivity_time'].dropna()
            productivity_series_filled = productivity_series[
                dataframe_localized_date_index]

            sleep_series = dataframe_localized['sleep_time'].dropna()
            sleep_series_filled = sleep_series[dataframe_localized_date_index]

            dataframe_localized[
                'productivity_time'] = productivity_series_filled.values
            dataframe_localized['sleep_time'] = sleep_series_filled.values

            valid_supplement_index = dataframe_localized['quantity'].dropna(
            ).index
            dataframe_localized = dataframe_localized.ix[
                valid_supplement_index]

        elif params['frequency'] == DAILY_FREQUENCY:
            dataframe_localized = dataframe_localized.resample('D').sum()

        elif params['frequency'] == MONTHLY_FREQUENCY:
            dataframe_localized = dataframe_localized.resample('M').sum()

        dataframe_localized = update_dataframe_to_be_none_instead_of_nan_for_api_responses(
            dataframe_localized)

        results = []
        for index, values in dataframe_localized.iterrows():
            time = index.isoformat()
            result = values.to_dict()
            result['time'] = time
            result['uniqueKey'] = '{}-{}'.format(time, result['quantity'])

            results.append(result)

        return Response(results)
예제 #8
0
 def get_sleep_activity_series(queryset):
     builder = SleepActivityDataframeBuilder(queryset)
     series = builder.get_sleep_history_series()
     return series
예제 #9
0
    def get(self, request, supplement_uuid):
        # TODO - Refactor this garbage, you can add some smart redis caching level to this

        supplement = get_object_or_404(Supplement, uuid=supplement_uuid, user=request.user)
        user = request.user

        serializer = SupplementLogRequestParametersSerializer(data=request.query_params)
        serializer.is_valid(raise_exception=True)
        params = serializer.validated_data

        start_date = params['start_date']
        end_date = get_current_userdate(user)

        supplement_events = SupplementLog.objects.filter(
            user=user, supplement=supplement, time__date__gte=start_date, time__date__lte=end_date)

        # no point if nothing exists
        if not supplement_events.exists():
            return Response([])

        # lots of crappy templating here, sorry.
        supplement_builder = SupplementEventsDataframeBuilder(supplement_events)
        # TODO - Really feels like you should build a helper on the builder to do this since you do it so often
        supplement_series = supplement_builder.build_dataframe()['Quantity'].sort_index()

        # because the dataframe will also get things like "source" etc, and we only care about
        # quantity, take that series and then recast it as a numeric
        supplement_series = pd.to_numeric(supplement_series)

        productivity_logs = DailyProductivityLog.objects.filter(
            user=user, date__gte=start_date, date__lte=end_date)
        productivity_builder = ProductivityLogEventsDataframeBuilder(productivity_logs)
        productivity_series = productivity_builder.get_productive_timeseries()

        sleep_logs = SleepLog.objects.filter(user=user, start_time__date__gte=start_date)
        sleep_builder = SleepActivityDataframeBuilder(sleep_logs, user)
        sleep_series = sleep_builder.get_sleep_history_series()

        dataframe_details = {
            'sleep_time': sleep_series,
            'productivity_time': productivity_series,
            'quantity': supplement_series,
        }

        dataframe = pd.DataFrame(dataframe_details)
        # don't really need to convert it to local, just makes debugging make easier
        dataframe_localized = dataframe.tz_convert(user.pytz_timezone)

        """
        because events are datetime based, but productivity and sleep are date-based
        this parts get a little hairy, but we want the nans for 8/30 and 9/01 to be filled
        however, we cant just pad fill because if a log for productivity and sleep was missing
        the wrong result would be filled. so ... the code below is slightly magical


                                    productivity_time       sleep_time  quantity
        2017-08-30 00:00:00-04:00               1336.0  647.013778         0.0
        2017-08-30 19:51:36.483443-04:00           NaN         NaN         1.0
        2017-08-31 00:00:00-04:00               1476.0  726.132314         0.0
        2017-09-01 00:00:00-04:00                730.0  513.894938         0.0
        2017-09-01 14:51:36.483443-04:00           NaN         NaN         1.0
        """
        if not params['frequency']:
            dataframe_localized_date_index = dataframe_localized.index.date
            dataframe_localized_date_index = pd.DatetimeIndex(dataframe_localized_date_index,
                                                              tz=request.user.pytz_timezone)

            productivity_series = dataframe_localized['productivity_time'].dropna()
            productivity_series_filled = productivity_series[dataframe_localized_date_index]

            sleep_series = dataframe_localized['sleep_time'].dropna()
            sleep_series_filled = sleep_series[dataframe_localized_date_index]

            dataframe_localized['productivity_time'] = productivity_series_filled.values
            dataframe_localized['sleep_time'] = sleep_series_filled.values

            valid_supplement_index = dataframe_localized['quantity'].dropna().index
            dataframe_localized = dataframe_localized.ix[valid_supplement_index]

        elif params['frequency'] == DAILY_FREQUENCY:
            dataframe_localized = dataframe_localized.resample('D').sum()

        elif params['frequency'] == MONTHLY_FREQUENCY:
            dataframe_localized = dataframe_localized.resample('M').sum()

        dataframe_localized = update_dataframe_to_be_none_instead_of_nan_for_api_responses(dataframe_localized)

        results = []
        for index, values in dataframe_localized.iterrows():
            time = index.isoformat()
            result = values.to_dict()
            result['time'] = time
            result['uniqueKey'] = '{}-{}'.format(time, result['quantity'])

            results.append(result)

        return Response(results)