Exemplo n.º 1
0
def download_page():
    return render_template(
        "download_landing_page.html",
        is_admin=researcher_is_an_admin(),
        allowed_studies=get_researcher_allowed_studies(),
        domain_name=DOMAIN_NAME,
    )
Exemplo n.º 2
0
def manage_credentials():
    serializer = ApiKeySerializer(ApiKey.objects.filter(researcher=get_session_researcher()), many=True)
    return render_template(
        'manage_credentials.html',
        is_admin=researcher_is_an_admin(),
        api_keys=sorted(serializer.data, reverse=True, key=lambda x: x['created_on']),
    )
Exemplo n.º 3
0
def inject_html_params():
    # these variables will be accessible to every template rendering attached to the blueprint
    return {
        "allowed_studies": get_researcher_allowed_studies(),
        "is_admin": researcher_is_an_admin(),
        "session_researcher": get_session_researcher(),
    }
def inject_html_params():
    # these variables will be accessible to every template rendering attached to the blueprint
    return {
        "allowed_studies": get_researcher_allowed_studies(),
        "users_by_study": participants_by_study(),
        "is_admin": researcher_is_an_admin()
    }
Exemplo n.º 5
0
def choose_study():
    allowed_studies = get_researcher_allowed_studies_as_query_set()

    # If the admin is authorized to view exactly 1 study, redirect to that study,
    # Otherwise, show the "Choose Study" page
    if allowed_studies.count() == 1:
        return redirect('/view_study/{:d}'.format(allowed_studies.values_list('pk', flat=True).get()))

    return render_template(
        'choose_study.html',
        studies=[obj.as_unpacked_native_python() for obj in allowed_studies],
        is_admin=researcher_is_an_admin()
    )
Exemplo n.º 6
0
def dashboard_page(study_id):
    """ information for the general dashboard view for a study"""
    study = Study.get_or_404(pk=study_id)
    participants = list(Participant.objects.filter(study=study_id).values_list("patient_id", flat=True))
    return render_template(
        'dashboard/dashboard.html',
        study=study,
        participants=participants,
        study_id=study_id,
        data_stream_dict=COMPLETE_DATA_STREAM_DICT,
        allowed_studies=get_researcher_allowed_studies(),
        is_admin=researcher_is_an_admin(),
        page_location='dashboard_landing',
    )
Exemplo n.º 7
0
def render_edit_survey(survey_id=None):
    try:
        survey = Survey.objects.get(pk=survey_id)
    except Survey.DoesNotExist:
        return abort(404)

    return render_template(
        'edit_survey.html',
        survey=survey.as_unpacked_native_python(),
        study=survey.study,
        allowed_studies=get_researcher_allowed_studies(),
        is_admin=researcher_is_an_admin(),
        domain_name=
        DOMAIN_NAME,  # used in a Javascript alert, see survey-editor.js
        interventions_dict={
            intervention.id: intervention.name
            for intervention in survey.study.interventions.all()
        },
        weekly_timings=survey.weekly_timings(),
        relative_timings=survey.relative_timings(),
        absolute_timings=survey.absolute_timings(),
        push_notifications_enabled=PUSH_NOTIFICATIONS_ENABLED,
    )
Exemplo n.º 8
0
def render_edit_survey(survey_id=None):
    try:
        survey = Survey.objects.get(pk=survey_id)
    except Survey.DoesNotExist:
        return abort(404)

    return render_template(
        'edit_survey.html',
        survey=survey.as_unpacked_native_python(),
        study=survey.study,
        allowed_studies=get_researcher_allowed_studies(),
        is_admin=researcher_is_an_admin(),
        domain_name=DOMAIN_NAME,  # used in a Javascript alert, see survey-editor.js
        interventions_dict={
            intervention.id: intervention.name for intervention in survey.study.interventions.all()
        },
        weekly_timings=survey.weekly_timings(),
        relative_timings=survey.relative_timings(),
        absolute_timings=survey.absolute_timings(),
        push_notifications_enabled=check_firebase_instance(require_android=True) or \
                                   check_firebase_instance(require_ios=True),
        today=localtime(timezone.now(), survey.study.timezone).strftime('%Y-%m-%d'),

    )
Exemplo n.º 9
0
def get_data_for_dashboard_datastream_display(study_id, data_stream):
    """ Parses information for the data stream dashboard view GET and POST requests left the post
    and get requests in the same function because the body of the get request relies on the
    variables set in the post request if a post request is sent --thus if a post request is sent
    we don't want all of the get request running. """
    study = Study.get_or_404(pk=study_id)

    if request.method == "POST":
        color_low_range, color_high_range, all_flags_list = set_default_settings_post_request(
            study, data_stream)
        show_color = "false" if color_low_range == 0 and color_high_range == 0 else "true"
    else:
        color_low_range, color_high_range, show_color = extract_range_args_from_request(
        )
        all_flags_list = extract_flag_args_from_request()

    default_filters = ""
    if DashboardColorSetting.objects.filter(data_type=data_stream,
                                            study=study).exists():
        settings = DashboardColorSetting.objects.get(data_type=data_stream,
                                                     study=study)
        default_filters = DashboardColorSetting.get_dashboard_color_settings(
            settings)
    else:
        settings = None

    # -------------------------------- dealing with color settings -------------------------------------------------
    # test if there are default settings saved,
    # and if there are, test if the default filters should be used or if the user has overridden them
    if default_filters != "":
        inflection_info = default_filters["inflections"]
        if all_flags_list == [] and color_high_range is None and color_low_range is None:
            # since none of the filters are set, parse default filters to pass in the default
            # settings set the values for gradient filter

            # backend: color_range_min, color_range_max --> frontend: color_low_range,
            # color_high_range the above is consistent throughout the back and front ends
            if settings.gradient_exists():
                gradient_info = default_filters["gradient"]
                color_low_range = gradient_info["color_range_min"]
                color_high_range = gradient_info["color_range_max"]
                show_color = "true"
            else:
                color_high_range = 0
                color_low_range = 0
                show_color = "false"

            # set the values for the flag/inflection filter*s*
            # the html is expecting a list of lists for the flags [[operator, value], ... ]
            all_flags_list = []
            for flag_info in inflection_info:
                single_flag = [
                    flag_info["operator"], flag_info["inflection_point"]
                ]
                all_flags_list.append(single_flag)

    # change the url params from jinja t/f to python understood T/F
    show_color = True if show_color == "true" else False

    # -----------------------------------  general data fetching --------------------------------------------
    start, end = extract_date_args_from_request()
    participant_objects = Participant.objects.filter(
        study=study_id).order_by("patient_id")
    unique_dates = []
    next_url = ""
    past_url = ""
    byte_streams = {}
    data_exists = None

    # --------------------- decide whether data is in Processed DB or Bytes DB -----------------------------
    if data_stream in ALL_DATA_STREAMS:
        first_day, last_day = dashboard_chunkregistry_date_query(
            study_id, data_stream)
        if first_day is not None:
            stream_data = OrderedDict(
                (participant.patient_id,
                 dashboard_chunkregistry_query(participant.id,
                                               data_stream=data_stream))
                for participant in participant_objects)
            unique_dates, _, _ = get_unique_dates(start, end, first_day,
                                                  last_day)
            next_url, past_url = create_next_past_urls(first_day,
                                                       last_day,
                                                       start=start,
                                                       end=end)

            # get the byte streams per date for each patient for a specific data stream for those dates
            byte_streams = OrderedDict((participant.patient_id, [
                get_bytes_participant_match(
                    stream_data[participant.patient_id], date)
                for date in unique_dates
            ]) for participant in participant_objects)
            # check if there is data to display
            data_exists = len([
                data for patient in byte_streams
                for data in byte_streams[patient] if data is not None
            ]) > 0
    else:
        start, end = extract_date_args_from_request()
        first_day, last_day, stream_data = parse_processed_data(
            study_id, participant_objects, data_stream)
        if first_day is not None:
            unique_dates, _, _ = get_unique_dates(start, end, first_day,
                                                  last_day)
            next_url, past_url = create_next_past_urls(first_day,
                                                       last_day,
                                                       start=start,
                                                       end=end)

            # get the byte streams per date for each patient for a specific data stream for those dates
            byte_streams = OrderedDict((participant.patient_id, [
                get_bytes_processed_data_match(
                    stream_data[participant.patient_id], date)
                for date in unique_dates
            ]) for participant in participant_objects)
            # check if there is data to display
            data_exists = len([
                data for patient in byte_streams
                for data in byte_streams[patient] if data is not None
            ]) > 0

    # ---------------------------------- base case if there is no data ------------------------------------------
    if first_day is None or (not data_exists and past_url == ""):
        unique_dates = []
        next_url = ""
        past_url = ""
        byte_streams = {}

    return render_template(
        'dashboard/data_stream_dashboard.html',
        study=study,
        data_stream=COMPLETE_DATA_STREAM_DICT.get(data_stream),
        times=unique_dates,
        byte_streams=byte_streams,
        base_next_url=next_url,
        base_past_url=past_url,
        study_id=study_id,
        data_stream_dict=COMPLETE_DATA_STREAM_DICT,
        color_low_range=color_low_range,
        color_high_range=color_high_range,
        first_day=first_day,
        last_day=last_day,
        show_color=show_color,
        all_flags_list=all_flags_list,
        allowed_studies=get_researcher_allowed_studies(),
        is_admin=researcher_is_an_admin(),
        page_location='dashboard_data',
    )
Exemplo n.º 10
0
def get_data_for_dashboard_patient_display(study_id, patient_id):
    """ parses data to be displayed for the singular participant dashboard view """
    study = Study.get_or_404(pk=study_id)
    participant = get_participant(patient_id, study_id)
    start, end = extract_date_args_from_request()
    chunks = dashboard_chunkregistry_query(participant.id)
    patient_ids = list(
        Participant.objects.filter(study=study_id).exclude(
            patient_id=patient_id).values_list("patient_id", flat=True))

    # ----------------- dates for bytes data streams -----------------------
    if chunks:
        first_day, last_day = dashboard_chunkregistry_date_query(study_id)
        _, first_date_data_entry, last_date_data_entry = \
            get_unique_dates(start, end, first_day, last_day, chunks)
    else:
        last_date_data_entry = first_date_data_entry = None
    # --------------- dates for  processed data streams -------------------
    # all_data is a list of dicts [{"time_bin": , "stream": , "processed_data": }...]
    processed_first_date_data_entry, processed_last_date_data_entry, all_data = parse_patient_processed_data(
        study_id, participant)

    # ------- decide the first date of data entry from processed AND bytes data as well as put the data together ------
    # but only if there are both processed and bytes data
    if chunks and all_data:
        if (processed_first_date_data_entry - first_date_data_entry).days < 0:
            first_date_data_entry = processed_first_date_data_entry
        if (processed_last_date_data_entry - last_date_data_entry).days < 0:
            last_date_data_entry = processed_last_date_data_entry
    if all_data and not chunks:
        first_date_data_entry = processed_first_date_data_entry
        last_date_data_entry = processed_last_date_data_entry

    # ---------------------- get next/past urls and unique dates, as long as data has been entered -------------------
    if chunks or all_data:
        next_url, past_url = create_next_past_urls(first_date_data_entry,
                                                   last_date_data_entry,
                                                   start=start,
                                                   end=end)
        unique_dates, _, _ = get_unique_dates(start, end,
                                              first_date_data_entry,
                                              last_date_data_entry)
    else:
        next_url = past_url = unique_dates = None

    # --------------------- get all the data using the correct unique dates from both data sets ----------------------
    # get the byte data for the dates that have data collected in that week
    if all_data:
        processed_byte_streams = OrderedDict((stream, [
            get_bytes_patient_processed_match(all_data, date, stream)
            for date in unique_dates
        ]) for stream in PROCESSED_DATA_STREAM_DICT)
    else:
        processed_byte_streams = None

    if chunks:
        byte_streams = OrderedDict((stream, [
            get_bytes_data_stream_match(chunks, date, stream)
            for date in unique_dates
        ]) for stream in ALL_DATA_STREAMS)
    else:
        byte_streams = None

    if chunks and all_data:
        byte_streams.update(processed_byte_streams)
    elif all_data and not chunks:
        byte_streams = OrderedDict((stream, [None for date in unique_dates])
                                   for stream in ALL_DATA_STREAMS)
        byte_streams.update(processed_byte_streams)
    elif chunks and not all_data:
        processed_byte_streams = OrderedDict(
            (stream, [None for date in unique_dates])
            for stream in PROCESSED_DATA_STREAM_DICT)
        byte_streams.update(processed_byte_streams)
    # -------------------------  edge case if no data has been entered -----------------------------------
    else:
        byte_streams = {}
        unique_dates = []
        next_url = ""
        past_url = ""
        first_date_data_entry = ""
        last_date_data_entry = ""

    return render_template(
        'dashboard/participant_dashboard.html',
        study=study,
        patient_id=patient_id,
        participant=participant,
        times=unique_dates,
        byte_streams=byte_streams,
        next_url=next_url,
        past_url=past_url,
        patient_ids=patient_ids,
        study_id=study_id,
        first_date_data=first_date_data_entry,
        last_date_data=last_date_data_entry,
        data_stream_dict=COMPLETE_DATA_STREAM_DICT,
        allowed_studies=get_researcher_allowed_studies(),
        is_admin=researcher_is_an_admin(),
        page_location='dashboard_patient',
    )