def main():
  num_session = 7
  # batch process for each session
  for i in range(1,num_session+1):
    # process annotation files
    annotation_file = '/session' + str(i) + '.annotation.csv'
    puff_correction_file = '/session' + str(i) + '_puff.correction.csv'
    annotation_list = s_annotation.annotation_csv_importer(s_info.clean_dataset_folder + annotation_file)
    try:
      correction_list = pd.read_csv(s_info.clean_dataset_folder + puff_correction_file, parse_dates=['STARTTIME', 'ENDTIME'], index_col=0)
    except:
      print puff_correction_file + ' not processed'
      correction_list = None
    annotation_list = s_annotation.correct_puffs_and_add_prototypical_marks(annotation_list, correction_list)
    annotation_list.to_csv(s_info.puff_corrected_folder + annotation_file, index=False)
    print annotation_file + ' written'
    # process raw data files
    for j in s_info.sensor_codes:
      data_file = '/session' + str(i) + '_' + j + '.raw.csv'
      data_set = w_utils.raw_csv_importer(s_info.raw_dataset_folder + data_file)
      data_set = s_raw.preprocess_raw(data_set, annotation_list, grace_period=timedelta(seconds=0))
      w_utils.raw_csv_exporter(data_set, s_info.puff_corrected_folder + data_file)
      print data_file + ' written'
    # sys.exit(1)
  return
예제 #2
0
def main():
    # batch process for each session
    for i in s_info.session_arr:
        i = 7
        # process annotation files
        annotation_file = 'session' + str(i) + '.annotation.csv'
        annotation_list = s_annotation.annotation_csv_importer(
            s_info.raw_dataset_folder + annotation_file)
        annotation_list = s_annotation.fix_annotation(
            annotation_list, time_offset=s_info.sys_offsets[i - 1])
        annotation_list.to_csv(s_info.clean_dataset_folder + annotation_file,
                               index=False)
        print annotation_file + ' written'
        # process raw data files
        for j in s_info.sensor_codes:
            data_file = 'session' + str(i) + '_' + j + '.raw.csv'
            data_set = w_utils.raw_csv_importer(s_info.raw_dataset_folder +
                                                data_file)
            data_set = s_raw.preprocess_raw(data_set,
                                            annotation_list,
                                            grace_period=timedelta(seconds=0))
            w_utils.raw_csv_exporter(data_set,
                                     s_info.clean_dataset_folder + data_file)
            print data_file + ' written'
        sys.exit(1)
    return
def generate_puff_correction_csv():
    for i in s_info.session_arr:
        i = 7
        annotation_file = 'session' + str(i) + '.annotation.csv'
        annotation_data = s_annotation.annotation_csv_importer(
            s_info.clean_dataset_folder + annotation_file)
        # get all puff annotations
        puff_st_markers = (
            annotation_data[s_annotation.puff_col] != 'no-puff') & (
                (annotation_data[s_annotation.puff_col].shift(1) == 'no-puff')
                | pd.isnull(annotation_data[s_annotation.puff_col].shift(-1)))
        puff_et_markers = (
            annotation_data[s_annotation.puff_col] != 'no-puff') & (
                (annotation_data[s_annotation.puff_col].shift(-1) == 'no-puff')
                | pd.isnull(annotation_data[s_annotation.puff_col].shift(-1)))
        c = 0
        puff_correction_item = {
            'STARTTIME': [],
            'ENDTIME': [],
            'prototypical': [],
            'offset-left': [],
            'offset-right': [],
            'potential error': [],
            'link': [],
            'note': []
        }
        for st, et in zip(
                annotation_data[puff_st_markers][s_annotation.st_col],
                annotation_data[puff_et_markers][s_annotation.et_col]):
            puff_correction_item['STARTTIME'].append(st)
            puff_correction_item['ENDTIME'].append(et)
            puff_correction_item['prototypical'].append(1)
            puff_correction_item['offset-left'].append(0)
            puff_correction_item['offset-right'].append(0)
            puff_correction_item['potential error'].append(0)
            puff_correction_item['note'].append('')
            if annotation_data[annotation_data[s_annotation.st_col] == st][
                    s_annotation.puff_col] == 'left-puff':
                side = 'L'
            else:
                side = 'R'
            fname = s_info.puff_figure_folder + 'session' + str(
                i) + '_puff' + str(c) + '_' + side + '.rawplot.png'
            puff_correction_item['link'].append(os.path.abspath(fname))
            c += 1
        #save puff correction to csv for each session
        tosave = pd.DataFrame(puff_correction_item,
                              columns=[
                                  'STARTTIME', 'ENDTIME', 'link',
                                  'offset-left', 'offset-right',
                                  'prototypical', 'potential error', 'note'
                              ])
        csvname = s_info.clean_dataset_folder + 'session' + str(
            i) + '_puff.correction.csv'
        tosave.to_csv(csvname)
        sys.exit(1)
예제 #4
0
def main():
    for i in s_info.session_arr:
        i = 2
        annotation_file = 'session' + str(i) + '.annotation.csv'
        annotation_list = s_annotation.annotation_csv_importer(
            s_info.clean_dataset_folder + annotation_file)

        raw_file = 'session' + str(i) + '_DAK.raw.csv'
        raw_ankle_data = w_utils.raw_csv_importer(s_info.clean_dataset_folder +
                                                  raw_file)

        selected_st, selected_et = get_walking_or_standing_annotation(
            annotation_list)
        n_subplots = len(selected_st) + 1
        #generate subplot grid
        ncols = np.ceil(np.sqrt(n_subplots))
        nrows = np.ceil(n_subplots / float(ncols))
        c = 1
        consolidate_figure = pyplot.figure(figsize=(20, 10))
        for st, et in zip(selected_st, selected_et):
            lbound = annotation_list.ix[st, s_annotation.st_col]
            rbound = annotation_list.ix[et, s_annotation.et_col]
            lbound = lbound - timedelta(seconds=5)
            rbound = rbound + timedelta(seconds=5)
            selected_raw = s_raw.select_raw_by_ts(raw_ankle_data, lbound,
                                                  rbound)
            selected_annotation = s_annotation.select_annotation_by_ts(
                annotation_list, lbound, rbound)
            selected_raw = s_viewer._prepare_raw_for_plot(selected_raw)
            print selected_annotation
            ax = consolidate_figure.add_subplot(nrows, ncols, c)
            s_viewer.get_singlesensor_raw_plot(selected_raw,
                                               selected_annotation,
                                               subplots=False,
                                               ax=ax,
                                               figsize=None)
            single_figure = pyplot.figure(figsize=(20, 10))
            s_viewer.get_singlesensor_raw_plot(selected_raw,
                                               selected_annotation,
                                               ax=single_figure.gca(),
                                               subplots=False)
            figfile = 'session' + str(i) + '_walking_episode' + str(
                c) + '.rawplot.png'
            single_figure.savefig(s_info.post_figure_folder + figfile)
            pyplot.close(single_figure)
            c += 1
        # add legend
        ax = consolidate_figure.add_subplot(nrows, ncols, c)
        s_viewer.get_legends_plot()
        figfile = 'session' + str(i) + '_walking_episodes.rawplot.png'
        consolidate_figure.savefig(s_info.post_figure_folder + figfile)
        pyplot.close(consolidate_figure)
        sys.exit(1)
def main():
    testfile_raw = "../puff_corrected_dataset/session7_DW.raw.csv"
    testfile_annotation = "../puff_corrected_dataset/session7.annotation.csv"
    raw_df = w_utils.raw_csv_importer(testfile_raw)
    annotation_df = s_annotation.annotation_csv_importer(testfile_annotation)
    mean_raw = raw_df.mean()
    std_raw = raw_df.std()
    # select by label
    labels = [[
        'right-puff',
    ], [
        'right-puff',
    ], [
        'walking',
    ], [
        'eating-a-meal',
    ], ['sitting', 'not-smoking', 'no-activity']]
    seeds = [0, 10, 2, 3, 10]
    for label, seed in zip(labels, seeds):
        lbound, rbound = s_annotation.generate_bounds_by_labels(
            annotation_df,
            duration=timedelta(seconds=30),
            labels=label,
            seed=seed)

        selected_a = s_annotation.select_annotation_by_ts(annotation_df,
                                                          lbound=lbound,
                                                          rbound=rbound)
        selected_raw = s_raw.select_raw_by_ts(raw_df,
                                              lbound=lbound,
                                              rbound=rbound)
        # test_filtering(selected_raw, selected_a)
        # test_linearRegression(selected_raw, selected_a)
        pyplot.figure()
        pyplot.suptitle(label)
        # test_preprocessing(selected_raw, selected_a, mean_raw, std_raw)
        test_peak_rate_computation(selected_raw, selected_a, mean_raw, std_raw)
    pyplot.show()
def main():
    # generate legend plot
    fhandle = s_viewer.get_legends_plot()
    pyplot.show()
    # fname = s_info.puff_figure_folder + 'legend.png'
    # fhandle.savefig(fname)
    # pyplot.close(fhandle)
    sys.exit(1)
    for i in s_info.session_arr:
        # i=7
        # read in raw and annotation
        annotation_file = 'session' + str(i) + '.annotation.csv'
        raw_files = [
            'session' + str(i) + '_' + code + '.raw.csv'
            for code in s_info.sensor_codes
        ]
        raw_datas = [
            w_utils.raw_csv_importer(s_info.clean_dataset_folder + raw_file)
            for raw_file in raw_files
        ]
        consolidate_raw = w_utils.raw_data_consolidator(
            raw_datas,
            sessions=[
                i,
            ] * len(raw_datas),
            sensors=s_info.sensor_codes)
        annotation_data = s_annotation.annotation_csv_importer(
            s_info.puff_corrected_folder +
            annotation_file)  # use puff corrected annotation
        # annotation_data = s_annotation.annotation_csv_importer(s_info.clean_dataset_folder + annotation_file) # use clean annotation without puff correction

        # get all puff annotations
        puff_st_markers = (
            annotation_data[s_annotation.puff_col] != 'no-puff') & (
                (annotation_data[s_annotation.puff_col].shift(1) == 'no-puff')
                | pd.isnull(annotation_data[s_annotation.puff_col].shift(-1)))
        puff_et_markers = (
            annotation_data[s_annotation.puff_col] != 'no-puff') & (
                (annotation_data[s_annotation.puff_col].shift(-1) == 'no-puff')
                | pd.isnull(annotation_data[s_annotation.puff_col].shift(-1)))
        c = 0
        for st, et in zip(
                annotation_data[puff_st_markers][s_annotation.st_col],
                annotation_data[puff_et_markers][s_annotation.et_col]):
            # get raw and annotation for current puff
            middle_time = st + (et - st) / 2
            from datetime import timedelta
            lbound = middle_time - timedelta(seconds=15)
            rbound = middle_time + timedelta(seconds=15)
            selected_puff_raw = s_raw.select_raw_by_ts(consolidate_raw,
                                                       lbound=lbound,
                                                       rbound=rbound,
                                                       by='sensor')
            selected_annotation = s_annotation.select_annotation_by_ts(
                annotation_data, lbound=lbound, rbound=rbound)

            selected_annotations = [
                selected_annotation.copy() for s in s_info.sensor_codes
            ]
            consolidate_selected_annotation = s_annotation.annotation_data_consolidator(
                selected_annotations, sensors=s_info.sensor_codes)

            # plot multisensor figure and save them
            if annotation_data[annotation_data[s_annotation.st_col] == st][
                    s_annotation.puff_col] == 'left-puff':
                side = 'L'
            else:
                side = 'R'
            fig = s_viewer.get_multisensor_raw_plot(
                selected_puff_raw,
                consolidate_selected_annotation,
                subplots=False)
            fname = s_info.puff_figure_folder + 'session' + str(
                i) + '_puff' + str(c) + '_' + side + '.rawplot.png'
            fig.savefig(fname)
            pyplot.close(fig)
            print fname + " written"
            c += 1
def main():
    npuff_names = [
        'left-puffs', 'prototypical-left', 'right-puffs', 'prototypical-right',
        'num-of-puffs', 'prototypical-total', 'hand-swap-count'
    ]
    npuff_statistics = {
        'left-puffs': [],
        'right-puffs': [],
        'num-of-puffs': [],
        'hand-swap-count': [],
        'prototypical-left': [],
        'prototypical-right': [],
        'prototypical-total': []
    }
    derived_names = [
        'hand-swap-rate', 'puff-speed', 'prototypical-left-percentage',
        'prototypical-right-percentage', 'prototypical-total-percentage'
    ]
    derived_statistics = {
        'hand-swap-rate': [],
        'puff-speed': [],
        'prototypical-left-percentage': [],
        'prototypical-right-percentage': [],
        'prototypical-total-percentage': []
    }
    complexity_names = ['superposition', 'ambiguity']
    complexity_statistics = {'superposition': [], 'ambiguity': []}
    dsmoke_statistics = []
    dpuff_statistics = []
    ipuff_statistics = []
    interpuff_stats = []
    interpuff_lists = []
    puffduration_lists = []
    puffduration_stats = []
    for i in s_info.session_arr:
        annotation_file = 'session' + str(i) + '.annotation.csv'
        # alist = s_annotation.annotation_csv_importer(s_info.clean_dataset_folder + annotation_file)
        # use puff corrected dataset
        alist = s_annotation.annotation_csv_importer(
            s_info.puff_corrected_folder + annotation_file)
        for name in npuff_names:
            npuff_statistics[name].append(
                get_num_of_puffs_statistics(alist, compute=name))
        dsmoke_statistics.append(get_smoke_duration_statistics(alist))
        interpuff_list, interpuff_stat = get_interpuff_intervals_statistics(
            alist)
        interpuff_stats.append(interpuff_stat)
        interpuff_lists.append(interpuff_list)
        puffduration_list, puffduration_stat = get_puff_duration_statistics(
            alist)
        puffduration_stats.append(puffduration_stat)
        puffduration_lists.append(puffduration_list)
        for name in derived_names:
            derived_statistics[name].append(
                get_derived_statistics(alist, compute=name))
        for name in complexity_names:
            complexity_statistics[name].append(
                get_complexity_statistics(alist, compute=name))

    dsmoke_statistics = pd.concat(dsmoke_statistics,
                                  keys=s_info.session_arr,
                                  axis=1)
    for name in npuff_names:
        npuff_statistics[name] = pd.concat(npuff_statistics[name],
                                           keys=s_info.session_arr,
                                           axis=1)
    npuff_statistics = pd.concat(npuff_statistics, keys=npuff_names)
    dpuff_statistics = pd.concat(puffduration_stats,
                                 keys=s_info.session_arr,
                                 axis=1)
    ipuff_statistics = pd.concat(interpuff_stats,
                                 keys=s_info.session_arr,
                                 axis=1)
    for name in derived_names:
        derived_statistics[name] = pd.concat(derived_statistics[name],
                                             keys=s_info.session_arr,
                                             axis=1)
    for name in complexity_names:
        complexity_statistics[name] = pd.concat(complexity_statistics[name],
                                                keys=s_info.session_arr,
                                                axis=1)
    # save
    writer = pd.ExcelWriter(s_info.stat_dataset_folder + 'stat.xlsx')
    dsmoke_statistics.to_excel(writer, sheet_name='smoking duration')
    npuff_statistics.to_excel(writer, sheet_name='puff counts')
    dpuff_statistics.to_excel(writer, sheet_name='puff-duration')
    ipuff_statistics.to_excel(writer, sheet_name='interpuff-interval')
    for name in derived_names:
        derived_statistics[name].to_excel(writer, sheet_name=name)
    for name in complexity_names:
        complexity_statistics[name].to_excel(writer,
                                             sheet_name='activity-' + name)
    c = 1
    for interpuff_li, puffduration_li in zip(interpuff_lists,
                                             puffduration_lists):
        interpuff_li.to_excel(writer, sheet_name='interpuff-session' + str(c))
        puffduration_li.to_excel(writer,
                                 sheet_name='puffduration-session' + str(c))
        c += 1
    writer.save()
    return