Beispiel #1
0
                                       p_value_threshold=0.05,
                                       n_top_features=50)

    #%%     K significant features

                k_sigfeat_dir = plot_dir / 'k_sig_feats'
                k_sigfeat_dir.mkdir(exist_ok=True, parents=True)

                # Infer feature set
                fset, (scores, pvalues), support = k_significant_feat(
                    feat=feat_df,
                    y_class=meta_df[GROUPING_VAR],
                    k=K_SIG_FEATS,
                    score_func='f_classif',
                    scale=None,
                    feat_names=None,
                    plot=True,
                    k_to_plot=None,
                    close_after_plotting=True,
                    saveto=None,
                    figsize=None,
                    title=None,
                    xlabel=None)
                # OPTIONAL: Plot cherry-picked features
                #fset = ['speed_50th','curvature_neck_abs_50th','major_axis_50th','angular_velocity_neck_abs_50th']
                #fset = pvalues_ttest.columns[np.where((pvalues_ttest < P_VALUE_THRESHOLD).any(axis=0))]
                boxplots_by_strain(df=meta_df.join(feat_df),
                                   group_by=GROUPING_VAR,
                                   test_pvalues_df=pvalues_ttest,
                                   control_strain=control_strain,
                                   features2plot=fset,
                                   saveDir=k_sigfeat_dir,
Beispiel #2
0
#%%
data = pd.read_csv(data_file, index_col=None)

y = data['worm_strain'].values
data = data.drop(columns='worm_strain')

#%%
mrmr_feat_set, mrmr_scores, mrmr_support = mRMR_feature_selection(
        data, k=10, y_class=y,
        redundancy_func='pearson_corr', relevance_func='kruskal',
        n_bins=4, mrmr_criterion='MIQ',
        plot=True, k_to_plot=5, close_after_plotting=False,
        saveto=None, figsize=None
        )

cv_scores_mrmr = cross_val_score(estimator, data[mrmr_feat_set], y, cv=5)
print('MRMR')
print(np.mean(cv_scores_mrmr))

#%%
feat_set, scores, support = k_significant_feat(
        data, y, k=10, score_func='f_classif', scale=None,
        plot=True, k_to_plot=5, close_after_plotting=False,
        saveto=None, figsize=None, title=None, xlabel=None
        )

cv_scores = cross_val_score(estimator, data[feat_set], y, cv=5)
print('k-best')
print(np.mean(cv_scores))
Beispiel #3
0
    featZ = pd.DataFrame(data=stats.zscore(feat_nonan, axis=0),
                         columns=feat.columns,
                         index=feat_nonan.index)
    
#%% sig features
    if k_feat:
        from tierpsytools.analysis.significant_features import k_significant_feat  
        (SAVETO / 'k_sig_feats').mkdir(exist_ok=True)
    
        window_groups = meta.groupby('window').groups
        for w in window_groups: 
            neuro_feats = {}
            for fset in featsets.keys():
                neuro_feats[fset],scores,support =  k_significant_feat(
                    feat_nonan.loc[window_groups[w],
                                   featsets[fset]],
                    meta.loc[window_groups[w], 'worm_gene'],
                    k=10,
                    plot=False)
            
            # save some plots
            _window_type = str(meta.loc[window_groups[w], 'window_light'].unique()[0])
            (SAVETO / 'k_sig_feats' / 'window{}_{}'.format(w, _window_type)).mkdir(exist_ok=True)
            for f in neuro_feats:
                (SAVETO / 'k_sig_feats' / 'window{}_{}'.format(w, _window_type) / f).mkdir(exist_ok=True)
                for s in neuro_feats[f]:
                    plt.figure()
                    sns.boxplot(x=feat_nonan.loc[window_groups[w], s],
                                y=meta.loc[window_groups[w], 'worm_gene'],
                                order=neuro_genes,
                                orient='h',
                                showfliers=False,
 else:
     stats_table['significance'] = sig_asterix(pvals.loc[stats_table.index, 
                                                         TEST_NAME].values)
     
 #%% K significant features
 
 # k_sigfeat_dir = plot_dir / 'k_sig_feats'
 # k_sigfeat_dir.mkdir(exist_ok=True, parents=True)      
 fset_ksig, (scores, pvalues_ksig), support = k_significant_feat(feat=feat_df, 
                                                 y_class=meta_df[GROUPING_VAR], 
                                                 k=(len(fset) if len(fset) > 
                                                    args.k_sig_features else 
                                                    args.k_sig_features), 
                                                 score_func='f_classif', 
                                                 scale=None, 
                                                 feat_names=None, 
                                                 plot=False, 
                                                 k_to_plot=None, 
                                                 close_after_plotting=True,
                                                 saveto=None, #k_sigfeat_dir
                                                 figsize=None, 
                                                 title=None, 
                                                 xlabel=None)
 
 pvalues_ksig = pd.DataFrame(pd.Series(data=pvalues_ksig, 
                                       index=fset_ksig, 
                                       name='k_significant_features')).T
 # Save k most significant features
 pvalues_ksig.to_csv(stats_dir / 'k_significant_features.csv', header=True, index=False)   
 
 # col = 'Top{} k-significant p-value'.format(args.k_sig_features)
                                                  featsets,
                                                  strain_lut,
                                                  feat_lut,
                                                  saveto=saveto / 'clustermaps')
            plt.close('all')
            N2clustered_features_copy = N2clustered_features.copy()
            #%% k significant features for each prestim, bluelight and poststim
            if exploratory:
                (saveto / 'ksig_feats').mkdir(exist_ok=True)
                sns.set_style('white')
                label_format = '{:.4f}'
                kfeats = {}
                for stim, fset in featsets.items():
                    kfeats[stim], scores, support = k_significant_feat(
                        feat_nonan[fset],
                        meta_df.worm_gene,
                        k=100,
                        plot=False)

                    for i in range(0,5):
                        fig, ax = plt.subplots(4, 5, sharex=True, figsize = [20,20])
                        for c, axis in enumerate(ax.flatten()):
                            counter=(20*i)-(20-c)
                            sns.boxplot(x=meta_df['worm_gene'],
                                        y=feat_df[kfeats[stim][counter]],
                                        palette=strain_lut.values(),
                                        ax=axis)
                            axis.set_ylabel(fontsize=8,
                                            ylabel=kfeats[stim][counter])
                            axis.set_yticklabels(labels=[label_format.format(x) for x in axis.get_yticks()],
                                                 fontsize=6)
    #%%  fillnans and normalise - used later for clustering and PCA/LDA
    feat_nonan = feat.copy()
    feat_nonan.fillna(feat_nonan.mean(axis=0), inplace=True)

    featZ = pd.DataFrame(data=stats.zscore(feat_nonan, axis=0),
                         columns=feat.columns,
                         index=feat_nonan.index)

    #%% sig features
    if k_feat:
        from tierpsytools.analysis.significant_features import k_significant_feat
        (SAVETO / 'k_sig_feats').mkdir(exist_ok=True)
        neuro_feats = {}
        for fset in featsets.keys():
            neuro_feats[fset], scores, support = k_significant_feat(
                feat_nonan[featsets[fset]], meta.worm_gene, k=20, plot=False)

        for f in neuro_feats:
            (SAVETO / 'k_sig_feats' / f).mkdir(exist_ok=True)
            for s in neuro_feats[f]:
                plt.figure()
                sns.boxplot(x=feat[s],
                            y=meta['worm_gene'],
                            order=neuro_genes,
                            orient='h',
                            showfliers=False,
                            palette=cmap)
                plt.savefig(SAVETO / 'k_sig_feats' / f / '{}.png'.format(s),
                            dpi=200)
                plt.close('all')
Beispiel #7
0
    set(prestim_nopath_feats) - set(bad_feats))

plt.figure()
for f in feat_stats_meta:
    sns.distplot(feat_stats_cov[feat_stats_cov['MOA_specific'] == f]
                 [prestim_nopath_minuscov_feats],
                 label=f,
                 bins=100)
plt.legend()

# now do statistical tests
y_classes = list(
    zip(subsample_meta.drug_type,
        subsample_meta.imaging_plate_drug_concentration))
y_classes = np.array(['{}, {}'.format(x[0], x[1]) for x in y_classes])
sig_feats = significant_features.k_significant_feat(
    subsample_feat[prestim_nopath_minuscov_feats], y_classes, k=50)

# initialise PCA
postprocessingPCA = PCA()
X2 = postprocessingPCA.fit_transform(subsample_featZ.values)
cumvar = np.cumsum(postprocessingPCA.explained_variance_ratio_)
thresh = cumvar <= 0.95  #set 95% variance threshold
cut_off = int(np.argwhere(thresh)[-1])

#make a plot
sns.set_style('whitegrid')
plt.figure()
plt.plot(range(0, len(cumvar)), cumvar * 100)
plt.plot([cut_off, cut_off], [0, 100], 'k')
plt.xlabel('Number of Principal Components', fontsize=16)
plt.ylabel('variance explained', fontsize=16)
def keio_stats(features, metadata, args):
    """ Perform statistical analyses on Keio screen results:
        - ANOVA tests for significant between strain variation among all strains for each feature
        - t-tests for significant differences between each strain and control for each feature
        - k-significant feature selection for agreement with ANOVA significant feature set
        
        Inputs
        ------
        features, metadata : pd.DataFrame
            Clean feature summaries and accompanying metadata
        
        args : Object
            Python object with the following attributes:
            - drop_size_features : bool
            - norm_features_only : bool
            - percentile_to_use : str
            - remove_outliers : bool
            - omit_strains : list
            - grouping_variable : str
            - control_dict : dict
            - collapse_control : bool
            - n_top_feats : int
            - tierpsy_top_feats_dir (if n_top_feats) : str
            - test : str
            - f_test : bool
            - pval_threshold : float
            - fdr_method : str
            - n_sig_features : int           
    """

    # categorical variable to investigate, eg.'gene_name'
    grouping_var = args.grouping_variable
    print("\nInvestigating '%s' variation" % grouping_var)    

    # assert there will be no errors duee to case-sensitivity
    assert len(metadata[grouping_var].unique()) == len(metadata[grouping_var].str.upper().unique())
    
    # Subset results (rows) to omit selected strains
    if args.omit_strains is not None:
        features, metadata = subset_results(features, metadata, grouping_var, args.omit_strains)

    # Load Tierpsy Top feature set + subset (columns) for top feats only
    if args.n_top_feats is not None:
        top_feats_path = Path(args.tierpsy_top_feats_dir) / "tierpsy_{}.csv".format(str(args.n_top_feats))
        topfeats = load_topfeats(top_feats_path, add_bluelight=args.align_bluelight, 
                                 remove_path_curvature=True, header=None)
        
        # Drop features that are not in results
        top_feats_list = [feat for feat in list(topfeats) if feat in features.columns]
        features = features[top_feats_list]
    
    assert not features.isna().any().any()
    
    strain_list = list(metadata[grouping_var].unique())
    control = args.control_dict[grouping_var] # control strain to use
    assert control in strain_list
    
    if args.collapse_control:
        print("Collapsing control data (mean of each day)")
        features, metadata = average_plate_control_data(features, 
                                                        metadata, 
                                                        control=control, 
                                                        grouping_var=grouping_var, 
                                                        plate_var='imaging_plate_id')

    _ = df_summary_stats(metadata) # summary df # TODO: plot from?

    # Record mean sample size per group
    mean_sample_size = int(np.round(metadata.join(features).groupby([grouping_var], 
                                                                    as_index=False).size().mean()))
    print("Mean sample size: %d" % mean_sample_size)

    # construct save paths (args.save_dir / topfeats? etc)
    save_dir = get_save_dir(args)
    stats_dir =  save_dir / grouping_var / "Stats" / args.fdr_method
    plot_dir = save_dir / grouping_var / "Plots" / args.fdr_method              

#%% F-test for equal variances

    # Compare variance in samples with control (and correct for multiple comparisons)
    # Sample size matters in that unequal variances don't pose a problem for a t-test with 
    # equal sample sizes. So as long as your sample sizes are equal, you don't have to worry about 
    # homogeneity of variances. If they are not equal, perform F-tests first to see if variance is 
    # equal before doing a t-test
    if args.f_test:
        levene_stats_path = stats_dir / 'levene_results.csv'
        levene_stats = levene_f_test(features, metadata, grouping_var, 
                                      p_value_threshold=args.pval_threshold, 
                                      multitest_method=args.fdr_method,
                                      saveto=levene_stats_path,
                                      del_if_exists=False)
        # if p < 0.05 then variances are not equal, and sample size matters
        prop_eqvar = (levene_stats['pval'] > args.pval_threshold).sum() / len(levene_stats['pval'])
        print("Percentage equal variance %.1f%%" % (prop_eqvar * 100))
          
#%% ANOVA / Kruskal-Wallis tests for significantly different features across groups

    test_path_unncorrected = stats_dir / '{}_results_uncorrected.csv'.format(args.test)
    test_path = stats_dir / '{}_results.csv'.format(args.test)
    
    if not (test_path.exists() and test_path_unncorrected.exists()):
        test_path.parent.mkdir(exist_ok=True, parents=True)
    
        if (args.test == "ANOVA" or args.test == "Kruskal"):
            if len(strain_list) > 2:   
                # perform ANOVA + record results before & after correcting for multiple comparisons               
                stats, pvals, reject = univariate_tests(X=features, 
                                                        y=metadata[grouping_var], 
                                                        control=control, 
                                                        test=args.test,
                                                        comparison_type='multiclass',
                                                        multitest_correction=None, # uncorrected
                                                        alpha=args.pval_threshold,
                                                        n_permutation_test='all')

                # get effect sizes
                effect_sizes = get_effect_sizes(X=features, 
                                                y=metadata[grouping_var],
                                                control=control,
                                                effect_type=None,
                                                linked_test=args.test)

                # compile + save results (uncorrected)
                test_results = pd.concat([stats, effect_sizes, pvals, reject], axis=1)
                test_results.columns = ['stats','effect_size','pvals','reject']     
                test_results['significance'] = sig_asterix(test_results['pvals'])
                test_results = test_results.sort_values(by=['pvals'], ascending=True) # rank pvals
                test_results.to_csv(test_path_unncorrected, header=True, index=True)

                # correct for multiple comparisons
                reject_corrected, pvals_corrected = _multitest_correct(pvals, 
                                                                       multitest_method=args.fdr_method,
                                                                       fdr=args.pval_threshold)
                                            
                # compile + save results (corrected)
                test_results = pd.concat([stats, effect_sizes, pvals_corrected, reject_corrected], axis=1)
                test_results.columns = ['stats','effect_size','pvals','reject']     
                test_results['significance'] = sig_asterix(test_results['pvals'])
                test_results = test_results.sort_values(by=['pvals'], ascending=True) # rank pvals
                test_results.to_csv(test_path, header=True, index=True)
        
                # use reject mask to find significant feature set
                fset = pvals.loc[reject[args.test]].sort_values(by=args.test, ascending=True).index.to_list()
                #assert set(fset) == set(anova_corrected['pvals'].index[np.where(anova_corrected['pvals'] < 
                #args.pval_threshold)[0]])

                if len(fset) > 0:
                    print("%d significant features found by %s for '%s' (P<%.2f, %s)" % (len(fset), 
                          args.test, grouping_var, args.pval_threshold, args.fdr_method))
                    anova_sigfeats_path = stats_dir / '{}_sigfeats.txt'.format(args.test)
                    write_list_to_file(fset, anova_sigfeats_path)
            else:
                fset = []
                print("\nWARNING: Not enough groups for %s for '%s' (n=%d groups)" %\
                      (args.test, grouping_var, len(strain_list)))
                    
#%% Linear Mixed Models (LMMs), accounting for day-to-day variation
        # NB: Ideally report:  parameter | beta | lower-95 | upper-95 | random effect (SD)
        elif args.test == 'LMM':
            with warnings.catch_warnings():
                # Filter warnings as parameter is often on the boundary
                warnings.filterwarnings("ignore")
                #warnings.simplefilter("ignore", ConvergenceWarning)
                (signif_effect, low_effect,  error, mask, pvals
                 ) = compounds_with_low_effect_univariate(feat=features, 
                                                          drug_name=metadata[grouping_var], 
                                                          drug_dose=None, 
                                                          random_effect=metadata[args.lmm_random_effect], 
                                                          control=control, 
                                                          test=args.test, 
                                                          comparison_type='multiclass',
                                                          multitest_method=args.fdr_method)
            assert len(error) == 0

            # save pvals
            pvals.to_csv(test_path_unncorrected, header=True, index=True)

            # save significant features -- if any strain significant for any feature
            fset = pvals.columns[(pvals < args.pval_threshold).any()].to_list()
            if len(fset) > 0:
                lmm_sigfeats_path = stats_dir / '{}_sigfeats.txt'.format(args.test)
                write_list_to_file(fset, lmm_sigfeats_path)

            # save significant effect strains
            if len(signif_effect) > 0:
                print(("%d significant features found (%d significant %ss vs %s control, "\
                      % (len(fset), len(signif_effect), grouping_var.replace('_',' '), 
                          control) if len(signif_effect) > 0 else\
                      "No significant differences found between %s "\
                      % grouping_var.replace('_',' '))
                      + "after accounting for %s variation, %s, P<%.2f, %s)"\
                      % (args.lmm_random_effect.split('_yyyymmdd')[0], args.test, 
                          args.pval_threshold, args.fdr_method))
                signif_effect_path = stats_dir / '{}_signif_effect_strains.txt'.format(args.test)
                write_list_to_file(signif_effect, signif_effect_path)
        
        else:
            raise IOError("Test '{}' not recognised".format(args.test))
    
#%% t-tests / Mann-Whitney tests
    
    # t-test to use        
    t_test = 't-test' if args.test == 'ANOVA' else 'Mann-Whitney' # aka. Wilcoxon rank-sum      
    ttest_path_uncorrected = stats_dir / '{}_results_uncorrected.csv'.format(t_test)
    ttest_path = stats_dir / '{}_results.csv'.format(t_test)               

    if not (ttest_path_uncorrected.exists() and ttest_path.exists()):    
        ttest_path.parent.mkdir(exist_ok=True, parents=True)

        if len(fset) > 0 or len(strain_list) == 2:
            # perform t-tests (without correction for multiple testing)
            stats_t, pvals_t, reject_t = univariate_tests(X=features, 
                                                          y=metadata[grouping_var], 
                                                          control=control, 
                                                          test=t_test,
                                                          comparison_type='binary_each_group',
                                                          multitest_correction=None, 
                                                          alpha=0.05)
            # get effect sizes for comparisons
            effect_sizes_t =  get_effect_sizes(X=features, 
                                               y=metadata[grouping_var], 
                                               control=control,
                                               effect_type=None,
                                               linked_test=t_test)
            
            # compile + save t-test results (uncorrected)
            stats_t.columns = ['stats_' + str(c) for c in stats_t.columns]
            pvals_t.columns = ['pvals_' + str(c) for c in pvals_t.columns]
            reject_t.columns = ['reject_' + str(c) for c in reject_t.columns]
            effect_sizes_t.columns = ['effect_size_' + str(c) for c in effect_sizes_t.columns]
            ttest_uncorrected = pd.concat([stats_t, effect_sizes_t, pvals_t, reject_t], axis=1)
            ttest_uncorrected.to_csv(ttest_path_uncorrected, header=True, index=True)
            
            # correct for multiple comparisons
            pvals_t.columns = [c.split("_")[-1] for c in pvals_t.columns]
            reject_t, pvals_t = _multitest_correct(pvals_t, 
                                                   multitest_method=args.fdr_method,
                                                   fdr=args.pval_threshold)

            # compile + save t-test results (corrected)
            pvals_t.columns = ['pvals_' + str(c) for c in pvals_t.columns]
            reject_t.columns = ['reject_' + str(c) for c in reject_t.columns]
            ttest_corrected = pd.concat([stats_t, effect_sizes_t, pvals_t, reject_t], axis=1)
            ttest_corrected.to_csv(ttest_path, header=True, index=True)

            # record t-test significant features (not ordered)
            fset_ttest = pvals_t[np.asmatrix(reject_t)].index.unique().to_list()
            #assert set(fset_ttest) == set(pvals_t.index[(pvals_t < args.pval_threshold).sum(axis=1) > 0])
            print("%d significant features found for any %s vs %s (%s, P<%.2f)" %\
                  (len(fset_ttest), grouping_var, control, t_test, args.pval_threshold))

            if len(fset_ttest) > 0:
                ttest_sigfeats_path = stats_dir / '{}_sigfeats.txt'.format(t_test)
                write_list_to_file(fset_ttest, ttest_sigfeats_path)
                                 
#%% K significant features
    
    ksig_uncorrected_path = stats_dir / 'k_significant_features_uncorrected.csv'
    ksig_corrected_path = stats_dir / 'k_significant_features.csv'
    if not (ksig_uncorrected_path.exists() and ksig_corrected_path.exists()):
        ksig_corrected_path.parent.mkdir(exist_ok=True, parents=True)      
        fset_ksig, (scores, pvalues_ksig), support = k_significant_feat(feat=features, 
                                                                        y_class=metadata[grouping_var], 
                                                                        k=len(fset),
                                                                        score_func='f_classif', 
                                                                        scale=None, 
                                                                        feat_names=None, 
                                                                        plot=False, 
                                                                        k_to_plot=None, 
                                                                        close_after_plotting=True,
                                                                        saveto=None, #k_sigfeat_dir
                                                                        figsize=None, 
                                                                        title=None, 
                                                                        xlabel=None)
        # compile + save k-significant features (uncorrected) 
        ksig_table = pd.concat([pd.Series(scores), pd.Series(pvalues_ksig)], axis=1)
        ksig_table.columns = ['scores','pvals']
        ksig_table.index = fset_ksig
        ksig_table.to_csv(ksig_uncorrected_path, header=True, index=True)   
        
        # Correct for multiple comparisons
        _, ksig_table['pvals'] = _multitest_correct(ksig_table['pvals'], 
                                                    multitest_method=args.fdr_method,
                                                    fdr=args.pval_threshold)
        
        # save k-significant features (corrected)
        ksig_table.to_csv(ksig_corrected_path, header=True, index=True)   

#%% mRMR feature selection: minimum Redunduncy, Maximum Relevance #####
    
    mrmr_dir = plot_dir / 'mrmr'
    mrmr_dir.mkdir(exist_ok=True, parents=True)
    mrmr_results_path = mrmr_dir / "mrmr_results.csv"

    if not mrmr_results_path.exists():
        estimator = Pipeline([('scaler', StandardScaler()), ('estimator', LogisticRegression())])
        y = metadata[grouping_var].values
        (mrmr_feat_set, 
         mrmr_scores, 
         mrmr_support) = mRMR_feature_selection(features, y_class=y, k=10,
                                                redundancy_func='pearson_corr',
                                                relevance_func='kruskal',
                                                n_bins=10, mrmr_criterion='MID',
                                                plot=True, k_to_plot=5, 
                                                close_after_plotting=True,
                                                saveto=mrmr_dir, figsize=None)
        # save results                                        
        mrmr_table = pd.concat([pd.Series(mrmr_feat_set), pd.Series(mrmr_scores)], axis=1)
        mrmr_table.columns = ['feature','score']
        mrmr_table.to_csv(mrmr_results_path, header=True, index=False)
        
        n_cv = 5
        cv_scores_mrmr = cross_val_score(estimator, features[mrmr_feat_set], y, cv=n_cv)
        cv_scores_mrmr = pd.DataFrame(cv_scores_mrmr, columns=['cv_score'])
        cv_scores_mrmr.to_csv(mrmr_dir / "cv_scores.csv", header=True, index=False)        
        print('MRMR CV Score: %f (n=%d)' % (np.mean(cv_scores_mrmr), n_cv))        
    else:
        # load mrmr results
        mrmr_table = pd.read_csv(mrmr_results_path)
        
    mrmr_feat_set = mrmr_table['feature'].to_list()
    print("\nTop %d features found by MRMR:" % len(mrmr_feat_set))
    for feat in mrmr_feat_set:
        print(feat)
coiled_meta = [meta[meta['MOA_specific'] == coiled]]
for control in control_drug:
    coiled_meta.append(meta[meta['drug_type'] == control])
coiled_meta = pd.concat(coiled_meta)

coiled_meta = coiled_meta[coiled_meta['worm_strain'] == control_strain]

# filter down the feat mat
coiled_feat = feat.loc[coiled_meta.index, prestim_feats]

# now do statistical tests
y_classes = list(
    zip(coiled_meta.drug_type, coiled_meta.imaging_plate_drug_concentration))
y_classes = np.array(['{}, {}'.format(x[0], x[1]) for x in y_classes])
sig_feats = significant_features.k_significant_feat(coiled_feat,
                                                    y_classes,
                                                    k=20)

speed_feats = [
    'speed_10th_prestim', 'speed_50th_prestim', 'speed_90th_prestim',
    'speed_IQR_prestim'
]
curvature_feats = [
    'curvature_midbody_abs_10th_prestim', 'curvature_midbody_abs_50th_prestim',
    'curvature_midbody_abs_90th_prestim', 'curvature_midbody_abs_IQR_prestim',
    'curvature_midbody_norm_abs_10th_prestim',
    'curvature_midbody_norm_abs_50th_prestim',
    'curvature_midbody_norm_abs_90th_prestim',
    'curvature_midbody_norm_abs_IQR_prestim',
    'curvature_midbody_w_backward_abs_10th_prestim',
    'curvature_midbody_w_backward_abs_50th_prestim',