def _extract_phenotype(data_phenotype, nuclei, wildcards, features): from lasagna.pipelines._20170914_endo import feature_table_stack from lasagna.process import feature_table, default_object_features df = feature_table_stack(data_phenotype, nuclei, features) features = default_object_features.copy() features['cell'] = features.pop('label') df2 = feature_table(nuclei, nuclei, features) df = df.join(df2.set_index('cell'), on='cell') for k, v in wildcards.items(): df[k] = v return df
def get_features(data, peaks): """ Uses peaks from DO only """ DO_masks = peaks_to_DO_masks(peaks) arr = [] # loop over peak sources for source, mask in DO_masks.items(): # peak object features objects = feature_table(mask, mask, object_features) # DO/sequencing features table = build_feature_table(data, mask, peak_features, all_index) table = objects.join(table) table['source'] = source arr += [table] return pd.concat(arr)
def test_feature_table(): features = { 'area': lambda region: region.area, 'bounds': lambda region: region.bbox, 'label': lambda region: np.median(region.intensity_image[region.intensity_image > 0]), } data = read_stack(stack) mask = read_stack(nuclei) df = feature_table(data[0][0], mask, features) df_ = pd.read_pickle(home('feature_table.pkl')) assert (df == df_).all().all()
def get_nuclear_features(dapi, nuclei): features = dict(object_features) features.update(peak_features) return feature_table(dapi, nuclei, features)