Esempio n. 1
0
def normalize_all_nedms():
    """Iterate through all neuroelectro NeuronEphysDataMap objects and normalize for differences in units"""
    nedms = m.NeuronEphysDataMap.objects.filter(neuron_concept_map__times_validated__gt = 0)
    nedms = nedms.exclude(source__data_table__irrelevant_flag = True)

    nedm_count = nedms.count()
    for i,nedm in enumerate(nedms):
        prog(i, nedm_count)
        norm_dict = normalize_nedm_val(nedm)
        # print norm_dict
        # print nedm.val_norm
        if nedm.val_norm is None:
            # if no existing normalized value for nedm in DB
            nedm.val_norm = norm_dict['value']
            nedm.err_norm = norm_dict['error']
            nedm.save()

        elif np.isclose(nedm.val, nedm.val_norm):
            # if there is a normalized value, but it's the same as the unnormalized value
            # so it may need to be updated
            norm_value = norm_dict['value']

            if norm_value is None:
                # can't normalize value but there's an existing one in the database
                # so keep it as long as it's in an appropriate range

                if check_data_val_range(nedm.val_norm, nedm.ephys_concept_map.ephys_prop) is False:
                    print 'deleting pre-normalized value of %s because out of appropriate range for %s ' % (nedm.val_norm, nedm.ephys_concept_map.ephys_prop )
                    nedm.val_norm = None
                    nedm.err_norm = None
                    nedm.save()
                pass
            elif np.isclose(norm_value, nedm.val_norm):
                # new normalized value same as old normalized value, so do nothing
                pass
                # nedm.err_norm = norm_dict['error']
                # nedm.save()
            else:
                # save nedm value
                nedm.val_norm = norm_value
                nedm.err_norm = norm_dict['error']
                nedm.save()
                # normalizing basically failed for some reason
        else:
            # there's a normalized value but it's different from what the algorithm suggests, so it's likely manually added

            # if existing normalized value is out of range
            if check_data_val_range(nedm.val_norm, nedm.ephys_concept_map.ephys_prop) is False:
                print 'deleting pre-normalized value of %s because out of appropriate range for %s ' % (nedm.val_norm, nedm.ephys_concept_map.ephys_prop )
                nedm.val_norm = None
                nedm.err_norm = None
                nedm.save()

            else:
                nedm.err_norm = norm_dict['error']
                nedm.save()
        # after all the checks above, do a final check for if algorithmically normalzied value is in correct range
        annotate_misnormalized_nedm(nedm)
Esempio n. 2
0
    def test_check_data_val_range_out(self):
        ephys_prop = m.EphysProp.objects.create(name = 'input resistance', min_range = .1, max_range = 10000)

        data_val = -10
        output_bool = check_data_val_range(data_val, ephys_prop)
        expected_bool = False
        self.assertEqual(output_bool, expected_bool)
Esempio n. 3
0
def annotate_misnormalized_nedm(nedm):
    ''' if can't algorithmically normalize nedm value to something appropriate, and raw value is out of range,
    leave a note in corresponding ecm in table'''
    norm_dict = normalize_nedm_val(nedm)
    if norm_dict['value'] is None and check_data_val_range(nedm.val, nedm.ephys_concept_map.ephys_prop) is False:
        ecm = nedm.ephys_concept_map
        normalizing_failed_note = 'Parsing failed to normalize ephys data'
        if not ecm.note:
            ecm.note = normalizing_failed_note
            ecm.changed_by = m.get_robot_user()
            ecm.save()
            print 'adding failed normalizing note to %s with data table id %d' % (ecm.ephys_prop, ecm.source.data_table.pk)
def export_db_to_data_frame():
    """Returns a nicely formatted pandas data frame of the ephys data and metadata for each stored article"""

    ncms = m.NeuronConceptMap.objects.all()#.order_by('-history__latest__history_date') # gets human-validated neuron mappings
    ncms = ncms.exclude(Q(source__data_table__irrelevant_flag = True) | Q(source__data_table__needs_expert = True)) # exclude
    ncm_count = ncms.count()
    ephys_props = m.EphysProp.objects.all().order_by('-ephyspropsummary__num_neurons')
    ephys_names = []
    for e in ephys_props:
        ephys_names.append(e.short_name)
        ephys_names.append(e.short_name + '_err')
        ephys_names.append(e.short_name + '_n')
        ephys_names.append(e.short_name + '_sd')
        ephys_names.append(e.short_name + '_note')
    #ephys_names = [e.name for e in ephys_props]
    #ncms = ncms.sort('-changed_on')
    dict_list = []
    for kk, ncm in enumerate(ncms):
        prog(kk, ncm_count)

    # TODO: need to check whether nedms under the same ncm have different experimental factor concept maps
    #     # check if any nedms have any experimental factors assoc with them
    #     efcms = ne_db.ExpFactConceptMap.objects.filter(neuronephysdatamap__in = nedms)
    #     for efcm in efcms:
    #         nedms = ne_db.NeuronEphysDataMap.objects.filter(neuron_concept_map = ncm, exp_fact_concept_map = ).distinct()

        nedms = m.NeuronEphysDataMap.objects.filter(neuron_concept_map = ncm, expert_validated = True).distinct()
        if nedms.count() == 0:
            continue

        sd_errors = identify_stdev(nedms)

        temp_dict = dict()
        temp_metadata_list = []
        for nedm in nedms:
            e = nedm.ephys_concept_map.ephys_prop
            # check data integrity - value MUST be in appropriate range for property
            data_val =  nedm.val_norm
            err_val = nedm.err_norm
            n_val = nedm.n
            note_val = nedm.ephys_concept_map.note
            if check_data_val_range(data_val, e):
                output_ephys_name = e.short_name
                output_ephys_err_name = '%s_err' % output_ephys_name
                output_ephys_sd_name = '%s_sd' % output_ephys_name
                output_ephys_n_name = '%s_n' % output_ephys_name
                output_ephys_note_name = '%s_note' % output_ephys_name
                temp_dict[output_ephys_name] = data_val
                temp_dict[output_ephys_err_name] = err_val
                temp_dict[output_ephys_n_name] = n_val
                temp_dict[output_ephys_note_name] = note_val

                # do converting to standard dev from standard error if needed
                if sd_errors:
                    temp_dict[output_ephys_sd_name] = err_val
                else:
                    # need to calculate sd
                    if err_val and n_val:
                        sd_val = err_val * np.sqrt(n_val)
                        temp_dict[output_ephys_sd_name] = sd_val

            #temp_metadata_list.append(nedm.get_metadata())

        temp_dict['NeuronName'] =  ncm.neuron.name
        temp_dict['NeuronLongName'] =  ncm.neuron_long_name
        if ncm.neuron_long_name:
            temp_dict['NeuronPrefName'] = ncm.neuron_long_name
        else:
            temp_dict['NeuronPrefName'] = ncm.neuron.name
        article = ncm.get_article()

        brain_reg_dict = get_neuron_region(ncm.neuron)
        if brain_reg_dict:
            temp_dict['BrainRegion'] = brain_reg_dict['region_name']

        #article_metadata = normalize_metadata(article)

        metadata_list = nedm.get_metadata()
        out_dict = dict()
        for metadata in metadata_list:
            #print metadata.name
            if not metadata.cont_value:
                if metadata.name in out_dict:
                    out_dict[metadata.name] = '%s, %s' % (out_dict[metadata.name], metadata.value)
                else:
                    out_dict[metadata.name] = metadata.value
            elif metadata.cont_value and 'Solution' in metadata.name:
                article = nedm.get_article()
                amdm = m.ArticleMetaDataMap.objects.filter(article = article, metadata__name = metadata.name)[0]
                ref_text = amdm.ref_text
                out_dict[metadata.name] = ref_text.text.encode('utf8', "replace")
                out_dict[metadata.name + '_conf'] = metadata.cont_value.mean
            elif metadata.cont_value and 'AnimalAge' in metadata.name:
                # return geometric mean of age ranges, not arithmetic mean
                if metadata.cont_value.min_range and metadata.cont_value.max_range:
                    min_range = metadata.cont_value.min_range
                    max_range = metadata.cont_value.max_range
                    if min_range <= 0:
                        min_range = 1
                    geom_mean = np.sqrt(min_range * max_range)
                    out_dict[metadata.name] = geom_mean
                else:
                    out_dict[metadata.name] = metadata.cont_value.mean
            else:
                out_dict[metadata.name] = metadata.cont_value.mean

        # has article metadata been curated by a human?
        afts = article.get_full_text_stat()
        if afts and afts.metadata_human_assigned:
            metadata_curated = True
            metadata_curation_note = afts.metadata_curation_note
        else:
            metadata_curated = False
            metadata_curation_note = None

        if ncm.source.data_table:
            data_table_note = ncm.source.data_table.note
        else:
            data_table_note = None

        temp_dict2 = temp_dict.copy()
        temp_dict2.update(out_dict)
        temp_dict = temp_dict2
        temp_dict['Title'] = article.title
        temp_dict['Pmid'] = article.pmid
        temp_dict['PubYear'] = article.pub_year
        temp_dict['LastAuthor'] = unicode(get_article_last_author(article))
        temp_dict['TableID'] = ncm.source.data_table_id
        temp_dict['TableNote'] = data_table_note
        temp_dict['ArticleID'] = article.pk
        temp_dict['MetadataCurated'] = metadata_curated
        temp_dict['MetadataNote'] = metadata_curation_note
        #print temp_dict
        dict_list.append(temp_dict)

    base_names = ['Title', 'Pmid', 'PubYear', 'LastAuthor', 'ArticleID', 'TableID',
                  'NeuronName', 'NeuronLongName', 'NeuronPrefName', 'BrainRegion']
    nom_vars = ['MetadataCurated', 'Species', 'Strain', 'ElectrodeType', 'PrepType', 'JxnPotential']
    cont_vars  = ['JxnOffset', 'RecTemp', 'AnimalAge', 'AnimalWeight', 'FlagSoln']
    annot_notes = ['MetadataNote', 'TableNote']

    for i in range(0, 1):
        cont_vars.extend([ 'ExternalSolution', 'ExternalSolution_conf', 'external_%s_Mg' % i, 'external_%s_Ca' % i, 'external_%s_Na' % i, 'external_%s_Cl' % i, 'external_%s_K' % i, 'external_%s_pH' % i, 'InternalSolution', 'InternalSolution_conf', 'internal_%s_Mg' % i, 'internal_%s_Ca' % i, 'internal_%s_Na' % i, 'internal_%s_Cl' % i, 'internal_%s_K' % i, 'internal_%s_pH' % i])
        #cont_var_headers.extend(['External_%s_Mg' % i, 'External_%s_Ca' % i, 'External_%s_Na' % i, 'External_%s_Cl' % i, 'External_%s_K' % i, 'External_%s_pH' % i, 'External_%s_text' % i, 'Internal_%s_Mg' % i, 'Internal_%s_Ca' % i, 'Internal_%s_Na' % i, 'Internal_%s_Cl' % i, 'Internal_%s_K' % i, 'Internal_%s_pH' % i, 'Internal_%s_text' % i])

    col_names = base_names + nom_vars + cont_vars + annot_notes + ephys_names

    # set up pandas data frame for export
    df = pd.DataFrame(dict_list, columns = col_names)

    # perform collapsing of rows about same neuron types but potentially across different tables
    cleaned_df = df
    # need to generate a random int for coercing NaN's to something - required for pandas grouping
    rand_int = -abs(np.random.randint(20000))
    cleaned_df.loc[:, 'Pmid':'FlagSoln'] = df.loc[:, 'Pmid':'FlagSoln'].fillna(rand_int)
    grouping_fields = base_names + nom_vars + cont_vars
    grouping_fields.remove('TableID')
    cleaned_df.groupby(by = grouping_fields).mean()
    cleaned_df.replace(to_replace = rand_int, value = np.nan, inplace=True)
    cleaned_df.reset_index(inplace=True)
    cleaned_df.sort_values(by = ['PubYear', 'Pmid', 'NeuronName'], ascending=[False, True, True], inplace=True)
    cleaned_df.index.name = "Index"

    # add in extra ephys data from columns based on known relationships, e.g., AP amp from AP peak and AP thr
    cleaned_df = add_ephys_props_by_conversion(cleaned_df)

    return cleaned_df
def export_db_to_data_frame():
    """Returns a nicely formatted pandas data frame of the ephys data and metadata for each stored article"""

    ncms = (
        m.NeuronConceptMap.objects.all()
    )  # .order_by('-history__latest__history_date') # gets human-validated neuron mappings
    # ncms = ncms.exclude(Q(source__data_table__irrelevant_flag = True) | Q(source__data_table__needs_expert = True)) # exclude
    ncms = ncms.exclude(Q(source__data_table__irrelevant_flag=True))  # exclude

    ncm_count = ncms.count()
    ephys_props = m.EphysProp.objects.all().order_by("-ephyspropsummary__num_neurons")
    ephys_names = []
    for e in ephys_props:
        ephys_names.append(e.short_name)
        ephys_names.append(e.short_name + "_raw")
        ephys_names.append(e.short_name + "_err")
        ephys_names.append(e.short_name + "_n")
        ephys_names.append(e.short_name + "_sd")
        ephys_names.append(e.short_name + "_note")
    # ephys_names = [e.name for e in ephys_props]
    # ncms = ncms.sort('-changed_on')
    dict_list = []
    for kk, ncm in enumerate(ncms):
        prog(kk, ncm_count)

        # TODO: need to check whether nedms under the same ncm have different experimental factor concept maps
        #     # check if any nedms have any experimental factors assoc with them
        #     efcms = ne_db.ExpFactConceptMap.objects.filter(neuronephysdatamap__in = nedms)
        #     for efcm in efcms:
        #         nedms = ne_db.NeuronEphysDataMap.objects.filter(neuron_concept_map = ncm, exp_fact_concept_map = ).distinct()

        # only check whether ncms have been expertly validated, not the nedm itself
        nedms = m.NeuronEphysDataMap.objects.filter(
            neuron_concept_map=ncm, neuron_concept_map__expert_validated=True
        ).distinct()
        if nedms.count() == 0:
            continue

        temp_dict = dict()
        temp_metadata_list = []
        for nedm in nedms:
            e = nedm.ephys_concept_map.ephys_prop

            # get error type for nedm by db lookup
            error_type = nedm.get_error_type()

            # check data integrity - value MUST be in appropriate range for property
            data_val = nedm.val_norm
            data_raw_val = nedm.val
            err_val = nedm.err_norm
            n_val = nedm.n
            note_val = nedm.ephys_concept_map.note
            output_ephys_name = e.short_name
            output_ephys_raw_name = "%s_raw" % output_ephys_name
            output_ephys_err_name = "%s_err" % output_ephys_name
            output_ephys_sem_name = "%s_sem" % output_ephys_name
            output_ephys_sd_name = "%s_sd" % output_ephys_name
            output_ephys_n_name = "%s_n" % output_ephys_name
            output_ephys_note_name = "%s_note" % output_ephys_name

            # output raw vals and notes for all props
            temp_dict[output_ephys_raw_name] = data_raw_val
            temp_dict[output_ephys_note_name] = note_val

            if check_data_val_range(data_val, e):

                temp_dict[output_ephys_name] = data_val
                temp_dict[output_ephys_err_name] = err_val
                temp_dict[output_ephys_n_name] = n_val

                # do converting to standard dev from standard error if needed
                if error_type == "sd":
                    temp_dict[output_ephys_sd_name] = err_val
                else:
                    # need to calculate sd
                    if err_val and n_val:
                        sd_val = err_val * np.sqrt(n_val)
                        temp_dict[output_ephys_sd_name] = sd_val

            # temp_metadata_list.append(nedm.get_metadata())

        temp_dict["NeuronName"] = ncm.neuron.name
        temp_dict["NeuronLongName"] = ncm.neuron_long_name
        if ncm.neuron_long_name:
            temp_dict["NeuronPrefName"] = ncm.neuron_long_name
        else:
            temp_dict["NeuronPrefName"] = ncm.neuron.name
        temp_dict["NeuroNERAnnots"] = ncm.get_neuroner()
        article = ncm.get_article()

        brain_reg_dict = get_neuron_region(ncm.neuron)
        if brain_reg_dict:
            temp_dict["BrainRegion"] = brain_reg_dict["region_name"]

        # article_metadata = normalize_metadata(article)

        metadata_list = nedm.get_metadata()
        out_dict = dict()
        for metadata in metadata_list:
            # print metadata.name
            if not metadata.cont_value:
                if metadata.name in out_dict:
                    out_dict[metadata.name] = "%s, %s" % (out_dict[metadata.name], metadata.value)
                else:
                    out_dict[metadata.name] = metadata.value
                if metadata.name == "Strain":
                    out_dict["StrainNote"] = metadata.note
                if metadata.name == "Species":
                    out_dict["SpeciesNote"] = metadata.note
            elif metadata.cont_value and "Solution" in metadata.name:
                article = nedm.get_article()
                if metadata.ref_text:
                    ref_text = metadata.ref_text
                else:
                    amdm = m.ArticleMetaDataMap.objects.filter(article=article, metadata__name=metadata.name)[0]
                    ref_text = amdm.ref_text
                out_dict[metadata.name] = ref_text.text.encode("utf8", "replace")
                out_dict[metadata.name + "_conf"] = metadata.cont_value.mean
            elif metadata.cont_value and "AnimalAge" in metadata.name:
                # return geometric mean of age ranges, not arithmetic mean
                if metadata.cont_value.min_range and metadata.cont_value.max_range:
                    min_range = metadata.cont_value.min_range
                    max_range = metadata.cont_value.max_range
                    if min_range <= 0:
                        min_range = 1
                    geom_mean = np.sqrt(min_range * max_range)
                    out_dict[metadata.name] = geom_mean
                else:
                    out_dict[metadata.name] = metadata.cont_value.mean
            else:
                out_dict[metadata.name] = metadata.cont_value.mean

        # has article metadata been curated by a human?
        afts = article.get_full_text_stat()
        if afts and afts.metadata_human_assigned:
            metadata_curated = True
            metadata_curation_note = afts.metadata_curation_note
        else:
            metadata_curated = False
            metadata_curation_note = None

        if ncm.source.data_table:
            data_table_note = ncm.source.data_table.note
        else:
            data_table_note = None

        temp_dict2 = temp_dict.copy()
        temp_dict2.update(out_dict)
        temp_dict = temp_dict2
        temp_dict["Title"] = article.title
        temp_dict["Pmid"] = article.pmid
        temp_dict["PubYear"] = article.pub_year
        temp_dict["LastAuthor"] = unicode(get_article_last_author(article))
        temp_dict["FirstAuthor"] = unicode(get_article_author(article, 0))
        temp_dict["TableID"] = ncm.source.data_table_id
        temp_dict["TableNote"] = data_table_note
        temp_dict["ArticleID"] = article.pk
        temp_dict["MetadataCurated"] = metadata_curated
        temp_dict["MetadataNote"] = metadata_curation_note
        # print temp_dict
        dict_list.append(temp_dict)

    base_names = [
        "Title",
        "Pmid",
        "PubYear",
        "FirstAuthor",
        "LastAuthor",
        "ArticleID",
        "TableID",
        "NeuronName",
        "NeuronLongName",
        "NeuronPrefName",
        "NeuroNERAnnots",
        "BrainRegion",
    ]
    nom_vars = [
        "MetadataCurated",
        "Species",
        "SpeciesNote",
        "Strain",
        "StrainNote",
        "ElectrodeType",
        "PrepType",
        "JxnPotential",
    ]
    cont_vars = ["JxnOffset", "RecTemp", "AnimalAge", "AnimalWeight", "FlagSoln"]
    annot_notes = ["MetadataNote", "TableNote"]

    grouping_fields = base_names + nom_vars + cont_vars

    for i in range(0, 1):
        cont_vars.extend(
            [
                "ExternalSolution",
                "ExternalSolution_conf",
                "external_%s_Mg" % i,
                "external_%s_Ca" % i,
                "external_%s_Na" % i,
                "external_%s_Cl" % i,
                "external_%s_K" % i,
                "external_%s_pH" % i,
                "external_%s_Cs" % i,
                "external_%s_glucose" % i,
                "external_%s_HEPES" % i,
                "external_%s_EDTA" % i,
                "external_%s_EGTA" % i,
                "external_%s_BAPTA" % i,
                "external_%s_ATP" % i,
                "external_%s_GTP" % i,
                "external_%s_CNQX" % i,
                "external_%s_DNQX" % i,
                "external_%s_NBQX" % i,
                "external_%s_MK801" % i,
                "external_%s_DAPV" % i,
                "external_%s_CPP" % i,
                "external_%s_kynur" % i,
                "external_%s_BIC" % i,
                "external_%s_picro" % i,
                "external_%s_gabazine" % i,
                "external_%s_CGP" % i,
                "external_%s_strychnine" % i,
                "InternalSolution",
                "InternalSolution_conf",
                "internal_%s_Mg" % i,
                "internal_%s_Ca" % i,
                "internal_%s_Na" % i,
                "internal_%s_Cl" % i,
                "internal_%s_K" % i,
                "internal_%s_pH" % i,
                "internal_%s_Cs" % i,
                "internal_%s_glucose" % i,
                "internal_%s_HEPES" % i,
                "internal_%s_EDTA" % i,
                "internal_%s_EGTA" % i,
                "internal_%s_BAPTA" % i,
                "internal_%s_ATP" % i,
                "internal_%s_GTP" % i,
            ]
        )

    col_names = base_names + nom_vars + cont_vars + annot_notes + ephys_names

    # not sure why but writing and reading data frame seems to fix a problem with ephys property pooling fxn
    df = pd.DataFrame(dict_list, columns=col_names)
    df.to_csv("temp.csv", sep="\t", encoding="utf-8")
    df = pd.read_csv("temp.csv", sep="\t", index_col=0, header=0)

    # perform collapsing of rows about same neuron types but potentially across different tables
    # this should be optional if the goal is ephys recuration, not ephys reanalysis
    grouping_fields.remove("TableID")
    grouping_fields.remove("NeuroNERAnnots")
    cleaned_df = pool_ephys_props_across_tables(df, grouping_fields)

    # add in extra ephys data from columns based on known relationships, e.g., AP amp from AP peak and AP thr
    cleaned_df = add_ephys_props_by_conversion(cleaned_df)

    # returning 2 data frames, 1 with properties pooled and calculated based on algebra, 1 not
    return cleaned_df, df