def get_reagent_columns(dataset_id): col_field_info = {} for fi in FieldInformation.objects.all().filter( table__in=['smallmolecule','protein','cell','antibody','otherreagent', 'smallmoleculebatch','proteinbatch','cellbatch','antibodybatch','otherreagentbatch']): col_field_info[fi.get_camel_case_dwg_name()] = { 'reagentType': fi.table, 'dwgName': fi.dwg_field_name, 'description': fi.description } data_columns = ( DataColumn.objects.filter(dataset_id=dataset_id) .filter(data_type__in=[ 'small_molecule','cell','protein','antibody','otherreagent']) ) reagent_fields = OrderedDict() meta_field_info = get_listing(DataColumn(),['datacolumn']) for dc in data_columns.order_by('display_order'): specific_name = dc.name field_schema = {} for item in meta_field_info.items(): meta_fi_attr = item[0] meta_fi = item[1]['fieldinformation'] field_schema[meta_fi.get_camel_case_dwg_name()] = ( getattr(dc,meta_fi_attr) ) reagent_fields[specific_name] = field_schema reagent_fields[specific_name]['columns'] = {} for dwg_name in DataSetDataResource2.datapoint_cols[dc.data_type]: col_name = '%s_%s' % (specific_name,dwg_name) reagent_fields[specific_name]['columns'][col_name] = col_field_info.get(dwg_name, {}) return reagent_fields
def get_datapoint_fields(dataset_id): data_columns = ( DataColumn.objects.filter(dataset_id=dataset_id) .exclude(data_type__in=[ 'small_molecule','cell','protein','antibody','otherreagent', 'omero_image']) .exclude(unit__in=['day','hour','minute','second']) ) datapoint_fields = OrderedDict() meta_field_info = get_listing(DataColumn(),['datacolumn']) for dc in data_columns.order_by('display_order'): specific_name = dc.name field_schema = {} for item in meta_field_info.items(): meta_fi_attr = item[0] meta_fi = item[1]['fieldinformation'] field_schema[meta_fi.get_camel_case_dwg_name()] = ( getattr(dc,meta_fi_attr) ) datapoint_fields[specific_name] = field_schema return datapoint_fields
def build_schema(self): schema = super(DataSetResource,self).build_schema() original_dict = schema['fields'] # TODO: reincorporate this information (this default information is about the DB schema definition) fields = get_detail_schema(DataSet(), 'dataset', lambda x: x.show_in_detail ) # Custom fields for SAF: TODO: generate the names here from the fieldinformation fields['datapointFile'] = get_schema_fieldinformation('datapoint_file','') fields['safVersion'] = get_schema_fieldinformation('saf_version','') fields['screeningFacility'] = get_schema_fieldinformation('screening_facility','') schema['fields'] = OrderedDict(sorted(fields.items(), key=lambda x: x[0])) # sort alpha, todo sort on fi.order ds_fieldinformation = DataSetDataResource.get_datasetdata_column_fieldinformation() ds_fieldinformation.append(('datapoint_value',get_fieldinformation('datapoint_value',[''])) ) ds_fieldinformation.append(('timepoint',get_fieldinformation('timepoint',[''])) ) ds_fieldinformation.append(('timepoint_unit',get_fieldinformation('timepoint_unit',[''])) ) ds_fieldinformation.append(('timepoint_description',get_fieldinformation('timepoint_description',[''])) ) meta_field_info = get_listing(FieldInformation(),['fieldinformation']) fields = {} for field,fi in ds_fieldinformation: field_schema_info = {} for item in meta_field_info.items(): meta_fi_attr = item[0] meta_fi = item[1]['fieldinformation'] field_schema_info[meta_fi.get_camel_case_dwg_name()] = getattr(fi,meta_fi_attr) fields[fi.get_camel_case_dwg_name()]= field_schema_info schema['datasetDataFile'] = OrderedDict(sorted(fields.items(), key=lambda x: x[0])) # sort alpha, todo sort on fi.order dc_fieldinformation = FieldInformation.objects.all().filter(table='datacolumn', show_in_detail=True) datapoint_fields = {} for fi in dc_fieldinformation: field_schema_info = {} for item in meta_field_info.items(): meta_fi_attr = item[0] meta_fi = item[1]['fieldinformation'] field_schema_info[meta_fi.get_camel_case_dwg_name()] = getattr(fi,meta_fi_attr) datapoint_fields[fi.get_camel_case_dwg_name()]= field_schema_info schema['datapointInformation'] = OrderedDict(sorted(datapoint_fields.items(), key=lambda x: x[0])) # sort alpha, todo sort on fi.order return schema
def build_schema(self): schema = super(DataSetDataResource,self).build_schema() original_dict = schema['fields'] # TODO: reincorporate this information (this default information is about the DB schema definition) ds_fieldinformation = DataSetDataResource.get_datasetdata_column_fieldinformation() ds_fieldinformation.append(('datapoint_value',get_fieldinformation('datapoint_value',[''])) ) ds_fieldinformation.append(('timepoint',get_fieldinformation('timepoint',[''])) ) ds_fieldinformation.append(('timepoint_unit',get_fieldinformation('timepoint_unit',[''])) ) ds_fieldinformation.append(('timepoint_description',get_fieldinformation('timepoint_description',[''])) ) meta_field_info = get_listing(FieldInformation(),['fieldinformation']) fields = {} for __,fi in ds_fieldinformation: field_schema_info = {} for item in meta_field_info.items(): meta_fi_attr = item[0] meta_fi = item[1]['fieldinformation'] field_schema_info[meta_fi.get_camel_case_dwg_name()] = getattr(fi,meta_fi_attr) fields[fi.get_camel_case_dwg_name()]= field_schema_info schema['fields'] = OrderedDict(sorted(fields.items(), key=lambda x: x[0])) # TODO, use the fieldinformation order return schema