def execute_analyzeReplicates(self,experiment_id_I,sample_name_abbreviations_I=[],sample_names_I=[],component_names_I=[]):
        '''calculate the replicates by subtracting out the filtrate
        NOTE: data_stage01_quantification_normalized must be populated
        Input:
        experiment_id

        Output:
        sample_name_short
        component_group_name
        component_name
        concentration
        concentration units

        '''
        print('execute_analyzeReplicates...')
        data_O = [];       
        #SPLIT 1:
        #1 query unique calculated_concentration_units/sample_name_abbreviations/component_names/component_group_names/time_points/sample_names/sample_ids/sample_description
        uniqueRows_all = self.getQueryResult_groupNormalizedAveragesSamples_experimentID_dataStage01QuantificationNormalizedAndAverages_limsSampleAndSampleID(
                experiment_id_I
            );
        #2 filter in broth samples
        uniqueRows = self.filter_groupNormalizedAveragesSamples_experimentID_dataStage01QuantificationNormalizedAndAverages_limsSampleAndSampleID(
                uniqueRows_all,
                calculated_concentration_units_I=[],
                component_names_I=component_names_I,
                component_group_names_I=[],
                sample_names_I=sample_names_I,
                sample_name_abbreviations_I=sample_name_abbreviations_I,
                time_points_I=[],
            );
        if type(uniqueRows)==type(listDict()):
            uniqueRows.convert_dataFrame2ListDict()
            uniqueRows = uniqueRows.get_listDict();
        data_tmp = {};#reorganize the data into a dictionary for quick traversal of the replicates
        for uniqueRow_cnt,uniqueRow in enumerate(uniqueRows):
            unique = (uniqueRow['sample_name_abbreviation'],
                      uniqueRow['experiment_id'],
                      uniqueRow['time_point'],
                      uniqueRow['component_name'],
                      uniqueRow['calculated_concentration_units'])
            if not unique in data_tmp.keys():
                data_tmp[unique] = [];
            data_tmp[unique].append(uniqueRow);
        for unique,replicates in data_tmp.items():
            print('analyzing replicates for sample_name_abbreviation ' + replicates[0]['sample_name_abbreviation'] + ' and component_name ' + replicates[0]['component_name']);
            # extract filtrate data
            concs = [d['calculated_concentration'] for d in replicates if d['sample_desc']=='Filtrate'
                     and not d['calculated_concentration'] is None and d['calculated_concentration']!=0];
            conc_units = [d['calculated_concentration_units'] for d in replicates if d['sample_desc']=='Filtrate'
                     and not d['calculated_concentration'] is None and d['calculated_concentration']!=0];
            if conc_units: conc_units = conc_units[0];
            else: conc_units = None;
            n_replicates = len(concs);
            conc_average_filtrate = 0.0;
            conc_var_filtrate = 0.0;
            # calculate average and CV of concentrations
            if (not(concs)): conc_average_filtrate = 0;
            elif n_replicates<2: conc_average_filtrate = concs[0];
            else: 
                conc_average_filtrate = numpy.mean(numpy.array(concs));
                conc_var_filtrate = numpy.var(numpy.array(concs));

            # extract broth data
            for rep in replicates:
                if rep['sample_desc']!='Broth': continue;
                conc = None;
                conc_unit = None;
                if rep['calculated_concentration'] is None or rep['calculated_concentration']==0: continue;
                else: conc=rep['calculated_concentration'];
                    # record all replicate broth samples whether they were measured or not 
                    # needed for MI2 later on
                if rep['calculated_concentration_units']: conc_units = rep['calculated_concentration_units'];
                # subract out filtrate average from each broth
                conc_broth = 0.0;
                if conc:
                    conc_broth = conc-conc_average_filtrate;
                    if (conc_broth < 0 ): conc_broth = None;
                else: conc_broth = None;
                # add data to the DB
                row = {};
                row = {'experiment_id':experiment_id_I,
                    'sample_name_short':rep['sample_name_short'],
                    'time_point':rep['time_point'],
                    'component_group_name':rep['component_group_name'],
                    'component_name':rep['component_name'],
                    'calculated_concentration':conc_broth,
                    'calculated_concentration_units':conc_units,
                    'used_':True,
                    'comment_':None,};
                data_O.append(row);

        ##SPLIT 2:
        ## get sample_name_abbreviations
        #if sample_name_abbreviations_I:
        #    sample_name_abbreviations = sample_name_abbreviations_I
        #else:
        #    sample_name_abbreviations = [];
        #    sample_name_abbreviations = self.get_sampleNameAbbreviations_experimentID_dataStage01Normalized(experiment_id_I);
        ## create database table
        #for sna in sample_name_abbreviations:
        #    print('analyzing replicates for sample_name_abbreviation ' + sna);
        #    # get component names
        #    if component_names_I:
        #        component_names = component_names_I
        #    else:
        #        component_names = [];
        #        component_names = self.get_componentsNames_experimentIDAndSampleNameAbbreviation_dataStage01Normalized(experiment_id_I,sna);
        #    for cn in component_names:
        #        print('analyzing replicates for component_name ' + cn);
        #        component_group_name = self.get_componentGroupName_experimentIDAndComponentName_dataStage01Normalized(experiment_id_I,cn);
        #        # get time points
        #        time_points = self.get_timePoint_experimentIDAndSampleNameAbbreviation_dataStage01Normalized(experiment_id_I,sna);
        #        for tp in time_points:
        #            print('analyzing replicates for time_point ' + tp);
        #            # get filtrate sample names
        #            sample_names = [];
        #            sample_description = 'Filtrate';
        #            sample_names = self.get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndComponentNameAndTimePoint_dataStage01Normalized(experiment_id_I,sna,sample_description,cn,tp);
        #            if sample_names_I: # screen out sample names that are not in the input
        #                sample_names = [x for x in sample_names if x in sample_names_I];
        #            concs = [];
        #            conc_units = None;
        #            for sn in sample_names:
        #                # concentrations and units
        #                conc = None;
        #                conc_unit = None;
        #                conc, conc_unit = self.get_concAndConcUnits_sampleNameAndComponentName_dataStage01Normalized(sn,cn);
        #                if not(conc): continue
        #                if (conc_unit): conc_units = conc_unit;
        #                concs.append(conc);
        #            n_replicates = len(concs);
        #            conc_average_filtrate = 0.0;
        #            conc_var_filtrate = 0.0;
        #            # calculate average and CV of concentrations
        #            if (not(concs)): conc_average_filtrate = 0;
        #            elif n_replicates<2: conc_average_filtrate = concs[0];
        #            else: 
        #                conc_average_filtrate = numpy.mean(numpy.array(concs));
        #                conc_var_filtrate = numpy.var(numpy.array(concs));
        #            # get filtrate sample names
        #            sample_names = [];
        #            sample_description = 'Broth';
        #            sample_names = self.get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndComponentNameAndTimePoint_dataStage01Normalized(experiment_id_I,sna,sample_description,cn,tp);
        #            if sample_names_I: # screen out sample names that are not in the input
        #                sample_names = [x for x in sample_names if x in sample_names_I];
        #            for sn in sample_names:
        #                print('analyzing replicates for sample_name ' + sn);
        #                # query concentrations and units
        #                conc = None;
        #                conc_unit = None;
        #                conc, conc_unit = self.get_concAndConcUnits_sampleNameAndComponentName_dataStage01Normalized(sn,cn);
        #                if (not(conc) or conc==0): continue # record all replicate broth samples whether they were measured or not 
        #                                                     # needed for MI2 later on
        #                if (conc_unit): conc_units = conc_unit;
        #                # subract out filtrate average from each broth
        #                conc_broth = 0.0;
        #                if conc:
        #                    conc_broth = conc-conc_average_filtrate;
        #                    if (conc_broth < 0 ): conc_broth = None;
        #                else: conc_broth = None;
        #                # get sample name short
        #                sample_name_short = self.get_sampleNameShort_experimentIDAndSampleName_dataStage01Normalized(experiment_id_I, sn);
        #                # add data to the session 
        #                # add data to the DB
        #                row = {'experiment_id':experiment_id_I,
        #                    'sample_name_short':sample_name_short,
        #                    'time_point':tp,
        #                    'component_group_name':component_group_name,
        #                    'component_name':cn,
        #                    'calculated_concentration':conc_broth,
        #                    'calculated_concentration_units':conc_units,
        #                    'used_':True,
        #                    'comment_':None,};
        #                data_O.append(row);
        self.add_rows_table('data_stage01_quantification_replicates',data_O);
    def export_dataStage01NormalizedAndAverages_js(self,
                analysis_id_I,
                sample_name_abbreviations_I=[],
                sample_names_I=[],
                component_names_I=[],
                cv_threshold_I=40,
                extracellular_threshold_I=80,
                data_dir_I='tmp'):
        '''export data_stage01_quantification_normalized and averages for visualization with ddt'''

        calc = calculate_interface();
        
        print('export_dataStage01Normalized_js...')
        data_norm_broth = [];
        data_norm_filtrate = [];
        data_norm_combined = [];
        data_ave = [];

        #SPLIT 1:
        #1 query unique calculated_concentration_units/sample_name_abbreviations/component_names/component_group_names/time_points/sample_names/sample_ids/sample_description
        uniqueRows_all = self.getQueryResult_groupNormalizedAveragesSamples_analysisID_dataStage01QuantificationNormalizedAndAverages(
                analysis_id_I
            );
        #2 filter in broth samples
        uniqueRows = self.filter_groupNormalizedAveragesSamples_experimentID_dataStage01QuantificationNormalizedAndAverages_limsSampleAndSampleID(
                uniqueRows_all,
                calculated_concentration_units_I=[],
                component_names_I=component_names_I,
                component_group_names_I=[],
                sample_names_I=sample_names_I,
                sample_name_abbreviations_I=sample_name_abbreviations_I,
                time_points_I=[],
            );
        if type(uniqueRows)==type(listDict()):
            uniqueRows.convert_dataFrame2ListDict()
            uniqueRows = uniqueRows.get_listDict();
        replicates_tmp = {};#reorganize the data into a dictionary for quick traversal of the replicates
        for uniqueRow_cnt,uniqueRow in enumerate(uniqueRows):
            unique = (
                      uniqueRow['sample_name_abbreviation'],
                      uniqueRow['experiment_id'],
                      uniqueRow['time_point'],
                      uniqueRow['component_name'],
                      uniqueRow['calculated_concentration_units'])
            if not unique in replicates_tmp.keys():
                replicates_tmp[unique] = [];
            replicates_tmp[unique].append(uniqueRow);
        for unique,replicates in replicates_tmp.items():
            #get data from averages once per sample_name_abbreviation/component_name
            #print('exporting sample_name_abbreviation ' + replicates[0]['sample_name_abbreviation'] + " and component_name " + replicates[0]['component_name']);
            # get the averages and %CV samples
            row_ave = {};
            row_ave = self.get_row_experimentIDAndSampleNameAbbreviationAndTimePointAndComponentNameAndCalculatedConcentrationCVAndExtracellularPercent_dataStage01Averages(
                    replicates[0]['experiment_id'],
                    replicates[0]['sample_name_abbreviation'],
                    replicates[0]['time_point'],
                    replicates[0]['component_name'],
                    cv_threshold_I=cv_threshold_I,
                    extracellular_threshold_I=extracellular_threshold_I);
            if row_ave:
                stdev = calc.convert_cv2StDev(row_ave['calculated_concentration_filtrate_average'],row_ave['calculated_concentration_filtrate_cv']);
                row_ave['calculated_concentration_filtrate_lb'] = row_ave['calculated_concentration_filtrate_average']-stdev;
                row_ave['calculated_concentration_filtrate_ub'] = row_ave['calculated_concentration_filtrate_average']+stdev;
                stdev = calc.convert_cv2StDev(row_ave['calculated_concentration_broth_average'],row_ave['calculated_concentration_broth_cv']);
                row_ave['calculated_concentration_broth_lb'] = row_ave['calculated_concentration_broth_average']-stdev;
                row_ave['calculated_concentration_broth_ub'] = row_ave['calculated_concentration_broth_average']+stdev;
                stdev = calc.convert_cv2StDev(row_ave['calculated_concentration_average'],row_ave['calculated_concentration_cv']);
                row_ave['calculated_concentration_lb'] = row_ave['calculated_concentration_average']-stdev;
                row_ave['calculated_concentration_ub'] = row_ave['calculated_concentration_average']+stdev;
                row_ave['analysis_id'] = analysis_id_I;

                # get data from normalized
                filtrate_conc = [];
                broth_conc = [];
                for rep in replicates:
                    row = {};
                    row['analysis_id'] = analysis_id_I;
                    row['extracellular_percent'] = row_ave['extracellular_percent']
                    row['calculated_concentration_cv'] = row_ave['calculated_concentration_cv']
                    row.update(rep)
                    if rep['sample_desc'] == 'Filtrate':
                        data_norm_filtrate.append(row);
                        filtrate_conc.append(rep['calculated_concentration'])
                    if rep['sample_desc'] == 'Broth':
                        data_norm_broth.append(row);
                        broth_conc.append(rep['calculated_concentration'])
                    data_norm_combined.append(row);

                #add data to aggregate and sample_name_abbreviations_all
                if not broth_conc: broth_conc = [0];
                if not filtrate_conc: filtrate_conc = [0];
                row_ave['calculated_concentration_min']=min(broth_conc+filtrate_conc)
                row_ave['calculated_concentration_max']=max(broth_conc+filtrate_conc)
                row_ave['calculated_concentration_broth_min']=min(broth_conc)
                row_ave['calculated_concentration_broth_max']=max(broth_conc)
                row_ave['calculated_concentration_filtrate_min']=min(filtrate_conc)
                row_ave['calculated_concentration_filtrate_max']=max(filtrate_conc)
                data_ave.append(row_ave);

        # dump chart parameters to a js files
        data1_keys = ['analysis_id',
                      'experiment_id',
                      'sample_name',
                      'sample_id',
                      'sample_name_abbreviation',
                      'component_group_name',
                      'component_name',
                      'calculated_concentration_units',
                      'extracellular_percent',
                      'calculated_concentration_cv'
                    ];
        data1_nestkeys = ['component_name'];
        data1_keymap = {'xdata':'component_name',
                        'ydata':'calculated_concentration',
                        #'ydatalb':'peakInfo_lb',
                        #'ydataub':'peakInfo_ub',
                        #'ydatamin':None,
                        #'ydatamax':None,
                        #'ydataiq1':None,
                        #'ydataiq3':None,
                        #'ydatamedian':None,
                        'serieslabel':'sample_name_abbreviation',
                        'featureslabel':'sample_name'};
        data2_keys = ['analysis_id',
                      'experiment_id',
                      'sample_name_abbreviation',
                      'time_point',
                      'component_group_name',
                      'component_name',
                      'calculated_concentration_units',
                      'extracellular_percent',
                      'calculated_concentration_broth_cv'
                    ];
        data2_nestkeys = ['component_name'];
        data2_keymap = {'xdata':'component_name',
                        'ydatamean':'calculated_concentration_broth_average',
                        'ydatalb':'calculated_concentration_broth_lb',
                        'ydataub':'calculated_concentration_broth_ub',
                        'ydatamin':'calculated_concentration_broth_min',
                        'ydatamax':'calculated_concentration_broth_max',
                        #'ydataiq1':None,
                        #'ydataiq3':None,
                        #'ydatamedian':None,
                        'serieslabel':'sample_name_abbreviation',
                        'featureslabel':'component_name'};
        data3_keys = ['analysis_id',
                      'experiment_id',
                      'sample_name_abbreviation',
                      'time_point',
                      'component_group_name',
                      'component_name',
                      'calculated_concentration_units',
                      'extracellular_percent',
                      'calculated_concentration_filtrate_cv',
                    ];
        data3_nestkeys = ['component_name'];
        data3_keymap = {'xdata':'component_name',
                        'ydatamean':'calculated_concentration_filtrate_average',
                        'ydatalb':'calculated_concentration_filtrate_lb',
                        'ydataub':'calculated_concentration_filtrate_ub',
                        'ydatamin':'calculated_concentration_filtrate_min',
                        'ydatamax':'calculated_concentration_filtrate_max',
                        #'ydataiq1':None,
                        #'ydataiq3':None,
                        #'ydatamedian':None,
                        'serieslabel':'sample_name_abbreviation',
                        'featureslabel':'component_name'};
        data4_keys = ['analysis_id',
                      'experiment_id',
                      'sample_name_abbreviation',
                      'time_point',
                      'component_group_name',
                      'component_name',
                      'calculated_concentration_units',
                      'extracellular_percent',
                      'calculated_concentration_cv'
                    ];
        data4_nestkeys = ['component_name'];
        data4_keymap = {'xdata':'component_name',
                        'ydata':'calculated_concentration_average',
                        'ydatamean':'calculated_concentration_average',
                        'ydatalb':'calculated_concentration_lb',
                        'ydataub':'calculated_concentration_ub',
                        #'ydatamin':'calculated_concentration_min',
                        #'ydatamax':'calculated_concentration_max',
                        #'ydataiq1':None,
                        #'ydataiq3':None,
                        #'ydatamedian':None,
                        'serieslabel':'sample_name_abbreviation',
                        'featureslabel':'component_name'};
        # make the data object
        dataobject_O = [{"data":data_norm_broth,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
                        {"data":data_norm_filtrate,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
                        {"data":data_norm_combined,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
                        {"data":data_ave,"datakeys":data2_keys,"datanestkeys":data2_nestkeys},
                        {"data":data_ave,"datakeys":data3_keys,"datanestkeys":data3_nestkeys},
                        {"data":data_ave,"datakeys":data4_keys,"datanestkeys":data4_nestkeys}];
        # make the tile parameter objects for the normalized and averages
        formtileparameters_averages_O = {'tileheader':'Filter menu averages','tiletype':'html','tileid':"filtermenu2",'rowid':"row1",'colid':"col1",
            'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-6"};
        formparameters_averages_O = {'htmlid':'filtermenuform2',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit2','text':'submit'},"formresetbuttonidtext":{'id':'reset2','text':'reset'},"formupdatebuttonidtext":{'id':'update2','text':'update'}};
        formtileparameters_averages_O.update(formparameters_averages_O);
        # make the svg objects for the averages data
        svgparameters_averages_broth_O = {"svgtype":'boxandwhiskersplot2d_02',"svgkeymap":[data2_keymap,data1_keymap],
                            'svgid':'svg4',
                            "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
                            "svgwidth":250,"svgheight":250,
                            "svgx1axislabel":"component_name","svgy1axislabel":"concentration",
        		'svgformtileid':'filtermenu2','svgresetbuttonid':'reset2','svgsubmitbuttonid':'submit2'};
        svgtileparameters_averages_broth_O = {'tileheader':'Broth data','tiletype':'svg','tileid':"tile4",'rowid':"row2",'colid':"col1",
            'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
        svgtileparameters_averages_broth_O.update(svgparameters_averages_broth_O);
        if data_norm_filtrate:
            svgparameters_averages_filtrate_O = {"svgtype":'boxandwhiskersplot2d_02',"svgkeymap":[data3_keymap,data1_keymap],
                            'svgid':'svg5',
                            "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
                            "svgwidth":250,"svgheight":250,
                            "svgx1axislabel":"component_name","svgy1axislabel":"concentration",
        		'svgformtileid':'filtermenu2','svgresetbuttonid':'reset2','svgsubmitbuttonid':'submit2'};
            svgtileparameters_averages_filtrate_O = {'tileheader':'Filtrate data','tiletype':'svg','tileid':"tile5",'rowid':"row2",'colid':"col2",
                'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
            svgtileparameters_averages_filtrate_O.update(svgparameters_averages_filtrate_O);
        else:
            svgparameters_averages_filtrate_O = {"svgtype":'boxandwhiskersplot2d_01',"svgkeymap":[data3_keymap],
                            'svgid':'svg5',
                            "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
                            "svgwidth":250,"svgheight":250,
                            "svgx1axislabel":"component_name","svgy1axislabel":"concentration",
        		'svgformtileid':'filtermenu2','svgresetbuttonid':'reset2','svgsubmitbuttonid':'submit2'};
            svgtileparameters_averages_filtrate_O = {'tileheader':'Filtrate data','tiletype':'svg','tileid':"tile5",'rowid':"row2",'colid':"col2",
                'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
            svgtileparameters_averages_filtrate_O.update(svgparameters_averages_filtrate_O);
        svgparameters_averages_combined_O = {
                            #"svgtype":'boxandwhiskersplot2d_02',
                            "svgtype":'boxandwhiskersplot2d_01',
                            #"svgkeymap":[data4_keymap,data1_keymap],
                            "svgkeymap":[data4_keymap],
                            'svgid':'svg6',
                            "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
                            "svgwidth":250,"svgheight":250,
                            "svgx1axislabel":"component_name","svgy1axislabel":"concentration",
    						'svgformtileid':'filtermenu2','svgresetbuttonid':'reset2','svgsubmitbuttonid':'submit2'};
        svgtileparameters_averages_combined_O = {'tileheader':'Broth-Filtrate data','tiletype':'svg','tileid':"tile6",'rowid':"row2",'colid':"col3",
            'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
        svgtileparameters_averages_combined_O.update(svgparameters_averages_combined_O);
        # make the tables for the normalized and averages data
        tableparameters_normalized_O = {"tabletype":'responsivetable_01',
                    'tableid':'table1',
                    "tablefilters":None,
                    "tableclass":"table  table-condensed table-hover",
    			    'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
        tabletileparameters_normalized_O = {'tileheader':'normalized data','tiletype':'table','tileid':"tile7",'rowid':"row4",'colid':"col1",
            'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
        tabletileparameters_normalized_O.update(tableparameters_normalized_O);
        tableparameters_averages_O = {"tabletype":'responsivetable_01',
                    'tableid':'table2',
                    "tablefilters":None,
                    "tableclass":"table  table-condensed table-hover",
    			    'tableformtileid':'filtermenu2','tableresetbuttonid':'reset2','tablesubmitbuttonid':'submit2'};
        tabletileparameters_averages_O = {'tileheader':'averages data','tiletype':'table','tileid':"tile8",'rowid':"row5",'colid':"col1",
            'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
        tabletileparameters_averages_O.update(tableparameters_averages_O);
        parametersobject_O = [
            formtileparameters_averages_O,
            svgtileparameters_averages_broth_O,
            svgtileparameters_averages_filtrate_O,
            svgtileparameters_averages_combined_O,
            tabletileparameters_normalized_O,
            tabletileparameters_averages_O];
        tile2datamap_O = {
            "filtermenu2":[5],
            "tile4":[3,0],
            "tile5":[4,1],
            #"tile6":[5,2],
            "tile6":[5],
            "tile7":[2],
            "tile8":[5]
            };
        #if data_norm_filtrate: tile2datamap_O.update({"tile5":[4,1]})
        #else: tile2datamap_O.update({"tile5":[4]})
        filtermenuobject_O = [
            #{"filtermenuid":"filtermenu1","filtermenuhtmlid":"filtermenuform1",
            #"filtermenusubmitbuttonid":"submit1","filtermenuresetbuttonid":"reset1",
            #"filtermenuupdatebuttonid":"update1"},
            {"filtermenuid":"filtermenu2","filtermenuhtmlid":"filtermenuform2",
            "filtermenusubmitbuttonid":"submit2","filtermenuresetbuttonid":"reset2",
            "filtermenuupdatebuttonid":"update2"}
                              ];
        #
        ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = filtermenuobject_O);
        if data_dir_I=='tmp':
            filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
        elif data_dir_I=='data_json':
            data_json_O = ddtutilities.get_allObjects_js();
            return data_json_O;
        with open(filename_str,'w') as file:
            file.write(ddtutilities.get_allObjects());
    def execute_analyzeAverages_blanks(self,experiment_id_I,
            sample_name_abbreviations_I=[],
            sample_names_I=[],
            component_names_I=[],
            blank_sample_names_I=[],
            blank_sample_name_abbreviations_I=[],
            ):
        '''calculate the averages using the ave(broth),i - ave(blank,broth)
        NOTE: data_stage01_quantification_normalized must be populated
        Input:
        experiment_id_I
        sample_name_abbreviations_I
        sample_names_I
        component_names_I
        blank_sample_names_I = []; if specified, specific blank samples will be used as the filtrate instead of filtrate samples
        Output:
        sample_name_abbreviation
        component_group_name
        component_name
        concentration average
        concentration CV
        concentration units
        % extracellular
        '''

        data_O=[];
        calc = calculate_interface();
        
        print('execute_analyzeAverages...')        
        #SPLIT 1:
        #1 query unique calculated_concentration_units/sample_name_abbreviations/component_names/component_group_names/time_points/sample_names/sample_ids/sample_description
        uniqueRows_all = self.getQueryResult_groupNormalizedAveragesSamples_experimentID_dataStage01QuantificationNormalizedAndAverages_limsSampleAndSampleID(
                experiment_id_I
            );
        #2 filter in broth samples
        uniqueRows = self.filter_groupNormalizedAveragesSamples_experimentID_dataStage01QuantificationNormalizedAndAverages_limsSampleAndSampleID(
                uniqueRows_all,
                calculated_concentration_units_I=[],
                component_names_I=component_names_I,
                component_group_names_I=[],
                sample_names_I=sample_names_I,
                sample_name_abbreviations_I=sample_name_abbreviations_I,
                time_points_I=[],
            );
        if type(uniqueRows)==type(listDict()):
            uniqueRows.convert_dataFrame2ListDict()
            uniqueRows = uniqueRows.get_listDict();
        data_tmp = {};#reorganize the data into a dictionary for quick traversal of the replicates
        for uniqueRow_cnt,uniqueRow in enumerate(uniqueRows):
            unique = (uniqueRow['sample_name_abbreviation'],
                      uniqueRow['experiment_id'],
                      uniqueRow['time_point'],
                      uniqueRow['component_name'],
                      uniqueRow['calculated_concentration_units'])
            if not unique in data_tmp.keys():
                data_tmp[unique] = [];
            data_tmp[unique].append(uniqueRow);
            
        #3 filter in blank samples
        uniqueBlanks=[];
        if blank_sample_names_I or blank_sample_name_abbreviations_I:
            uniqueBlanks = self.filter_groupNormalizedAveragesSamples_experimentID_dataStage01QuantificationNormalizedAndAverages_limsSampleAndSampleID(
                uniqueRows_all,
                calculated_concentration_units_I=[],
                component_names_I=component_names_I,
                component_group_names_I=[],
                sample_names_I=blank_sample_names_I,
                sample_name_abbreviations_I=blank_sample_name_abbreviations_I,
                time_points_I=[],
                );
        if type(uniqueBlanks)==type(listDict()):
            uniqueBlanks.convert_dataFrame2ListDict()
            uniqueBlanks = uniqueBlanks.get_listDict();
        data_blanks_tmp = {}; #reorganize the data for a quick traversal of the components
        for uniqueBlanks_cnt,uniqueBlank in enumerate(uniqueBlanks):
            unique = uniqueBlank['component_name']
            if not unique in data_tmp.keys():
                data_blanks_tmp[unique] = [];
            data_blanks_tmp[unique].append(uniqueBlank);

        #4 iterate through each unique unique calculated_concentration_units/sample_name_abbreviations/component_names/component_group_names/time_points
        # and determine the ave, cv, etc., after subtracting out the blanks
        for unique,replicates in data_tmp.items():
            print('analyzing averages for sample_name_abbreviation ' + replicates[0]['sample_name_abbreviation'] + ' and component_name ' + replicates[0]['component_name']);
            # get blank concentrations
            if data_blanks_tmp and replicates[0]['component_name'] in data_blanks_tmp.keys():
                concs = [d['calculated_concentration'] for d in data_blanks_tmp[replicates[0]['component_name']]
                         if not d['calculated_concentration'] is None and d['calculated_concentration']!=0];
                conc_units = [d['calculated_concentration_units'] for d in data_blanks_tmp[replicates[0]['component_name']]
                         if not d['calculated_concentration'] is None and d['calculated_concentration']!=0];
                if conc_units: conc_units = conc_units[0];
                else: conc_units = None;
            else:
                concs = [];
                conc_units = None;

            #if blank_sample_names_I:
            #    sample_names = blank_sample_names_I;
            #else:
            #    sample_names = [];
            #concs = [];
            #conc_units = None;
            #for sn in sample_names:
            #    # concentrations and units
            #    conc = None;
            #    conc_unit = None;
            #    conc, conc_unit = self.get_concAndConcUnits_sampleNameAndComponentName_dataStage01Normalized(
            #        sn,
            #        replicates[0]['component_name']);
            #    if (not(conc) or conc==0): continue
            #    if (conc_unit): conc_units = conc_unit;
            #    concs.append(conc);
            n_replicates_filtrate = len(concs);
            conc_average_filtrate = 0.0;
            conc_var_filtrate = 0.0;
            conc_cv_filtrate = 0.0;
            # calculate average and CV of concentrations
            if (not(concs)): 
                conc_average_filtrate = 0;
                conc_var_filtrate = 0;
            elif n_replicates_filtrate<2: 
                conc_average_filtrate = concs[0];
                conc_var_filtrate = 0;
            else: 
                #conc_average_filtrate, conc_var_filtrate = calc.calculate_ave_var_R(concs);
                conc_average_filtrate = numpy.mean(numpy.array(concs));
                conc_var_filtrate = numpy.var(numpy.array(concs));
                if (conc_average_filtrate <= 0): conc_cv_filtrate = 0;
                else: conc_cv_filtrate = sqrt(conc_var_filtrate)/conc_average_filtrate*100; 
            # get broth sample names
            concs = [d['calculated_concentration'] for d in replicates if d['sample_desc']=='Broth'
                     and not d['calculated_concentration'] is None and d['calculated_concentration']!=0];
            conc_units = [d['calculated_concentration_units'] for d in replicates if d['sample_desc']=='Broth'
                     and not d['calculated_concentration'] is None and d['calculated_concentration']!=0];
            if conc_units: conc_units = conc_units[0];
            else: conc_units = None;
            #concs = [];
            #conc_units = None;
            #sample_names = [d['sample_name'] for d in replicates if d['sample_desc']=='Broth'];
            #for sn in sample_names:
            #    # query concentrations and units
            #    conc = None;
            #    conc_unit = None;
            #    conc, conc_unit = self.get_concAndConcUnits_sampleNameAndComponentName_dataStage01Normalized(
            #        sn,
            #        replicates[0]['component_name']);
            #    if (not(conc) or conc==0): continue
            #    if (conc_unit): conc_units = conc_unit;
            #    concs.append(conc);
            n_replicates = len(concs);
            conc_average_broth = 0.0;
            conc_var_broth = 0.0;
            conc_cv_broth = 0.0;
            # calculate average and CV of concentrations
            if (not(concs)): 
                continue
            elif n_replicates<2: 
                continue
            else: 
                #conc_average_broth, conc_var_broth = calc.calculate_ave_var_R(concs);
                conc_average_broth = numpy.mean(numpy.array(concs));
                conc_var_broth = numpy.var(numpy.array(concs));
                if (conc_average_broth <= 0): conc_cv_broth = 0;
                else: conc_cv_broth = sqrt(conc_var_broth)/conc_average_broth*100; 
            # calculate average and CV
            conc_average = 0.0;
            conc_var = 0.0;
            conc_cv = 0.0;
            conc_average = conc_average_broth-conc_average_filtrate;
            if (conc_average < 0): conc_average = 0;
            conc_var = conc_var_broth + conc_var_filtrate;
            if (conc_average <= 0): conc_cv = 0;
            else: conc_cv = sqrt(conc_var)/conc_average*100;
            # calculate the % extracellular
            extracellular_percent = conc_average_filtrate/conc_average_broth*100;
            # add data to the session
            row = {};
            row = {'experiment_id':experiment_id_I,
                    'sample_name_abbreviation':replicates[0]['sample_name_abbreviation'],
                    'time_point':replicates[0]['time_point'],
                    'component_group_name':replicates[0]['component_group_name'],
                    'component_name':replicates[0]['component_name'],
                    'n_replicates_broth':n_replicates,
                    'calculated_concentration_broth_average':conc_average_broth,
                    'calculated_concentration_broth_cv':conc_cv_broth,
                    'n_replicates_filtrate':n_replicates_filtrate,
                    'calculated_concentration_filtrate_average':conc_average_filtrate,
                    'calculated_concentration_filtrate_cv':conc_cv_filtrate,
                    'n_replicates':n_replicates,
                    'calculated_concentration_average':conc_average,
                    'calculated_concentration_cv':conc_cv,
                    'calculated_concentration_units':conc_units,
                    'extracellular_percent':extracellular_percent,
                    'used_':True,};
            data_O.append(row);

        ##SPLIT2
        ## get sample_name_abbreviations
        #if sample_name_abbreviations_I:
        #    sample_name_abbreviations = sample_name_abbreviations_I
        #else:
        #    sample_name_abbreviations = [];
        #    sample_name_abbreviations = self.get_sampleNameAbbreviations_experimentID_dataStage01Normalized(experiment_id_I);
        #for sna in sample_name_abbreviations:
        #    print('analyzing averages for sample_name_abbreviation ' + sna);
        #    # get component names
        #    if component_names_I:
        #        component_names = component_names_I
        #    else:
        #        component_names = [];
        #        component_names = self.get_componentsNames_experimentIDAndSampleNameAbbreviation_dataStage01Normalized(experiment_id_I,sna);
        #    for cn in component_names:
        #        print('analyzing averages for component_name ' + cn);
        #        component_group_name = self.get_componentGroupName_experimentIDAndComponentName_dataStage01Normalized(experiment_id_I,cn);
        #        # get time points
        #        time_points = self.get_timePoint_experimentIDAndSampleNameAbbreviation_dataStage01Normalized(experiment_id_I,sna);
        #        if not time_points: continue;
        #        for tp in time_points:
        #            print('analyzing averages for time_point ' + tp);
        #            # get blank concentrations
        #            if blank_sample_names_I:
        #                sample_names = blank_sample_names_I;
        #            else:
        #                sample_names = [];
        #            concs = [];
        #            conc_units = None;
        #            for sn in sample_names:
        #                # concentrations and units
        #                conc = None;
        #                conc_unit = None;
        #                conc, conc_unit = self.get_concAndConcUnits_sampleNameAndComponentName_dataStage01Normalized(sn,cn);
        #                if (not(conc) or conc==0): continue
        #                if (conc_unit): conc_units = conc_unit;
        #                concs.append(conc);
        #            n_replicates_filtrate = len(concs);
        #            conc_average_filtrate = 0.0;
        #            conc_var_filtrate = 0.0;
        #            conc_cv_filtrate = 0.0;
        #            # calculate average and CV of concentrations
        #            if (not(concs)): 
        #                conc_average_filtrate = 0;
        #                conc_var_filtrate = 0;
        #            elif n_replicates_filtrate<2: 
        #                conc_average_filtrate = concs[0];
        #                conc_var_filtrate = 0;
        #            else: 
        #                #conc_average_filtrate, conc_var_filtrate = calc.calculate_ave_var_R(concs);
        #                conc_average_filtrate = numpy.mean(numpy.array(concs));
        #                conc_var_filtrate = numpy.var(numpy.array(concs));
        #                if (conc_average_filtrate <= 0): conc_cv_filtrate = 0;
        #                else: conc_cv_filtrate = sqrt(conc_var_filtrate)/conc_average_filtrate*100; 
        #            # get broth sample names
        #            sample_names = [];
        #            sample_description = 'Broth';
        #            sample_names = self.get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndComponentNameAndTimePoint_dataStage01Normalized(experiment_id_I,sna,sample_description,cn,tp);
        #            if sample_names_I: # screen out sample names that are not in the input
        #                sample_names = [x for x in sample_names if x in sample_names_I];
        #            concs = [];
        #            conc_units = None;
        #            for sn in sample_names:
        #                print('analyzing averages for sample_name ' + sn);
        #                # query concentrations and units
        #                conc = None;
        #                conc_unit = None;
        #                conc, conc_unit = self.get_concAndConcUnits_sampleNameAndComponentName_dataStage01Normalized(sn,cn);
        #                if (not(conc) or conc==0): continue
        #                if (conc_unit): conc_units = conc_unit;
        #                concs.append(conc);
        #            n_replicates = len(concs);
        #            conc_average_broth = 0.0;
        #            conc_var_broth = 0.0;
        #            conc_cv_broth = 0.0;
        #            # calculate average and CV of concentrations
        #            if (not(concs)): 
        #                continue
        #            elif n_replicates<2: 
        #                continue
        #            else: 
        #                #conc_average_broth, conc_var_broth = calc.calculate_ave_var_R(concs);
        #                conc_average_broth = numpy.mean(numpy.array(concs));
        #                conc_var_broth = numpy.var(numpy.array(concs));
        #                if (conc_average_broth <= 0): conc_cv_broth = 0;
        #                else: conc_cv_broth = sqrt(conc_var_broth)/conc_average_broth*100; 
        #            # calculate average and CV
        #            conc_average = 0.0;
        #            conc_var = 0.0;
        #            conc_cv = 0.0;
        #            conc_average = conc_average_broth-conc_average_filtrate;
        #            if (conc_average < 0): conc_average = 0;
        #            conc_var = conc_var_broth + conc_var_filtrate;
        #            if (conc_average <= 0): conc_cv = 0;
        #            else: conc_cv = sqrt(conc_var)/conc_average*100;
        #            # calculate the % extracellular
        #            extracellular_percent = conc_average_filtrate/conc_average_broth*100;
        #            # add data to the session
        #            row = {};
        #            row = {'experiment_id':experiment_id_I,
        #                    'sample_name_abbreviation':sna,
        #                    'time_point':tp,
        #                    'component_group_name':component_group_name,
        #                    'component_name':cn,
        #                    'n_replicates_broth':n_replicates,
        #                    'calculated_concentration_broth_average':conc_average_broth,
        #                    'calculated_concentration_broth_cv':conc_cv_broth,
        #                    'n_replicates_filtrate':n_replicates_filtrate,
        #                    'calculated_concentration_filtrate_average':conc_average_filtrate,
        #                    'calculated_concentration_filtrate_cv':conc_cv_filtrate,
        #                    'n_replicates':n_replicates,
        #                    'calculated_concentration_average':conc_average,
        #                    'calculated_concentration_cv':conc_cv,
        #                    'calculated_concentration_units':conc_units,
        #                    'extracellular_percent':extracellular_percent,
        #                    'used_':True,};
        #            data_O.append(row);

        self.add_rows_table('data_stage01_quantification_averages',data_O);
    def export_dataStage01RNASequencingGenesFpkmTracking_pairWisePlot_js(self,analysis_id_I,log2normalization_I=True,data_dir_I='tmp'):
        '''Export data for a pairwise scatter plot
        INPUT:
        analysis_id = String, analysis_id
        log2normalization_I = Boolean, apply a log2 normalization the FPKM values (default: True)
        data_dir_I = string, data directory
        OUTPUT:
        '''
        # get the analysis information
        experiment_ids,sample_names = [],[];
        experiment_ids,sample_names = self.get_experimentIDAndSampleName_analysisID_dataStage01RNASequencingAnalysis(analysis_id_I);
        data_O = [];
        for sample_name_cnt,sample_name in enumerate(sample_names):
            # query fpkm data:
            fpkms = [];
            fpkms = self.get_rows_experimentIDAndSampleName_dataStage01RNASequencingGenesFpkmTracking(experiment_ids[sample_name_cnt],sample_name);
            if log2normalization_I:
                for f in fpkms:
                    if f['FPKM'] == 0.0: f['FPKM'] = 0.0;
                    else: f['FPKM'] = log2(f['FPKM']);
            data_O.extend(fpkms);
        # reorganize the data
        listdict = listDict(data_O);
        data_O,columnValueHeader_O = listdict.convert_listDict2ColumnGroupListDict(
                    #value_labels_I = ['FPKM','FPKM_conf_lo','FPKM_conf_hi'],
                    value_labels_I = ['FPKM',],
                    column_labels_I = ['experiment_id','sample_name'],
                    feature_labels_I = ['gene_id','gene_short_name'],
                    na_str_I=0.0,
                    columnValueConnector_str_I='_',
                    );
        # make the tile object
        #data1 = filtermenu/table
        data1_keymap_table = {
            'xdata':'svd_method',
            'ydata':'singular_value_index',
            'zdata':'d_vector',
            'rowslabel':'svd_method',
            'columnslabel':'singular_value_index',
            };     
        #data2 = svg
        #if single plot, data2 = filter menu, data2, and table
        data1_keys = ['gene_id','gene_short_name'
                    ];
        data1_nestkeys = ['gene_short_name'];
        data1_keymap_svg = [];
        svgtype = [];
        svgtile2datamap = [];
        data_svg_keymap = [];
        for cnt1,column1 in enumerate(columnValueHeader_O):
            for cnt2,column2 in enumerate(columnValueHeader_O[cnt1+1:]):
                keymap = {
                'xdata':column1,
                'ydata':column2,
                'serieslabel':'',
                'featureslabel':'gene_short_name',
                'tooltipdata':'gene_short_name',
                };
                data1_keymap_svg.append([keymap]);
                data_svg_keymap.append(keymap);
                svgtype.append('pcaplot2d_scores_01');
                svgtile2datamap.append([0]);

        nsvgtable = ddt_container_filterMenuAndChart2dAndTable();
        nsvgtable.make_filterMenuAndChart2dAndTable(
                data_filtermenu=data_O,
                data_filtermenu_keys=data1_keys,
                data_filtermenu_nestkeys=data1_nestkeys,
                data_filtermenu_keymap=data1_keymap_table,
                data_svg_keys=data1_keys,
                data_svg_nestkeys=data1_nestkeys,
                data_svg_keymap=data_svg_keymap,
                data_table_keys=data1_keys,
                data_table_nestkeys=data1_nestkeys,
                data_table_keymap=data1_keymap_table,
                data_svg=None,
                data_table=None,
                svgtype=svgtype,
                tabletype='responsivetable_01',
                svgx1axislabel='',
                svgy1axislabel='',
                tablekeymap = [data1_keymap_table],
                svgkeymap = data1_keymap_svg,
                formtile2datamap=[0],
                tabletile2datamap=[0],
                svgtile2datamap=svgtile2datamap,
                svgfilters=None,
                svgtileheader='Pair-wise scatter plot',
                tablefilters=None,
                tableheaders=None
                );

        if data_dir_I=='tmp':
            filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
        elif data_dir_I=='data_json':
            data_json_O = nsvgtable.get_allObjects_js();
            return data_json_O;
        with open(filename_str,'w') as file:
            file.write(nsvgtable.get_allObjects());
    def execute_normalizeSamples2Biomass(self,experiment_id_I,biological_material_I=None,conversion_name_I=None,sample_names_I=[],component_names_I=[],use_height_I=False,sample_types_I=['Unknown']):
        '''Normalize calculated concentrations to measured biomass
         Input:
           experiment_id_I
           biological_material_I =  biological material (if None, no normalization is done)
           conversion_name_I = biomass conversion name (if None, no normalization is done)
           use_height_I = if True, use the ion count for peak height instead of the calculated_concentration or height/area ratio
         Output:
           sample_name
           sample_id
           component_group_name
           component_name
           calculated_concentration
           calculated_concentration_units
           used_
        '''

        data_O=[];
        calc = calculate_interface();
        
        print('execute_normalizeSamples2Biomass...')

        ##SPLIT 1:
        # get the unique sample_names/sample_ids/sample_types/component_names/component_group_names/calculated_concentration_units
        groupJoin = self.getGroupJoin_experimentAndQuantitationMethodAndMQResultsTable_experimentID_dataStage01QuantificationMQResultsTable(
            experiment_id_I,
            sample_types_I=sample_types_I,
            sample_names_I=sample_names_I,
            component_names_I=component_names_I,
            sample_ids_I=[],
            );
        if type(groupJoin)==type(listDict()):
            groupJoin.convert_dataFrame2ListDict()
            groupJoin = groupJoin.get_listDict();
        if (biological_material_I and conversion_name_I):
            # get the conversion units once
            conversion = None;
            conversion_units = None;
            conversion, conversion_units = self.get_conversionAndConversionUnits_biologicalMaterialAndConversionName(biological_material_I,conversion_name_I);
            for row_cnt,row in enumerate(groupJoin):
                print('normalizing samples2Biomass for component_name ' + row['component_name']);
                # get physiological parameters
                cvs = None;
                cvs_units = None;
                od600 = None;
                dil = None;
                dil_units = None;
                cvs, cvs_units, od600, dil,dil_units = self.get_CVSAndCVSUnitsAndODAndDilAndDilUnits_sampleName(row['sample_name']);
                if not(cvs and cvs_units and od600 and dil and dil_units):
                    print('cvs, cvs_units, or od600 are missing from physiological parameters');
                    print('or dil and dil_units are missing from sample descripton');
                    exit(-1);
                elif not(conversion and conversion_units):
                    print('biological_material or conversion name is incorrect');
                    exit(-1);  
                else:
                    #calculate the cell volume or biomass depending on the conversion units
                    #cell_volume, cell_volume_units = calc.calculate_cellVolume_CVSAndCVSUnitsAndODAndConversionAndConversionUnits(cvs,cvs_units,od600,conversion,conversion_units);
                    cell_volume, cell_volume_units = calc.calculate_biomass_CVSAndCVSUnitsAndODAndConversionAndConversionUnits(cvs,cvs_units,od600,conversion,conversion_units);
                # get the calculated concentration
                calc_conc = None;
                calc_conc_units = None;
                #data_row = self.get_row_sampleNameAndComponentName(
                #    row['sample_name'],
                #    row['component_name']);
                if use_height_I: 
                    #calc_conc, calc_conc_units = data_row['height'],'height';
                    calc_conc, calc_conc_units = row['height'],'height';
                elif row['use_calculated_concentration']:
                    #calc_conc, calc_conc_units = data_row['calculated_concentration'],data_row['conc_units'];
                    calc_conc, calc_conc_units = row['calculated_concentration'],row['conc_units'];
                elif not row['use_calculated_concentration'] and row['use_area']:
                    #calc_conc, calc_conc_units = data_row['area_ratio'],'area_ratio';
                    calc_conc, calc_conc_units = row['area_ratio'],'area_ratio';
                elif not row['use_calculated_concentration'] and not row['use_area']:
                    #calc_conc, calc_conc_units = data_row['height_ratio'],'height_ratio';
                    calc_conc, calc_conc_units = row['height_ratio'],'height_ratio';
                # calculate the normalized concentration
                norm_conc = None;
                norm_conc_units = None;
                if calc_conc: 
                    norm_conc, norm_conc_units = calc.calculate_conc_concAndConcUnitsAndDilAndDilUnitsAndConversionAndConversionUnits(calc_conc,calc_conc_units,dil,dil_units,cell_volume, cell_volume_units);
                # update data_stage01_quantification_normalized
                if norm_conc:
                    row = {'experiment_id':experiment_id_I,
                            'sample_name':row['sample_name'],
                            'sample_id':row['sample_id'],
                            'component_group_name':row['component_group_name'],
                            'component_name':row['component_name'],
                            'calculated_concentration':norm_conc,
                            'calculated_concentration_units':norm_conc_units,
                            'used_':True,};
                    data_O.append(row);
        else:
            for row_cnt,row in enumerate(groupJoin):
                print('normalizing samples2Biomass for sample_name ' + row['sample_name'] + ' and component_name ' + row['component_name']);
                # get the calculated concentration
                calc_conc = None;
                calc_conc_units = None;
                #data_row = self.get_row_sampleNameAndComponentName(
                #    row['sample_name'],
                #    row['component_name']);
                if use_height_I: 
                    #calc_conc, calc_conc_units = data_row['height'],'height';
                    calc_conc, calc_conc_units = row['height'],'height';
                elif row['use_calculated_concentration']:
                    #calc_conc, calc_conc_units = data_row['calculated_concentration'],data_row['conc_units'];
                    calc_conc, calc_conc_units = row['calculated_concentration'],row['conc_units'];
                elif not row['use_calculated_concentration'] and row['use_area']:
                    #calc_conc, calc_conc_units = data_row['area_ratio'],'area_ratio';
                    calc_conc, calc_conc_units = row['area_ratio'],'area_ratio';
                elif not row['use_calculated_concentration'] and not row['use_area']:
                    #calc_conc, calc_conc_units = data_row['height_ratio'],'height_ratio';
                    calc_conc, calc_conc_units = row['height_ratio'],'height_ratio';
                # add data to the DB
                if calc_conc: 
                    row = {'experiment_id':experiment_id_I,
                            'sample_name':row['sample_name'],
                            'sample_id':row['sample_id'],
                            'component_group_name':row['component_group_name'],
                            'component_name':row['component_name'],
                            'calculated_concentration':calc_conc,
                            'calculated_concentration_units':calc_conc_units,
                            'used_':True,};
                    data_O.append(row);

        ##SPLIT 2:
        ## get sample names
        #sample_names = [];
        #sample_ids = [];
        #for st in sample_types_I:
        #    sample_names_tmp = [];
        #    sample_ids_tmp = [];
        #    #sample_names_tmp = self.get_sampleNames_experimentIDAndSampleType(experiment_id_I,st);
        #    sample_names_tmp,sample_ids_tmp = self.get_sampleNamesAndSampleIDs_experimentIDAndSampleType(experiment_id_I,st);
        #    sample_names.extend(sample_names_tmp);
        #    sample_ids.extend(sample_ids_tmp); 
        #if sample_names_I:
        #    sample_names_ind = [i for i,x in enumerate(sample_names) if x in sample_names_I];
        #    sample_names_cpy = copy.copy(sample_names);
        #    sample_ids = copy.copy(sample_ids);
        #    sample_names = [x for i,x in enumerate(sample_names) if i in sample_names_ind]
        #    sample_ids = [x for i,x in enumerate(sample_ids) if i in sample_names_ind]
        ## create database table
        #for sn_cnt,sn in enumerate(sample_names):
        #    print('normalizing samples2Biomass for sample_name ' + sn);
        #    # get component names
        #    component_names = [];
        #    component_group_names = [];
        #    #component_names = self.get_componentsNames_experimentIDAndSampleName(experiment_id_I,sn);
        #    component_names,component_group_names = self.get_componentsNamesAndComponentGroupNames_experimentIDAndSampleName(experiment_id_I,sn);
        #    if component_names_I:                
        #        component_names_ind = [i for i,x in enumerate(component_names) if x in component_names_I];
        #        component_names_cpy = copy.copy(component_names);
        #        component_group_names = copy.copy(component_group_names);
        #        component_names = [x for i,x in enumerate(component_names) if i in component_names_ind]
        #        component_group_names = [x for i,x in enumerate(component_group_names) if i in component_names_ind]
        #    ## get sample id
        #    #sample_id = self.get_sampleID_experimentIDAndSampleName(experiment_id_I,sn);
        #    if (biological_material_I and conversion_name_I):
        #        # get physiological parameters
        #        cvs = None;
        #        cvs_units = None;
        #        od600 = None;
        #        dil = None;
        #        dil_units = None;
        #        conversion = None;
        #        conversion_units = None;
        #        cvs, cvs_units, od600, dil,dil_units = self.get_CVSAndCVSUnitsAndODAndDilAndDilUnits_sampleName(sn);
        #        conversion, conversion_units = self.get_conversionAndConversionUnits_biologicalMaterialAndConversionName(biological_material_I,conversion_name_I);
        #        if not(cvs and cvs_units and od600 and dil and dil_units):
        #            print('cvs, cvs_units, or od600 are missing from physiological parameters');
        #            print('or dil and dil_units are missing from sample descripton');
        #            exit(-1);
        #        elif not(conversion and conversion_units):
        #            print('biological_material or conversion name is incorrect');
        #            exit(-1);  
        #        else:
        #            #calculate the cell volume or biomass depending on the conversion units
        #            #cell_volume, cell_volume_units = calc.calculate_cellVolume_CVSAndCVSUnitsAndODAndConversionAndConversionUnits(cvs,cvs_units,od600,conversion,conversion_units);
        #            cell_volume, cell_volume_units = calc.calculate_biomass_CVSAndCVSUnitsAndODAndConversionAndConversionUnits(cvs,cvs_units,od600,conversion,conversion_units);
        #        for cn_cnt,cn in enumerate(component_names):
        #            print('normalizing samples2Biomass for component_name ' + cn);
        #            # get component group name
        #            #component_group_name = self.get_componentGroupName_experimentIDAndComponentName(experiment_id_I,cn);
        #            #component_group_name = self.get_msGroup_componentName_MSComponents(cn);
        #            # get the calculated concentration
        #            calc_conc = None;
        #            calc_conc_units = None;
        #            if use_height_I: 
        #                calc_conc, calc_conc_units = self.get_peakHeight_sampleNameAndComponentName(sn,cn);
        #            else:
        #                calc_conc, calc_conc_units = self.get_concAndConcUnits_sampleNameAndComponentName(sn,cn);
        #            # calculate the normalized concentration
        #            norm_conc = None;
        #            norm_conc_units = None;
        #            if calc_conc: 
        #                norm_conc, norm_conc_units = calc.calculate_conc_concAndConcUnitsAndDilAndDilUnitsAndConversionAndConversionUnits(calc_conc,calc_conc_units,dil,dil_units,cell_volume, cell_volume_units);
        #            # update data_stage01_quantification_normalized
        #            if norm_conc:
        #                row = {'experiment_id':experiment_id_I,
        #                        'sample_name':sn,
        #                        'sample_id':sample_ids[sn_cnt],
        #                        'component_group_name':component_group_names[cn_cnt],
        #                        'component_name':cn,
        #                        'calculated_concentration':norm_conc,
        #                        'calculated_concentration_units':norm_conc_units,
        #                        'used_':True,};
        #                data_O.append(row);
        #    else:
        #        for cn_cnt,cn in enumerate(component_names):
        #            print('normalizing samples2Biomass for component_name ' + cn);
        #            # get component group name
        #            #component_group_name = self.get_componentGroupName_experimentIDAndComponentName(experiment_id_I,cn);
        #            #component_group_name = self.get_msGroup_componentName_MSComponents(cn);
        #            # get the calculated concentration
        #            calc_conc = None;
        #            calc_conc_units = None;
        #            if use_height_I: 
        #                calc_conc, calc_conc_units = self.get_peakHeight_sampleNameAndComponentName(sn,cn);
        #            else:
        #                calc_conc, calc_conc_units = self.get_concAndConcUnits_sampleNameAndComponentName(sn,cn);
        #            # add data to the DB
        #            row = {'experiment_id':experiment_id_I,
        #                    'sample_name':sn,
        #                    'sample_id':sample_ids[sn_cnt],
        #                    'component_group_name':component_group_names[cn_cnt],
        #                    'component_name':cn,
        #                    'calculated_concentration':calc_conc,
        #                    'calculated_concentration_units':calc_conc_units,
        #                    'used_':True,};
        #            data_O.append(row);

        self.add_rows_table('data_stage01_quantification_normalized',data_O);