Beispiel #1
0
    def get_analysis_config_list(self):
        #This uses the analysis configuration table in order to establish which measures it should calculate averages and standard deviations for to upload to the integral table.  In my opinion, it is an unecessary step and can be eliminated.  If this step is included and there is an issue where uploading fails, make sure that the column name exists in the analysis_config_tbl, it's 'active' value is 1, and the column exists in the proc data table and the integral table.
        objects = ['avg_std_cols']
        q = db.select_Query(objects, table = 'analysis_config_tbl', condition_list = ["active = 1"])
        col_list = []
        col_tuple = self.interface_proc.query(q)
        for d in col_tuple:
            col_list.append(d['avg_std_cols'])

        return col_list
Beispiel #2
0
    def get_analysis_config_list(self):
        #q = db.use_Query("lab_proc_db")
        objects = ['avg_std_cols']
        q = db.select_Query(objects, table = 'rfmr_analysis_config_tbl', condition_list = ["active = 1"])
        col_list = []
        col_tuple = self.interface_proc.query(q)
        for d in col_tuple:
            col_list.append(d['avg_std_cols'])

        return col_list
Beispiel #3
0
    def _load_ts_timeseries_data(self):
        """Loads raw gasifier data."""
        self.ts = GasifierProcTS(start=self.run_info.info['ts_start'],
                                 end=self.run_info.info['ts_stop'])
        self.ts.SQL_load(self.interface_raw, 'gasifier_lv_GC_view')

        q = db.select_Query(objects=['tag_number', 'simple_name', 'units'],
                            table="tag_glossary_tbl")
        glossary = self.interface_raw.query(q)
        self.glossary = {}
        self.gl_units = {}
        for row in glossary:
            self.glossary[row['tag_number']] = row['simple_name']
            self.gl_units[row['simple_name']] = row['units']

        self.ts.glossary_replace(self.glossary)
        self.ts.set_units(self.gl_units)
        self.ts.replace_None_w_nan_all()
    def SQL_load_data(self, db_interface, table = "", conditions = None):
        """Allows the user to load the dataframe directly from a MySQL database; must pass interface"""
        if conditions is None:
            conditions = []
        if not isinstance(db_interface, SQL.db_interface):
            raise dfSQLError, "The passed interface is not a db_toolbox interface"

        query = SQL.select_Query(objects = ['*'], table = table, condition_list = conditions)
        

        #try:
        #This should now return a pandas Dataframe object, from which we can initialize our own object
        s = db_interface.query(query, return_type = 'pandas_dataframe')
           
        #except SQL.DBToolboxError:
        #    raise dfSQLError, "There was an error in using the SQL interface"

                
        #try this:
        self.reinitialize_data(s)
Beispiel #5
0
 def load_SQL_time_series(self, con = None, table = ""):
     """Loads time series data for specified time frame"""
     
     if con == None:
         con = self.db_interface
     self.ts = lfl.ts_data(self.start, self.stop)
     q = db.select_Query(objects = ['tag_number'], table = "tag_glossary_tbl", condition_list = ["tag_number LIKE 'MFC_%_SP'"])
     self.log_tags = [i['tag_number'] for i in con.query(q)]
     self.ts.SQL_load(con, table = table, glossary = 'tag_glossary_tbl')
     self.log_status = {}
     self.glossary = {}
     self.gl_units = {}
     
     for row in self.glossary:
         self.glossary[row['tag_number']] = row['simple_name']
         self.gl_units[row['simple_name']] = row['units']
         self.log_status[row['log_status']] = row['log_status']
     
     self.ts.glossary_replace(self.glossary)
     self.ts.set_units(self.gl_units)
     self.ts.replace_None_w_nan_all()
    def _load_ts_timeseries_data(self):
        """Loads raw gasifier data."""
        self.ts = GasifierProcTS(start=self.run_info.info['ts_start'],
                                 end=self.run_info.info['ts_stop'])
        self.ts.SQL_load(self.interface_raw, 'gasifier_lv_GC_view'
                         )  #This line needs to automatically load the units

        #Need to build the glossary using the SQL tools
        q = db.select_Query(objects=['tag_number', 'simple_name', 'units'],
                            table="tag_glossary_tbl")
        glossary = self.interface_raw.query(q)
        self.glossary = {}
        self.gl_units = {}
        for row in glossary:
            self.glossary[row['tag_number']] = row['simple_name']
            self.gl_units[row['simple_name']] = row['units']

        self.ts.glossary_replace(self.glossary)
        self.ts.set_units(self.gl_units)
        self.ts.replace_None_w_nan_all()
        self.ts['ts'] = self.ts['ts'].astype(datetime.datetime)
    def _load_timeseries_data(self):
        """Loads the timeseries data into the database"""

        self.gts = GasifierProcTS(start=self.run_info.info['ss_start'],
                                  end=self.run_info.info['ss_stop'])
        self.gts.SQL_load(self.interface_raw,
                          'analysis_view',
                          glossary='glossary_tbl'
                          )  #This line needs to automatically load the units
        #Need to build the glossary using the SQL tools
        q = db.select_Query(objects=['tag_number', 'simple_name', 'units'],
                            table="glossary_tbl")
        glossary = self.interface_raw.query(q)
        self.glossary = {}
        self.gl_units = {}
        for row in glossary:
            self.glossary[row['tag_number']] = row['simple_name']
            self.gl_units[row['simple_name']] = row['units']

        self.gts.glossary_replace(self.glossary)

        self.gts.set_units(self.gl_units)
        self.gts.replace_None_w_nan_all()
    def SQL_load_data(self, db_interface, table = "", conditions = None):
        """Allows the user to load the dataframe directly from a MySQL database; must pass interface"""
        if conditions is None:
            conditions = []
        query = SQL.select_Query(objects = ['*'], table = table, condition_list = conditions)
        
        results = db_interface.query(query)

        #intitialize numpy arrays
        for key, value in results[0].items():
            if type(results[0][key]) == float:
                #This is the 64-bit implementation
                self.data[key] = np.ndarray(0,dtype = 'float64')
            elif type(results[0][key]) == int:
                #This is the 64-bit implementation - could do arch check if necessary
                self.data[key] = np.ndarray(0,dtype = 'int64')
            else:
                #Catchall for everything else -- will make sure things fit okay
                self.data[key] = np.ndarray(0,dtype = 'object')

        #Build the arrays row by row
        for row in results:
            try: self.append(row, row_name=datetime.datetime.strftime(row['timestamp'], '%Y-%m-%d %H:%M:%S'))
            except: self.append(row, row_name=row['run_id'])
Beispiel #9
0
    def _load_timeseries_data(self):
        """Loads the timeseries data into the database"""
        
	#Creates object self.gts for gasifier time series
        self.gts = GasifierProcTS(start = self.run_info.info['ss_start'], end = self.run_info.info['ss_stop'])
        self.gts.SQL_load(self.interface_raw,'gasifier_lv_GC_view') 

	#Set up query for obtaining glossary and units data.  Change table as appropriate for new systems.
        q = db.select_Query(objects = ['tag_number', 'simple_name', 'units'], table = "tag_glossary_tbl")
	#Build glossary as a dictionary from glossary table.
	glossary = self.interface_raw.query(q)
	self.glossary = {}
	self.gl_units = {}
	for row in glossary:
            self.glossary[row['tag_number']] = row['simple_name']
            self.gl_units[row['simple_name']] = row['units']
        
	#Change column names to 'simple name's from glossary to make calculations more human readable
        self.gts.glossary_replace(self.glossary)
        
	#Set units from glossary information
        self.gts.set_units(self.gl_units)
	#Replaces None's with nan's to prevent limitations with numpy calculations
        self.gts.replace_None_w_nan_all()
Beispiel #10
0
    def _load_timeseries_data(self):
        """Loads the timeseries data into the database"""
        
        self.gts = GasifierProcTS(start = self.run_info.info['ss_start'], end = self.run_info.info['ss_stop'])
        self.gts.SQL_load(self.interface_raw,'rfmr_analysis_view') #This line needs to automatically load the units
        #Need to build the glossary using the SQL tools
        q = db.select_Query(objects = ['tag_name', 'simple_name', 'units'], table = "rfmr_glossary_tbl")
	glossary = self.interface_raw.query(q)
	self.glossary = {}
	self.gl_units = {}
	for row in glossary:
            self.glossary[row['tag_name']] = row['simple_name']
            self.gl_units[row['simple_name']] = row['units']

        self.gts.glossary_replace(self.glossary)

        self.gts.set_units(self.gl_units)
        self.gts.replace_None_w_nan_all()

        # Filter out mass flow controller PV's which give false or negative feed rates with zero flow.  This is a better solution compared to gasifier_analysis.py, as it accounts for possibility of an operator mis-setting the setpoint on the HMI.
        for i in [i for i in self.gts.columns if i.startswith('mass_flow_')]:
            self.gts[i][self.gts[i] < 0.5] = 0
        # Get list of columns in raw data frame for uploading calculated columns later
        self.raw_cols = self.gts.columns
    def _build_treeview(self):
        #get the data into a pandas dataframe
        objects = []
        objects.extend(PlanApp.run_info)
        objects.extend(PlanApp.sp_info)
        objects.extend(PlanApp.bm_info)

        q = db.select_Query(objects=objects, table="run_plan_view")

        self.data = psql.read_frame(q.getQuery(),
                                    con=self.interface.connection)

        self.store = Gtk.ListStore(
            bool, str, str, str, str, str, str, str, str, str, str, str, str,
            str, str, str, str, str, str, str, str, str, str, str, str, str,
            str, str, str,
            str)  #This is awful -- you really should not do it this way
        #add all of the rows to the store
        self.col_lookup = {}
        i = 1
        for col in objects:
            self.col_lookup[col] = i
            i += 1

        for i in range(0, len(self.data.index)):
            #build the row
            row = [
                False,
            ]
            for col in objects:
                row.append(str(self.data.iloc[i][col]))

            self.iter = self.store.append(row)

            self.row_count = i

        #now we want to build the view
        self.view = self.builder.get_object("run_plan_table")
        self.view.set_model(self.store)

        self.view_renderers = {}
        self.view_columns = {}

        self.analysis_renderer = Gtk.CellRendererToggle()
        self.analysis_col = Gtk.TreeViewColumn("Analyze",
                                               self.analysis_renderer,
                                               active=0)
        self.analysis_renderer.connect("toggled", self.on_analysis_toggled)
        self.analysis_renderer.set_sensitive(True)
        self.view.append_column(self.analysis_col)

        for col in objects:
            self.view_renderers[col] = Gtk.CellRendererText()
            self.view_renderers[col].set_property("editable", True)
            self.view_renderers[col].connect("edited", self.text_edited,
                                             self.col_lookup[col])

            self.view_columns[col] = Gtk.TreeViewColumn(
                PlanApp.labels[col],
                self.view_renderers[col],
                text=self.col_lookup[col])
            self.view.append_column(self.view_columns[col])

        self.select = self.view.get_selection()
        self.select.connect("changed", self.on_tree_selection_changed)
Beispiel #12
0
    def append_runsheet(self):
        #upload the unique setpoint and biomass points (which will auto-increment ids)
        for setp in self.setpoints:
            objects = {}
            for k in setp.properties:
                #print setp.properties[k]
                objects[k] = str(setp.properties[k])

            q = db.insert_Query(objects=objects, table="setpoint_tbl")
            #print q.getQuery()
            self.interface.query(q)
            q = db.commit_Query()
            self.interface.query(q)

        for bm in self.biomasses:
            objects = {}
            for k in bm.properties:
                #print bm.properties[k]
                objects[k] = str(bm.properties[k])

            q = db.insert_Query(objects=objects, table="biomass_tbl")
            #print q.getQuery()
            self.interface.query(q)
            q = db.commit_Query()
            self.interface.query(q)
        #now upload the run information, changing to NULL where appropriate

        run_info = [
            'run_id', 'exp_id', 'ts_start', 'ts_stop', 'ss_start', 'ss_stop',
            'operator', 'feeder_slope', 'feeder_intercept',
            'superheater_gas_type', 'quality'
        ]

        sp_info = [
            'temperature', 'pressure', 'biomass_rate', 'steam_flow',
            'steam_temp', 'ent_CO2', 'sweep_CO2', 'Ar_tracer',
            'superheater_purge', 'tube_diameter'
        ]

        bm_info = [
            'sample_name', 'moisture', 'w_c', 'w_n', 'w_h', 'd10', 'd50', 'd90'
        ]

        for i in self.data.index:
            d = {}
            for k in run_info:
                d[k] = str(self.data[k][i])
            #now locate the setpoint in the setpoint_table using SQL queries
            objects = ['setpoint_id']

            cl = []
            for item in sp_info:
                cl.append("%s='%s'" % (item, self.data[item][i]))
            q = db.select_Query(objects=objects,
                                condition_list=cl,
                                table="setpoint_tbl")
            s = self.interface.query(q)
            sid = s[0]['setpoint_id']

            cl = []
            objects = ['biomass_id']
            for item in bm_info:
                cl.append("%s='%s'" % (item, self.data[item][i]))
            q = db.select_Query(objects=objects,
                                condition_list=cl,
                                table="biomass_tbl")
            s = self.interface.query(q)
            bid = s[0]['biomass_id']

            d['setpoint_id'] = str(sid)
            d['biomass_id'] = str(bid)

            try:
                q = db.insert_Query(objects=d, table="run_info_tbl")
                self.interface.query(q)
                q = db.commit_Query()
                self.interface.query(q)
            except db.DBToolboxError:
                try:
                    q = db.update_Query(
                        objects=d,
                        table="run_info_tbl",
                        conditions=["%s = '%s'" % ('run_id', d['run_id'])])
                    self.interface.query(q)
                    q = db.commit_Query()
                    self.interface.query(q)
                except db.DBToolboxError:
                    raise Exception, "Trouble loading the data, man.  How bout you go catch some tasty waves?"