def create_dummy_attrib(dum_attrib, session): """ This method creates dummy record in the attribute table when loading datasets and objecttypes. Also contributes in creating links in the database :param session: :param dum_attrib: Dummy attribute name :return: An instance of the Attributes() table """ # crating dummy attribute record for dummy object type records dummy_attrib = SqlAlchemy.Attributes() if not isinstance(dum_attrib[0], int): dummy_attrib.AttributeName = dum_attrib[0] dummy_attrib.ObjectTypeID = session.query( SqlAlchemy.ObjectTypes).order_by( SqlAlchemy.ObjectTypes.ObjectTypeID.desc()).first( ).ObjectTypeID else: dummy_attrib.AttributeName = 'ResourceTypeAcronym' dummy_attrib.ObjectTypeID = dum_attrib[0] dummy_attrib.UnitNameCV = session.query(SqlAlchemy.CV_Units).filter( SqlAlchemy.CV_Units.Name == 'No unit').first().Name dummy_attrib.AttributeDataTypeCV = session.query( SqlAlchemy.CV_AttributeDataType).filter( SqlAlchemy.CV_AttributeDataType.Name == 'Dummy').first().Name dummy_attrib.AttributeNameCV = None dummy_attrib.AttributeCategoryID = None dummy_attrib.ModelInputOrOutput = None dummy_attrib.AttributeDescription = dum_attrib[1] return dummy_attrib
def connect(self, dbpath, db_type, sql_string=None): self.__session = SqlAlchemy.connect(dbpath, db_type, sql_string) print dbpath, 'path' print db_type, 'type' print sql_string, 'string' self.__path = dbpath # To be used later to backup the database. self.__type = db_type
def restore_db(self): """ used to restore the database after the user cancels data loading. copies the backup database, and renames it "currentname" then connects to it, so the user can load more data safely. the thread does not exit immediately, it finishes the job at hand, then it exits, this is safe because if it exits the job in the middle it might cause a deadlock to the pc or any other issues """ self.close_db() src = self.__backupName dst = src.split('---')[0] + '.' + src.split('.')[1] copyfile(src, dst) self.__session = SqlAlchemy.connect(dst, self.__type)
def load_mapping(params, session): """ This is a helper method to create an instance of the Mapping table. it creates a connection between Attributes, Instances, Scenarios, Sources and Methods, and DataValeus tables. :param session: :param params: A list of data to fill Mapping tables :return: A filled instance of Mapping() table """ # try: dummy_map = SqlAlchemy.Mappings() try: dummy_id = session.query(SqlAlchemy.Attributes).filter( SqlAlchemy.Attributes.ObjectTypeID == params[0]).first().AttributeID except Exception as e: raise Exception(e.message) dummy_map.AttributeID = dummy_id dummy_map.InstanceID = params[1] try: dummy_map.SourceID = session.query(SqlAlchemy.Sources).filter( SqlAlchemy.Sources.SourceName == params[2]).first().SourceID except: # raise exception with Sources table and value if there is no params[2] valuye in the Sources table. msg = "Sources|{}".format(params[2]) raise Exception(msg) try: dummy_map.MethodID = session.query(SqlAlchemy.Methods).filter( SqlAlchemy.Methods.MethodName == params[3]).first().MethodID except: # raise exception with Methods table and value if there is no params[3] valuye in the Methods table. msg = "Methods|{}".format(params[3]) raise Exception(msg) dummy_map.ValuesMapperID = params[4] try: test = session.query(SqlAlchemy.Mappings).filter( and_(SqlAlchemy.Mappings.AttributeID == dummy_map.AttributeID, SqlAlchemy.Mappings.InstanceID == dummy_map.InstanceID, SqlAlchemy.Mappings.SourceID == dummy_map.SourceID, SqlAlchemy.Mappings.MethodID == dummy_map.MethodID)).first().MappingID return None, dummy_id except: return dummy_map, dummy_id
def load_data_values(session): """ This is a method to create an instance of the DataValuesMapper table. it queries the table to get the most recent datavaluemapperid. If query returns None, the Databaluesmapperid is set to None else 1 is added to the highest Datavaluesmapper. :return: An instance of DataValuesMapper with filled fields """ dummy_dataval = SqlAlchemy.ValuesMapper() try: dummy_dataval.ValuesMapperID = int( session.query(SqlAlchemy.ValuesMapper).order_by( SqlAlchemy.ValuesMapper.ValuesMapperID.desc()).first(). ValuesMapperID) dummy_dataval.ValuesMapperID += 1 except: dummy_dataval.ValuesMapperID = 1 return dummy_dataval
def load_scenario_mapping(params, session): """ This is a helper method to create and instance of the ScenarioMapping table and filling the appropriate fields with values sent through params. :param params: A list of data to fill scenariomapping table :return: a filled instance of the scenariomapping table """ dummy_scen_map = SqlAlchemy.ScenarioMappings() dummy_scen_map.ScenarioID = params[0] try: dummy_scen_map.MappingID = session.query( SqlAlchemy.Mappings).filter( and_( SqlAlchemy.Mappings.AttributeID == params[1], SqlAlchemy.Mappings.InstanceID == params[2], SqlAlchemy.Mappings.SourceID == session.query( SqlAlchemy.Sources).filter( SqlAlchemy.Sources.SourceName == params[3]).first().SourceID, SqlAlchemy.Mappings.MethodID == session.query( SqlAlchemy.Methods).filter( SqlAlchemy.Methods.MethodName == params[4]).first().MethodID)).first().MappingID except: raise Exception('An error occurred when loading nodes sheet') try: test = session.query(SqlAlchemy.ScenarioMappings).filter( and_( SqlAlchemy.ScenarioMappings.MappingID == dummy_scen_map.MappingID, SqlAlchemy.ScenarioMappings.ScenarioID == params[0])).first().ScenarioMappingID return None except: return dummy_scen_map
def btn_connectOnButtonClick(self, event): error = self.evaluator() if error: msg_somethigWrong(self.topframe, '\n\n' + error).Show() return else: setup = DB_Setup() # test if the user is already connected to a database, if so, user is asked to disconnect if setup.get_session(): msg_somethigWrong(None, msg='\n\nError: You are already connected to a database. \n\n to use another' ' database, you need to disconnect from the current one').Show() return # connects to mysql database given the sql_string setup.connect('', db_type='mysql', sql_string=self.sql_string) self.topframe.SetTitle(self.topframe.GetTitle() + ' ::: You are connected to MySQL DB ' + self.db_name) from Messages_forms.msg_connSQLiteSuccs import msg_connSQLiteSuccs msgdlg = msg_connSQLiteSuccs(self.topframe) msgdlg.setMessage(u"\n\nSuccessfully connected to mysql db with name \'" + self.db_name + u"\'") msgdlg.Show() # Create WaMDaMVersion table and fills it the version number declared in define.py file obj_cat = SqlAlchemy.WaMDaMVersion() try: qeury = setup.get_session().query(SqlAlchemy.WaMDaMVersion).first().VersionNumber except: obj_cat.VersionNumber = define.version setup.push_data(obj_cat) setup.add_data() self.Close()
def btn_connectOnButtonClick(self, event): topframe = wx.GetApp().GetTopWindow() dir_name = self.dirPicker_newDB.GetPath() db_name = self.m_textCtrl1.GetValue() # Check whether user select the file to write data correctly. fileCheck = False for root, dirs, files in os.walk(dir_name): for file in files: if file.endswith(".sqlite"): if file.split('.')[0] == db_name: from Messages_forms.generalMsgDlg import messageDlg errMsgDlg = messageDlg(topframe) errMsgDlg.SetTitle("Error") errMsgDlg.setMessage( "This database name already exists in this directory.\nPlease choose a different database name!") errMsgDlg.ShowModal() return # Report connecting to the db in the logfile define.logger = define.create_logger(db_name) define.logger.name = __name__ define.logger.info("Start database connection.") setup = DB_Setup() if setup.get_session(): define.logger.error( 'Failed database connection.\n\n Error: You are already connected to a database. \n\n to use another' 'database, you need to disconnect from the current one') msg_somethigWrong(topframe, msg='\n\n Error: You are already connected to a database. \n\n to use another' 'database, you need to disconnect from the current one').Show() return if not dir_name: define.logger.error( 'Failed database connection.\n\nError: Please select a directory. \n\nIf already so, try using the "other..." ' 'option in the Dir Dialog') msg_somethigWrong(topframe, msg='\n\nError: Please select a directory. \n\nIf already so, try using the "other..." ' 'option in the Dir Dialog').Show() return if not db_name: define.logger.error('Failed database connection.\n\nError: The database name is required.') msg_somethigWrong(topframe, msg='\n\n\nError: The database name is required.').Show() return if len(db_name.split('.')) > 1: if db_name.split('.')[-1] in ['sqlite']: pass else: db_name += '.sqlite' else: db_name += '.sqlite' # Get connection with db db_path = os.path.join(dir_name, db_name) db = DB_Setup() db.connect(db_path, db_type='sqlite') # Create WaMDaMVersion table and fills it the version number declared in define.py file obj_cat = SqlAlchemy.WaMDaMVersion() obj_cat.VersionNumber = define.version db.push_data(obj_cat) db.add_data() topframe.SetTitle(topframe.GetTitle() + ' ::: You are connected to ' + os.path.basename(db_name)) from Messages_forms.msg_connSQLiteSuccs import msg_connSQLiteSuccs msgdlg = msg_connSQLiteSuccs(topframe) msgdlg.setMessage(u"\n\n\n\n\n You are successfully connected to " + db_path.split('\\')[-1] + u".") msgdlg.Show() '''Report the connected db name to the logfile''' define.logger.info("'" + db_path.split('\\')[-1] + "'was connected successfully.\n") define.dbName = db_path.split('\\')[-1] self.Close()
def load_data(self): """ This method is used to parse data from each sheet to its appropriate table in the database. Due to the structure of the excel file, some hard coding was done to get data accurately. It functions by iterating over the work_sheet dictionary and getting each corresponding sheet_name and sheetrows to load the table. It starts by querying for id's of the datavalue properties. then using some tests to load the data value appropriately to avoid data duplication and creating links between tables. :return: None """ for sheet_name, sheet_rows in self.work_sheet.items(): temp = sheet_rows[:] # Detecting start of data in the sheet for row_id, row in enumerate(temp): temp_row = [cell.value for cell in row] if 'SeasonalNumericValues_table' in temp_row: temp = sheet_rows[row_id + 4:] break if len(temp) < 1: continue # rows are stored for comparison to determine if a row is similar to others or different order_value = 0 temp_row = [ cell.value for cell in temp[0][:6] ] # stores from ojectInstance to Methodname for comparison order_row_test = temp_row[:6] stored_rows = [temp_row] scenario_name = temp[0][2] mappingID = '' for row_id, row in enumerate(temp): if all('' == cell.value for cell in row): break if any('' == cell.value for cell in row[:7]): raise Exception( "Some empty fields were found in SeasonalNumericValues.\nPlease fill all Required fields" ) if row[-1].value is None or row[-1].value == '': continue if row[0].value == "": raise Exception( 'Error in {} row of "SeasonalNumericValues_table" of sheet "{}"\nField named "ObjectType" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[1].value == "": raise Exception( 'Error in {} row of "SeasonalNumericValues_table" of sheet "{}"\nField named "InstancenName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[2].value == "": raise Exception( 'Error in {} row of "SeasonalNumericValues_table" of sheet "{}"\nField named "ScenarioName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[3].value == "": raise Exception( 'Error in {} row of "SeasonalNumericValues_table" of sheet "{}"\nField named "AttributeName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[4].value == "": raise Exception( 'Error in {} row of "SeasonalNumericValues_table" of sheet "{}"\nField named "SourceName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[5].value == "": raise Exception( 'Error in {} row of "SeasonalNumericValues_table" of sheet "{}"\nField named "MethodName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[6].value == "": raise Exception( 'Error in {} row of "SeasonalNumericValues_table" of sheet "{}"\nField named "SeasonName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) # if row[7].value == "": # raise Exception('Error in {} row of "SeasonalNumericValues_table" of sheet "{}"\nField named "SeasonNameCV" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' # .format(row_id, sheet_name)) # test for datatype if not self.data_type_test(self.__session, row, 'SeasonalNumericValues'): raise Exception( "'{}' attribute is not associated to {} Datatype". format(row[3].value, 'SeasonalNumericValues')) diff_scene = False temp_row = [cell.value for cell in row[:6]] order_current_row = temp_row[:6] # TODO: make sure to store only the block in stored rows for comparison so that we can use for loop for unordered data. # Make block comparison to see if next block is same or different if temp_row != stored_rows[-1]: diff_scene = True # escaping creation of different datavaluemapperID for same block stored_rows.append(temp_row) # Store new block in temp_row sparams = SqlAlchemy.SeasonalNumericValues() attrib_id, instance_id, scenario_id, source_id, method_id = self.get_ids( row, self.__session, sheet_name, row_id) # Test if attrib belongs to object type self.test_properties(self.__session, row, sheet_name) # getting datavaluemapper id using the above params from Mapping table # this will be used to a datavaluemapperid to be reused if it already exists datavalues = self.__session.query(SqlAlchemy.Mappings).filter( and_(SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.SourceID == source_id, SqlAlchemy.Mappings.MethodID == method_id)).order_by( SqlAlchemy.Mappings.MappingID.desc()).all() ''' The Idea in creating link used here is to query the datavalue table to get the DataValueMapperID for the current value. if the value already exists in the table, the Mapping id is selected and loaded with the scenario id else a new record is created in the Mapping table and link in the scenarioMapping table. ''' value = None datavalues_id = None found = False try: # skips searching datavaluemapperid for required field if its attribs are not found in mapping table # if new row's attribs are not mapped yet, then it is a new block, we skip datavaluemap search if not datavalues: diff_scene = False raise Exception # If the mapperID exists for the attribs in the current row, we then search if the value exists for # the row is stored in the db, if yes we then try to match with the mappingid to get datavaluemapper datavalues_id = self.__session.query( SqlAlchemy.SeasonalNumericValues).filter( and_( SqlAlchemy.SeasonalNumericValues. SeasonNumericValue == row[8].value, SqlAlchemy.SeasonalNumericValues.SeasonName == row[6].value)).all() # if block is different but there is a mapping and the value is not found in the db, we create a # new datavalue mapper ID (this is mostly to handle different case scenario if datavalues and diff_scene and not datavalues_id: diff_scene = False raise Exception result = [ datavaluemapper.ValuesMapperID for datavaluemapper in datavalues_id ] # check for mapping with same ValuesMapper as the data value. # if found, reuse of mapping id is emminent. if len(result) > 0: for mapping in datavalues: if found: break for each in result[:]: if mapping.ValuesMapperID == each: datavalues = mapping found = True break # if the current value datvaluesmapperID is not found matching # we use the most recent mapping id because they are from same block if not found: datavalues = datavalues[0] datavalues_id = None value = True except Exception as e: value = True # to ensure the first value is logged in datavalues = None datavalues_id = None # Creating New entry, datavaluemapperID and mappingID if not datavalues and not diff_scene: datavalmapper = self.load_data_values(self.__session) dataval_map = SqlAlchemy.Mappings() dataval_map.AttributeID = attrib_id dataval_map.InstanceID = instance_id dataval_map.SourceID = source_id dataval_map.MethodID = method_id # Creating new datavaluemapper if its the start of another block if datavalues_id is None and not diff_scene: self.setup.push_data(datavalmapper) datavalues_id = datavalmapper.ValuesMapperID dataval_map.ValuesMapperID = datavalmapper.ValuesMapperID elif not diff_scene: dataval_map.ValuesMapperID = datavalues_id self.setup.push_data(dataval_map) else: datavalues_id = datavalues.ValuesMapperID # Creating new scenariomapping if scenarioID-mappingID does not exists. # Starts by searchine for the mappingID in case its just been created, then tests to see if a # scenarioID-mappingID exists, if yes, it skips, if no, it creates an entry scenariomap = SqlAlchemy.ScenarioMappings() scenariomap.ScenarioID = scenario_id # try to get the mappingid for the scenario if an entry already exist # else we get the most recent MappingID if datavalues: scenariomap.MappingID = datavalues.MappingID else: scenariomap.MappingID = self.__session.query( SqlAlchemy.Mappings).filter( and_( SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.SourceID == source_id, SqlAlchemy.Mappings.MethodID == method_id, SqlAlchemy.Mappings.ValuesMapperID == datavalues_id)).first().MappingID # there is a problem in sharing/reusing the MappingID into the scenarioMapping table for different blocks. # therefore, there is a duplicat of values showing in the query result # test if the mappingid - scenarioid already exists in scenario table # if yes, then nothing is added, else, we add new entry based of diff_scene var. try: test = self.__session.query( SqlAlchemy.ScenarioMappings).filter( and_( SqlAlchemy.ScenarioMappings.MappingID == scenariomap.MappingID, SqlAlchemy.ScenarioMappings.ScenarioID == scenariomap.ScenarioID)).first( ).ScenarioMappingID except: self.setup.push_data(scenariomap) if row[8].value is not None: # Add new entry to the database if the value(depicts similar scenario) and the value is not found if value and not found: if row[7].value: try: sparams.SeasonNameCV = self.__session.query( SqlAlchemy.CV_SeasonName).filter( SqlAlchemy.CV_SeasonName.Name == row[7].value).first().Name except: raise Exception( "Error:\nCannot find '{}' in SeasonNameCV field of SeasonalNumericValues_table" .format(row[7].value)) sparams.SeasonNumericValue = row[8].value sparams.ValuesMapperID = datavalues_id sparams.SeasonName = row[6].value # sparams.SeasonDateFormate=row[9].value """1999/12/1 YYYY/MM/DD""" \ """ Year value must be 9999""" self.setup.push_data(sparams) # Adding order to the seasonalNumericValues according to blocks # If the current row is same as the previous row, we add one to the previous order_value # If it is different from the previous we create a new order_value # This will be inaccurate if the excel file is not arranged in blocks. # TODO: if the excel file at any point will not be arranged in block, this will have to modified # TODO: and we will have to be searching the db at each point to know if the row is in or not # Todo: if it is in we get the most recent value and add one else we create a new order_value if order_current_row == order_row_test: order_value = order_value + 1 sparams.SeasonOrder = order_value order_row_test = order_current_row[:] else: order_value = 1 sparams.SeasonOrder = order_value order_row_test = order_current_row[:] value = False else: raise Exception( 'Error in sheet "{}"\nField named "SeasonValue" is empty.\nThis field should not be empty.\nPlease fill this field to a value' .format(sheet_name))
def load_data(self): """ This method is used to parse data from each sheet to its appropriate table in the database. Due to the structure of the excel file, some hard coding was done to get data accurately. It functions by iterating over the work_sheet dictionary and getting each corresponding sheet_name and sheetrows to load the table. It starts by querying for id's of the datavalue properties. then using some tests to load the data value appropriately to avoid data duplication and creating links between tables. :return: None """ for sheet_name, sheet_rows in self.work_sheet.items(): temp = sheet_rows[:] # Detecting start of data in the sheet for row_id, row in enumerate(temp): temp_row = [cell.value for cell in row] if 'FreeText_table' in temp_row: temp = sheet_rows[row_id + 4:] break if len(temp) < 1: continue # rows are stored for comparison to determine if a row is similar to others or different temp_row = [cell.value for cell in temp[0]] temp_row.pop(2) stored_rows = [temp_row] scenario_name = temp[0][2] for row_id, row in enumerate(temp): if all('' == cell.value.strip() for cell in row): break if any('' == cell.value.strip() for cell in row): continue if row[0].value == "": raise Exception( 'Error in {} row of "FreeText_table" of sheet "{}"\nField named "ObjectType" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[1].value == "": raise Exception( 'Error in {} row of "FreeText_table" of sheet "{}"\nField named "InstancenName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[2].value == "": raise Exception( 'Error in {} row of "FreeText_table" of sheet "{}"\nField named "ScenarioName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[3].value == "": raise Exception( 'Error in {} row of "FreeText_table" of sheet "{}"\nField named "AttributeName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[4].value == "": raise Exception( 'Error in {} row of "FreeText_table" of sheet "{}"\nField named "SourceName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[5].value == "": raise Exception( 'Error in {} row of "FreeText_table" of sheet "{}"\nField named "MethodName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[6].value == "": raise Exception( 'Error in {} row of "FreeText_table" of sheet "{}"\nField named "FreeTextValue" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) # test for datatype if not self.data_type_test(self.__session, row, 'FreeText'): raise Exception( "'{}' attribute is not associated to {} Datatype". format(row[3].value, 'FreeText')) diff_scene = False temp_row = [cell.value for cell in row[:]] temp_row.pop(2) # Checking row against stored rows to determine if its different or similar for each in stored_rows: if temp_row == each: # checking if current row is same as any previous rows if row[2].value != scenario_name.value: # basing difference in terms of scenario name diff_scene = True break break stored_rows.append(temp_row) params = SqlAlchemy.FreeText() attrib_id, instance_id, scenario_id, source_id, method_id = self.get_ids( row, self.__session, sheet_name, row_id) # Test if the provided attribute name belongs to object type self.test_properties(self.__session, row, sheet_name) # getting datavaluemapper id using the above params fro Mapping table datavalues = self.__session.query(SqlAlchemy.Mappings).filter( and_(SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.SourceID == source_id, SqlAlchemy.Mappings.MethodID == method_id)).all() ''' The Idea in creating link between tables used here is to query the datavalue table to get the DataValueMapperID for the current value. if the value already exists in the table, the Mapping id is selected and loaded with the scenario id else a new record is created in the Mapping table and link in the scenarioMapping table. ''' value = None datavalues_id = None found = False try: # skips searching datavaluemapperid for required field if its attribs are not found in mapping table # this means a new datavaluemapperID will be created. this occurs when the ros is different if not datavalues: raise Exception # If the mapperID exists for the attribs in the current row, we then search if the value exists for # the row is stored in the db, if yes we then try to match with the mappingid to get datavaluemapper datavalues_id = self.__session.query( SqlAlchemy.FreeText).filter( SqlAlchemy.FreeText.FreeTextValue == row[6].value).all() result = [ datavaluemapper.ValuesMapperID for datavaluemapper in datavalues_id ] # check for mapping with same ValuesMapper as the data value. # if found, reuse of mapping id is emminent. for mapping in datavalues: if found: break for each in result[:]: if mapping.ValuesMapperID == each: datavalues = mapping found = True break # if the current value datvaluesmapperID is not found matching. # if its not found, A trigger is sent for new entry creation if not found: raise Exception value = True except Exception as e: datavalues = None datavalues_id = None # if the current row combination does not exists in mapping table, a new entry is created if not datavalues and not diff_scene: datavalmapper = self.load_data_values(self.__session) dataval_map = SqlAlchemy.Mappings() dataval_map.AttributeID = attrib_id dataval_map.InstanceID = instance_id dataval_map.SourceID = source_id dataval_map.MethodID = method_id # creating new datavaluemapper if the row is in a different block if datavalues_id is None and not diff_scene: self.setup.push_data(datavalmapper) datavalues_id = datavalmapper.ValuesMapperID dataval_map.ValuesMapperID = datavalmapper.ValuesMapperID elif not diff_scene: dataval_map.ValuesMapperID = datavalues_id self.setup.push_data(dataval_map) else: datavalues_id = datavalues.ValuesMapperID # Creating new scenariomapping if scenarioID-mappingID does not exists. # Starts by searchine for the mappingID in case its just been created, then tests to see if a # scenarioID-mappingID exists, if yes, it skips, if no, it creates an entry scenariomap = SqlAlchemy.ScenarioMappings() scenariomap.ScenarioID = scenario_id if datavalues: scenariomap.MappingID = datavalues.MappingID else: scenariomap.MappingID = self.__session.query( SqlAlchemy.Mappings).filter( and_( SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.SourceID == source_id, SqlAlchemy.Mappings.MethodID == method_id, SqlAlchemy.Mappings.ValuesMapperID == datavalues_id)).first().MappingID # test to see if the current mappingid - scenarioid association exists in scenariomapping table try: test = self.__session.query( SqlAlchemy.ScenarioMappings).filter( and_( SqlAlchemy.ScenarioMappings.MappingID == scenariomap.MappingID, SqlAlchemy.ScenarioMappings.ScenarioID == scenariomap.ScenarioID)).first( ).ScenarioMappingID except: self.setup.push_data(scenariomap) if row[6].value is not None: # value is a boolean variable which tells if a datavalue is already in the database, # if value if false, it is added to the db, also if row is different from previously # loaded rows, that current row is also added to the database. if not value or not diff_scene: params.FreeTextValue = row[6].value params.ValuesMapperID = datavalues_id self.setup.push_data(params) value = False else: raise Exception( 'Error in sheet "{}"\n field named "FreeTextValue" is empty.\nThis field should not be empty.' '\nPlease fill this field to a value'.format( sheet_name))
def load_data(self): """ This method is used to parse data from each sheet to its appropriate table in the database. Due to the structure of the excel file, some hard coding was done to get data accurately. It functions by iterating over the work_sheet dictionary and getting each corresponding sheet_name and sheetrows to load the table. It starts by querying for id's of the datavalue properties. then using some tests to load the data value appropriately to avoid data duplication and creating links between tables. It then concatenates the file name and append to the location provided. Using the os module, it opens the file and loads the data to the filebase table as a blob. :return: None """ for sheet_name, sheet_rows in self.work_sheet.items(): temp = sheet_rows[:] # Detecting start of data in the sheet for row_id, row in enumerate(temp): temp_row = [cell.value for cell in row] if 'ElectronicFiles_table' in temp_row: temp = sheet_rows[row_id + 4:] break for row_id, row in enumerate(temp): if all('' == cell.value for cell in row): break if any('' == cell.value for cell in row[:-1]): raise Exception( "Some empty fields where found in ElectronicFiles.\n Please fill all Required fields in " + sheet_name) if row[0].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "ObjectType" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[1].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "InstancenName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[2].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "ScenarioName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[3].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "AttributeName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[4].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "SourceName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[5].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "MethodName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[6].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "ElectronicFileName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[7].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "ElectronicFileFormatCV" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[8].value == "": raise Exception( 'Error in {} row of "ElectronicFiles_table" of sheet "{}"\nField named "FileLocationOnDesk" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) # test for datatype if not self.data_type_test(self.__session, row, 'File'): raise Exception( "'{}' attribute is not associated to {} Datatype". format(row[3].value, 'File')) file_base = SqlAlchemy.File() attrib_id, instance_id, scenario_id, source_id, method_id = self.get_ids( row, self.__session, sheet_name, row_id) # Test if attrib belongs to object type self.test_properties(self.__session, row, sheet_name) # getting datavaluemapper id using the above params fro Mapping table datavalues = self.__session.query(SqlAlchemy.Mappings).filter( and_(SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.SourceID == source_id, SqlAlchemy.Mappings.MethodID == method_id)).first() ''' The Idea in creating link used here is to query the datavalue table to get the DataValueMapperID using a combination of (aggregattionstatisticcvid, aggreationInterval, Internvaltimeunitcvid) for the current value combination. Only required fields are considered. if the value already exists in the table, the Mapping id is selected and loaded with the scenario id else a new record is created in the Mapping table and link in the scenarioMapping table. ''' value = None datavalues_id = None try: datavalues_id = self.__session.query( SqlAlchemy.TextFree).filter( and_( SqlAlchemy.File.FileName == row[6].value, SqlAlchemy.File.ElectronicFileFormatCV == self.__session.query( SqlAlchemy.CV_ElectronicFormat).filter( SqlAlchemy.CV_ElectronicFormat.Name == row[7].value).first().Name)).first( ).ValuesMapperID value = True except Exception as e: print e raise Exception(e.message) if not datavalues: datavalmapper = self.load_data_values(self.__session) dataval_map = SqlAlchemy.Mappings() dataval_map.AttributeID = attrib_id dataval_map.InstanceID = instance_id dataval_map.SourceID = source_id dataval_map.MethodID = method_id if datavalues_id is None: self.setup.push_data(datavalmapper) datavalues_id = datavalmapper.ValuesMapperID dataval_map.ValuesMapperID = datavalmapper.ValuesMapperID else: dataval_map.ValuesMapperID = datavalues_id self.setup.push_data(dataval_map) if not datavalues_id: datavalues_id = datavalues.ValuesMapperID if row[5].value and row[6].value and row[7].value: if not value: file_base.FileName = row[6].value file_base.ValuesMapperID = datavalues_id file_base.ElectronicFileFormatCV = self.__session.query( SqlAlchemy.CV_ElectronicFormat).filter( SqlAlchemy.CV_ElectronicFormat.Name == row[7].value).first().Name file_base.Description = row[9].value # getting file data and storing as blob in model filename = row[6].value + '.' + row[7].value file_path = os.path.join(row[8].value, filename) fp = open(file_path, mode='rb') data = fp.read() file_base.File = data self.setup.push_data(file_base) value = False scenariomap = SqlAlchemy.ScenarioMappings() scenariomap.ScenarioID = scenario_id if datavalues: scenariomap.MappingID = datavalues.MappingID else: scenariomap.MappingID = self.__session.query( SqlAlchemy.Mappings).filter( and_(SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.SourceID == source_id, SqlAlchemy.Mappings.MethodID == method_id)).first().MappingID try: test = self.__session.query( SqlAlchemy.ScenarioMappings).filter( and_( SqlAlchemy.ScenarioMappings.MappingID == scenariomap.MappingID, SqlAlchemy.ScenarioMappings.ScenarioID == scenariomap.ScenarioID)).first( ).ScenarioMappingID except: self.setup.push_data(scenariomap)
def load_data(self): """ This method is used to parse data from each sheet to its appropriate table in the database. Due to the structure of the excel file, some hard coding was done to get data accurately. It functions by iterating over the work_sheet dictionary and getting each corresponding sheet_name and sheetrows to load the table. It starts by querying for id's of the datavalue properties. then using some tests to load the data value appropriately to avoid data duplication and creating links between tables. :return: None """ for sheet_name, sheet_rows in self.work_sheet.items(): if sheet_name != datavalues_sheets_ordered[2]: continue temp = sheet_rows[:] # Detecting start of data in the sheet for row_id, row in enumerate(temp): temp_row = [cell.value for cell in row] if 'TimeSeries_table' in temp_row: temp = sheet_rows[row_id + 4:] break if len(temp) < 1: continue # testing is timeseriesvalues are different or similar # mapx is used to row (object, instance and attribute) to show if other rows with same parms are diff or sim mapx = dict() timeseries_rows = self.work_sheet[datavalues_sheets_ordered[4]][:] # Detecting start of data in the sheet for row_id, row in enumerate(timeseries_rows): temp_row = [cell.value for cell in row] if 'TimeSeriesValues_table' in temp_row: timeseries_rows = sheet_rows[row_id + 4:] break # if timeseries sheet is not empty, we get data which will be used for comparison. if len(timeseries_rows) > 0: temp_row = [cell.value for cell in timeseries_rows[0] ] # get all first row which will be a reference stored_rows = [ temp_row ] # stored rows will be used to determine if a row is different or similar mapx = {str(temp_row[:3]): False} scenario_name = temp[0][2] diff_scene = True row_id = 1 for row in timeseries_rows: if all('' == cell.value for cell in row): break if any('' == cell.value for cell in row[:4]): raise Exception( "Some Empty Fields where found in TimeSeriesValue.\nPlease fill all Required fields" ) if row[-1].value is None: continue if row[0].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "ObjectType" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[1].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "InstancenName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[2].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "ScenarioName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[3].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "AttributeName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[4].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "SourceName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[5].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "MethodName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[6].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "YearType" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[7].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "AggregationStatisticCV" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[8].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "AggregationInterval" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[9].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "IntervalTimeUnit" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) temp_row = [cell.value for cell in row[:]] temp_row.pop(2) # determine if a row falls under the category of similar or different for each in stored_rows[:]: if temp_row[:3] == each[:3]: if row[2].value != scenario_name.value: if temp_row == each: # if rows are similar, they are set to true mapx[str(temp_row[:3])] = True break else: # store current row if not found else we store it in the map and assume they # are different if not (str(temp_row[:3]) in mapx.keys()): mapx[str(temp_row[:3])] = False stored_rows.append(temp_row) row_id += 1 for row_id, row in enumerate(temp): if all('' == cell.value for cell in row): break if any('' == cell.value for cell in row[:9]): raise Exception( "Some Empty Fields where found in TimeSeries.\n Please fill all Required fields" ) # test if row is valid for this datatype if not self.data_type_test(self.__session, row, 'TimeSeries'): raise Exception( "'{}' attribute is not associated to {} Datatype". format(row[3].value, 'TimeSeries')) timeseries = SqlAlchemy.TimeSeries() attrib_id, instance_id, scenario_id, source_id, method_id = self.get_ids( row, self.__session, sheet_name, row_id) # Test if attrib belongs to object type self.test_properties(self.__session, row, sheet_name) row_copy = [cell.value for cell in row] row_copy.pop(2) # getting value of diff_scene var based on the (object, instance, scenario and attribute) of the # timeseries values. if diff_scene is True, then the rows are similar and reuse will be implemented # else if diff_scene is False, then the rows are different and reuse will not be implemented try: diff_scene = mapx[str(row_copy[:3])] except: diff_scene = False # If the mapperID exists for the attribs in the current row, we then search if the value exists for # the row is stored in the db, if yes we then try to match with the mappingid to get datavaluemapper datavalues = self.__session.query(SqlAlchemy.Mappings).filter( and_(SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.SourceID == source_id, SqlAlchemy.Mappings.MethodID == method_id)).all() ''' The Idea in creating link used here is to query the datavalue table to get the DataValueMapperID using a combination of (aggregattionstatisticcvid, aggreationInterval, Internvaltimeunitcvid) for the current value combination. Only required fields are considered. if the value already exists in the table, the Mapping id is selected and loaded with the scenario id else a new record is created in the Mapping table and link in the scenarioMapping table. ''' value = None datavalues_id = None found = False try: # skips searhing datavaluemapperid for required field if its attribs are not found in mapping table if not datavalues or not diff_scene: raise Exception # try getting the datavalue_id based on the required value to try getting the required mappingID datavalues_id = self.__session.query( SqlAlchemy.TimeSeries).filter( and_( SqlAlchemy.TimeSeries.AggregationStatisticCV == self.__session.query( SqlAlchemy.CV_AggregationStatistic).filter( SqlAlchemy.CV_AggregationStatistic.Name == row[7].value).first().Name, SqlAlchemy.TimeSeries.AggregationInterval == row[8].value, SqlAlchemy.TimeSeries.IntervalTimeUnitCV == self.__session.query( SqlAlchemy.CV_Units).filter( SqlAlchemy.CV_Units.Name == row[9].value).first().Name, )).all() result = [ datavaluemapper.ValuesMapperID for datavaluemapper in datavalues_id ] # check for mapping with same datavaluesmapper as the data value. # if found, reuse of mapping id is emminent. for mapping in datavalues: if found: break for each in result[:]: if mapping.ValuesMapperID == each: datavalues = mapping found = True break # if the current value datvaluesmapperID is not found matching. # if its not found, new entry is created if not found: raise Exception value = True except Exception as e: datavalues = None datavalues_id = None # if current row params are not already on the mapping table a new entry added # Creating New entry, datavaluemapperID and mappingID if not datavalues: datavalmapper = self.load_data_values(self.__session) dataval_map = SqlAlchemy.Mappings() dataval_map.AttributeID = attrib_id dataval_map.InstanceID = instance_id dataval_map.SourceID = source_id dataval_map.MethodID = method_id # Creating new datavaluemapper if its the start of another block if datavalues_id is None: self.setup.push_data(datavalmapper) datavalues_id = datavalmapper.ValuesMapperID dataval_map.ValuesMapperID = datavalmapper.ValuesMapperID else: dataval_map.ValuesMapperID = datavalues_id self.setup.push_data(dataval_map) else: datavalues_id = datavalues.ValuesMapperID # Creating new scenariomapping if scenarioID-mappingID does not exists. # Starts by searchine for the mappingID in case its just been created, then tests to see if a # scenarioID-mappingID exists, if yes, it skips, if no, it creates an entry scenariomap = SqlAlchemy.ScenarioMappings() scenariomap.ScenarioID = scenario_id # try to get the mappingid for the scenario if an entry already exist # else we get the most recent MappingID if datavalues: scenariomap.MappingID = datavalues.MappingID else: scenariomap.MappingID = self.__session.query( SqlAlchemy.Mappings).filter( and_( SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.SourceID == source_id, SqlAlchemy.Mappings.MethodID == method_id, SqlAlchemy.Mappings.ValuesMapperID == datavalues_id)).first().MappingID try: # test if the mappingid - scenarioid already exists in scenario table # if yes, then nothing is added, else, we add new entry based of diff_scene var. test = self.__session.query( SqlAlchemy.ScenarioMappings).filter( and_( SqlAlchemy.ScenarioMappings.MappingID == scenariomap.MappingID, SqlAlchemy.ScenarioMappings.ScenarioID == scenariomap.ScenarioID)).first( ).ScenarioMappingID except: self.setup.push_data(scenariomap) if row[10].value: timeseries.IsRegular = row[10].value if row[11].value: timeseries.NoDataValue = row[11].value if row[12].value: timeseries.Description = row[12].value timeseries.BeginDateTime = None timeseries.EndDateTime = None if row[6].value == "": raise Exception( 'Error in "TimeSeries_table" of sheet "{}"\nField named "YearType" is empty.\nThis field should not be empty.\nPlease fill this field to a value' .format(sheet_name)) if row[8].value == "": raise Exception( 'Error in "TimeSeries_table" of sheet "{}"\nField named "AggregationInterval" is empty.\nThis field should not be empty.\nPlease fill this field to a value' .format(sheet_name)) if row[5].value and row[6].value and row[7].value and row[ 8].value: # if the value does not already exist, we add it to the db else we skip if not value or not diff_scene: timeseries.YearType = row[6].value timeseries.AggregationStatisticCV = self.__session.query( SqlAlchemy.CV_AggregationStatistic).filter( SqlAlchemy.CV_AggregationStatistic.Name == row[7].value).first().Name timeseries.AggregationInterval = row[8].value timeseries.IntervalTimeUnitCV = self.__session.query( SqlAlchemy.CV_Units).filter( SqlAlchemy.CV_Units.Name == row[9].value).first().Name timeseries.ValuesMapperID = datavalues_id self.setup.push_data(timeseries) value = False
def load_data(self): """ This method is used to parse data from each sheet to its appropriate table in the database. Due to the structure of the excel file, some hard coding was done to get data accurately. It functions by iterating over the work_sheet dictionary and getting each corresponding sheet_name and sheetrows to load the table. It starts by querying for id's of the datavalue properties. then using a join query to get 'result' (timeseriesID and mappingID). if result is Null, then the timeseriesvalue does not exist and its loaded to the database, Else, a Datavaluemapperid is created for that timeseries. :return: None """ for sheet_name, sheet_rows in self.work_sheet.items(): temp = sheet_rows[:] # Detecting start of data in the sheet for row_id, row in enumerate(temp): temp_row = [cell.value for cell in row] if 'TimeSeriesValues_table' in temp_row: # print '******************** it works here **********************' temp = sheet_rows[row_id + 4:] break if len(temp) < 1: continue # test to see if timeseries has data loaded in it's table, if yes, we load the series values try: self.__session.query( SqlAlchemy.TimeSeries).first().TimeSeriesID except: continue # get data which will be used to determine if a row is similar or different. temp_row = [cell.value for cell in temp[0] ] # getting first row of the timeseriesvalue sheet temp_row.pop(2) stored_rows = [temp_row] scenario_name = temp[0][2] # init_instance is used in storing dates in the time series init_instance = temp[0][1].value if len(temp) > 0 else False dates = [] for row_id, row in enumerate(temp): if all('' == cell.value for cell in row): break if any('' == cell.value for cell in row[:4]): raise Exception( "Some Empty Fields where found in TimeSeriesValue.\nPlease fill all Required fields" ) if row[-1].value is None: continue if row[0].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "ObjectType" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[1].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "InstancenName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[2].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "ScenarioName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[3].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "AttributeName" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[4].value == "": raise Exception( 'Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "DateTimeStamp" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' .format(row_id, sheet_name)) if row[5].value == "": row[5].value = "-9999" # raise Exception('Error in {} row of "TimeSeriesValues_table" of sheet "{}"\nField named "Value" is empty.\nThis field should not be empty.\nPlease fill this field to a value.' # .format(row_id, sheet_name)) timeserieval = SqlAlchemy.TimeSeriesValues() # get attibute id based on attrib - objecttype association try: ResourceTypeID = self.__session.query( SqlAlchemy.ResourceTypes).filter( SqlAlchemy.ResourceTypes.ResourceTypeAcronym == define.datasetName).first().ResourceTypeID attrib_id = self.__session.query( SqlAlchemy.Attributes).filter( and_( SqlAlchemy.Attributes.AttributeName == row[3].value, SqlAlchemy.Attributes.ObjectTypeID == self. __session.query(SqlAlchemy.ObjectTypes).filter( and_( SqlAlchemy.ObjectTypes.ObjectType == row[0].value, SqlAlchemy.ObjectTypes.ResourceTypeID == ResourceTypeID)).first( ).ObjectTypeID)).first().AttributeID except Exception as e: print e raise Exception( "Could not find the combination of attribute and objectType '{}' / '{}' in the Attributess table for row '{}' of TimeSeriesValues sheet" .format(row[3].value, row[0].value), row_id) # Get InstanceID id based on InstanceName. Here, row[1].value--InstanceName try: instance_id = self.__session.query( SqlAlchemy.Instances).filter( SqlAlchemy.Instances.InstanceName == row[1].value).first().InstanceID except: # Get instance id based on ObjectType. instance_type = self.__session.query( SqlAlchemy.ObjectTypes).filter( SqlAlchemy.ObjectTypes.ObjectType == row[0].value).first().ObjectTypologyCV raise Exception( "In the '{}' table,\nCould not find '{}' that existing in row '{}' of '{}' sheet." .format(instance_type, row[1].value, row_id, sheet_name)) # raise Exception('Could not find "{}" in the {} table'.format(row[1].value, instance_type)) try: scenario_id = self.__session.query( SqlAlchemy.Scenarios).filter( SqlAlchemy.Scenarios.ScenarioName == row[2].value).first().ScenarioID except: raise Exception( 'Could not find "{}" in the Scenarios table'.format( row[2].value)) # check difference or similar in rows diff_scene = False temp_row = [cell.value for cell in row[:]] temp_row.pop(2) # Checking row against stored rows to determine if its different or similar for each in stored_rows[:]: if temp_row == each: if row[2].value != scenario_name.value: diff_scene = True break break stored_rows.append(temp_row) # using inner join with mapping, timeseries and scenariomapping to get the timeseries ID result = self.__session.query(SqlAlchemy.TimeSeries.TimeSeriesID, SqlAlchemy.Mappings.MappingID, SqlAlchemy.ScenarioMappings.ScenarioID). \ join(SqlAlchemy.Mappings, SqlAlchemy.Mappings.ValuesMapperID == SqlAlchemy.TimeSeries.ValuesMapperID). \ join(SqlAlchemy.ScenarioMappings, SqlAlchemy.ScenarioMappings.MappingID == SqlAlchemy.Mappings.MappingID). \ filter( and_( SqlAlchemy.Mappings.InstanceID == instance_id, SqlAlchemy.Mappings.AttributeID == attrib_id, SqlAlchemy.ScenarioMappings.ScenarioID == scenario_id )).all() if len(result) == 0: raise Exception( 'One or more parameters in row{} of TimeseriesValues \n' 'Are not found in TimeSeries Table. Please Check \n' 'Loading is Exiting due to this error.'.format(row_id)) # find mapping id which are mapped with the current scenario in the scenariomapping from result above # if it is found, we set the found var to True and reuse the mapping. for mapping in result: try: scene = self.__session.query( SqlAlchemy.ScenarioMappings).filter( and_( SqlAlchemy.ScenarioMappings.MappingID == mapping.MappingID, SqlAlchemy.ScenarioMappings.ScenarioID == scenario_id)).first().ScenarioMappingID found = True result = mapping break except Exception as e: print e if isinstance(result, list): raise Exception( "Timeseriesvalue Maps to multiple timeseries \n" "Wizard is confused :( ") # test if an entry already exists in timeseriesvalues ( TimeSeriesID, Value, DateTimeStamp) # if not a new timeseries value entry is added to the db try: if isinstance(row[4].value, float) or isinstance( row[4].value, int): timeserieval.DateTimeStamp = datetime.date.fromordinal( int(row[4].value) + 693594) test_query = self.__session.query( SqlAlchemy.TimeSeriesValues).filter( and_( SqlAlchemy.TimeSeriesValues.TimeSeriesID == result.TimeSeriesID, SqlAlchemy.TimeSeriesValues.DataValue == row[5].value, SqlAlchemy.TimeSeriesValues.DateTimeStamp == datetime.date.fromordinal( int(row[4].value) + 693594))).first().TimeSeriesValueID else: test_query = self.__session.query( SqlAlchemy.TimeSeriesValues).filter( and_( SqlAlchemy.TimeSeriesValues.TimeSeriesID == result.TimeSeriesID, SqlAlchemy.TimeSeriesValues.DataValue == row[5].value, SqlAlchemy.TimeSeriesValues.DateTimeStamp == datetime.date( int(row[4].value.split("/")[2]), int(row[4].value.split("/")[0]), int(row[4].value.split("/") [1])))).first().TimeSeriesValueID # print row except Exception as e: if result is None: raise Exception( 'Error, No TimeSeries was found with "{}"/"{}" ' 'attribue and instance combination'.format( row[3].value, row[1].value)) # Adding new entery for time series values timeserieval.TimeSeriesID = result.TimeSeriesID timeserieval.DataValue = row[5].value try: if isinstance(row[4].value, float) or isinstance( row[4].value, int): timeserieval.DateTimeStamp = datetime.date.fromordinal( int(row[4].value) + 693594) else: timeserieval.DateTimeStamp = datetime.date( int(row[4].value.split("/")[2]), int(row[4].value.split("/")[0]), int(row[4].value.split("/")[1])) except: msg = "Error: row {} in {}\n" \ "'{}' is not date type. (ex:1/1/1996)".format(row_id, sheet_name, row[4].value) raise Exception(msg) # timeserieval.DateTimeStamp = datetime.date.fromordinal(int(row[4].value) + 693594) self.setup.push_data(timeserieval) # adding start and end dates to timeseries. # TODO: add dates to the timeseries when the timeseries values for an instance has been loaded if row[1].value == init_instance: try: if isinstance(row[4].value, float) or isinstance( row[4].value, int): dates.append(int(row[4].value)) else: dates.append( datetime.date( int(row[4].value.split("/")[2]), int(row[4].value.split("/")[0]), int(row[4].value.split("/")[1]))) except: msg = "Error: row {} in {}\n" \ "'{}' is not date type. (ex:1/1/1996)".format(row_id, sheet_name, row[4].value) raise Exception(msg) else: init_instance = row[1] dates = []