def _get_query(self): try: if self._query.mapper is not self: self._query = query.Query(self) return self._query except AttributeError: self._query = query.Query(self) return self._query
def _create_query(self): q = query.Query(self._filter_text) if self._options.current_filename: q.current_filename = self._options.current_filename if self._options.open_filenames: q.open_filenames = self._options.open_filenames return q
def assign(profile_fn, outfile=None): blast_data = bi.parse_psiblast(profile_fn) qry = query.Query("test") qry.set_input(blast_data) sc1_input = fg.ScorerInput(["profile", "sequence"]) sc1_input.set_data(qry) scorers1 = [] for i in range(para.num_classes - 1): scorers1.append(scorer.Scorer(para.lvl1_coef[i], sc1_input)) qry.set_structure_vector(scorers1) #print "#Lvl 1 complete" #print qry.profile_vec #print qry.structure_vec #sc2_input = fg.ScorerInput(["sequence", "profile"]) sc2_input = fg.ScorerInput(["structure", "profile"]) sc2_input.set_data(qry) scorers2 = [] for i in range(para.num_classes - 1): scorers2.append(scorer.Scorer(para.lvl2_coef[i], sc2_input)) qry.set_scores(scorers2) qry.set_assignment(expected.get_expected(para.num_classes)) qry.set_confidence() #print "Lvl 2 complete" out.output(qry, outfile) return qry
def __init__(self): self.context = { "scripts" : { "info" : edition.Info(self), "load" : edition.Load(self), "save" : edition.Save(self), "set" : edition.Set(self), "get" : edition.Get(self), "remove" : edition.Remove(self), "map" : edition.Map(self), "clear" : edition.Clear(self), "find" : query.Find(self), "filter" : query.Filter(self), "query" : query.Query(self), "layout" : layout.Layout(self), "play" : player.Play(self), "stop" : player.Stop(self), "topo" : graph.Topology(self), "test" : test.Test(self), "help" : Help(self), "color" : Color(self), "opendns" : opendns.OpenDNS(self) } } self.query = dict() self.api = OpenGraphiti()
def render_result(self, uuid, owner, tag, starred, query_string, runid=None, result_string=None): q=query.Query(query_string) r=result_string != None and query.Result(result_string) or None try: param_table=self.setup.param_table_template.PCDATA except: param_table="system/results/param_table.genshi" param_table = cherrypy.engine.templateProcessor.template_filename(param_table) try: result_table=self.setup.result_table_template.PCDATA except: result_table="system/results/result_table.genshi" result_table = cherrypy.engine.templateProcessor.template_filename(result_table) try: if self.setup.result_template!=None: tmpl=self.setup.result_template.PCDATA except: tmpl="system/results/results.genshi" return render(tmpl,app=self,uuid=uuid, owner=owner, runid=runid, query=q, result=r, param_table=param_table, result_table=result_table, tag=tag, starred=starred)
def query(): #instantiate a query class object such that it reads all the index once queryInput = q.Query() while True: inputed = raw_input("Enter your search query: ") #builds the query queryInput.makeQuery(inputed)
def get_species(self): """ Queries all species from the database-node via TAP-XSAMS request and Query 'Select Species'. The list of species is saved in object species. Note: This does not work for all species ! """ # Some nodes do not understand this query at all # others do not understand the query SELECT SPECIES # therefore the following query is a small workaround for some nodes query=q.Query("SELECT SPECIES WHERE ((InchiKey!='UGFAIRIUMAVXCW'))") query.set_node(self) result = r.Result() result.set_query(query) result.do_query() result.populate_model() try: self.Molecules = result.data['Molecules'] except: pass try: self.Atoms = result.data['Atoms'] except: pass
def set_child_record_method(wrapped_obj, new_value): _verify_type_match(new_value, self.child_name) child = model_from_name(self.child_name) table = repo.Repo.table_name(wrapped_obj.__class__) q = query.Query(child, record=wrapped_obj).joins(table).where( **{table: { 'id': wrapped_obj.id }}) # Get the previous value old_value = q.first() # Recall that join_args will have either 0 or 2 or more, # never 1 element joiner = q.join_args[-2] # Find the intermediate record that will connect +new_value+ # to wrapped_obj next_up = model_from_name(joiner['table']) next_r = query.Query(next_up, record=wrapped_obj).joins(table).where(**{ table: { 'id': wrapped_obj.id } }).first() if not model_has_foreign_key_for_table(joiner['table'], child): # The intermediate record has the foreign key: set it if new_value is None: setattr(next_r, joiner['on'][0], None) else: setattr(next_r, joiner['on'][0], getattr(new_value, joiner['on'][1])) wrapped_obj._related_records.append(next_r) else: # Set the foreign key on the new value if new_value is not None: # Associate new value setattr( new_value, joiner['on'][1], # Foreign key # Lookup the id/foreign_key of the record getattr(next_r, joiner['on'][0])) wrapped_obj._related_records.append(new_value) # Disassociate the old value if old_value is not None: setattr( old_value, joiner['on'][1], # Foreign key None) wrapped_obj._related_records.append(old_value)
def __init__(self, ID="", name="", description="", metrics=None, query=None): self.__data = copy.deepcopy(default) self.ID = ID self.name = name self.description = description self.metrics = metrics or [] self.query = query or mquery.Query()
def main(): hl = select_area.Selection(INTERVAL) qr = query.Query() while True: word = hl.get_str() xlate = qr.qr_gxlate(word) print(xlate) print(qr.get_gelapsed())
def query_twitter(self): """ Gets the followers of the current bot :param bot_id: bot_id to get followers of :return: up to 5000 of the most recent followers of the current bot """ # Maybe expand so can take in screen name or bot id q = query.Query(self.tokens) self.followers = q.query_api(self.bot_id)
def setUp(self): self.port = 27601 self.timeout = 1 self.q = query.Query() # init server fork self.thread = ServerQuery(self.port) self.thread.start()
def child_record_method(wrapped_obj): child = model_from_name(self.child_name) return query.Query(child, record=wrapped_obj).joins( repo.Repo.table_name(wrapped_obj.__class__)).where( **{ repo.Repo.table_name(wrapped_obj.__class__): { 'id': wrapped_obj.id } }).first()
def check_for_updates(self, node): """ Checks for each database entry if an update for the molecular or atomic specie is available in the specified VAMDC database node. :ivar nodes.Node node: VAMDC database node which will be checked for updates """ count_updates = 0 counter = 0 #species_list = [] cursor = self.conn.cursor() cursor.execute("SELECT PF_Name, PF_SpeciesID, PF_VamdcSpeciesID, datetime(PF_Timestamp) FROM Partitionfunctions ") rows = cursor.fetchall() num_rows = len(rows) query = q.Query() request = r.Request() for row in rows: counter += 1 print("%5d/%5d: Check specie %-55s (%-15s): " % (counter, num_rows, row[0], row[1]), end=' ') #id = row[1] vamdcspeciesid = row[2] # query_string = "SELECT ALL WHERE VAMDCSpeciesID='%s'" % vamdcspeciesid query_string = "SELECT ALL WHERE SpeciesID=%s" % row[1][6:] request.setquery(query_string) request.setnode(node) try: changedate = request.getlastmodified() except r.TimeOutError: print("TIMEOUT") continue except r.NoContentError: print("ENTRY OUTDATED") changedate = None continue except Exception as e: print("Error in getlastmodified: %s " % str(e)) print("Status - code: %s" % str(request.status)) changedate = None continue tstamp = parser.parse(row[3] + " GMT") if changedate is None: print(" -- UNKNOWN (Could not retrieve information)") continue if tstamp < changedate: print(" -- UPDATE AVAILABLE ") count_updates += 1 else: print(" -- up to date") if count_updates == 0: print("\r No updates for your entries available") print("Done")
def search(queryinput, key, provider, cutoff, n): e = engine.Engine(provider, key) q = query.Query(queryinput) qs = q.getQueries() g = e.searchAndGram(qs) f = filter.Filter(qs, g) f.reweightGrams() t = tile.Tile(g) return t.getAnswers(cutoff, n)
def __init__(self, line): p = line.split(',') self.query = query.Query(p[0]) self.entity = p[1] self.file = p[2] self.correct_idx = docmap.get((self.entity), -1) if self.correct_idx == -1: raise Exception(f'Ground truth not found in documents. Offending line {p}') # print(query.documents[self.correct_idx], "query: " + p[0]) # execute returns 4 items, one for each engine self.results = [Measure(p[0], r, self.correct_idx) for r in self.query.execute()]
def menu_mysqlquery(self, widget=None, event=None, data=None): print "self.database: ", self.database Query = query.Query(self.database, self.Users) fcands, spcands = Query.query_cands() if fcands: self.Plot.set_fcands(fcands.get_cands()) if spcands: self.Plot.set_spcands(spcands.get_cands()) # Update the main plot self.Plot.set_mode(self.mode) self.update_main_plot()
def __init__(self): print('[= =] 欢迎使用子域名一条龙信息收集工具') if os.path.exists('save.txt'): print('---已找到save.txt,开启暴躁的信息采集---') fd = found.Found() fd.djc() print('---暴躁的信息采集2---') rqts = reqts.Rgbtsqeury() rqts.djc() else: print('[- -] 没有检测到save.txt,开启暴躁的子域名查询') users = input('[- -] 要查询的域名>') qt = query.Query('{}'.format(users)) qt.request() Main()
def child_records_method(wrapped_obj): child = model_from_name(self.child_name) # No guarentee that self.through is the last in the chain # It could be the other part of a many-to-many # Or it could be a through that is a couple of levels down # e.g. Category has many Posts through Threads # (but chain is Category -> Forum -> Thread -> Post) result = query.Query(child, record=wrapped_obj).joins( repo.Repo.table_name(wrapped_obj.__class__)).where( **{ repo.Repo.table_name(wrapped_obj.__class__): { 'id': wrapped_obj.id } }) return self.scoping(result)
def load_params(self): # initial config instance self.config = config.Config() # prompt user for name, needed for database object try: self.user = self.config.settings["user"] except KeyError: name, ok = QInputDialog.getText(self, "Configuration", "Input your chess.com username:"******"user"] = name else: self.exit() # initial query and db instances self.query = query.Query() self.db = database.Database(self.config.settings, default_query=self.query)
def setquery(self, query): """ Sets the query which shall be defined on the database node. Query could ether be a query.Query instance or a string. The query has to be specified before the request can be performed. """ self.status = 0 self.reason = "INIT" if type(query) == q.Query: self.query = query self.__setquerypath() elif type(query) == str or type(query) == unicode: self.query = q.Query(Query=query) self.__setquerypath() else: #print(type(query)) #print("Warning: this is not a query object") pass
def check_for_updates(self, node): """ """ count_updates = 0 counter = 0 #species_list = [] cursor = self.conn.cursor() cursor.execute("SELECT PF_Name, PF_SpeciesID, PF_VamdcSpeciesID, datetime(PF_Timestamp) FROM Partitionfunctions ") rows = cursor.fetchall() num_rows = len(rows) query = q.Query() result = results.Result() for row in rows: counter += 1 print "%5d/%5d: Check specie %-55s (%-15s): " % (counter, num_rows, row[0], row[1]), #id = row[1] vamdcspeciesid = row[2] # query_string = "SELECT ALL WHERE VAMDCSpeciesID='%s'" % vamdcspeciesid query_string = "SELECT ALL WHERE SpeciesID=%s" % row[1][6:] query.set_query(query_string) query.set_node(node) result.set_query(query) try: changedate = result.getChangeDate() except: changedate = None tstamp = parser.parse(row[3] + " GMT") if changedate is None: print " -- UNKNOWN (Could not retrieve information)" continue if tstamp < changedate: print " -- UPDATE AVAILABLE " count_updates += 1 else: print " -- up to date" if count_updates == 0: print "\r No updates for your entries available" print "Done"
def __init__(self, settings, auto_connect=True, default_query=None): self.user = settings["user"] self.settings = settings["database"] self.path = self.settings["db_dir"] self.connected = False self.connection = None self.cursor = None self.create = False # will initialize a query object if one is not passed, otherwise use the one passed # to avoid creating multiple during a run if default_query is None: self.query = query.Query() else: self.query = default_query if auto_connect: self.connect() # if database was created during the connect method this runs if self.create: print("Creating tables.") self.create_table()
def __init__(self, mainFrameObject): if type(mainFrameObject) is not MainFrame: TypeError("the input object is not MainFrame") self.query = query.Query() """ Collect all widgets """ self.main = mainFrameObject self.dialogbehavior = DialogBehavior(self.main) self.searchPanelBehavior = SearchPanelBehavior( self.main.panelLeft.searchPanel, self.main.panelRight.bookList, self.query) self.panelL = self.main.panelLeft self.panelR = self.main.panelRight self.booklist = self.main.panelRight.bookList self.statusbar = self.main.statusBar self.launchButton = self.main.panelLeft.launchButton self.infoBookTree = self.main.panelLeft.infoBookTree self.searchComboBox = self.main.panelLeft.searchPanel.searchComboBox self.searchEntry = self.main.panelLeft.searchPanel.entrySearch self.searchResult = self.main.panelLeft.searchPanel.resultSearch self.history = history.History() self.statusbar = self.main.statusBar self.statusbar.SetStatusText("{} book(s) in the" " database".format( bdd_misc_queries.numberOfBooks())) #init list items self.query.setQuery(select_items.SelectItems.like('book', 'A')) self.booklist.fillList(self.query) self.initEvent()
# Arguments for the template arguments = { # Common Template Variables "wwwroot": config.wwwroot, "tree": config.trees[0], "trees": config.trees, "generated_date": config.generated_date, "config": config.template_parameters, # Error template Variables "error": "Tree '%s' is not a valid tree." % tree } template = "error.html" else: # Parse the search query qtext = querystring.get("q", "").decode('utf-8') q = query.Query(qtext) # Connect to database conn = utils.connect_db(tree) # Arguments for the template arguments = { # Common Template Variables "wwwroot": config.wwwroot, "tree": tree, "trees": config.trees, "config": config.template_parameters, "generated_date": config.generated_date } if conn: result = None if can_redirect: result = query.direct_result(conn, q)
def update_database(self, add_nodes = None, insert_only = False, update_only = False): """ Checks if there are updates available for all entries. Updates will be retrieved from the resource specified in the database. All resources will be searched for new entries, which will be inserted if available. Additional resources can be specified via add_nodes. add_nodes: Single or List of node-instances (nodes.Node) """ # counter to identify which entry is currently processed counter = 0 # counter to count available updates count_updates = 0 # list of database - nodes which are currently in the local database dbnodes = [] # create an instance with all available vamdc-nodes nl = nodes.Nodelist() # attach additional nodes to the list of dbnodes (for insert) if not functions.isiterable(add_nodes): add_nodes = [add_nodes] for node in add_nodes: if node is None: pass elif not isinstance(node, nodes.Node): print "Could not attach node. Wrong type, it should be type <nodes.Node>" else: dbnodes.append(node) #-------------------------------------------------------------------- # Check if updates are available for entries # Get list of species in the database cursor = self.conn.cursor() cursor.execute("SELECT PF_Name, PF_SpeciesID, PF_VamdcSpeciesID, datetime(PF_Timestamp), PF_ResourceID FROM Partitionfunctions ") rows = cursor.fetchall() num_rows = len(rows) query = q.Query() result = results.Result() if not insert_only: print("----------------------------------------------------------") print "Looking for updates" print("----------------------------------------------------------") for row in rows: counter += 1 print "%5d/%5d: Check specie %-55s (%-15s): " % (counter, num_rows, row[0], row[1]), try: node = nl.getnode(str(row[4])) except: node = None if node is None: print " -- RESOURCE NOT AVAILABLE" continue else: if node not in dbnodes: dbnodes.append(node) vamdcspeciesid = row[2] query_string = "SELECT ALL WHERE SpeciesID=%s" % row[1][6:] query.set_query(query_string) query.set_node(node) result.set_query(query) try: changedate = result.getChangeDate() except: changedate = None tstamp = parser.parse(row[3] + " GMT") if changedate is None: print " -- UNKNOWN (Could not retrieve information)" continue if tstamp < changedate: print " -- UPDATE AVAILABLE " count_updates += 1 print " -- PERFORM UPDATE -- " query_string = "SELECT SPECIES WHERE SpeciesID=%s" % row[1][6:] query.set_query(query_string) query.set_node(node) result.set_query(query) result.do_query() result.populate_model() insert_species_data(result.data['Molecules'], update = True) print " -- UPDATE DONE -- " else: print " -- up to date" if count_updates == 0: print "\r No updates for your entries available" print "Done" else: cursor.execute("SELECT distinct PF_ResourceID FROM Partitionfunctions ") rows = cursor.fetchall() for row in rows: try: node = nl.getnode(str(row[0])) except: node = None if node is None: print " -- RESOURCE NOT AVAILABLE" continue else: if node not in dbnodes: dbnodes.append(node) if update_only: return # Check if there are new entries available #--------------------------------------------------------- # Check all dbnodes for new species counter = 0 insert_molecules_list = [] for node in dbnodes: print("----------------------------------------------------------") print "Query '{dbname}' for new species ".format(dbname=node.name) print("----------------------------------------------------------") node.get_species() for id in node.Molecules: try: cursor.execute("SELECT PF_Name, PF_SpeciesID, PF_VamdcSpeciesID, PF_Timestamp FROM Partitionfunctions WHERE PF_SpeciesID=?", [(id)]) exist = cursor.fetchone() if exist is None: print " %s" % node.Molecules[id] insert_molecules_list.append(node.Molecules[id]) counter += 1 except Exception, e: print e print id print "There are %d new species available" % counter print("----------------------------------------------------------") print "Start insert" print("----------------------------------------------------------") self.insert_species_data(insert_molecules_list, node) print("----------------------------------------------------------") print "Done"
def insert_species_data(self, species, node, update=False): """ Inserts new species into the local database species: species which will be inserted node: vamdc-node / type: instance(nodes.node) update: if True then all entries in the local database with the same species-id will be deleted before the insert is performed. """ # create a list of names. New names have not to be in that list names_black_list = [] cursor = self.conn.cursor() cursor.execute("SELECT PF_Name FROM Partitionfunctions") rows = cursor.fetchall() for row in rows: names_black_list.append(row[0]) #---------------------------------------------------------- # Create a list of species for which transitions will be # retrieved and inserted in the database. # Species have to be in the Partitionfunctions - table if not functions.isiterable(species): species = [species] #-------------------------------------------------------------- for specie in species: num_transitions = {} # will contain a list of names which belong to one specie species_names = {} # list will contain species whose insert-failed species_with_error = [] # check if specie is of type Molecule if isinstance(specie, specmodel.Molecule): speciesid = specie.SpeciesID vamdcspeciesid = specie.VAMDCSpeciesID formula = specie.OrdinaryStructuralFormula else: try: if isinstance(specie, str) and len(specie) == 27: vamdcspeciesid = specie speciesid = None except: print "Specie is not of wrong type" print "Type Molecule or string (Inchikey) is allowed" continue if speciesid: print "Processing: {speciesid}".format(speciesid = speciesid) else: print "Processing: {vamdcspeciesid}".format(vamdcspeciesid = vamdcspeciesid) try: # Create query string query_string = "SELECT ALL WHERE VAMDCSpeciesID='%s'" % vamdcspeciesid query = q.Query() result = results.Result() # Get data from the database query.set_query(query_string) query.set_node(node) result.set_query(query) result.do_query() result.populate_model() except: print " -- Error: Could not fetch and process data" continue #--------------------------------------- cursor = self.conn.cursor() cursor.execute('BEGIN TRANSACTION') #------------------------------------------------------------------------------------------------------ # if update is allowed then all entries in the database for the given species-id will be # deleted, and thus replaced by the new data if update: cursor.execute("SELECT PF_Name FROM Partitionfunctions WHERE PF_SpeciesID = ?", (speciesid, )) rows = cursor.fetchall() for row in rows: names_black_list.remove(row[0]) cursor.execute("DELETE FROM Transitions WHERE T_Name = ?", (row[0], )) cursor.execute("DELETE FROM Partitionfunctions WHERE PF_Name = ?", (row[0], )) #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ # Insert all transitions num_transitions_found = len(result.data['RadiativeTransitions']) counter_transitions = 0 for trans in result.data['RadiativeTransitions']: counter_transitions+=1 print "\r insert transition %d of %d" % (counter_transitions, num_transitions_found), # data might contain transitions for other species (if query is based on ichikey/vamdcspeciesid). # Insert transitions only if they belong to the correct specie if result.data['RadiativeTransitions'][trans].SpeciesID == speciesid or speciesid is None: id = str(result.data['RadiativeTransitions'][trans].SpeciesID) # if an error has occured already then there will be no further insert if id in species_with_error: continue formula = str(result.data['Molecules'][id].OrdinaryStructuralFormula) # Get upper and lower state from the states table try: upper_state = result.data['States']["%s" % result.data['RadiativeTransitions'][trans].UpperStateRef] lower_state = result.data['States']["%s" % result.data['RadiativeTransitions'][trans].LowerStateRef] except (KeyError, AttributeError): print " -- Error: State is missing" species_with_error.append(id) continue # Get string which identifies the vibrational states involved in the transition t_state = self.getvibstatelabel(upper_state, lower_state) # Get hyperfinestructure info if hfsInfo is None # only then the hfsInfo has not been inserted in the species name # (there can be multiple values in the complete dataset t_hfs = '' try: for pc in result.data['RadiativeTransitions'][trans].ProcessClass: if str(pc)[:3] == 'hyp': t_hfs = str(pc) except Exception, e: print "Error: %s", e t_name = "%s; %s; %s" % (formula, t_state, t_hfs) t_name = t_name.strip() # check if name is in the list of forbidden names and add counter if so i = 1 while t_name in names_black_list: t_name = "%s#%d" % (t_name.split('#')[0], i) i += 1 # update list of distinct species names. if id in species_names: if not t_name in species_names[id]: species_names[id].append(t_name) num_transitions[t_name] = 0 else: species_names[id] = [t_name] num_transitions[t_name] = 0 frequency = float(result.data['RadiativeTransitions'][trans].FrequencyValue) try: uncertainty = "%lf" % float(result.data['RadiativeTransitions'][trans].FrequencyAccuracy) except TypeError: print " -- Error uncertainty not available" species_with_error.append(id) continue # Get statistical weight if present try: weight = int(upper_state.TotalStatisticalWeight) except: print " -- Error statistical weight not available" species_with_error.append(id) continue # Get nuclear spin isomer (ortho/para) if present #print "%s; %s" % (result.data['RadiativeTransitions'][trans].Id, upper_state.Id) try: nsiName = upper_state.NuclearSpinIsomerName except AttributeError: nsiName = None # Insert transition into database try: cursor.execute("""INSERT INTO Transitions ( T_Name, T_Frequency, T_EinsteinA, T_Uncertainty, T_EnergyLower, T_UpperStateDegeneracy, T_HFS, T_UpperStateQuantumNumbers, T_LowerStateQuantumNumbers) VALUES (?, ?,?,?,?, ?,?, ?,?)""", (t_name, "%lf" % frequency, "%g" % float(result.data['RadiativeTransitions'][trans].TransitionProbabilityA), uncertainty, "%lf" % float(lower_state.StateEnergyValue), weight, #upper_state.QuantumNumbers.case, t_hfs, str(upper_state.QuantumNumbers.qn_string), str(lower_state.QuantumNumbers.qn_string), )) num_transitions[t_name] += 1 except Exception, e: print "Transition has not been inserted:\n Error: %s" % e
def insert_radiativetransitions(self, species, node): """ """ # will contain a list of names which belong to one specie species_names = {} #---------------------------------------------------------- # Create a list of species for which transitions will be # retrieved and inserted in the database. # Species have to be in the Partitionfunctions - table if not functions.isiterable(species): species = [species] species_list = [] cursor = self.conn.cursor() for specie in species: cursor.execute("SELECT PF_Name, PF_SpeciesID, PF_VamdcSpeciesID, PF_HFS FROM Partitionfunctions WHERE PF_SpeciesID=? or PF_VamdcSpeciesID=?", (specie, specie)) rows = cursor.fetchall() for row in rows: species_list.append([row[0], row[1], row[2], row[3]]) #-------------------------------------------------------------- for specie in species_list: num_transitions = {} #------------------------------------ # Retrieve data from the database id = specie[1] vamdcspeciesid = specie[2] hfs = specie[3] name = specie[0] # name should be formated like 'formula; state-info; hfs-info' name_array = name.split(';') formula = name_array[0].strip() try: stateInfo = name_array[1].strip() except: stateInfo = '' # get hfs-flag from the name. try: hfsInfo = name_array[2].strip() except: hfsInfo = '' # Create query string query_string = "SELECT ALL WHERE VAMDCSpeciesID='%s'" % vamdcspeciesid if hfs is not None and hfs.strip() != '': query_string += " and RadTransCode='%s'" % hfs query = q.Query() result = results.Result() # Get data from the database query.set_query(query_string) query.set_node(node) result.set_query(query) result.do_query() result.populate_model() #--------------------------------------- cursor = self.conn.cursor() cursor.execute('BEGIN TRANSACTION') cursor.execute("DELETE FROM Transitions WHERE T_Name = ?", (name,)) for trans in result.data['RadiativeTransitions']: # data might contain transitions for other species (if query is based on ichikey/vamdcspeciesid). # Insert transitions only if they belong to the correct specie if result.data['RadiativeTransitions'][trans].SpeciesID == id: # Get upper and lower state from the states table upper_state = result.data['States']["%s" % result.data['RadiativeTransitions'][trans].UpperStateRef] lower_state = result.data['States']["%s" % result.data['RadiativeTransitions'][trans].LowerStateRef] # Get string which identifies the vibrational states involved in the transition try: if upper_state.QuantumNumbers.vibstate == lower_state.QuantumNumbers.vibstate: t_state = str(upper_state.QuantumNumbers.vibstate).strip() else: #vup = upper_state.QuantumNumbers.vibstate.split(",") #vlow = lower_state.QuantumNumbers.vibstate.split(",") v_dict = {} for label in list(set(upper_state.QuantumNumbers.qn_dict.keys() + lower_state.QuantumNumbers.qn_dict.keys())): if isVibrationalStateLabel(label): try: value_up = upper_state.QuantumNumbers.qn_dict[label] except: value_up = 0 try: value_low = lower_state.QuantumNumbers.qn_dict[label] except: value_low = 0 v_dict[label] = [value_up, value_low] v_string = '' valup_string = '' vallow_string = '' for v in v_dict: v_string += "%s," % v valup_string += "%s," % v_dict[v][0] vallow_string += "%s," % v_dict[v][1] if len(v_dict) > 1: t_state = "(%s)=(%s)-(%s)" % (v_string[:-1], valup_string[:-1], vallow_string[:-1]) else: t_state = "%s=%s-%s" % (v_string[:-1], valup_string[:-1], vallow_string[:-1]) #t_state = '(%s)-(%s)' % (upper_state.QuantumNumbers.vibstate,lower_state.QuantumNumbers.vibstate) except: t_state = '' # go to the next transition if state does not match if t_state != stateInfo and stateInfo is not None and stateInfo != '': continue # Get hyperfinestructure info if hfsInfo is None # only then the hfsInfo has not been inserted in the species name # (there can be multiple values in the complete dataset if hfsInfo == '': t_hfs = '' try: for pc in result.data['RadiativeTransitions'][trans].ProcessClass: if str(pc)[:3] == 'hyp': t_hfs = str(pc) except Exception, e: print "Error: %s", e else: t_hfs = hfsInfo # if hfs is not None and empty then only Transitions without hfs-flag # should be processed if hfs is not None and hfs != t_hfs: continue t_name = "%s; %s; %s" % (formula, t_state, t_hfs) t_name = t_name.strip() # update list of distinct species names. if id in species_names: if not t_name in species_names[id]: species_names[id].append(t_name) num_transitions[t_name] = 0 else: species_names[id] = [t_name] num_transitions[t_name] = 0 frequency = float(result.data['RadiativeTransitions'][trans].FrequencyValue) uncertainty = "%lf" % float(result.data['RadiativeTransitions'][trans].FrequencyAccuracy) # Get statistical weight if present if upper_state.TotalStatisticalWeight: weight = int(upper_state.TotalStatisticalWeight) else: weight = None # Get nuclear spin isomer (ortho/para) if present try: nsiName = upper_state.NuclearSpinIsomerName except AttributeError: nsiName = None # Insert transition into database try: cursor.execute("""INSERT INTO Transitions ( T_Name, T_Frequency, T_EinsteinA, T_Uncertainty, T_EnergyLower, T_UpperStateDegeneracy, T_HFS, T_UpperStateQuantumNumbers, T_LowerStateQuantumNumbers) VALUES (?, ?,?,?,?, ?,?, ?,?)""", (t_name, "%lf" % frequency, "%g" % float(result.data['RadiativeTransitions'][trans].TransitionProbabilityA), uncertainty, "%lf" % float(lower_state.StateEnergyValue), weight, #upper_state.QuantumNumbers.case, t_hfs, str(upper_state.QuantumNumbers.qn_string), str(lower_state.QuantumNumbers.qn_string), )) num_transitions[t_name] += 1 except Exception, e: print "Transition has not been inserted:\n Error: %s" % e
def moveDBServer(filename): f = open(filename, 'r') read = f.read() f.close() #텍스트 파일에서 줄바꿈과 맨 끝의 ; 표시를 제거 후 ;를 기준으로 구분 input_list = read.replace('\n', '').rstrip(';').split(';') print(input_list) # <-표시로 대입시킬 객체의 이름과 결과를 Dictionary에 저장 save_name = {} # 텍스트파일을 저장한 리스트를 한 줄씩 읽음 for example in input_list: if '<-' in example: real = example.split('<-')[1] print(real) if '.' not in real: conn = sqlite3.connect('Test.db') conn.row_factory = sqlite3.Row cur = conn.cursor() result = cur.execute("select * from db_list where name='%s'" % real) for r in result: if r['db_type'] == 'oracle': engine_url = 'oracle+cx_oracle://' + r['id'] + ":" + r[ 'password'] + '@' + r['host_port'] + '/' + r[ 'database'] print(engine_url) elif r['db_type'] == 'mysql': engine_url = 'mysql+pymysql://' + r['id'] + ":" + r[ 'password'] + '@' + r['host_port'] + '/' + r[ 'database'] print(engine_url) else: engine_url = 'mssql+pymssql://' + r['id'] + ":" + r[ 'password'] + '@' + r['host_port'] + '/' + r[ 'database'] print(engine_url) engine = create_engine(engine_url, encoding='utf8', echo=True) conn.close() save_name[example.split('<-')[0]] = engine elif 'SQL' in real: #Source DB 중 query문 engine = save_name[real.split('.')[0]] Base = declarative_base() Base.metadata.create_all(engine) source_query = query.Query(save_name, example, engine) source_query.query_to_save_name() elif 'simple' in real: #Source DB 중 간단한 join문 engine = save_name[real.split('.simple.')[0]] from_table_name = real.split('.simple.')[1].split( '?')[0].split('+') # + 기준으로 join, 연산할 테이블 저장 condition = real.split('.simple.')[1].split('?')[1].split( '->')[0].split(',') # ? 이후 조건 저장 to_column_name = real.split('.simple.')[1].split('?')[1].split( '->')[1].split(',') # 저장할 column명 지정 inspector2 = inspect(engine) md2 = MetaData(bind=engine) DBSession = scoped_session(sessionmaker()) DBSession.configure(bind=engine) from_table_list = dict() # Source DB의 table을 저장할 dictionary #DB에서 table 이름 매핑 시켜 저장하는 과정 for table_name in inspector2.get_table_names(): for name in from_table_name: if table_name == name: tb = Table(table_name, md2, autoload_with=engine, extend_existing=True) from_table_list[name] = tb print(from_table_list) new_simple = simple_query.Simple_Query( save_name, from_table_name, condition, to_column_name, from_table_list, DBSession) total_list = [] #query 결과를 담을 listpy real_list = [] #실제 전체 결과 dictionary 들어갈 list if len(condition) == 1: #조건이 간단한 연산과 비교 하나 뿐일 때 if '<' in condition[0]: if 'INT' in condition[0].split('<')[1].split( '.')[0]: #비교하는 것이 INT 일 때 if '=' in condition[0].split('<')[1].split( '.')[0]: #<= total_list = new_simple.one_condition_int_smaller_same( ) else: #< total_list = new_simple.one_condition_int_smaller( ) elif '>' in condition[0]: if 'INT' in condition[0].split('<')[1].split( '.')[0]: #비교하는 것이 INT 일 때 if '=' in condition[0].split('<')[1].split( '.')[0]: #>= total_list = new_simple.one_condition_int_bigger_same( ) else: #> total_list = new_simple.one_condition_int_bigger( ) elif '=' in condition[0]: if condition[0].split('<')[1].split('.')[0] == 'INT': total_list = new_simple.one_condition_int_same() elif condition[0].split('<')[1].split('.')[0] == 'STR': total_list = new_simple.one_conditon_str_same() else: #비교 연산이 아닌 join 조건일 때 total_list = new_simple.one_condition_join() else: #조건이 간단한 사칙연산이 아닌 group by를 포함할 때 if 'GB' in condition[1]: group_by_list = condition[1].split( '.' ) # .을 기준으로 1. GB 표시 2. group by 기준 테이블 3. 기준 테이블의 기준 칼럼명 4. 간단한 함수(SUM, MIN..) 5. 계산할 테이블 6. 계산할 칼럼명 # count, sum, avg, min, max를 사용하여 group by 가능 if group_by_list[3] == 'COUNT': total_list = new_simple.two_condition_group_by_count( group_by_list) elif group_by_list[3] == 'SUM': total_list = new_simple.two_condition_group_by_sum( group_by_list) elif group_by_list[3] == 'AVG': total_list = new_simple.two_condition_group_by_avg( group_by_list) elif group_by_list[3] == 'MAX': total_list = new_simple.two_condition_group_by_max( group_by_list) elif group_by_list[3] == 'MIN': total_list = new_simple.two_condition_group_by_min( group_by_list) #total_list 값을 dictionary로 변환하는 과정 for row in total_list: real_list.append(row._asdict()) #real_list를 전체 결과 dictionary에 저장 save_name[example.split('<-')[0]] = real_list print(save_name) else: #Source DB 중 매칭하는 테이블 저장 engine = save_name[real.split('.')[0]] inspector2 = inspect(engine) md = MetaData(bind=engine) for table_name in inspector2.get_table_names(): if table_name == real.split('.')[1]: tb = Table(table_name, md, autoload_with=engine, extend_existing=True) save_name[example.split('<-') [0]] = tb #전체 결과 dicionary에 테이블 객체 저장''' print(save_name) else: #텍스트 파일 행 중 S, T 표시 없는 실제 실행하는 행 if 'insert' in example: #insert values = example.split('values')[1].lstrip('(').rstrip( ');' ).split( '/ ' ) #values() 안에 / 으로 구분 value는 전체 결과 dictionary에 저장한 이름과 column명으로 구성 to_table = example.split('values')[0].split('.')[ 1] #맨 앞에 insert 할 테이블 이름 .으로 구분 columns = example.split('values')[0].split( '.')[2].split('insert')[1].lstrip('(').rstrip(')').split( ', ') #insert() 안에 대입할 column명 values와 순서대로 매핑함 engine = save_name[example.split('values')[0].split('.')[0]] Base = declarative_base() Base.metadata.create_all(engine) md = MetaData(bind=engine) tb = Table(save_name[to_table], md, autoload_with=engine, extend_existing=True) print(values) print(to_table) print(columns) print(tb) new_insert = insert.Insert(save_name, engine, to_table, values, columns, tb) if len(set([v.split('.')[0] for v in values])) == 1: #values들이 하나의 객체일 때 from_table = values[0].split('.')[0] if 4 in [len(v.split('.')) for v in values ]: #그 중에 query문이 있는지 판단(query문이 있다면 길이가 4이므로) new_insert.same_result_have_query(from_table) else: #객체 타입이 전부 list 값이거나 Table 일 때 if str(type( save_name[from_table])) == "<class 'list'>": new_insert.same_result_only_list(from_table) elif str(type(save_name[from_table]) ) == "<class 'sqlalchemy.sql.schema.Table'>": for s in save_name.values(): if str(s) == str( save_name[from_table].metadata).lstrip( 'MetaData(bind=')[:-1]: engine2 = s print(engine2) DBSession = scoped_session(sessionmaker()) DBSession.configure(bind=engine2) new_insert.same_result_only_table( from_table, DBSession) else: #객체 타입이 하나가 아니고 여러개 일 DBSession = scoped_session(sessionmaker()) DBSession.configure(bind=engine) new_insert.different_result(DBSession) elif 'delete' in example: #delete engine = save_name[example.split('.')[0]] Base = declarative_base() Base.metadata.create_all(engine) md = MetaData(bind=engine) conn = engine.connect() to_table = example.split('.')[1] #맨 앞에 delete 할 테이블 이름 .으로 구분 tb = Table(save_name[to_table], md, autoload_with=engine, extend_existing=True) Session = sessionmaker(bind=engine) session = Session() new_delete = delete.Delete(save_name, conn, to_table, example, session, tb) if 'where' in example: #특정 row 삭제할 때 if '>' in example.split('where')[1]: if 'INT' in example.split('where')[1]: #INT 비교 if '=' in example.split('where')[1]: #>= new_delete.int_biggerorsame_delete() else: #> new_delete.int_bigger_delete() elif '<' in example.split('where')[1]: if 'INT' in example.split('where')[1]: #INT 비교 if '=' in example.split('where')[1]: #<= new_delete.int_smallerorsame_delete() else: #< new_delete.int_smaller_delete() else: # == if 'INT' in example.split('where')[1]: #INT 비교 new_delete.int_same_delete() else: #STR 비교 new_delete.str_same_delete() else: #table 전부 삭제 new_delete.all_delete() elif 'update' in example: #update engine = save_name[example.split('.')[0]] Base = declarative_base() Base.metadata.create_all(engine) md = MetaData(bind=engine) to_table = example.split('.')[1] #맨 앞에 update 할 테이블 이름 .으로 구분 set_condition = example.split('set')[1].lstrip('(').rstrip( ')').split('=') #새로 업데이트 할 coulum과 value where = example.split('set')[0].split('where')[1].lstrip( '(').rstrip(')').split('=') #업데이트할 row 찾을 조건 tb = Table(save_name[to_table], md, autoload_with=engine, extend_existing=True) Session = sessionmaker(bind=engine) session = Session() new_update = update.Update(save_name, to_table, set_condition, where, tb, session) if 'INT' in where[1]: #where 조건에 INT 비교 if 'INT' in set_condition[1]: #업데이트 할 value 중 INT 포함 new_update.int_to_int_update() elif 'STR' in set_condition[1]: #업데이트 할 value 중 STR 포함 new_update.int_to_str_update() elif 'STR' in where[1]: #where 조건에 STR 비교 if 'INT' in set_condition[1]: #업데이트 할 value 중 INT 포함 new_update.str_to_int_update() elif 'STR' in set_condition[1]: #업데이트 할 value 중 STR 포함 new_update.str_to_str_update()
def query(self, mapper_or_class, entity_name=None): """return a new Query object corresponding to this Session and the mapper, or the classes' primary mapper.""" if isinstance(mapper_or_class, type): return query.Query(class_mapper(mapper_or_class, entity_name=entity_name), self) else: return query.Query(mapper_or_class, self)