def drop_attr_col(db_cursor, table_name, col_name): query = "DELETE FROM `attributes` WHERE table_name = '" + table_name + \ "' AND col_name = '" + col_name + "'" print('drop query in de:', query, '<br>') result = df.send_query(db_cursor, query) if type(result).__name__ == 'str': return result # don't drop columns if some fault in attributes occures query = 'ALTER TABLE `' + table_name + '` DROP ' + col_name return df.send_query(db_cursor, query)
def set_from_db(self, db_cursor, al_id, ref_id): wc = ' WHERE al_id = ' + str(al_id) al_info_row = df.send_query(db_cursor, 'SELECT * FROM `al_info` '+ wc) sd_rows = df.send_query(db_cursor, 'SELECT * FROM `al_su_data` '+ wc) ad_rows = df.send_query(db_cursor, 'SELECT * FROM `al_data` '+ wc + \ ' ORDER BY su_name') for query_result in [al_info_row, sd_rows, ad_rows]: if type(query_result).__name__ == 'str': return ['Can\'t get alignment from database: ' + query_result] if al_info_row and self.set_al_info(al_info_row[0]): self.set_su_data(sd_rows) self.set_aac_cols(ad_rows, ref_id)
def save_block_markup(self, db_cursor): for sn, bm in self.f_al.sn_to_bm.items(): bm_str = fac.bm_to_bm_str(bm) query = 'UPDATE `al_su_data` SET block_markup = "' + bm_str + \ '" WHERE al_id = ' + str(self.al_id) + ' AND su_name = "' + sn + '"' result = df.send_query(db_cursor, query) self.save_bm = False #to avoid of constant rewriting with each page load
def mk_attr_cols_from_db(db_cursor, attr_col_descr_list): attr_cols = [] for acd in attr_col_descr_list.all_cols: ac = sc.AttrCol(acd) query_cols = ', '.join([sf.get_id_name(table_name), acd.col_name]) query = 'SELECT ' + query_cols + ' FROM `' + table_name + '`' attr_val_rows = df.send_query(db_cursor, query) ac.fill_from_db_rows(attr_val_rows) attr_cols.append(deepcopy(ac)) return attr_cols
def _init_min_max(self, db_cursor, table_name, col_name, where_case): query = self.get_query(table_name, col_name, where_case) db_rows = df.send_query(db_cursor, query) val_list = [i if i else 0 for i in db_rows[0].values()] min_max = [round(val, 2) for val in sorted(val_list)] # min_max = sorted(db_rows[0].values()) #ERR wrong db_rows # min_max = sorted(db_rows.values()) # ERR more than two values self.min_val, self.max_val = min_max self.chosen_min, self.chosen_max = min_max
def __init__(self, f_name, db_cursor, post_data, where_case=''): self.f_name = f_name query = "SELECT DISTINCT su_name FROM `subunits`" + where_case + \ " ORDER BY su_name" db_rows = df.send_query(db_cursor, query) self.su_names = [] if type(db_rows).__name__ != 'str': # print('fc db_rows:', db_rows, '<br>') self.su_names = [str(row['su_name']) for row in db_rows] self.use_filter = fh.get_use_filter(post_data, f_name) self.chosen_names = sorted(fh.get_chosen_su_names(post_data, f_name))
def _init_ranks(self, db_cursor, table_name, col_name, where_case): self.ranks, self.chosen_ranks = set(), set() query = self.get_query(table_name, col_name, where_case) db_rows = df.send_query(db_cursor, query) if type(db_rows).__name__ != 'str': # print('fc db_rows:', db_rows, '<br>') # self.ranks = [] ranks = [str(row[col_name]) for row in db_rows] for rank in ranks: if rank: self.ranks.add(rank)
def set_filtered_ids(self, db_cursor): query = 'SELECT e.enz_id FROM `enzymes` e INNER JOIN organisms o ' + \ 'ON e.org_id = o.org_id' eid_wc = sf.get_where_id_case(self.all_ids, 'enzymes', True) f_wc = self.filter_set.get_db_query_filter() if eid_wc and f_wc: wc = ' WHERE ' + eid_wc + ' AND ' + f_wc enz_id_rows = df.send_query(db_cursor, query + wc) for row in enz_id_rows: enz_id = sf.try_val_to_int(row['enz_id']) if enz_id: self.filtered_ids.append(enz_id)
def set_filtered_enz_ids(self, db_cursor): if type(self.filter_set).__name__ != 'FilterSet': return query = 'SELECT e.enz_id FROM `enzymes` e INNER JOIN organisms o ' + \ 'ON e.org_id = o.org_id' eid_wc = sf.get_where_id_case(self.f_al.enz_id_set, 'enzymes', True) f_wc = self.filter_set.get_db_query_filter() if eid_wc and f_wc: wc = ' WHERE ' + eid_wc + ' AND ' + f_wc enz_id_rows = df.send_query(db_cursor, query + wc) for row in enz_id_rows: self.filt_enz_id_set.add(row['enz_id'])
def get_max_col_id(db_cursor, table_name): prefix = sf.get_col_name_prefix(table_name) + '_' query = 'SELECT col_name FROM `attributes` WHERE col_name LIKE "' + \ prefix + '%"' result = df.send_query(db_cursor, query) if type(result).__name__ == 'str': return ['cannot get the latest column id:' + result] id_str = [] for row in result: #get full col name, cut off prefix then get id col_id = sf.try_val_to_int(row.get('col_name', '')[len(prefix):]) if col_id: id_str.append(col_id) return max(id_str) if id_str else 0
def get_enz_id_to_enz_descr(db_cursor, enz_id_set=set()): enz_id_to_names = dict() where_case = get_where_id_case(enz_id_set, 'enzymes', True) query = "SELECT o.org_id, o.org_name, e.enz_id, e.enz_name FROM " + \ "organisms o INNER JOIN enzymes e ON e.org_id = o.org_id " query += ' WHERE ' + where_case if where_case else '' db_rows = df.send_query(db_cursor, query) if type(db_rows) != 'str': for row in db_rows: enz_descr = sc.EnzDescr(row.get('enz_id', 0), row.get('enz_name', ''), row.get('org_id', 0), row.get('org_name', '')) enz_id_to_names[enz_descr.enz_id] = enz_descr return enz_id_to_names
def write_attr_col_to_db(db_cursor, attr_col, upd_descr=False): # writes vals from attr col to table_name.col_name where id = value id acd, err_msg = attr_col.attr_col_descr, [] id_name = sf.get_id_name(acd.table_name) #if upd_descr, change attr col description in `attributes`: if upd_descr and acd.var_to_val: # values are not empty query = "UPDATE `attributes` SET " for var, val in acd.var_to_val.items(): query += var + " = '" + str(val) + "', " # kill the last comma came from "for" cycle and send "update" query query = query[:-2] + " WHERE table_name = '" + acd.table_name + \ "' AND col_name = '" + acd.col_name + "'" # print('write attr col upd attr query:', query, '<br>') result = df.send_query(db_cursor, query) if result: err_msg.append(result) for id, val in attr_col.id_to_val.items(): db_field_val = "'" + str(val) + "'" if val else 'NULL' query = "UPDATE `" + acd.table_name + "` SET " + acd.col_name + " = " + \ db_field_val + " WHERE " + id_name + " = " + str(id) result = df.send_query(db_cursor, query) if type(result).__name__ == 'str': err_msg.append(result) return err_msg
def add_attr_cols(db_cursor, attr_col_descr_list): err_msg = [] #TODO: set col types according to data types! now just 'text' type for table in sv.main_tables: alter_query = 'ALTER TABLE `' + table + '` ' for attr_col_descr in attr_col_descr_list.get_table_col_list(table): # insert attribute col info into 'attributes': result = add_attr_col_descr(db_cursor, attr_col_descr) if type(result).__name__ == 'str': err_msg.append(result) continue # if attribute description is written, add attribute col to 'ALTER' query: alter_query += ' ADD ' + attr_col_descr.col_name + ' ' + \ attr_col_descr.get_db_col_type() + ',' query = alter_query[:-1] # fix 'ALTER' query: erase last ',' # alternate current table with new attribute columns: result = df.send_query(db_cursor, query) if type(result).__name__ == 'str': err_msg.append(result) return err_msg
def get_id_to_name(db_cursor, table_name): #TODO*: usage for every main table! (now - only for 'organisms') - ready? query, id_to_name, id_name = 'SELECT', dict(), get_id_name(table_name) if table_name == 'organisms': query += ' org_id, org_name FROM `organisms`' if table_name == 'enzymes': query += ' enz_id, org_name, enz_name FROM `organisms` o INNER JOIN ' + \ '`enzymes` e on o.org_id = e.org_id ORDER BY o.org_name' if table_name == 'subunits': query += ' su_id, org_name, enz_name, su_name FROM `organisms` o ' + \ 'INNER JOIN `enzymes` e on o.org_id = e.org_id INNER JOIN ' + \ '`subunits` s on e.enz_id = s.enz_id ORDER BY o.org_name' db_rows = df.send_query(db_cursor, query) for row in db_rows: if id_name not in row.keys(): continue name = row['org_name'] if 'enz_name' in row.keys(): name += '##' + row['enz_name'] if 'su_name' in row.keys(): name += '##' + row['su_name'] id_to_name[row[id_name]] = name return id_to_name
def get_al_rows(db_cursor): query = "SELECT * FROM `al_info` ORDER BY al_id" return df.send_query(db_cursor, query)
def add_attr_col_to_table(db_cursor, table_name, col_name): query = 'ALTER TABLE `' + table_name + '` ADD ' + col_name + ' TEXT' return df.send_query(db_cursor, query)
def add_attr_col_descr(db_cursor, attr_col_descr): attr_vals = sf.quoted_vals(attr_col_descr.all_vars_vals) query = 'INSERT INTO `attributes`(' + ', '.join(attr_col_descr.vars) + \ ') VALUES (' + attr_vals + ')' return df.send_query(db_cursor, query) #ERR can be here
def create_fuseblan_db(db_name, db_descr, db_obj=None): query_list = ['CREATE DATABASE IF NOT EXISTS `' + db_name + \ '` CHARACTER SET utf8 COLLATE utf8_unicode_ci'] query_list += ['USE `' + db_name + '`'] # use created db to make tables table_enum = sf.quoted_vals(sv.main_tables) query_list += ['CREATE TABLE IF NOT EXISTS `attributes`' + \ '(col_id MEDIUMINT UNSIGNED NOT NULL AUTO_INCREMENT, ' + \ 'table_name ENUM(' + table_enum + ') NOT NULL, ' + \ 'col_name VARCHAR(20) NOT NULL UNIQUE,' + \ 'col_group VARCHAR(60), ' + \ 'label VARCHAR(200), ' + \ 'data_type VARCHAR(30) NOT NULL, ' 'PRIMARY KEY(col_id))'] # attributes table query_list += ['CREATE TABLE IF NOT EXISTS `organisms`' + \ '(org_id MEDIUMINT UNSIGNED NOT NULL AUTO_INCREMENT, ' + \ 'org_name VARCHAR(100) NOT NULL, ' + \ 'PRIMARY KEY(org_id))'] # organisms table query_list += ['CREATE TABLE IF NOT EXISTS `enzymes`' + \ '(org_id MEDIUMINT UNSIGNED NOT NULL, ' + \ 'enz_id MEDIUMINT UNSIGNED NOT NULL AUTO_INCREMENT, ' + \ 'enz_name VARCHAR(60) NOT NULL, ' + \ 'PRIMARY KEY(enz_id), FOREIGN KEY (org_id) ' + \ 'REFERENCES organisms(org_id) ' + \ 'ON UPDATE CASCADE ON DELETE CASCADE)'] # enzymes table query_list += ['CREATE TABLE IF NOT EXISTS `subunits`' + \ '(enz_id MEDIUMINT UNSIGNED NOT NULL, ' + \ 'su_id MEDIUMINT UNSIGNED NOT NULL AUTO_INCREMENT, ' + \ 'su_name VARCHAR(60) NOT NULL, ' + \ 'su_seq TEXT NOT NULL, ' + \ 'PRIMARY KEY(su_id), FOREIGN KEY (enz_id) ' + \ 'REFERENCES enzymes(enz_id) ' + \ 'ON UPDATE CASCADE ON DELETE CASCADE)'] # subunits table query_list += ['CREATE TABLE IF NOT EXISTS `al_info`' + \ '(al_id MEDIUMINT UNSIGNED NOT NULL AUTO_INCREMENT, ' + \ 'ctime FLOAT UNSIGNED NOT NULL, ' + \ 'enz_id_set TEXT NOT NULL, su_names TEXT NOT NULL, ' + \ 'filter_set TEXT, al_descr TEXT, ' + \ 'PRIMARY KEY(al_id))'] # al_info table query_list += ['CREATE TABLE IF NOT EXISTS `al_su_data`' + \ '(al_id MEDIUMINT UNSIGNED NOT NULL, ' + \ 'su_name VARCHAR(60) NOT NULL, ' + \ 'al_length MEDIUMINT UNSIGNED NOT NULL, ' + \ 'block_markup TEXT, ' + \ 'PRIMARY KEY(al_id, su_name), '+ \ 'FOREIGN KEY (al_id) REFERENCES al_info(al_id) ' + \ 'ON UPDATE CASCADE ON DELETE CASCADE)'] # al_su_data table query_list += ['CREATE TABLE IF NOT EXISTS `al_data`' + \ '(al_id MEDIUMINT UNSIGNED NOT NULL, ' + \ 'su_name VARCHAR(60) NOT NULL, ' + \ 'enz_id MEDIUMINT UNSIGNED NOT NULL, ' + \ 'al_seq TEXT NOT NULL, ' + \ 'PRIMARY KEY(al_id, su_name, enz_id), ' + \ 'FOREIGN KEY (al_id) REFERENCES al_info(al_id) ' + \ 'ON UPDATE CASCADE ON DELETE CASCADE, ' + \ 'FOREIGN KEY (al_id, su_name) ' + \ 'REFERENCES al_su_data(al_id, su_name) ' + \ 'ON UPDATE CASCADE ON DELETE CASCADE)'] # al_data table # print('<br>'.join(query_list)) # try to create database: db_link, db_cursor = df.get_link_and_cursor() if type(db_link).__name__ == 'str' or type(db_cursor).__name__ == 'str': return ['Error creating database'] + [db_link] + [db_cursor] for query in query_list: result = df.send_query(db_cursor, query) if type(result ).__name__ == 'str': #some error, delete db and say about it: # err_msg += ['Deleting new database due to SQL query error:' + str(result)] # del_result = delete_db(db_cursor, db_id) # if del_result: # err_msg += ['Got database deletion error:'] + del_result db_link.close() return [result] # if writing to db is ok - there could be folder error, now isn't critical status = ['New database "' + db_descr + '" has been created with name "' + \ db_name + '"'] if db_obj: if type(db_obj).__name__ != 'DbObj': status += ['Database object obtained from xml is incorrect'] else: status += fill_db_from_db_obj(db_cursor, db_obj) db_link.commit() db_link.close() return status # + ['List of errors occured during creation:' ] + err_msg