def find_DB_agg_table_columns(dbconf, tblname, AggSQL, cols): cols1, cols1_order = dict(), dict() TmpSQL = 'create temporary table `tmp_{table}` {SQL} limit 0; describe `tmp_{table}`'.format( table=tblname, SQL=AggSQL) conn = DB.get_connection(dbconf[HOST], dbconf[PORT], dbconf[USER], dbconf[PSWD], dbconf[DATABASE], dbconf[CHARSET], __name__) with DB.run_sql(conn, TmpSQL) as cur: cur.nextset() for i, ( field, dbtype, isnull, key, default, extra, ) in enumerate(cur): cols1_order[i + 1] = field if dbtype == 'binary(0)': cols1[field] = cols[field] else: cols1[field] = dict([('column type', dbtype), ('order', i + 1)]) conn.rollback() conn.close() return cols1, cols1_order
def make_sure_DB(dbconf, dbname, conn=None): if conn is None: conn = DB.get_connection(dbconf[HOST], dbconf[PORT], dbconf[USER], dbconf[PSWD], dbconf[DATABASE], dbconf[CHARSET], __name__) callback = lambda: conn.close() else: callback = lambda: None if type(dbname) is str: with conn.cursor() as cur: cur.execute( "show databases like '{database}'".format(database=dbname)) i = -1 for i, x in enumerate(cur): pass if i == -1: cur.execute( "create database if not exists `{database}` charset='utf8' collate='utf8_general_ci'" .format(database=dbname)) conn.commit() callback() elif type(dbname) is list: for i, dbname1 in enumerate(dbname): make_sure_DB(dbconf, dbname1, conn) callback()
def get_ym_of_latest(dbconf, tblname, dbname=None): conn = DB.get_connection(dbconf[HOST], dbconf[PORT], dbconf[USER], dbconf[PSWD], dbconf[DATABASE], dbconf[CHARSET], __name__) with conn.cursor() as cur: cur.execute("show tables in `{database}` like '{table}_latest'".format( database=dbconf[DATABASE] if dbname is None else dbname, table=tblname)) i = -1 for i, x in enumerate(cur): pass if i == -1: return None else: sql = 'select `TIME` from {dbpart}`{table}_latest` limit 1'.format( table=tblname, dbpart='' if dbname is None else '`{database}`.'.format( database=dbname)) cur.execute(sql) for i, (x, ) in enumerate(cur): d = Util.extract_date(x) if d is not None: return (d.year, d.month) break return None
def just_run_sql(cusid, tech, CAT, SQL, mod_name= __name__): dbconf = ETLDB.get_computed_config(cusid, tech, mod_name)[CAT] conn = DB.get_connection(dbconf[RESTRICT.HOST], dbconf[RESTRICT.PORT], dbconf[RESTRICT.USER], dbconf[RESTRICT.PSWD], dbconf[RESTRICT.DB], dbconf[RESTRICT.CHARSET], mod_name) #logger(mod_name).debug(SQL) DB.run_sql(conn, SQL) conn.close()
def _get_agg_rules(cusid, tech, owner, tblname, mod_name=__name__): dbconf = customer.get_default_DB_config(cusid, tech)[RESTRICT.CORE] conn = DB.get_connection(dbconf[RESTRICT.HOST], dbconf[RESTRICT.PORT], dbconf[RESTRICT.USER], dbconf[RESTRICT.PSWD], dbconf[RESTRICT.DB], dbconf[RESTRICT.CHARSET], __name__) SQL = """ select upper(nc.counter_name_in_view), oam.obj_level, oam._select, oam._left_join, oam._where, oam._group_by, ndt.obj_gid_mapping, time_aggregation, object_aggregation from NOKIA_DB_TABLES as ndt, OBJ_AGG_MAPPING as oam, NOKIA_COUNTERS as nc where owner = '{owner}' and table_name = '{table}' and instr(upper(ndt.obj_agg_level), upper(oam.obj_level)) and instr(upper(nc.raw_measurement_view), upper(ndt.table_name)) and instr(upper(ndt.owner), upper(nc.adaptation)) order by obj_level; """.format(owner=owner.upper(), table=tblname.upper()) col = dict([(0, 'colname'), (1, 'obj_level'), (2, 'select'), (3, 'left_join'), (4, 'where'), (5, 'group'), (6, 'ID'), (7, 'ta_func'), (8, 'oa_func')]) with DB.run_sql(conn, SQL) as cur: result = list() for _, x in enumerate(cur): row = dict([(col[i], val) for i, val in enumerate(x)]) result.append(row) conn.close() return result
_build_DDL_my_alter(dbconf[RESTRICT.DB], dbtblname_latest, dbcols_latest, cols_order_latest, dbcols1_latest)) if len(ddls) > 0: ddl = ';\n'.join(['start transaction'] + ddls + ['commit']) logger(__name__).debug(ddl) with open( str(workpath.joinpath('{table}.sql'.format(table=t))), 'w') as fo: fo.write(ddl) if load == True: conn = DB.get_connection(dbconf[HOST], dbconf[PORT], dbconf[USER], dbconf[PSWD], dbconf[DATABASE], dbconf[CHARSET], __name__) DB.run_sql(conn, ddl) conn.close() # def get_synthesized_columns(cusid, tech, date, CAT, mod_name=__name__): s = [(l, z, f) for l, z, f in Common.extract_info(cusid, tech, date, CAT, __name__)] LRCs = set([l for _, (l, _, _) in enumerate(s)]) zfs = set([(z, f, _get_owner(CAT, z, dsconf[RESTRICT.ZIP_FLT][CAT]), _get_table_name(f, dsconf[RESTRICT.CSV_FLT][CAT])) for _, (_, z, f) in enumerate(s)]) LRC_ow_ta_columns = DSColumn.get_all_columns(cusid, tech, date, CAT, __name__)
def load(DDL_proc, SQL_gen_proc, date, cusid, tech, CAT, mod_name): """ ## DDL_proc: lambda (owner, tblname) -> SQL ##- tblname: table name in database ## SQL_gen_proc: lambda (sqlformat, lines) -> SQL ## - sqlformat: 'insert into some_table ({columns}) values {values};' ## - lines: a list of { column: value } """ if Util.is_function(DDL_proc) and Util.get_arity(DDL_proc) == 2 and\ Util.is_function(SQL_gen_proc) and Util.get_arity(SQL_gen_proc) == 4: #import dispy cfgbase, dpbase = _get_config(cusid, tech, date, mod_name) #logger(mod_name).debug(cfgbase) #logger(mod_name).debug(dpbase) dbconf_base = ETLDB.get_computed_config(cusid, tech, mod_name) dbconf1 = dbconf_base[CAT] dbconfs = dict([ (PCOFNSRAW, dbconf_base[PCOFNSRAW]), (PCOFNGRAW, dbconf_base[PCOFNGRAW]), (IMSCSFRAW, dbconf_base[IMSCSFRAW]), (IMSHSSRAW, dbconf_base[IMSHSSRAW]), (MADNHRRAW, dbconf_base[MADNHRRAW]), (MADODCRAW, dbconf_base[MADODCRAW]), (IMSDRARAW, dbconf_base[IMSDRARAW]), (XMLNSSRAW, dbconf_base[XMLNSSRAW]), (NOKOBWRAW, dbconf_base[NOKOBWRAW]), (NOKOMWRAW, dbconf_base[NOKOMWRAW]), (NOKIUMRAW, dbconf_base[NOKIUMRAW]) ]) hsformat = str(cfgbase.joinpath('history/{cat}.{tblname}.{filestem}.upload')) fdrpath = dpbase.joinpath(CAT) #logger(mod_name).debug(fdrpath) #cluster = dispy.JobCluster(_load_SQL) for p in fdrpath.glob('*/*.txt'): #logger(mod_name).debug('{} {}'.format(type(p), p)) owner = _get_owner(p.parent.parent.name) tblname = p.parent.name hspath = pathlib.Path(hsformat.format(cat= CAT, tblname= tblname, filestem= p.stem)) if _has_symbol(hspath): continue logger(__name__).debug('symbol create: "{}"'.format(hspath)) #_put_symbol(hspath) if owner in dbconfs: dbconf = dbconfs[owner] else: dbconf = dbconf1 ddl = DDL_proc(owner, tblname) conn = DB.get_connection(dbconf[RESTRICT.HOST], dbconf[RESTRICT.PORT], dbconf[RESTRICT.USER], dbconf[RESTRICT.PSWD], dbconf[RESTRICT.DB], dbconf[RESTRICT.CHARSET], mod_name) #if not DB.table_exists(conn, dbconf[RESTRICT.DB], tblname): # block_size = DB.get_variable(conn, 'max_allowed_packet') # DB.run_sql(conn, ddl) #DB.run_sql(conn, 'set autocommit=0;') ##conn.close() #for sql in _gen_SQL_list(SQL_gen_proc, owner, tblname, p, block_size): # #logger(__name__).debug(sql) # #conn = DB.get_connection(dbconf[RESTRICT.HOST], # # dbconf[RESTRICT.PORT], # # dbconf[RESTRICT.USER], # # dbconf[RESTRICT.PSWD], # # dbconf[RESTRICT.DB], # # dbconf[RESTRICT.CHARSET], mod_name) # #logger(__name__).debug(len(sql)) # DB.run_sql(conn, sql) # #DB.run_sql(conn, 'commit;') # #conn.close() ##conn = DB.get_connection(dbconf[RESTRICT.HOST], ## dbconf[RESTRICT.PORT], ## dbconf[RESTRICT.USER], ## dbconf[RESTRICT.PSWD], ## dbconf[RESTRICT.DB], ## dbconf[RESTRICT.CHARSET], mod_name) #DB.run_sql(conn, 'commit;') #conn.close() block_size = DB.get_variable(conn, 'max_allowed_packet') DB.run_sql(conn, ddl) try: cursor = conn.cursor() cursor.execute('start transaction') for i, sql in enumerate(_gen_SQL_list(SQL_gen_proc, owner, tblname, p, block_size)): sql1 = re.sub(r'(\r\n|\n)', ' ', sql).strip() sql2 = sql1[:512] #logger(__name__).debug(sql1) logger(__name__).debug('run: length {}; "{}{}"'.format(len(sql1), sql2, '...' if len(sql1) > len(sql2) else '')) cursor.execute(sql) cursor.connection.commit() except Exception as ex: traceback.print_exc() finally: cursor.close() conn.close()