Created on Tue Feb 14 21:01:21 2017 @author: megahertz http://mp.weixin.qq.com/s?__biz=MzAwOTgzMDk5Ng==&mid=2650833972&idx=1&sn=4de9f9ee81bc8bf85d1e0a4a8f79b0de&chksm=80adb30fb7da3a19817c72ff6f715ee91d6e342eb0402e860e171993bb0293bc4097e2dc4fe9&mpshare=1&scene=1&srcid=1106BPAdPiPCnj6m2Xyt5p2M#wechat_redirect """ import tushare as ts from sqlalchemy import create_engine from sqlalchemy import types from multiprocessing import Pool import pandas as pd df_type_dic = {'date': types.VARCHAR(20)} THREAD_POOL_SIZE = 33 def syncStock(code): for i in range(0, 10): df = ts.get_k_data(code, start='1980-01-01') if df is not None: break print 'retry ', i, code if df is None: return # print df df.to_csv('./output/kdata/' + code + '.csv')
def QA_util_sql_store_mysql(data, table_name, host="localhost", user="******", passwd="123456", db="quantaxis", if_exists="fail"): engine = create_engine(ORACLE_PATH1) columns = list(data.columns) for i in range(len(columns)): if columns[i].isdigit(): columns[i] = "column_%s" % (columns[i]).strip(" ").replace( ' ', '').upper() elif columns[i] == 'INDEX' or columns[i] == 'index': columns[i] = 'INDEX_COL' else: if columns[i] == 'date' or columns[i] == 'DATE': columns[i] = 'order_date' columns[i] = columns[i].strip(" ").replace(' ', '').upper() columns = ",".join(columns).replace('-', '_').replace('/', '_').replace(';', '') data.columns = columns.split(",") columns = ",".join(data.columns) dtyp = { c: types.VARCHAR(126) for c in data.columns[data.dtypes == 'object'].tolist() } try: data[:0].to_sql(table_name, engine, if_exists=if_exists, dtype=dtyp) except Exception as e: print("Table '%s' already exists." % (table_name)) print(e) #sql_start = "insert into {} ({}) values(%s,%s,%s)".format(table_name, columns) sql_end = ",".join( [":" + str(i) for i in list(range(1, data.shape[1] + 1))]) sql = "insert into {table_name} ({columns}) values({sql_end})".format( table_name=table_name, columns=columns, sql_end=sql_end) conn = cx_Oracle.connect(ORACLE_PATH2) cursor = conn.cursor() if data.shape[1] > 30: break_num = 100000 else: break_num = 1000000 try: for i in chunks( [tuple(x) for x in data.where((pd.notnull(data)), None).values], break_num): cursor.executemany(sql, i) print("{} has been stored into Table {} Mysql DataBase ".format( table_name, table_name)) except Exception as e: conn.rollback() print("执行MySQL: %s 时出错:%s" % (sql, e)) finally: cursor.close() conn.commit() conn.close()
), ( "TIMESTAMP_NTZ", types.TIMESTAMP(timezone=False), ), ( "TIMESTAMP_TZ", types.TIMESTAMP(timezone=True), ), ( "TEXT", types.TEXT(), ), ( "VARCHAR", types.VARCHAR(), ), ) unquoted_types = ( ( "BINARY", types.LargeBinary(), ), ( "BOOLEAN", types.Boolean(), ), ( "DECIMAL", types.DECIMAL(),
def broadcastNewBlock(blockchain): print('\tFunction "broadcastNewBlock" executed') # newBlock = getLatestBlock(blockchain) # get the latest block importedNodes = readNodes( db_nodeListTableName, db_nodeListTableColumns) # get server node ip and port reqHeader = {'Content-Type': 'application/json; charset=utf-8'} reqBody = [] nodeDataList = [] connectInfo = 'oracle+cx_oracle://%s:%s@%s:%s/%s' % ( db_id, db_pw, db_ip, db_port, db_serviceName) engine = create_engine(connectInfo) for i in blockchain: reqBody.append(i.__dict__) if len(importedNodes) > 0: for node in importedNodes: try: URL = "http://" + node[0] + ":" + node[ 1] + g_receiveNewBlock # http://ip:port/node/receiveNewBlock res = requests.post(URL, headers=reqHeader, data=json.dumps(reqBody)) if res.status_code == 200: print(URL + " sent ok.") print("Response Message " + res.text) else: print(URL + " responding error " + res.status_code) except: print(URL + " is not responding.") # write responding results nodeData = selectTable(db_nodeListTableName, db_nodeListTableColumns, engine) for i in range(len(nodeData)): row = nodeData.loc[i] if (row[0] == node[0] and row[1] == node[1]): print("connection failed " + row[0] + ":" + row[1] + ", number of fail " + row[2]) tmp = row[2] # too much fail, delete node if int(tmp) > g_maximumTry: print( row[0] + ":" + row[1] + " deleted from node list because of exceeding the request limit" ) else: row[2] = int(tmp) + 1 nodeDataList.append(row) else: nodeDataList.append(row) if (len(nodeData) > 0): nodeDataFrame = pd.DataFrame( nodeDataList, columns=db_nodeListTableColumns) replaceTable(db_nodeListTableName, db_nodeListTableColumns) try: to_varchar = { c: types.VARCHAR(nodeDataFrame[c].str.len().max()) for c in nodeDataFrame.columns[ nodeDataFrame.dtypes == 'object'].tolist() } nodeDataFrame.to_sql(db_nodeListTableName, engine, if_exists='append', index=False, dtype=to_varchar) except: print( 'Data save error, It seems to have an integrity or type problem.' ) to_varchar = { c: types.VARCHAR(nodeData[c].str.len().max()) for c in nodeData.columns[nodeData.dtypes == 'object'].tolist() } nodeData.to_sql(db_nodeListTableName, engine, if_exists='append', index=False, dtype=to_varchar) else: print("caught exception while updating node list")
def init_model(engine, name): """Call me before using any of the tables or classes in the model""" ## Reflected tables must be defined and mapped here #global reflected_table #reflected_table = sa.Table("Reflected", meta.metadata, autoload=True, # autoload_with=engine) #orm.mapper(Reflected, reflected_table) # metas[name] = metaObj() metas[name].Session.configure(bind=engine) metas[name].engine = engine ip_banned_table = schema.Table( 'ip_banned', metas['realm'].metadata, schema.Column('ip', types.VARCHAR(), primary_key=True), schema.Column('bandate', MSBigInteger(), primary_key=True), schema.Column('unbandate', MSBigInteger()), schema.Column('bannedby', types.VARCHAR()), schema.Column('banreason', types.VARCHAR())) class IpBanned(object): pass orm.mapper(IpBanned, ip_banned_table)
def run(self): stock_df = self.get_codes_by_date(self.date_end) df = pd.read_sql("select code,code_name from dm_baostock\ where code not in (\ select code from realtimestockprice where substr(name,1,2)='XD')", con=engine) #for index, row in stock_df.iterrows(): for index, row in df.iterrows(): print(f'processing {row["code"]} {row["code_name"]}') start_time = time.time() #code = "sh.600037" #code = "sh.600081" code8 = row["code"] code = code8[0:2] + '.' + code8[2:8] print(code) df_code = bs.query_history_k_data_plus( code, self.fields, # adjustflag:复权类型,默认不复权:3;1:后复权;2:前复权。 #df_code = bs.query_history_k_data_plus(row["code"], self.fields, #rs = bs.query_history_k_data_plus(code, self.fields, start_date=self.date_start, end_date=self.date_end, frequency="d", adjustflag="3").get_data() #frequency="d", adjustflag="2").get_data() #print('query_history_k_data_plus respond error_code:'+rs.error_code) #print('query_history_k_data_plus respond error_msg :'+rs.error_msg) #df_code = rs.get_data() # code_name = row["code_name"].replace('*', '') #print(code) #code = code.replace('.', '') df_code.columns = [ 'RQ', 'CODE', 'OPEN', 'HIGH', 'LOW', 'CLOSE', 'VOLUME', 'AMOUNT', 'ADJUSTFLAG', 'TURN', 'TRADESTATUS', 'PCTCHG', 'PETTM', 'PBMRQ', 'PSTTM', 'PCFNCFTTM', 'ISST' ] #print(df_code.columns) df_code['RQ'] = pd.to_datetime(df_code['RQ'], format='%Y-%m-%d') df_code['CODE'] = code8 # df_code['CODE'].apply( # lambda x: str(x[0:2]).upper()+x[3:9]) df_code['VOLUME'].replace('', '0', inplace=True) df_code['AMOUNT'].replace('', '0', inplace=True) df_code['TURN'].replace('', '0', inplace=True) df_code['PCTCHG'].replace('', '0', inplace=True) df_code['PETTM'].replace('', '0', inplace=True) df_code['PBMRQ'].replace('', '0', inplace=True) df_code['PSTTM'].replace('', '0', inplace=True) df_code['PCFNCFTTM'].replace('', '0', inplace=True) df_code['ISST'].replace('', '0', inplace=True) convert_dict = { 'CODE': str, 'OPEN': float, 'HIGH': float, 'LOW': float, 'CLOSE': float, 'VOLUME': int, 'AMOUNT': float, 'ADJUSTFLAG': int, 'TURN': float, 'TRADESTATUS': int, 'PCTCHG': float, 'PETTM': float, 'PBMRQ': float, 'PSTTM': float, 'PCFNCFTTM': float, 'ISST': int } print(df_code.head()) df_code = df_code.astype(convert_dict) #print(df_code.head()) #print(df_code.dtypes) print(df_code.tail()) df_code.to_sql('hq_baostock', engine, index=False, if_exists='append', dtype={ 'CODE': types.VARCHAR(length=8), 'ISST': types.INTEGER() }) end_time = time.time() print('elapsed ' + str(end_time - start_time)) #break ''' , dtype={ 'DATE': types.DateTime(), 'CODE': types.VARCHAR(length=9), 'OPEN': types.Float(precision=4, asdecimal=True), 'HIGH': types.Float(precision=4, asdecimal=True), 'LOW': types.Float(precision=4, asdecimal=True), 'CLOSE': types.Float(precision=4, asdecimal=True), 'VOLUME': types.INTEGER(), 'AMOUNT': types.Float(precision=4, asdecimal=True), 'ADJUSTFLAG': types.INTEGER(), 'TURN': types.Float(precision=6, asdecimal=True), 'TRADESTATUS': types.INTEGER(), 'PCTCHG': types.Float(precision=6, asdecimal=True), 'PETTM': types.Float(precision=6, asdecimal=True), 'PBMRQ': types.Float(precision=6, asdecimal=True), 'PSTTM': types.Float(precision=6, asdecimal=True), 'PCFNCFTTM': types.Float(precision=6, asdecimal=True), 'ISST': types.INTEGER()}) ''' #break # df_code.to_csv( # f'{self.output_dir}/{row["code"]}.{code_name}.csv', index=False) self.exit()
'Sending table "{}" to oracle, mode "{}", size "{}", and chunksize "{}".' .format(name_tb, if_exist, len(df), chunksize)) df.to_sql( name=name_tb, con=connection, if_exists=if_exist, dtype=types, index=False, chunksize=chunksize ) # dtype necessary to avoid infering leading to CLOB types (limit comparison with other strings and very slow). # TODO: check df.to_sql above for long integers. Noticed long numbers where rounded. logger.info( "Copied table to oracle '{}', using connection profile '{}'".format( name_tb, connection_profile)) logger = log.setup_logging('Oracle') if __name__ == '__main__': from sqlalchemy import types import pandas as pd data = [['aaa', 10], ['bbb', 12], ['ccc', 3]] df = pd.DataFrame(data, columns=['session_id', 'count_events']) types = { 'session_id': types.VARCHAR(16), 'count_events': types.Integer(), } connection_profile = 'some_connection_profile' name_tb = 'test_table' create_table(df, connection_profile, name_tb, types)
def engine1(): """ Take all file from file_path to DF Take last date DD Aggregate Objects and KPI with mean, sum Save to csv (storage) Save to Oracle tablename """ print('dwhdb_conn=', dwhdb_conn, ', server=', server, ', interval=', interval) tablename = "oper_er_3g_" + server + '_' + interval_name.lower() storage = 'temp/oper_er_3g_' + server + '_' + interval_name + '.csv' file_path = "Green_log/er_3g_15min/" + dwhdb_conn + "/*.res" print(file_path) datatype_dict = { 'LAC': 'uint16', 'CELL_ID': 'uint16', 'CDR_Attempts': 'uint16', 'CDR_Drops': 'uint16', 'CunSR_Attempts': 'uint16', 'CunSR_Drops': 'uint16', 'CunSSR_Attempts': 'uint16', 'CunSSR_Drops': 'uint16', 'RAB_DR_PS_Attempts': 'uint16', 'RAB_DR_PS_Drops': 'uint16', 'RAB_FR_PS_Attempts': 'uint16', 'RAB_FR_PS_Drops': 'uint16', 'RAB_Setup_FR_CS_Attempts': 'uint16', 'RAB_Setup_FR_CS_Drops': 'uint16', 'RRC_CSetup_FR_CS_Attempts': 'uint16', 'RRC_CSetup_FR_CS_Drops': 'uint16', 'RRC_CSetup_FR_PS_Attempts': 'uint16', 'RRC_CSetup_FR_PS_Drops': 'uint16', 'U_CunSR_PS_Attempts': 'uint16', 'U_CunSR_PS_Drops': 'uint16', 'U_CunSSR_PS_Attempts': 'uint16', 'U_CunSSR_PS_Drops': 'uint16' } filelist = glob.glob(file_path) filelist.sort(key=os.path.getmtime) filelist = filelist[-interval:] df_from_each_file = (pd.read_csv(f, delimiter=';', decimal='.', header=0, index_col=[ "LAC", "CELL_ID", "CELL_NAME", "BeelineObject", 'Element', 'Server' ], parse_dates=["DATETIME_ID"], dtype=datatype_dict) for f in filelist) df100 = pd.concat(df_from_each_file, ignore_index=False) df100['DD'] = df100['DATETIME_ID'].dt.date del df100['DATETIME_ID'] # print(df100.head()) # get last date dataframe: idx = df100.groupby(["LAC", "CELL_ID"])['DD'].transform(max) == df100['DD'] df200 = df100[idx] del df100 # print('df200 last cell date:') # print(df200.head()) agglist = { 'DD': ['count'], 'CDR': 'mean', 'CDR_Attempts': 'sum', 'CDR_Drops': 'sum', 'CunSR': 'mean', 'CunSR_Attempts': 'sum', 'CunSR_Drops': 'sum', 'CunSSR': 'mean', 'CunSSR_Attempts': 'sum', 'CunSSR_Drops': 'sum', 'RAB_DR_PS': 'mean', 'RAB_DR_PS_Attempts': 'sum', 'RAB_DR_PS_Drops': 'sum', 'RAB_FR_PS': 'mean', 'RAB_FR_PS_Attempts': 'sum', 'RAB_FR_PS_Drops': 'sum', 'RAB_Setup_FR_CS': 'mean', 'RAB_Setup_FR_CS_Attempts': 'sum', 'RAB_Setup_FR_CS_Drops': 'sum', 'RRC_CSetup_FR_CS': 'mean', 'RRC_CSetup_FR_CS_Attempts': 'sum', 'RRC_CSetup_FR_CS_Drops': 'sum', 'RRC_CSetup_FR_PS': 'mean', 'RRC_CSetup_FR_PS_Attempts': 'sum', 'RRC_CSetup_FR_PS_Drops': 'sum', 'U_CunSR_PS': 'mean', 'U_CunSR_PS_Attempts': 'sum', 'U_CunSR_PS_Drops': 'sum', 'U_CunSSR_PS': 'mean', 'U_CunSSR_PS_Attempts': 'sum', 'U_CunSSR_PS_Drops': 'sum' } df400 = df200.groupby([ "LAC", "CELL_ID", "CELL_NAME", "BeelineObject", 'Element', 'Server', 'DD' ]).agg(agglist) # put all columns levels to one level df400.columns = ['_'.join(col) for col in df400.columns] del df200 print('df400 saving...') print('df400 to file...', storage) df400 = df400.round(decimals=2) df400.reset_index().to_csv(storage, index=False, header=True, decimal=',', sep='\t', float_format='%.1f') print('df400 to Oracle...', tablename) from sqlalchemy import create_engine from sqlalchemy import types from sqlalchemy.types import Date, Numeric # String, DateTime # engine = create_engine('oracle://*****:*****@RAN_dcn') engine = create_engine('oracle://*****:*****@10.136.147.37:1521/ran') df400 = df400.reset_index(drop=False, inplace=False) df400.columns = map(lambda x: str(x).upper(), df400.columns) # print(df400.info()) df401 = df400 # set VARCHAR(50) type for all string objects dtyp = { c: types.VARCHAR(50) for c in df401.columns[df401.dtypes == 'object'].tolist() } # set Date type for DD del dtyp["DD"] dtyp["DD"] = Date df401.to_sql(tablename, engine, if_exists='replace', index=False, dtype=dtyp) df401_counter = len(df401.index) del df401 print('update Oracle log table') import datetime dt_now = datetime.datetime.now() dict_ = { 'ADAM_TABLENAME': tablename, 'RAW_QTY': df401_counter, 'DT': dt_now } df = pd.DataFrame([dict_]) dtyp = { 'ADAM_TABLENAME': types.VARCHAR(50), "RAW_QTY": Numeric, 'DT': types.DateTime } df.to_sql('oper_log_2', engine, if_exists='append', index=False, dtype=dtyp)
def week_FromAppMetrika(d_start, d_stop): wNum = int((datetime.strptime(d_start, '%Y-%m-%d').date()).strftime("%Y%V")) d = [] for i, j in apps.items(): params_sess = { 'lang': 'ru', 'request_domain': 'ru', 'filters': "exists ym:d:device with (appID=='{}')".format(i), 'id': 516000, 'date1': d_start, 'date2': d_stop, 'metrics': 'ym:s:sessions', 'dimensions': 'ym:s:date', 'sort': '-ym:s:date', 'offset': 1, 'limit': 10, 'accuracy': 1, 'proposedAccuracy': 'true' } response_ses = requests.get( 'https://api.appmetrica.yandex.ru/stat/v1/data', params=params_sess, headers={ 'Authorization': 'OAuth ' + token }).json() params_users = { 'lang': 'ru', 'request_domain': 'ru', 'filters': "exists ym:d:device with (appID=='{}')".format(i), 'id': 516000, 'date1': d_start, 'date2': d_stop, 'metrics': 'ym:u:activeUsers', 'dimensions': 'ym:u:date', 'sort': '-ym:u:date', 'include_undefined': 'true', 'offset': 1, 'limit': 10, 'accuracy': 1, 'proposedAccuracy': 'true' } response_user = requests.get( 'https://api.appmetrica.yandex.ru/stat/v1/data', params=params_users, headers={ 'Authorization': 'OAuth ' + token }).json() d.append([ wNum, j, int(*response_ses['totals']), int(*response_user['totals']) ]) # assert len(d)>1 df_app_weekly = pd.DataFrame( d, columns=['year_week', 'app', 'sessions', 'users']) dtyp = { c: types.VARCHAR(df_app_weekly[c].str.len().max()) for c in df_app_weekly.columns[df_app_weekly.dtypes == 'object'].tolist() } df_app_weekly.to_sql('app_sess_user_week', conn, schema='dwh_stage', if_exists='append', index=False, dtype=dtyp)
def store(self, df, table, **kwargs): assert isinstance(df, pd.DataFrame), "Invalid data type" if_exists = kwargs.get('if_exists', 'fail') chunksize = kwargs.get('chunksize', 10000) pkey = kwargs.get('pkey', None) indexes = kwargs.get('indexes', []) checkpoint_column = kwargs.get('checkpoint_column', None) checkpoint = kwargs.get('checkpoint') last_checkpoint = kwargs.get('last_checkpoint') _conn = self.open() try: if if_exists == 'append' or if_exists == 'update': target_table = Table(table, MetaData(), autoload=True, autoload_with=_conn) assert checkpoint_column is not None, "checkpoint_column is required in update mode!" assert ( isinstance(checkpoint_column, tuple) and len(checkpoint_column) == 2) or isinstance( checkpoint_column, str), "checkpoint_column can only be str or 2-tuple!" if isinstance(checkpoint_column, tuple): (create_time_column, update_time_column) = checkpoint_column else: create_time_column = checkpoint_column update_time_column = checkpoint_column # delete extra records over last checkpoint in append/update mode clear_ins = target_table.delete().where( Column(update_time_column) >= last_checkpoint) _conn.execute(clear_ins) if if_exists == 'update': assert pkey is not None, "primary key is required in update mode!" assert isinstance( pkey, str), "update mode only support single primary key" update_df = df[df[create_time_column] < last_checkpoint] if not update_df.empty: logger.info(table + ": find {} records to update".format( len(update_df))) update_keys = list(update_df[pkey]) delete_ins = target_table.delete().where( Column(pkey).in_(update_keys)) _conn.execute(delete_ins) if_exists = 'append' except NoSuchTableError: if_exists = 'replace' schema = None if table.find('.') >= 0: toks = table.split('.', 1) schema = toks[0] table = toks[1] float_columns = list( df.select_dtypes(include=['float64', 'float']).keys()) if len(float_columns) > 0: logger.warn( table + ": Detect columns with float types {}, you better check if this is caused by NAN-integer " "column issue of pandas!".format(list(float_columns))) typehints = dict() obj_columns = list(df.select_dtypes(include=['object']).keys()) if len(obj_columns) > 0: logger.warn( table + ": Detect columns with object types {}, which is automatically converted to *VARCHAR(256)*, " "you can override this by specifying type hints!".format( list(obj_columns))) import sqlalchemy.types as sqltypes typehints.update(dict((k, sqltypes.VARCHAR(256)) for k in obj_columns)) # TODO: upddate typehints with user-specified one _typehints = kwargs.get('typehints', {}) from parade.type import stdtype_to_sqltype for col, stdtype in _typehints.items(): logger.info( table + ": Column [{}] is set to type [{}]".format(col, str(stdtype))) typehints[col] = stdtype_to_sqltype(stdtype) def _chunks(_df, _chunksize): """Yield successive n-sized chunks from l.""" for i in range(0, len(_df), _chunksize): yield df[i:i + _chunksize] # still write to database for empty dataframe if df.empty: df.to_sql(name=table, con=_conn, index=False, schema=schema, if_exists=if_exists, dtype=typehints) logger.warn(table + ": Write to {}: empty dataframe".format(table)) else: for idx, chunk in enumerate(_chunks(df, chunksize)): if_exists_ = 'append' if idx > 0 else if_exists chunk.to_sql(name=table, con=_conn, index=False, schema=schema, if_exists=if_exists_, dtype=typehints) logger.info(table + ": Write to {}: rows #{}-#{}".format( table, idx * chunksize, (idx + 1) * chunksize)) if if_exists == 'replace': if pkey: pkeys = pkey if isinstance(pkey, str) else ','.join(pkey) _conn.execute('ALTER TABLE {} ADD PRIMARY KEY ({})'.format( table, pkeys)) for index in indexes: index_str = index if isinstance(index, str) else ','.join(index) index_name = index if isinstance(index, str) else '_'.join(index) _conn.execute('ALTER TABLE {} ADD INDEX idx_{} ({})'.format( table, index_name, index_str))
def native_type(self, registry): if self.encrypt_key: return types.VARCHAR(1024) return self.sqlalchemy_type
from sqlalchemy.sql import operators from sqlalchemy.sql.expression import BindParameter from sqlalchemy_solr.solr_type_compiler import SolrTypeCompiler from sqlalchemy_solr.solrdbapi.array import ARRAY logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.ERROR) _type_map = { "binary": types.LargeBinary(), "boolean": types.Boolean(), "pdate": types.DateTime(), "pint": types.Integer(), "plong": types.BigInteger(), "pfloat": types.Float(), "pdouble": types.REAL(), "string": types.VARCHAR(), "text_general": types.Text(), "booleans": ARRAY(types.BOOLEAN()), "pints": ARRAY(types.Integer()), "plongs": ARRAY(types.BigInteger()), "pfloats": ARRAY(types.Float()), "pdoubles": ARRAY(types.REAL()), "strings": ARRAY(types.VARCHAR()), } class SolrCompiler(compiler.SQLCompiler): merge_ops = (operators.ge, operators.gt, operators.le, operators.lt) bounds = { operators.ge: "[",
def initSvr(): print('\tFunction "initSvr" executed') connectInfo = 'oracle+cx_oracle://%s:%s@%s:%s/%s' % ( db_id, db_pw, db_ip, db_port, db_serviceName) engine = create_engine(connectInfo) last_line_number = len( selectTable(db_nodeListTableName, db_nodeListTableColumns, engine)) # 1. check if we have a node list file # if we don't have, let's request node list if last_line_number == 0: # get nodes... for key, value in g_nodeList.items(): URL = 'http://' + key + ':' + value + '/node/getNode' try: res = requests.get(URL) except requests.exceptions.ConnectionError: continue if res.status_code == 200: print(res.text) tmpNodeLists = json.loads(res.text) for node in tmpNodeLists: addNode(node) # 2. check if we have a blockchain data file last_line_number = len( selectTable(db_blockTableName, db_blockTableColumns, engine)) blockchainList = [] if last_line_number == 0: # get Block Data... for key, value in g_nodeList.items(): URL = 'http://' + key + ':' + value + '/block/getBlockData' try: res = requests.get(URL) except requests.exceptions.ConnectionError: continue if res.status_code == 200: print(res.text) tmpbcData = json.loads(res.text) for line in tmpbcData: # print(type(line)) # index, previousHash, timestamp, data, currentHash, proof block = [ line['index'], line['previousHash'], line['timestamp'], line['data'], line['currentHash'], line['proof'] ] blockchainList.append(block) blockchainData = pd.DataFrame(blockchainList, columns=db_blockTableColumns) replaceTable(db_blockTableName, db_blockTableColumns) try: to_varchar = { c: types.VARCHAR(blockchainData[c].str.len().max()) for c in blockchainData.columns[blockchainData.dtypes == 'object'].tolist() } blockchainData.to_sql(db_blockTableName, engine, if_exists='append', index=False, dtype=to_varchar) except Exception as e: print('Data save error in initSvr()', e) return 1
def compareMerge(bcDict): print('\tFunction "compareMerge" executed') heldBlock = [] bcToValidateForBlock = [] # Read GenesisBlock connectInfo = 'oracle+cx_oracle://%s:%s@%s:%s/%s' % ( db_id, db_pw, db_ip, db_port, db_serviceName) engine = create_engine(connectInfo) blockReader = selectTable(db_blockTableName, db_blockTableColumns, engine) for i in range(len(blockReader)): line = blockReader.loc[i] block = Block(line[0], line[1], line[2], line[3], line[4], line[5]) heldBlock.append(block) if len(blockReader) == 0: print("file open error in compareMerge or No database exists") print("call initSvr if this server has just installed") return -1 # if it fails to read block data from db(csv) if len(heldBlock) == 0: print("fail to read") return -2 # transform given data to Block object for line in bcDict: # print(type(line)) # index, previousHash, timestamp, data, currentHash, proof block = Block(line['index'], line['previousHash'], line['timestamp'], line['data'], line['currentHash'], line['proof']) bcToValidateForBlock.append(block) # compare the given data with genesisBlock if not isSameBlock(bcToValidateForBlock[0], heldBlock[0]): print('Genesis Block Incorrect') return -1 # check if broadcasted new block,1 ahead than > last held block if isValidNewBlock(bcToValidateForBlock[-1], heldBlock[-1]) == False: # latest block == broadcasted last block if isSameBlock(heldBlock[-1], bcToValidateForBlock[-1]) == True: print('latest block == broadcasted last block, already updated') return 2 # select longest chain elif len(bcToValidateForBlock) > len(heldBlock): # validation if isSameBlock(heldBlock[0], bcToValidateForBlock[0]) == False: print("Block Information Incorrect #1") return -1 tempBlocks = [bcToValidateForBlock[0]] for i in range(1, len(bcToValidateForBlock)): if isValidNewBlock(bcToValidateForBlock[i], tempBlocks[i - 1]): tempBlocks.append(bcToValidateForBlock[i]) else: return -1 # [START] save it to csv blockchainList = [] for block in bcToValidateForBlock: blockList = [ block.index, block.previousHash, str(block.timestamp), block.data, block.currentHash, block.proof ] blockchainList.append(blockList) blockchainData = pd.DataFrame(blockchainList, columns=db_blockTableColumns) blockWriter = pd.concat([blockReader, blockchainData], axis=0).reset_index(drop=True) replaceTable(db_blockTableName, db_blockTableColumns) try: to_varchar = { c: types.VARCHAR(blockWriter[c].str.len().max()) for c in blockWriter.columns[blockWriter.dtypes == 'object'].tolist() } blockWriter.to_sql(db_blockTableName, engine, if_exists='append', index=False, dtype=to_varchar) except: print( 'Data save error, It seems to have an integrity or type problem.' ) to_varchar = { c: types.VARCHAR(blockReader[c].str.len().max()) for c in blockReader.columns[blockReader.dtypes == 'object'].tolist() } blockReader.to_sql(db_blockTableName, engine, if_exists='append', index=False, dtype=to_varchar) # [END] save it to csv return 1 elif len(bcToValidateForBlock) < len(heldBlock): # validation # for i in range(0,len(bcToValidateForBlock)): # if isSameBlock(heldBlock[i], bcToValidateForBlock[i]) == False: # print("Block Information Incorrect #1") # return -1 tempBlocks = [bcToValidateForBlock[0]] for i in range(1, len(bcToValidateForBlock)): if isValidNewBlock(bcToValidateForBlock[i], tempBlocks[i - 1]): tempBlocks.append(bcToValidateForBlock[i]) else: return -1 print("We have a longer chain") return 3 else: print("Block Information Incorrect #2") return -1 else: # very normal case (ex> we have index 100 and receive index 101 ...) tempBlocks = [bcToValidateForBlock[0]] for i in range(1, len(bcToValidateForBlock)): if isValidNewBlock(bcToValidateForBlock[i], tempBlocks[i - 1]): tempBlocks.append(bcToValidateForBlock[i]) else: print("Block Information Incorrect #2 " + tempBlocks.__dict__) return -1 print("new block good") # validation for i in range(0, len(heldBlock)): if isSameBlock(heldBlock[i], bcToValidateForBlock[i]) == False: print("Block Information Incorrect #1") return -1 # [START] save it to csv blockchainList = [] for block in bcToValidateForBlock: blockList = [ block.index, block.previousHash, str(block.timestamp), block.data, block.currentHash, block.proof ] blockchainList.append(blockList) blockchainData = pd.DataFrame(blockchainList, columns=db_blockTableColumns) blockWriter = pd.concat([blockReader, blockchainData], axis=0).reset_index(drop=True) replaceTable(db_blockTableName, db_blockTableColumns) try: to_varchar = { c: types.VARCHAR(blockWriter[c].str.len().max()) for c in blockWriter.columns[blockWriter.dtypes == 'object'].tolist() } blockWriter.to_sql(db_blockTableName, engine, if_exists='append', index=False, dtype=to_varchar) except: print( 'Data save error, It seems to have an integrity or type problem.' ) to_varchar = { c: types.VARCHAR(blockReader[c].str.len().max()) for c in blockReader.columns[blockReader.dtypes == 'object'].tolist() } blockReader.to_sql(db_blockTableName, engine, if_exists='append', index=False, dtype=to_varchar) # [END] save it to csv return 1
class Book(Base): __tablename__ = 'book' id = sa.Column( postgresql_types.UUID(), default=GET_DEFAULT('books.models.Book.id'), primary_key=True, unique=True, nullable=False, doc="testtest", ) price = sa.Column( postgresql_types.JSONB(), primary_key=False, unique=False, nullable=False, doc="testtest", ) title = sa.Column( default_types.VARCHAR(length=255, collation="utf8"), primary_key=False, unique=False, nullable=False, doc="testtest", server_default="val", ) description = sa.Column( postgresql_types.TEXT(), primary_key=False, unique=False, nullable=True, doc="testtest", ) author_id = sa.Column( postgresql_types.INTEGER(), sa.ForeignKey(column="author.id", ondelete="SET_NULL"), primary_key=False, unique=False, nullable=True, autoincrement=True, doc="testtest", ) content = sa.Column( postgresql_types.BYTEA(), primary_key=False, unique=False, nullable=False, doc="testtest", ) tags = sa.Column( postgresql_types.ARRAY(item_type=postgresql_types.VARCHAR, dimensions=1), primary_key=False, unique=False, nullable=False, doc="testtest", ) author = sa.orm.relationship( 'Author', foreign_keys="[book.c.author_id]", remote_side=None, backref="books", ) category = sa.orm.relationship( 'Category', secondary="book_category", foreign_keys="[book_category.c.book_id, book_category.c.category_id]", remote_side=None, backref="books", lazy="joined", )
#options.add_argument("download.default_directory=\\Users\\KLD300\\Documents\\Python Scripts\\Web_Scrap_EV") #down_load_dir = 'C:\\Users\\KLD300\\Documents\\Python Scripts\\Web_Scrap_EV' #chromeOptions = webdriver.ChromeOptions() #prefs1 = {"download.default_directory" : down_load_dir} #chromeOptions.add_experimental_option("prefs",prefs1) #browser = webdriver.Chrome(chrome_options=chromeOptions)#chrome_options=chromeOptions) #browser.get(a) MoT = pd.read_csv( 'C:\\Users\\KLD300\\Downloads\\Towns_by_reg_quarter_data.csv') dtyp = { c: types.VARCHAR(int(MoT[c].str.len().max())) for c in MoT.columns[MoT.dtypes == 'object'].tolist() } engine = sqlalchemy.create_engine( "mssql+pyodbc://tstrndsql01.jsds1.test/rawZone?driver=SQL+Server+Native+Client+11.0" ) destination_table_name = 'List_MoT_DATA' # table name engine.connect() MoT.to_sql(name=destination_table_name, con=engine, schema='srcDG', if_exists='replace', dtype=dtyp, index=False)
def load_dialect_impl(self, dialect): return dialect.type_descriptor(types.VARCHAR(4096))
datos[sno_id].columns = columns # Pus DataFrame to DB # datos[sno_id].to_sql( 'snotel', engine, if_exists='append', index=False, dtype={ 'date': types.DATE, 'snow_water_equivalent_in_start_of_day_values': types.REAL, 'precipitation_accumulation_in_start_of_day_values': types.REAL, 'air_temperature_maximum_deg': types.REAL, 'air_temperature_minimum_degf': types.REAL, 'air_temperature_average_degf': types.REAL, 'precipitation_increment_in': types.REAL, 'site_name': types.VARCHAR(50), 'state': types.VARCHAR(3), 'site_id': types.INTEGER }) except: print('Unable to push to DB') df_snotelids = pd.DataFrame.from_dict(snotelID_to_name, orient='index', columns=['Snotel Name']).reset_index() df_snotelids.columns = ['Snotel ID', 'Snotel Name'] df_snotelids.to_sql('SnotelIDs', engine, if_exists='replace', index=False)
class PrestoEngineSpec(BaseEngineSpec): engine = "presto" engine_name = "Presto" _time_grain_expressions = { None: "{col}", "PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))", "PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))", "PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))", "P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))", "P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))", "P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))", "P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))", "P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))", "P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", "1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def get_allow_cost_estimate(cls, version: Optional[str] = None) -> bool: return version is not None and StrictVersion(version) >= StrictVersion( "0.319") @classmethod def get_table_names(cls, database: "Database", inspector: Inspector, schema: Optional[str]) -> List[str]: tables = super().get_table_names(database, inspector, schema) if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"): return tables views = set(cls.get_view_names(database, inspector, schema)) actual_tables = set(tables) - views return list(actual_tables) @classmethod def get_view_names(cls, database: "Database", inspector: Inspector, schema: Optional[str]) -> List[str]: """Returns an empty list get_table_names() function returns all table names and view names, and get_view_names() is not implemented in sqlalchemy_presto.py https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py """ if not is_feature_enabled("PRESTO_SPLIT_VIEWS_FROM_TABLES"): return [] if schema: sql = ("SELECT table_name FROM information_schema.views " "WHERE table_schema=%(schema)s") params = {"schema": schema} else: sql = "SELECT table_name FROM information_schema.views" params = {} engine = cls.get_engine(database, schema=schema) with closing(engine.raw_connection()) as conn: with closing(conn.cursor()) as cursor: cursor.execute(sql, params) results = cursor.fetchall() return [row[0] for row in results] @classmethod def _create_column_info(cls, name: str, data_type: types.TypeEngine) -> Dict[str, Any]: """ Create column info object :param name: column name :param data_type: column data type :return: column info object """ return {"name": name, "type": f"{data_type}"} @classmethod def _get_full_name(cls, names: List[Tuple[str, str]]) -> str: """ Get the full column name :param names: list of all individual column names :return: full column name """ return ".".join(column[0] for column in names if column[0]) @classmethod def _has_nested_data_types(cls, component_type: str) -> bool: """ Check if string contains a data type. We determine if there is a data type by whitespace or multiple data types by commas :param component_type: data type :return: boolean """ comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" return (re.search(comma_regex, component_type) is not None or re.search(white_space_regex, component_type) is not None) @classmethod def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]: """ Split data type based on given delimiter. Do not split the string if the delimiter is enclosed in quotes :param data_type: data type :param delimiter: string separator (i.e. open parenthesis, closed parenthesis, comma, whitespace) :return: list of strings after breaking it by the delimiter """ return re.split( r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type) @classmethod def _parse_structural_column( # pylint: disable=too-many-locals,too-many-branches cls, parent_column_name: str, parent_data_type: str, result: List[Dict[str, Any]], ) -> None: """ Parse a row or array column :param result: list tracking the results """ formatted_parent_column_name = parent_column_name # Quote the column name if there is a space if " " in parent_column_name: formatted_parent_column_name = f'"{parent_column_name}"' full_data_type = f"{formatted_parent_column_name} {parent_data_type}" original_result_len = len(result) # split on open parenthesis ( to get the structural # data type and its component types data_types = cls._split_data_type(full_data_type, r"\(") stack: List[Tuple[str, str]] = [] for data_type in data_types: # split on closed parenthesis ) to track which component # types belong to what structural data type inner_types = cls._split_data_type(data_type, r"\)") for inner_type in inner_types: # We have finished parsing multiple structural data types if not inner_type and stack: stack.pop() elif cls._has_nested_data_types(inner_type): # split on comma , to get individual data types single_fields = cls._split_data_type(inner_type, ",") for single_field in single_fields: single_field = single_field.strip() # If component type starts with a comma, the first single field # will be an empty string. Disregard this empty string. if not single_field: continue # split on whitespace to get field name and data type field_info = cls._split_data_type(single_field, r"\s") # check if there is a structural data type within # overall structural data type column_type = cls.get_sqla_column_type(field_info[1]) if column_type is None: column_type = types.String() logger.info( "Did not recognize type %s of column %s", field_info[1], field_info[0], ) if field_info[1] == "array" or field_info[1] == "row": stack.append((field_info[0], field_info[1])) full_parent_path = cls._get_full_name(stack) result.append( cls._create_column_info( full_parent_path, column_type)) else: # otherwise this field is a basic data type full_parent_path = cls._get_full_name(stack) column_name = "{}.{}".format( full_parent_path, field_info[0]) result.append( cls._create_column_info( column_name, column_type)) # If the component type ends with a structural data type, do not pop # the stack. We have run across a structural data type within the # overall structural data type. Otherwise, we have completely parsed # through the entire structural data type and can move on. if not (inner_type.endswith("array") or inner_type.endswith("row")): stack.pop() # We have an array of row objects (i.e. array(row(...))) elif inner_type in ("array", "row"): # Push a dummy object to represent the structural data type stack.append(("", inner_type)) # We have an array of a basic data types(i.e. array(varchar)). elif stack: # Because it is an array of a basic data type. We have finished # parsing the structural data type and can move on. stack.pop() # Unquote the column name if necessary if formatted_parent_column_name != parent_column_name: for index in range(original_result_len, len(result)): result[index]["name"] = result[index]["name"].replace( formatted_parent_column_name, parent_column_name) @classmethod def _show_columns(cls, inspector: Inspector, table_name: str, schema: Optional[str]) -> List[RowProxy]: """ Show presto column names :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: list of column objects """ quote = inspector.engine.dialect.identifier_preparer.quote_identifier full_table = quote(table_name) if schema: full_table = "{}.{}".format(quote(schema), full_table) columns = inspector.bind.execute( "SHOW COLUMNS FROM {}".format(full_table)) return columns column_type_mappings = ( (re.compile(r"^boolean.*", re.IGNORECASE), types.Boolean()), (re.compile(r"^tinyint.*", re.IGNORECASE), TinyInteger()), (re.compile(r"^smallint.*", re.IGNORECASE), types.SmallInteger()), (re.compile(r"^integer.*", re.IGNORECASE), types.Integer()), (re.compile(r"^bigint.*", re.IGNORECASE), types.BigInteger()), (re.compile(r"^real.*", re.IGNORECASE), types.Float()), (re.compile(r"^double.*", re.IGNORECASE), types.Float()), (re.compile(r"^decimal.*", re.IGNORECASE), types.DECIMAL()), ( re.compile(r"^varchar(\((\d+)\))*$", re.IGNORECASE), lambda match: types.VARCHAR(int(match[2])) if match[2] else types.String(), ), ( re.compile(r"^char(\((\d+)\))*$", re.IGNORECASE), lambda match: types.CHAR(int(match[2])) if match[2] else types.CHAR(), ), (re.compile(r"^varbinary.*", re.IGNORECASE), types.VARBINARY()), (re.compile(r"^json.*", re.IGNORECASE), types.JSON()), (re.compile(r"^date.*", re.IGNORECASE), types.DATE()), (re.compile(r"^time.*", re.IGNORECASE), types.Time()), (re.compile(r"^timestamp.*", re.IGNORECASE), types.TIMESTAMP()), (re.compile(r"^interval.*", re.IGNORECASE), Interval()), (re.compile(r"^array.*", re.IGNORECASE), Array()), (re.compile(r"^map.*", re.IGNORECASE), Map()), (re.compile(r"^row.*", re.IGNORECASE), Row()), ) @classmethod def get_columns(cls, inspector: Inspector, table_name: str, schema: Optional[str]) -> List[Dict[str, Any]]: """ Get columns from a Presto data source. This includes handling row and array data types :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: a list of results that contain column info (i.e. column name and data type) """ columns = cls._show_columns(inspector, table_name, schema) result: List[Dict[str, Any]] = [] for column in columns: # parse column if it is a row or array if is_feature_enabled("PRESTO_EXPAND_DATA") and ( "array" in column.Type or "row" in column.Type): structural_column_index = len(result) cls._parse_structural_column(column.Column, column.Type, result) result[structural_column_index]["nullable"] = getattr( column, "Null", True) result[structural_column_index]["default"] = None continue # otherwise column is a basic data type column_type = cls.get_sqla_column_type(column.Type) if column_type is None: column_type = types.String() logger.info( "Did not recognize type %s of column %s", str(column.Type), str(column.Column), ) column_info = cls._create_column_info(column.Column, column_type) column_info["nullable"] = getattr(column, "Null", True) column_info["default"] = None result.append(column_info) return result @classmethod def _is_column_name_quoted(cls, column_name: str) -> bool: """ Check if column name is in quotes :param column_name: column name :return: boolean """ return column_name.startswith('"') and column_name.endswith('"') @classmethod def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]: """ Format column clauses where names are in quotes and labels are specified :param cols: columns :return: column clauses """ column_clauses = [] # Column names are separated by periods. This regex will find periods in a # string if they are not enclosed in quotes because if a period is enclosed in # quotes, then that period is part of a column name. dot_pattern = r"""\. # split on period (?= # look ahead (?: # create non-capture group [^\"]*\"[^\"]*\" # two quotes )*[^\"]*$) # end regex""" dot_regex = re.compile(dot_pattern, re.VERBOSE) for col in cols: # get individual column names col_names = re.split(dot_regex, col["name"]) # quote each column name if it is not already quoted for index, col_name in enumerate(col_names): if not cls._is_column_name_quoted(col_name): col_names[index] = '"{}"'.format(col_name) quoted_col_name = ".".join( col_name if cls._is_column_name_quoted(col_name ) else f'"{col_name}"' for col_name in col_names) # create column clause in the format "name"."name" AS "name.name" column_clause = literal_column(quoted_col_name).label(col["name"]) column_clauses.append(column_clause) return column_clauses @classmethod def select_star( # pylint: disable=too-many-arguments cls, database: "Database", table_name: str, engine: Engine, schema: Optional[str] = None, limit: int = 100, show_cols: bool = False, indent: bool = True, latest_partition: bool = True, cols: Optional[List[Dict[str, Any]]] = None, ) -> str: """ Include selecting properties of row objects. We cannot easily break arrays into rows, so render the whole array in its own row and skip columns that correspond to an array's contents. """ cols = cols or [] presto_cols = cols if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols: dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" presto_cols = [ col for col in presto_cols if not re.search(dot_regex, col["name"]) ] return super().select_star( database, table_name, engine, schema, limit, show_cols, indent, latest_partition, presto_cols, ) @classmethod def estimate_statement_cost( # pylint: disable=too-many-locals cls, statement: str, database: "Database", cursor: Any, user_name: str) -> Dict[str, Any]: """ Run a SQL query that estimates the cost of a given statement. :param statement: A single SQL statement :param database: Database instance :param cursor: Cursor instance :param username: Effective username :return: JSON response from Presto """ parsed_query = ParsedQuery(statement) sql = parsed_query.stripped() sql_query_mutator = config["SQL_QUERY_MUTATOR"] if sql_query_mutator: sql = sql_query_mutator(sql, user_name, security_manager, database) sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {sql}" cursor.execute(sql) # the output from Presto is a single column and a single row containing # JSON: # # { # ... # "estimate" : { # "outputRowCount" : 8.73265878E8, # "outputSizeInBytes" : 3.41425774958E11, # "cpuCost" : 3.41425774958E11, # "maxMemory" : 0.0, # "networkCost" : 3.41425774958E11 # } # } result = json.loads(cursor.fetchone()[0]) return result @classmethod def query_cost_formatter( cls, raw_cost: List[Dict[str, Any]]) -> List[Dict[str, str]]: """ Format cost estimate. :param raw_cost: JSON estimate from Presto :return: Human readable cost estimate """ def humanize(value: Any, suffix: str) -> str: try: value = int(value) except ValueError: return str(value) prefixes = ["K", "M", "G", "T", "P", "E", "Z", "Y"] prefix = "" to_next_prefix = 1000 while value > to_next_prefix and prefixes: prefix = prefixes.pop(0) value //= to_next_prefix return f"{value} {prefix}{suffix}" cost = [] columns = [ ("outputRowCount", "Output count", " rows"), ("outputSizeInBytes", "Output size", "B"), ("cpuCost", "CPU cost", ""), ("maxMemory", "Max memory", "B"), ("networkCost", "Network cost", ""), ] for row in raw_cost: estimate: Dict[str, float] = row.get("estimate", {}) statement_cost = {} for key, label, suffix in columns: if key in estimate: statement_cost[label] = humanize(estimate[key], suffix).strip() cost.append(statement_cost) return cost @classmethod def adjust_database_uri(cls, uri: URL, selected_schema: Optional[str] = None) -> None: database = uri.database if selected_schema and database: selected_schema = parse.quote(selected_schema, safe="") if "/" in database: database = database.split("/")[0] + "/" + selected_schema else: database += "/" + selected_schema uri.database = database @classmethod def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]: tt = target_type.upper() if tt == utils.TemporalType.DATE: return f"""from_iso8601_date('{dttm.date().isoformat()}')""" if tt == utils.TemporalType.TIMESTAMP: return f"""from_iso8601_timestamp('{dttm.isoformat(timespec="microseconds")}')""" # pylint: disable=line-too-long return None @classmethod def epoch_to_dttm(cls) -> str: return "from_unixtime({col})" @classmethod def get_all_datasource_names( cls, database: "Database", datasource_type: str) -> List[utils.DatasourceName]: datasource_df = database.get_df( "SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S " "ORDER BY concat(table_schema, '.', table_name)".format( datasource_type.upper()), None, ) datasource_names: List[utils.DatasourceName] = [] for _unused, row in datasource_df.iterrows(): datasource_names.append( utils.DatasourceName(schema=row["table_schema"], table=row["table_name"])) return datasource_names @classmethod def expand_data( # pylint: disable=too-many-locals,too-many-branches cls, columns: List[Dict[Any, Any]], data: List[Dict[Any, Any]]) -> Tuple[List[Dict[Any, Any]], List[Dict[ Any, Any]], List[Dict[Any, Any]]]: """ We do not immediately display rows and arrays clearly in the data grid. This method separates out nested fields and data values to help clearly display structural columns. Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int) Original data set = [ {'ColumnA': ['a1'], 'ColumnB': [1, 2]}, {'ColumnA': ['a2'], 'ColumnB': [3, 4]}, ] Expanded data set = [ {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2}, {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4}, ] :param columns: columns selected in the query :param data: original data set :return: list of all columns(selected columns and their nested fields), expanded data set, listed of nested fields """ if not is_feature_enabled("PRESTO_EXPAND_DATA"): return columns, data, [] # process each column, unnesting ARRAY types and # expanding ROW types into new columns to_process = deque((column, 0) for column in columns) all_columns: List[Dict[str, Any]] = [] expanded_columns = [] current_array_level = None while to_process: column, level = to_process.popleft() if column["name"] not in [ column["name"] for column in all_columns ]: all_columns.append(column) # When unnesting arrays we need to keep track of how many extra rows # were added, for each original row. This is necessary when we expand # multiple arrays, so that the arrays after the first reuse the rows # added by the first. every time we change a level in the nested arrays # we reinitialize this. if level != current_array_level: unnested_rows: Dict[int, int] = defaultdict(int) current_array_level = level name = column["name"] values: Optional[Union[str, List[Any]]] if column["type"].startswith("ARRAY("): # keep processing array children; we append to the right so that # multiple nested arrays are processed breadth-first to_process.append((get_children(column)[0], level + 1)) # unnest array objects data into new rows i = 0 while i < len(data): row = data[i] values = row.get(name) if isinstance(values, str): row[name] = values = destringify(values) if values: # how many extra rows we need to unnest the data? extra_rows = len(values) - 1 # how many rows were already added for this row? current_unnested_rows = unnested_rows[i] # add any necessary rows missing = extra_rows - current_unnested_rows for _ in range(missing): data.insert(i + current_unnested_rows + 1, {}) unnested_rows[i] += 1 # unnest array into rows for j, value in enumerate(values): data[i + j][name] = value # skip newly unnested rows i += unnested_rows[i] i += 1 if column["type"].startswith("ROW("): # expand columns; we append them to the left so they are added # immediately after the parent expanded = get_children(column) to_process.extendleft( (column, level) for column in expanded[::-1]) expanded_columns.extend(expanded) # expand row objects into new columns for row in data: values = row.get(name) or [] if isinstance(values, str): row[name] = values = cast(List[Any], destringify(values)) for value, col in zip(values, expanded): row[col["name"]] = value data = [{k["name"]: row.get(k["name"], "") for k in all_columns} for row in data] return all_columns, data, expanded_columns @classmethod def extra_table_metadata(cls, database: "Database", table_name: str, schema_name: str) -> Dict[str, Any]: metadata = {} indexes = database.get_indexes(table_name, schema_name) if indexes: cols = indexes[0].get("column_names", []) full_table_name = table_name if schema_name and "." not in table_name: full_table_name = "{}.{}".format(schema_name, table_name) pql = cls._partition_query(full_table_name, database) col_names, latest_parts = cls.latest_partition(table_name, schema_name, database, show_first=True) if not latest_parts: latest_parts = tuple([None] * len(col_names)) metadata["partitions"] = { "cols": cols, "latest": dict(zip(col_names, latest_parts)), "partitionQuery": pql, } # flake8 is not matching `Optional[str]` to `Any` for some reason... metadata["view"] = cast( Any, cls.get_create_view(database, schema_name, table_name)) return metadata @classmethod def get_create_view(cls, database: "Database", schema: str, table: str) -> Optional[str]: """ Return a CREATE VIEW statement, or `None` if not a view. :param database: Database instance :param schema: Schema name :param table: Table (view) name """ from pyhive.exc import DatabaseError engine = cls.get_engine(database, schema) with closing(engine.raw_connection()) as conn: with closing(conn.cursor()) as cursor: sql = f"SHOW CREATE VIEW {schema}.{table}" try: cls.execute(cursor, sql) polled = cursor.poll() while polled: time.sleep(0.2) polled = cursor.poll() except DatabaseError: # not a VIEW return None rows = cls.fetch_data(cursor, 1) return rows[0][0] @classmethod def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None: """Updates progress information""" query_id = query.id poll_interval = query.database.connect_args.get( "poll_interval", config["PRESTO_POLL_INTERVAL"]) logger.info("Query %i: Polling the cursor for progress", query_id) polled = cursor.poll() # poll returns dict -- JSON status information or ``None`` # if the query is done # https://github.com/dropbox/PyHive/blob/ # b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178 while polled: # Update the object and wait for the kill signal. stats = polled.get("stats", {}) query = session.query(type(query)).filter_by(id=query_id).one() if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]: cursor.cancel() break if stats: state = stats.get("state") # if already finished, then stop polling if state == "FINISHED": break completed_splits = float(stats.get("completedSplits")) total_splits = float(stats.get("totalSplits")) if total_splits and completed_splits: progress = 100 * (completed_splits / total_splits) logger.info("Query {} progress: {} / {} " # pylint: disable=logging-format-interpolation "splits".format(query_id, completed_splits, total_splits)) if progress > query.progress: query.progress = progress session.commit() time.sleep(poll_interval) logger.info("Query %i: Polling the cursor for progress", query_id) polled = cursor.poll() @classmethod def _extract_error_message(cls, ex: Exception) -> str: if (hasattr(ex, "orig") and type(ex.orig).__name__ == "DatabaseError" # type: ignore and isinstance(ex.orig[0], dict) # type: ignore ): error_dict = ex.orig[0] # type: ignore return "{} at {}: {}".format( error_dict.get("errorName"), error_dict.get("errorLocation"), error_dict.get("message"), ) if type(ex).__name__ == "DatabaseError" and hasattr( ex, "args") and ex.args: error_dict = ex.args[0] return error_dict.get("message", _("Unknown Presto Error")) return utils.error_msg_from_exception(ex) @classmethod def _partition_query( # pylint: disable=too-many-arguments,too-many-locals cls, table_name: str, database: "Database", limit: int = 0, order_by: Optional[List[Tuple[str, bool]]] = None, filters: Optional[Dict[Any, Any]] = None, ) -> str: """Returns a partition query :param table_name: the name of the table to get partitions from :type table_name: str :param limit: the number of partitions to be returned :type limit: int :param order_by: a list of tuples of field name and a boolean that determines if that field should be sorted in descending order :type order_by: list of (str, bool) tuples :param filters: dict of field name and filter value combinations """ limit_clause = "LIMIT {}".format(limit) if limit else "" order_by_clause = "" if order_by: l = [] for field, desc in order_by: l.append(field + " DESC" if desc else "") order_by_clause = "ORDER BY " + ", ".join(l) where_clause = "" if filters: l = [] for field, value in filters.items(): l.append(f"{field} = '{value}'") where_clause = "WHERE " + " AND ".join(l) presto_version = database.get_extra().get("version") # Partition select syntax changed in v0.199, so check here. # Default to the new syntax if version is unset. partition_select_clause = ( f'SELECT * FROM "{table_name}$partitions"' if not presto_version or StrictVersion(presto_version) >= StrictVersion("0.199") else f"SHOW PARTITIONS FROM {table_name}") sql = textwrap.dedent(f"""\ {partition_select_clause} {where_clause} {order_by_clause} {limit_clause} """) return sql @classmethod def where_latest_partition( # pylint: disable=too-many-arguments cls, table_name: str, schema: Optional[str], database: "Database", query: Select, columns: Optional[List[Dict[str, str]]] = None, ) -> Optional[Select]: try: col_names, values = cls.latest_partition(table_name, schema, database, show_first=True) except Exception: # pylint: disable=broad-except # table is not partitioned return None if values is None: return None column_names = {column.get("name") for column in columns or []} for col_name, value in zip(col_names, values): if col_name in column_names: query = query.where(Column(col_name) == value) return query @classmethod def _latest_partition_from_df(cls, df: pd.DataFrame) -> Optional[List[str]]: if not df.empty: return df.to_records(index=False)[0].item() return None @classmethod @cache.memoize(timeout=60) def latest_partition( cls, table_name: str, schema: Optional[str], database: "Database", show_first: bool = False, ) -> Tuple[List[str], Optional[List[str]]]: """Returns col name and the latest (max) partition value for a table :param table_name: the name of the table :param schema: schema / database / namespace :param database: database query will be run against :type database: models.Database :param show_first: displays the value for the first partitioning key if there are many partitioning keys :type show_first: bool >>> latest_partition('foo_table') (['ds'], ('2018-01-01',)) """ indexes = database.get_indexes(table_name, schema) if not indexes: raise SupersetTemplateException( f"Error getting partition for {schema}.{table_name}. " "Verify that this table has a partition.") if len(indexes[0]["column_names"]) < 1: raise SupersetTemplateException( "The table should have one partitioned field") if not show_first and len(indexes[0]["column_names"]) > 1: raise SupersetTemplateException( "The table should have a single partitioned field " "to use this function. You may want to use " "`presto.latest_sub_partition`") column_names = indexes[0]["column_names"] part_fields = [(column_name, True) for column_name in column_names] sql = cls._partition_query(table_name, database, 1, part_fields) df = database.get_df(sql, schema) return column_names, cls._latest_partition_from_df(df) @classmethod def latest_sub_partition(cls, table_name: str, schema: Optional[str], database: "Database", **kwargs: Any) -> Any: """Returns the latest (max) partition value for a table A filtering criteria should be passed for all fields that are partitioned except for the field to be returned. For example, if a table is partitioned by (``ds``, ``event_type`` and ``event_category``) and you want the latest ``ds``, you'll want to provide a filter as keyword arguments for both ``event_type`` and ``event_category`` as in ``latest_sub_partition('my_table', event_category='page', event_type='click')`` :param table_name: the name of the table, can be just the table name or a fully qualified table name as ``schema_name.table_name`` :type table_name: str :param schema: schema / database / namespace :type schema: str :param database: database query will be run against :type database: models.Database :param kwargs: keyword arguments define the filtering criteria on the partition list. There can be many of these. :type kwargs: str >>> latest_sub_partition('sub_partition_table', event_type='click') '2018-01-01' """ indexes = database.get_indexes(table_name, schema) part_fields = indexes[0]["column_names"] for k in kwargs.keys(): # pylint: disable=consider-iterating-dictionary if k not in k in part_fields: # pylint: disable=comparison-with-itself msg = "Field [{k}] is not part of the portioning key" raise SupersetTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: msg = ("A filter needs to be specified for {} out of the " "{} fields.").format( len(part_fields) - 1, len(part_fields)) raise SupersetTemplateException(msg) for field in part_fields: if field not in kwargs.keys(): field_to_return = field sql = cls._partition_query(table_name, database, 1, [(field_to_return, True)], kwargs) df = database.get_df(sql, schema) if df.empty: return "" return df.to_dict()[field_to_return][0] @classmethod @cache.memoize() def get_function_names(cls, database: "Database") -> List[str]: """ Get a list of function names that are able to be called on the database. Used for SQL Lab autocomplete. :param database: The database to get functions for :return: A list of function names useable in the database """ return database.get_df("SHOW FUNCTIONS")["Function"].tolist() @classmethod def extract_errors(cls, ex: Exception) -> List[Dict[str, Any]]: raw_message = cls._extract_error_message(ex) column_match = re.search(COLUMN_NOT_RESOLVED_ERROR_REGEX, raw_message) if column_match: return [ dataclasses.asdict( SupersetError( error_type=SupersetErrorType. COLUMN_DOES_NOT_EXIST_ERROR, message=__( 'We can\'t seem to resolve the column "%(column_name)s" at ' "line %(location)s.", column_name=column_match.group(2), location=column_match.group(1), ), level=ErrorLevel.ERROR, extra={"engine_name": cls.engine_name}, )) ] table_match = re.search(TABLE_DOES_NOT_EXIST_ERROR_REGEX, raw_message) if table_match: return [ dataclasses.asdict( SupersetError( error_type=SupersetErrorType. TABLE_DOES_NOT_EXIST_ERROR, message=__( 'The table "%(table_name)s" does not exist. ' "A valid table must be used to run this query.", table_name=table_match.group(1), ), level=ErrorLevel.ERROR, extra={"engine_name": cls.engine_name}, )) ] return [ dataclasses.asdict( SupersetError( error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR, message=cls._extract_error_message(ex), level=ErrorLevel.ERROR, extra={"engine_name": cls.engine_name}, )) ]
def init_db(): """define table user and mapping""" # Database definition from sqlalchemy import types, orm from sqlalchemy.schema import Column, Table, Sequence, ForeignKey from sqlalchemy.orm import relationship, backref, relation, mapper # Dependencies from Planning import Planning from Class import Class from Cursus import Cursus from Campus import Campus t_user = Table( 'user', db.metadata, Column('id', types.Integer, Sequence('user_seq_id', optional=True), nullable=False, primary_key=True), Column('name', types.VARCHAR(255), nullable=False), Column('firstname', types.VARCHAR(255), nullable=False), Column('login', types.VARCHAR(64), nullable=False, unique=True), Column('password', types.VARCHAR(255), nullable=False), Column('email', types.VARCHAR(255), nullable=False), Column('type', types.Enum('admin', 'manager', 'teacher', 'student'), nullable=False), Column('id_planning', types.Integer, ForeignKey('planning.id'), nullable=False), Column('id_class', types.Integer, ForeignKey('class.id')), ) t_user_cursus = Table( 'user_cursus', db.metadata, Column('id_user', types.Integer, ForeignKey('user.id'), nullable=False), Column('id_cursus', types.Integer, ForeignKey('cursus.id'), nullable=False), ) t_user_campus = Table( 'user_campus', db.metadata, Column('id_user', types.Integer, ForeignKey('user.id'), nullable=False), Column('id_campus', types.Integer, ForeignKey('campus.id'), nullable=False), ) t_teacher_campus = Table( 'teacher_campus', db.metadata, Column('id_user', types.Integer, ForeignKey('user.id'), nullable=False), Column('id_campus', types.Integer, ForeignKey('campus.id'), nullable=False), ) t_user_class = Table( 'user_class', db.metadata, Column('id_user', types.Integer, ForeignKey('user.id'), nullable=False), Column('id_class', types.Integer, ForeignKey('class.id'), nullable=False), ) mapper(User, t_user, properties={ 'planning': relationship(Planning, backref=backref('type_user', uselist=False)), 'student_class': relationship(Class, backref="students"), 'cursus': relationship(Cursus, secondary=t_user_cursus, backref='managers'), 'campus': relationship(Campus, secondary=t_user_campus, backref='managers'), 'manager_class': relationship(Class, secondary=t_user_class, backref='managers'), 'teacher_campus': relationship(Campus, secondary=t_teacher_campus, backref='teachers'), })
def get_columns(self, connection, table_name, schema=None, **kw): schema = schema or connection.engine.url.database if schema is None: schema = connection.execute( "select CURRENT_SCHEMA from dual").scalar() table_name = self.denormalize_name(table_name) schema = self.denormalize_name(schema) columns = [] for row in self._get_all_columns(connection, schema, info_cache=kw.get("info_cache")): if row[9] != table_name and table_name is not None: continue (colname, coltype, length, precision, scale, nullable, default, identity, is_distribution_key) = \ (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) # FIXME: Missing type support: INTERVAL DAY [(p)] TO SECOND [(fp)], INTERVAL YEAR[(p)] TO MONTH # remove ASCII, UTF8 and spaces from char-like types coltype = re.sub(r'ASCII|UTF8| ', '', coltype) # remove precision and scale addition from numeric types coltype = re.sub(r'\(\d+(\,\d+)?\)', '', coltype) try: if coltype == 'VARCHAR': coltype = sqltypes.VARCHAR(length) elif coltype == 'CHAR': coltype = sqltypes.CHAR(length) elif coltype == 'DECIMAL': # this Dialect forces INTTYPESINRESULTSIFPOSSIBLE=y on ODBC level # thus, we need to convert DECIMAL(<=18,0) back to INTEGER type # and DECIMAL(36,0) back to BIGINT type if scale == 0 and precision <= 18: coltype = sqltypes.INTEGER() elif scale == 0 and precision == 36: coltype = sqltypes.BIGINT() else: coltype = sqltypes.DECIMAL(precision, scale) else: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, colname)) coltype = sqltypes.NULLTYPE cdict = { 'name': self.normalize_name(colname), 'type': coltype, 'nullable': nullable, 'default': default, 'is_distribution_key': is_distribution_key } if identity: identity = int(identity) # if we have a positive identity value add a sequence if identity is not None and identity >= 0: cdict['sequence'] = {'name': ''} # TODO: we have to possibility to encode the current identity value count # into the column metadata. But the consequence is that it would also be used # as start value in CREATE statements. For now the current value is ignored. # Add it by changing the dict to: {'name':'', 'start': int(identity)} columns.append(cdict) return columns
def writeBlockchain(blockchain): print('\tFunction "writeBlockchain" executed') blockchainList = [] for block in blockchain: blockList = [ block.index, str(block.previousHash), str(block.timestamp), str(block.data), str(block.currentHash), block.proof ] blockchainList.append(blockList) connectInfo = 'oracle+cx_oracle://%s:%s@%s:%s/%s' % ( db_id, db_pw, db_ip, db_port, db_serviceName) engine = create_engine(connectInfo) blockReader = selectTable(db_blockTableName, db_blockTableColumns, engine) lastLineNumber = len(blockReader) for i in range(lastLineNumber): lineNumber = i + 1 if (lineNumber == lastLineNumber): line = blockReader.loc[i] lastBlock = Block(line[0], line[1], line[2], line[3], line[4], line[5]) try: if (lastBlock.index + 1 != int(blockchainList[-1][0]) or lastLineNumber + 1 != len(blockchainList)): print("Index sequence mismatch") if (lastBlock.index == int(blockchainList[-1][0])): print("DB has already been updated") return except: print( 'Index search error, There are no data or Existing table have problems. \n It will be replaced by full data.' ) pass blockWriter = pd.DataFrame(blockchainList, columns=db_blockTableColumns) # convert type to varchar if the types of the columns of a dataframe is object replaceTable(db_blockTableName, db_blockTableColumns) try: to_varchar = { c: types.VARCHAR(blockWriter[c].str.len().max()) for c in blockWriter.columns[blockWriter.dtypes == 'object'].tolist() } blockWriter.to_sql(db_blockTableName, engine, if_exists='append', index=False, dtype=to_varchar) print('Blockchain written to db') except: print( 'Data save error, It seems to have an integrity or type problem.') to_varchar = { c: types.VARCHAR(blockReader[c].str.len().max()) for c in blockReader.columns[blockReader.dtypes == 'object'].tolist() } blockReader.to_sql(db_blockTableName, engine, if_exists='append', index=False, dtype=to_varchar) # update txData cause it has been mined. for block in blockchain: updateTx(block) print('Broadcasting new block to other nodes') broadcastNewBlock(blockchain)