def test_db_function_negative_volumn(self): now_total = get_now_time_second() i = 0 symbols = self.db_obj.get_all_share_info() print('total symbol: {}'.format(len(symbols))) for symbol in symbols: now = get_now_time_second() i = i + 1 res = self.db_obj.get_all_fail_sub_trade( en_symbol_12_digit_code=symbol[0]) if res is not False: for item in res: self.db_obj.collect_all_share_data_rollback( en_symbol_12_digit_code=item[0], tsetmc_id=1, date_m=item[1], error_msg='for negative volume', error_code=9000) print( '{}: en_symbol_12_digit_code: {} day_count: {} runtime: {}' .format(i, symbol[0], len(res), get_now_time_second() - now)) else: print( '{}: en_symbol_12_digit_code: {} day_count: {} runtime: {}' .format(i, symbol[0], 'fail', get_now_time_second() - now)) print(get_now_time_second() - now_total)
def run(self, force=False): print('start sync database: {}'.format(self.database_name)) current_destination_update_time = 0 if force is True: # get source update time current_source_update_time, error = self.get_source_update_time() if error is not None: return 0, 0, error else: # check time period if self.last_sync_time + self.sync_period_time > get_now_time_second(): print('sync in sleep time') self.last_sync_time = get_now_time_second() return 0, 0, None # get source update time current_source_update_time, error = self.get_source_update_time() if error is not None: return 0, 0, error # check source have new data if current_source_update_time <= self.source_last_sync_update_time: print('no new data') self.last_sync_time = get_now_time_second() return 0, 0, None # get destination update time current_destination_update_time, error = self.get_destination_update_time() if error is not None: return 0, 0, error if current_destination_update_time >= current_source_update_time: print('no need sync') self.last_sync_time = get_now_time_second() return 0, 0, None total, done, error = self.run_sync_rule() if error is not None: return total, done, error if done > 0: error = self.set_destination_update_time(update_time=current_source_update_time) if error is not None: return total, done, error if current_destination_update_time < current_source_update_time: error = self.set_destination_update_time(update_time=current_source_update_time) if error is not None: return total, done, error self.last_sync_time = get_now_time_second() self.source_last_sync_update_time = current_source_update_time return total, done, error
def __init__(self, process_id, db_info, lock, lock_acquire_wait, wait_list, complete_list, running_list, fail_list, all_process_status, api_key, api_secret, timestamp_base_time, is_test, public_ip,process_auto_cropped_time, log_file_name=None, log_table_name=None, logging_mod=None, log_obj=None): self.p = colored_print(default_text_color='cyan') self.process_id = process_id self.wait_list = wait_list self.complete_list = complete_list self.running_list = running_list self.fail_list = fail_list self.lock = lock self.lock_acquire_wait = lock_acquire_wait self.process_auto_cropped_time = process_auto_cropped_time # ---- define status dict and setup it ---- self.obj_status = dict() self.all_process_status = all_process_status self.all_process_status[str(self.process_id)] = self.obj_status self.set_status('stop_flag', False) self.set_status('last_run_time', get_now_time_second()) self.set_status('current_running_share', None) self.set_status('state', 'initialing') # ------------------------------ # log param default_log_file_name = 'Logging.log' default_log_table_name = 'bot_log' default_logging_mod = Log_Mod.console self.log_obj = None # ----- create log object ------------------------- if log_obj is None: if log_file_name is not None: log_file_name = default_log_file_name if log_table_name is not None: log_table_name = default_log_table_name if logging_mod is not None: logging_mod = default_logging_mod self.log_obj = Logging() self.log_obj.logConfig(log_file_name=log_file_name, log_table_name=log_table_name, logging_mod=logging_mod, db_obj=None) else: self.log_obj = log_obj # ----- create database object ------------------------- # self.db = database.DataBase(db_info=db_info, log_obj=self.log) # if self.db is None: # return # ------------------------------ # self.print_color = 'green' self.client_obj = LoadData(api_key=api_key, api_secret=api_secret, db_info=db_info, timestamp_base_time=timestamp_base_time, is_test=is_test, log_obj=self.log_obj, public_ip=public_ip)
def test_db_function_transfer(self, source_table, destination_table): now_total = get_now_time_second() i = 0 symbol = self.db_obj.get_all_share_info() #source_table='share_sub_trad_data' #destination_table='share_sub_trad_data_backup' print('total symbol: {}'.format(len(symbol))) for item in symbol: now = get_now_time_second() i = i + 1 self.db_obj.transfer_share_sub_trad_data( source_table=source_table, destination_table=destination_table, en_symbol_12_digit_code=item[0]) print('{}: en_symbol_12_digit_code: {} runtime: {}'.format( i, item[0], get_now_time_second() - now)) #sleep(10) print(get_now_time_second() - now_total)
def run(self): while True: start = get_now_time_second() self.print_c('server count: {}'.format(len(self.process_list))) for p in self.process_list: self.print_c('{0}: {1}'.format(p.name, p.status()), 'yellow') wait_process_count = 0 sleeping_process_count = 0 stop_process_count = 0 stopping_process_count = 0 shutting_down_process_count = 0 shutdown_process_count = 0 # print(self.process_list) for process in self.process_list: p = process.status() # self.print_c(p) if p == server_status_sleeping: sleeping_process_count += 1 if p == server_status_stop: stop_process_count += 1 if p == server_status_stopping: stopping_process_count += 1 if p == server_status_waiting: wait_process_count += 1 if p == server_status_shutting_down: shutting_down_process_count += 1 if p == server_status_shutdown: shutdown_process_count += 1 if len(self.process_list) > self.max_process_count: # self.print_c('if len(process_list) > max_process_count: {}'.format(len(self.process_list))) if shutdown_process_count > 0: for p in self.process_list: if p.status() in [server_status_shutdown]: self.process_list.remove(p) break elif shutting_down_process_count > 0: sleep(1) elif stop_process_count > 0: for p in self.process_list: if p.status() == server_status_stop: p.server_shutdown() sleep(1) break elif sleeping_process_count > 0: for p in self.process_list: if p.status() == server_status_sleeping: p.server_shutdown() sleep(1) break elif wait_process_count > 0: for p in self.process_list: if p.status() == server_status_waiting: p.server_shutdown() sleep(1) break elif stopping_process_count > 0: for p in self.process_list: if p.status() == server_status_stopping: p.server_shutdown() sleep(1) break else: self.process_list[0].server_shutdown() sleep(1) elif wait_process_count > (1 + self.max_process_count * 0.2): # self.print_c('elif wait_process_count > (1 + max_process_count * 0.2): # {}'.format(len(self.process_list))) for p in self.process_list: if p.status() == server_status_waiting: self.print_c('{0}: change status: {1} --> {2}'.format(p.name, p.status(), 'stopping'), 'green') p.server_stop() break elif wait_process_count == 0: # self.print_c('elif wait_process_count == 0: {}'.format(len(self.process_list))) # print('++++++++++') # print('wait_process_count : {}'.format(wait_process_count)) # print('sleeping_process_count : {}'.format(sleeping_process_count)) # print('stop_process_count : {}'.format(stop_process_count)) # print('stopping_process_count : {}'.format(stopping_process_count)) # print('shutting_down_process_count : {}'.format(shutting_down_process_count)) # print('shutdown_process_count : {}'.format(shutdown_process_count)) # print('++++++++++') if stopping_process_count > 0: for p in self.process_list: if p.status() == server_status_stopping: self.print_c('{0}: change status: {1} --> {2}'.format(p.name, p.status(), 'running'), 'green') p.server_start() break elif stop_process_count > 0: for p in self.process_list: if p.status() == server_status_stop: self.print_c('{0}: change status: {1} --> {2}'.format(p.name, p.status(), 'running'), 'green') p.server_start() break elif shutting_down_process_count > 0: for p in self.process_list: if p.status() == server_status_shutting_down: self.print_c('{0}: change status: {1} --> {2}'.format(p.name, p.status(), 'running'), 'green') p.server_start() break elif shutdown_process_count > 0: for p in self.process_list: if p.status() == server_status_shutdown: self.print_c('{0}: change status: {1} --> {2}'.format(p.name, p.status(), 'running'), 'green') p.server_start() break elif len(self.process_list) < self.max_process_count: self.process_sub_name += 1 process_name = self.process_name + str(self.process_sub_name) self.print_c('create new process: {0}'.format(process_name), 'green') self.process_list.append(BackTestSingleProcessServerObj(process_name=process_name, max_thread=self.process_max_thread, web_order_db_info=self.web_order_db_info, order_avg_run_time=self.order_avg_run_time)) self.process_list[-1].start() self.print_c('-------------') cycle_run_tim = get_now_time_second() - start if cycle_run_tim < self.cycle_time: t = self.cycle_time - start + get_now_time_second() self.print_c('sleep in: {}'.format(t), 'blue') sleep(t) self.print_c('sleep out', 'blue')
"date_m > {}".format(where_start_date_m) ], [share_info, share_info, None], [ share_second_data, share_second_data, "date_time > {}".format(where_start_date_m * 1000000) ], [share_status, share_status, None], [ share_sub_trad_data, share_sub_trad_data, "date_m > {}".format(where_start_date_m) ], ] obj_2.set_rull_table(obj_2_rule_table_where) start_cycle_time = get_now_time_second() #obj_1.select_query(force=force) obj_2.run(force=force) #obj_3.select_query(force=True) cycle_runtime = get_now_time_second() - start_cycle_time print('cycle_runtime: {}'.format(cycle_runtime)) sleep_time = start_cycle_time + cycle_period_time - get_now_time_second() if sleep_time > 0: print('sleep sync select_process. start sleep: {0} sleep_time: {1}'. format(get_now_time_string(), sleep_time)) sleep(sleep_time)
def run_find_shares_fail_source_data(self, latest_day): self.first_start_flag = True self.lock_status = False self.hang_time = self.max_run_time * 3 * self.max_process while True: try: self.print_c('get setting') main_stop_flag = self.db.get_main_stop_flag(self.client_id) if main_stop_flag is False: raise Exception('fail to get main_stop_flag') self.max_process = self.db.get_max_process_from_db( self.client_id) if self.max_process is False: raise Exception('fail to get max_process') # check exit condition self.print_c('check exit condition') if self.first_start_flag is True: self.first_start_flag = False else: if len(self.process_list) == 0 and ( main_stop_flag > 0 or len(self.wait_list) == 0): self.print_c( 'exit function from condition: 1') # end of try result = True break self.print_c('check stop flag') if main_stop_flag > 0: self.print_c('stop all process from user') self.stop_all_process() else: if len(self.process_list) < self.max_process and ( len(self.wait_list) > 0 or (len(self.complete_list) + len(self.running_list) + len(self.fail_list)) == 0): # ??? need_process = self.max_process - len( self.process_list) - 2 if need_process > 0: for i in range(need_process): if len(self.wait_list) <= 1: print('empty wait list') break # ایجاد پروسس جدید self.last_process_id += 1 self.print_c( 'create new process id:{0}'.format( self.last_process_id)) self.data['latest_day'] = latest_day self.set_data() p = Find_shares_fail_source_data_process_obj( self.data) time.sleep(0.5) p.start() self.process_list.append(p) # ایجاد پروسس جدید self.last_process_id += 1 self.print_c('create new process id:{0}'.format( self.last_process_id)) self.data['latest_day'] = latest_day self.set_data() p = Find_shares_fail_source_data_process_obj(self.data) time.sleep(0.5) p.start() self.process_list.append(p) elif len(self.process_list) > self.max_process: self.print_c( 'stop all process because upper than max process') self.stop_all_process() # ------------- check process runtime if self.lock.acquire(timeout=self.lock_acquire_wait) is True: self.lock_status = True self.print_c('check process runtime') for p in self.process_list: # check started process if p.name in self.status: if 'last_run_time' in self.status[p.name]: # check process runtime try: if (get_now_time_second() - self.status[p.name] ['last_run_time']) > self.max_run_time: self.print_c( 'stop process {0} because max runtime' .format(p.name)) self.set_status( p.name, 'stop_flag', True) if (get_now_time_second() - self.status[p.name] ['last_run_time']) > self.hang_time: self.print_c( 'terminate process {0} because hanged' ) hang_item = self.status[ p.name]['current_running_share_id'] if hang_item not in self.hang_list: self.hang_list.append(hang_item) self.print_c( 'terminate process: {0} ; current_running_share_id: {1} ; process: {2}' .format(p.name, hang_item, p)) # time.sleep(15) if self.terminate_process_tree( process=p, include_parent=True, timeout=10) is True: self.running_list.remove( hang_item) except Exception as e: self.print_c( 'except: {0} ; error: {1} ; process: {2}' .format('cant chack process runtime', str(e), p)) self.lock.release() self.lock_status = False self.print_c('check not alive process') for p in self.process_list: if p.is_alive() is False: self.print_c('terminate process: {}'.format(p.name)) p.terminate() p.join() self.process_list.remove(p) # print status self.lock.acquire(timeout=self.lock_acquire_wait) self.lock_status = True process_symbols = list() for p in self.process_list: try: a = self.status[p.name]['current_running_share_id'] process_symbols.append('{0}'.format(a)) except: pass running_list_symbol = list() for p in self.running_list: running_list_symbol.append('{0}'.format(p)) hang_symbol = list() for p in self.hang_list: hang_symbol.append('{0}'.format(p)) color = 'magenta' self.print_c( 'wait_list:{0} complete_list:{1} running_list:{2} fail_list:{3} ' 'alive_process:{4} hang_symbol:{5} symbols:{6}, hang_symbol:{7}' .format(len(self.wait_list), len(self.complete_list), len(self.running_list), len(self.fail_list), (len(self.process_list) - len(self.hang_list)), len(hang_symbol), process_symbols, hang_symbol), color) self.lock.release() self.lock_status = False time.sleep(5) except Exception as e: if self.lock_status is True: self.print_c('main except: lock status: True :' + str(3) + ' : ' + str(e)) for p in self.process_list: try: self.print_c('terminate thread: ' + p.name) # self.status[p.name]['stop_flag'] = True self.set_status(p.name, 'stop_flag', True) except Exception as e: self.print_c('main except: ' + str(4) + ' : ' + str(e)) self.lock.release() else: self.print_c('main except: lock status: False :' + str(5) + ' : ' + str(e)) self.lock.acquire() for p in self.process_list: try: self.print_c('terminate thread: ' + p.name) # self.status[p.name]['stop_flag'] = True self.set_status(p.name, 'stop_flag', True) except Exception as e: self.print_c('main except: ' + str(6) + ' : ' + str(e)) self.lock.release() self.print_c('wait to exit all thread') while len(self.process_list) > 0: try: time.sleep(5) for p in self.process_list: self.print_c('terminate process: {}'.format( p.name)) p.terminate() for p in self.process_list: self.print_c('wait to exit thread: {}'.format( p.name)) p.join() except: pass self.print_c('exit main: 2') result = False break return result
def run_collect_all_share_data(self): self.first_start_flag = True self.lock_status = False self.hang_time = self.max_run_time * 3 * self.max_process while True: try: self.print_c('get setting') main_stop_flag = self.db.get_main_stop_flag(self.client_id) if main_stop_flag is False: raise Exception('fail to get main_stop_flag') self.max_process = self.db.get_max_process_from_db( self.client_id) if self.max_process is False: raise Exception('fail to get max_process') # check exit condition self.print_c('check exit condition') if self.first_start_flag is True: self.first_start_flag = False else: if len(self.process_list) == 0 and ( main_stop_flag > 0 or len(self.wait_list) == 0): self.print_c( 'exit function from condition: 1') # end of try result = True break self.print_c('check stop flag') if main_stop_flag > 0: self.print_c('stop all process from user') self.stop_all_process() else: if len(self.process_list) < self.max_process and ( len(self.wait_list) > 0 or (len(self.complete_list) + len(self.running_list) + len(self.fail_list)) == 0): # ??? need_process = self.max_process - len( self.process_list) - 2 if need_process > 0: for i in range(need_process): if len(self.wait_list) <= 1: print('empty wait list') break # ایجاد پروسس جدید self.last_process_id += 1 self.print_c( 'create new process id:{0}'.format( self.last_process_id)) self.set_data() p = collect_all_share_data_process_obj( self.data) time.sleep(0.5) p.start() self.process_list.append(p) # ایجاد پروسس جدید self.last_process_id += 1 self.print_c('create new process id:{0}'.format( self.last_process_id)) self.set_data() p = collect_all_share_data_process_obj(self.data) time.sleep(0.5) p.start() self.process_list.append(p) elif len(self.process_list) > self.max_process: self.print_c( 'stop all process because upper than max process') self.stop_all_process() # ------------- check process runtime if self.lock.acquire(timeout=self.lock_acquire_wait) is True: self.lock_status = True self.print_c('check process runtime') for p in self.process_list: # check started process if p.name in self.status: if 'last_run_time' in self.status[p.name]: # check process runtime try: if (get_now_time_second() - self.status[p.name] ['last_run_time']) > self.max_run_time: self.print_c( 'stop process {0} because max runtime' .format(p.name)) self.set_status( p.name, 'stop_flag', True) if (get_now_time_second() - self.status[p.name] ['last_run_time']) > self.hang_time: self.print_c( 'terminate process {0} because hanged' ) hang_item = self.status[ p.name]['current_running_share'] if hang_item not in self.hang_list: self.hang_list.append(hang_item) self.print_c( 'terminate process: {0} ; en_symbol_12_digit_code: {1} ; tsetmc_id: {2} ; date_m: {3} ; process: {4}' .format( p.name, hang_item[0], hang_item[1], hang_item[2], p)) # time.sleep(15) if self.terminate_process_tree( process=p, include_parent=True, timeout=10) is True: # self.db.add_share_to_fail_hang_share(en_symbol_12_digit_code=hang_item[0], date_m=str(hang_item[2])) self.db.collect_all_share_data_rollback( en_symbol_12_digit_code= hang_item[0], tsetmc_id=str( hang_item[1]), date_m=hang_item[2], error_msg='hang process', error_code=9000) self.running_list.remove( hang_item) # self.hang_list.remove(hang_item) else: self.db.add_share_to_fail_hang_share( en_symbol_12_digit_code= hang_item[0], date_m=hang_item[1]) except Exception as e: self.print_c( 'except: {0} ; error: {1} ; process: {2}' .format('cant chack process runtime', str(e), p)) self.lock.release() self.lock_status = False self.print_c('check not alive process') for p in self.process_list: if p.is_alive() is False: self.print_c('terminate process: {}'.format(p.name)) p.terminate() p.join() self.process_list.remove(p) # print status self.lock.acquire(timeout=self.lock_acquire_wait) self.lock_status = True process_symbols = list() for p in self.process_list: try: a = self.status[p.name]['current_running_share'] process_symbols.append('{0}:{1}:{2}'.format( a[0], a[1], a[2])) except: pass running_list_symbol = list() for p in self.running_list: running_list_symbol.append('{0}:{1}:{2}'.format( p[0], p[1], p[2])) hang_symbol = list() for p in self.hang_list: hang_symbol.append('{0}:{1}:{2}'.format(p[0], p[1], p[2])) color = 'magenta' self.print_c( 'wait_list:{0} complete_list:{1} running_list:{2} fail_list:{3} ' 'alive_process:{4} hang_symbol:{5} symbols:{6}, hang_symbol:{7}' .format(len(self.wait_list), len(self.complete_list), len(self.running_list), len(self.fail_list), (len(self.process_list) - len(self.hang_list)), len(hang_symbol), process_symbols, hang_symbol), color) #self.print_c( # 'wait_list:{0} complete:{1} running:{2} fail:{3} hang:{4} alive_process:{5} process_symbols:{6} running_symbols:{7} hang_symbol:{8}' # .format(len(self.wait_list), len(self.complete_list), len(self.running_list), len(self.fail_list), len(self.hang_list), # (len(self.process_list) - len(self.hang_list)), process_symbols, running_list_symbol, hang_symbol), color) #self.print_c( # 'wait_list:{0} complete:{1} running:{2} fail:{3} hang:{4} alive_process:{5} process_symbols:{6} running_symbols:{7} hang_symbol:{8}' # .format(len(self.wait_list), len(self.complete_list), len(self.running_list), # len(self.fail_list), len(self.hang_list), len(self.process_list), # process_symbols, running_list_symbol, hang_symbol), color) self.lock.release() self.lock_status = False time.sleep(5) # ---------------------- # i += 1 # if i > 5: # print('start hang i:{0}'.format(i)) # for p in self.process_list: # try: # name = p.name # print(colored('--- user terminate start:{0}'.format(name), 'green')) # print('--- user terminate start: ' + name) # if p not in self.hang_process_list: # if self.terminate_process_tree(process=p, include_parent=True, timeout=10) is False: # print('terminate fail') # else: # print('terminate ok') # print('--- user terminate end: ' + name) # print(colored('--- user terminate end:{0}'.format(name), 'green')) # break # except: # pass # i = 0 # ---------------------- except Exception as e: if self.lock_status is True: self.print_c('main except: lock status: True :' + str(3) + ' : ' + str(e)) for p in self.process_list: try: self.print_c('terminate thread: ' + p.name) # self.status[p.name]['stop_flag'] = True self.set_status(p.name, 'stop_flag', True) except Exception as e: self.print_c('main except: ' + str(4) + ' : ' + str(e)) self.lock.release() else: self.print_c('main except: lock status: False :' + str(5) + ' : ' + str(e)) self.lock.acquire() for p in self.process_list: try: self.print_c('terminate thread: ' + p.name) # self.status[p.name]['stop_flag'] = True self.set_status(p.name, 'stop_flag', True) except Exception as e: self.print_c('main except: ' + str(6) + ' : ' + str(e)) self.lock.release() self.print_c('wait to exit all thread') while len(self.process_list) > 0: try: for p in self.process_list: self.print_c('terminate process: {}'.format( p.name)) p.terminate() for p in self.process_list: self.print_c('wait to exit thread: {}'.format( p.name)) p.join() except: pass self.print_c('exit main: 2') result = False break return result
def run(self): lock_status = False # error = None # error_code = None self.p.print('worker: {0} :{1}'.format(self.process_id, 'start run function')) start_time = get_now_time_second() while self.get_status('stop_flag') is False: self.set_status('last_run_time', get_now_time_second()) self.lock.acquire() lock_status = True # check exit condition if len(self.wait_list) <= 0: self.lock.release() lock_status = False self.p.print('worker: {0} :{1}'.format(self.process_id, 'wait list empty')) break # گرفتن یک آیتم # get new item try: self.current_running_crypto = None self.current_running_crypto = self.wait_list.pop() self.running_list.append(self.current_running_crypto) self.lock.release() lock_status = False except Exception as e: if self.current_running_crypto is not None: # error in running_list append item if self.current_running_crypto not in self.wait_list: self.wait_list.append(self.current_running_crypto) self.current_running_crypto = None self.lock.release() lock_status = False self.p.print('worker: {0} :{1} ;{2}'.format(self.process_id, 'get new crypto : fail', str(e))) continue self.set_status('current_running_share', self.current_running_crypto) symbol = self.current_running_crypto[0] interval = self.current_running_crypto[1] start_datetime = self.current_running_crypto[2] end_datetime = self.current_running_crypto[3] add_to_database = self.current_running_crypto[4] earlier_valid_timestamp = self.current_running_crypto[5] try: self.p.print('worker: {0} :{1}'.format(self.process_id, 'start collect data')) self.set_status('state', 'running') if self.process_auto_cropped_time is True: error = self.client_obj.load_and_set_complete_candle_historical_auto_cropped_time( symbol=symbol, interval=interval, start_datetime=start_datetime, end_datetime=end_datetime, add_to_database=add_to_database, earlier_valid_timestamp=earlier_valid_timestamp) else: error = self.client_obj.load_and_set_complete_candle_historical_do_not_cropped_time( symbol=symbol, interval=interval, start_datetime=start_datetime, end_datetime=end_datetime, add_to_database=add_to_database, earlier_valid_timestamp=earlier_valid_timestamp) if error is not True: self.p.print('--- error: {}'.format(error)) if error != 'no eny data in time range': raise Exception(error) self.lock.acquire() lock_status = True self.running_list.remove(self.current_running_crypto) self.complete_list.append(self.current_running_crypto) self.lock.release() lock_status = False self.p.print('worker: {0} :{1}'.format(self.process_id, 'end collect data')) except Exception as e: # sleep(5) if lock_status is True: self.p.print( 'worker {0} except: lock status: {1} ; error: {2}'.format(str(self.process_id), True, str(e))) self.set_status('state', 'failing') self.lock.release() else: # self.p.print('worker {0} except: lock status: {1} : {2} :{3}'.format(str(self.id), False, 14, e)) self.p.print( 'worker {0} except: lock status: {1} ; error: {2}'.format(str(self.process_id), False, str(e))) self.set_status('state', 'failing') # try: # self.p.print('worker: {0} : except: {1}'.format(current_process().name, 'rollback data')) # self.db.collect_all_share_data_rollback(en_symbol_12_digit_code, tsetmc_id, date_m, error_msg=error, error_code=error_code) # finally: self.lock.acquire() self.running_list.remove(self.current_running_crypto) self.fail_list.append(self.current_running_crypto) self.lock.release() self.p.print('worker: {0} :{1}'.format(str(self.process_id), 'fail')) self.p.print('worker: {0} :{1}'.format(str(self.process_id), 'quit')) end_time = get_now_time_second() self.p.print('runtime:{0}'.format(end_time - start_time), color='red') return True
def transfer_new_record(source_db_info, destination_db_info, source_table_name, destination_table_name, select_max_thread, insert_max_thread, select_max_offset, insert_max_offset, lock, where_str=None): start = get_now_time_second() transfer_record = dict() transfer_record['result'] = list() transfer_record['select_query_status'] = list() transfer_record['command_query_status'] = list() transfer_record['sum_select_record'] = list() transfer_record['sum_insert_record'] = list() transfer_record_count = 0 print('get source record count') db = DataBase(db_info=source_db_info) if where_str is None: query = 'select count(*) from {0}'.format(source_table_name) else: query = 'select count(*) from {0} where {1}'.format( source_table_name, where_str) res, err = db.select_query(query=query, args=(), mod=1) if err is None: transfer_record_count = res[0][0] transfer_record['transfer_record_count'] = [transfer_record_count] print('transfer_record_count: {}'.format(transfer_record_count)) # ---------- # select_query = 'select * from {}'.format(source_table_name) if where_str is None: select_query = 'select * from {0}'.format(source_table_name) else: select_query = 'select * from {0} where {1}'.format( source_table_name, where_str) args = () select_thread = threading.Thread( name='select_thread', target=select_process, args=(transfer_record, source_db_info, select_max_thread, transfer_record_count, select_max_offset, lock, select_query, args)) print('start select thread') select_thread.start() while not select_thread.is_alive(): sleep(0.1) select_thread_name = select_thread.getName() #print(select_thread_name) insert_status_flag = [True] # ---------- while len( transfer_record['result'] ) < 1 and transfer_record['select_query_status'][0] != 'stopping': sleep(0.1) if len(transfer_record['result']) < 1: return 0, 0, transfer_record['error'], get_now_time_second() - start v = '%s, ' * len(transfer_record['result'][0]) v = v.strip(', ') insert_query = 'insert ignore into {} values({}) '.format( destination_table_name, v) args = transfer_record['result'] insert_thread = threading.Thread( name='insert_thread', target=command_process, args=(transfer_record, destination_db_info, insert_max_thread, insert_max_offset, lock, insert_query, args, insert_status_flag)) print('start insert thread') insert_thread.start() while not insert_thread.is_alive(): sleep(0.1) # ---------- while transfer_record['select_query_status'][0] != 'stopping': #print('{}: {}/{}'.format(transfer_record_count, transfer_record['sum_select_record'][0], transfer_record['sum_insert_record'][0])) sleep(0.1) insert_status_flag[0] = False #print('-----------insert_status_flag: {}'.format(insert_status_flag[0])) for t in threading.enumerate(): if t != threading.main_thread(): #print('--------{}----------'.format(t.getName())) t.join() # sleep(1) new_record_count = transfer_record_count total_transfer_record = transfer_record['sum_insert_record'][0] error = transfer_record['error'] run_time = get_now_time_second() - start return new_record_count, total_transfer_record, error, run_time
for t in threading.enumerate(): if t != threading.main_thread(): #print('--------{}----------'.format(t.getName())) t.join() # sleep(1) new_record_count = transfer_record_count total_transfer_record = transfer_record['sum_insert_record'][0] error = transfer_record['error'] run_time = get_now_time_second() - start return new_record_count, total_transfer_record, error, run_time if __name__ == '__main__': start = get_now_time_second() source_db_info = get_database_info( pc_name=vps1_remote_access, database_name=tsetmc_and_analyze_data) #'tsetmc_raw_data') destination_db_info = get_database_info( pc_name=laptop_local_access, database_name=tsetmc_and_analyze_data) source_table_name = 'shareholders_data' destination_table_name = 'shareholders_data' select_max_thread = 5 insert_max_thread = 1 select_max_offset = 10000 insert_max_offset = 1000 lock = threading.Lock() res = transfer_new_record(source_db_info, destination_db_info,
def start(self): last_collect_share_info_time = 0 last_collect_index_data_time = 0 last_collect_trade_data_time = 0 have_new_data = False print(self.step_collect_share_info) print(self.step_collect_index_data) print(self.step_collect_share_all_data) while True: start_cycle_time = get_now_time_second() # --------------------------------------------- # update symbol info now_time = get_now_time_datetime() if last_collect_share_info_time == 0: if self.step_collect_share_info is True: res = self.cli.collect_share_info() else: res = 0 if res > 0: last_collect_share_info_time = get_now_time_datetime() have_new_data = True else: if now_time.day in [ 1, 11, 21 ] and (last_collect_share_info_time.year != now_time.year or last_collect_share_info_time.month != now_time.month or last_collect_share_info_time.day != now_time.day): res = self.cli.collect_share_info() if res > 0: last_collect_share_info_time = get_now_time_datetime() have_new_data = True # --------------------------------------------- # update index daily date int_now_time = now_time.hour * 10000 + now_time.minute * 100 + now_time.second if last_collect_index_data_time == 0: if self.step_collect_index_data is True: res = self.cli.collect_index_data(mod=1) else: res = 0 # res = None if res is None: last_collect_index_data_time = get_now_time_datetime() have_new_data = True else: if int_now_time > self.tsetmc_update_time and ( now_time.year != last_collect_index_data_time.year or now_time.month != last_collect_index_data_time.month or now_time.day != last_collect_index_data_time.day): res = self.cli.collect_index_data(mod=1) if res is None: last_collect_index_data_time = get_now_time_datetime() have_new_data = True # --------------------------------------------- # update trade data int_now_time = now_time.hour * 10000 + now_time.minute * 100 + now_time.second if last_collect_trade_data_time == 0: if self.step_collect_share_all_data is True: res = self.cli.run() else: res = 0 if res > 0: last_collect_trade_data_time = get_now_time_datetime() have_new_data = True else: if int_now_time > self.tsetmc_update_time and ( now_time.year != last_collect_trade_data_time.year or now_time.month != last_collect_trade_data_time.month or now_time.day != last_collect_trade_data_time.day): res = self.cli.run() if res > 0: last_collect_trade_data_time = get_now_time_datetime() have_new_data = True # --------------------------------------------- if have_new_data is True: # have new record # save database update time self.cli.set_database_update_time(start_cycle_time) have_new_data = False sleep_time = start_cycle_time + self.cycle_period_time - get_now_time_second( ) if sleep_time > 0: print('sleep sync process. start sleep: {0} sleep_time: {1}'. format(get_now_time_string(), sleep_time)) sleep(sleep_time)
def temp(): cycle_period_time = 60 * 1 client_id = client_setting.client_id cli = Client(client_id=client_id, local_db_info=client_setting.local_db_info, server_db_info=client_setting.server_db_info, mod='') # cli.update_client_database_from_server() # cli.transfer_share_info() # cli.collect_index_data(mod=1) # cli.collect_share_info() # res = cli.run() # print(res) hour = 18 minute = 0 sec = 0 tsetmc_update_time = hour * 10000 + minute * 100 + sec last_collect_share_info_time = 0 last_collect_index_data_time = 0 last_collect_trade_data_time = 0 have_new_data = False while True: start_cycle_time = get_now_time_second() # --------------------------------------------- # update symbol info now_time = get_now_time_datetime() if last_collect_share_info_time == 0: res = cli.collect_share_info() # res = 10 if res > 0: last_collect_share_info_time = get_now_time_datetime() have_new_data = True else: if now_time.day in [ 1, 11, 21 ] and (last_collect_share_info_time.year != now_time.year or last_collect_share_info_time.month != now_time.month or last_collect_share_info_time.day != now_time.day): res = cli.collect_share_info() if res > 0: last_collect_share_info_time = get_now_time_datetime() have_new_data = True # --------------------------------------------- # update index daily date int_now_time = now_time.hour * 10000 + now_time.minute * 100 + now_time.second if last_collect_index_data_time == 0: res = cli.collect_index_data(mod=1) # res = None if res is None: last_collect_index_data_time = get_now_time_datetime() have_new_data = True else: if int_now_time > tsetmc_update_time and ( now_time.year != last_collect_index_data_time.year or now_time.month != last_collect_index_data_time.month or now_time.day != last_collect_index_data_time.day): res = cli.collect_index_data(mod=1) if res is None: last_collect_index_data_time = get_now_time_datetime() have_new_data = True # --------------------------------------------- # update trade data int_now_time = now_time.hour * 10000 + now_time.minute * 100 + now_time.second if last_collect_trade_data_time == 0: res = cli.run() # res = 10 if res > 0: last_collect_trade_data_time = get_now_time_datetime() have_new_data = True else: if int_now_time > tsetmc_update_time and ( now_time.year != last_collect_trade_data_time.year or now_time.month != last_collect_trade_data_time.month or now_time.day != last_collect_trade_data_time.day): res = cli.run() if res > 0: last_collect_trade_data_time = get_now_time_datetime() have_new_data = True # --------------------------------------------- if have_new_data is True: # have new record # save database update time cli.set_database_update_time(start_cycle_time) have_new_data = False sleep_time = start_cycle_time + cycle_period_time - get_now_time_second( ) if sleep_time > 0: print( 'sleep sync process. start sleep: {0} sleep_time: {1}'.format( get_now_time_string(), sleep_time)) sleep(sleep_time)
transaction_mod = True max_packet_size = 1000000 sync_period_time = 1000000 cycle_period_time = 60 * 1 obj1 = SyncSiteAppDataBase(source_db_info=source_db_info, destination_db_info=destination_db_info, max_packet_size=max_packet_size, transaction_mod=transaction_mod, sync_period_time=sync_period_time) obj2 = SyncBackTestAppDataBase(source_db_info=source_db_info, destination_db_info=destination_db_info, max_packet_size=max_packet_size, transaction_mod=transaction_mod, sync_period_time=sync_period_time) while True: start_cycle_time = get_now_time_second() obj1.run(autocommit_each_table=True) obj2.run(autocommit_each_table=True) sleep_time = start_cycle_time + cycle_period_time - get_now_time_second() if sleep_time > 0: print('sleep sync select_process. start sleep: {0} sleep_time: {1}'. format(get_now_time_string(), sleep_time)) sleep(sleep_time)