def _delete(instance, client, status, force_delete=False): instance = client.instances.get(instance) change_status(instance, client, status) if not force_delete: client.instances.delete(instance) else: client.instances.force_delete(instance)
def _attach_as_admin(self, status): client = self.clients("trove") manager = client.instances instance, conf = manager.get(self.context['admin_instance'] ), self.context['admin_configuration'] change_status(instance, client, status) manager.modify(instance, conf)
def predict_func(args, log_dir): # Required: testx, model_dir, weight id = args.id csv_header = args.header change_status('loading', id) test_data = load_file(args.testx, csv_header) model = load_model(args.weight) change_status('executing', id) result = model.predict(test_data) model_dir = args.model model_file = 'preprocessed/result.json' model_path = os.path.join(model_dir, model_file) with open(model_path) as f: model_parser = json.load(f) for key, value in model_parser['layers'].items(): if value['type'] == 'Output': loss = value['params']['loss'] if 'entropy' in loss: problem = 'classification' else: problem = 'regression' with open(os.path.join(log_dir, 'type'), 'w') as f: f.write(str(problem)) with open(os.path.join(log_dir, 'result'), 'w') as f: for i in result: f.write(','.join(map(str, i)) + '\n')
def _resize(instance, client, status, size): instance = client.instances.get(instance) change_status(instance, client, status) client.instances.resize_volume(instance['id'], size) utils.wait_for(instance, update_resource=client.instances.get, ready_statuse=["RESIZE"]) check_ready(instance)
def _resize(instance, client, status, flavor_id): instance = client.instances.get(instance) change_status(instance, client, status) client.instances.resize_instance(instance, flavor_id) utils.wait_for(instance, update_resource=client.instances.get, ready_statuse=["RESIZE"]) check_ready(instance)
def _restart(instance, client, status): instance = client.instances.get(instance) change_status( instance, status, client.configurations if status == "RESTART_REQUIRED" else None) instance.restart utils.wait_for(instance, ready_statuse=["REBOOT"], update_resource=client.instances.get) check_ready(instance)
def validate_func(args, log_dir): # Required: testx, testy, model_dir, weight id = args.id csv_header = args.header change_status('loading', id) test_data = load_file(args.testx, csv_header) test_target = load_file(args.testy, csv_header) model = load_model(args.weight) change_status('executing', id) result = model.predict(test_data) model_dir = args.model model_file = 'preprocessed/result.json' model_path = os.path.join(model_dir, model_file) with open(model_path) as f: model_parser = json.load(f) for key, value in model_parser['layers'].items(): if value['type'] == 'Output': loss = value['params']['loss'] if 'entropy' in loss: problem = 'classification' else: problem = 'regression' with open(os.path.join(log_dir, 'type'), 'w') as f: f.write(str(problem)) with open(os.path.join(log_dir, 'result'), 'w') as f: for i in result: f.write(','.join(map(str, i)) + '\n') if problem == 'classification': classes = len(result[0]) conf_matrix = [[0.0] * classes] * classes total_count = len(result) score = (1 / float(total_count)) for i in range(len(result)): conf_matrix[test_target[i].argmax()][result[i].argmax()] += score with open(os.path.join(log_dir, 'evaluate'), 'w') as f: for i in conf_matrix: f.write(','.join(map(str, i)) + '\n')
def get_data_allocate(strategy, data_type): dd = Calendar.today() a = Calendar.in_business(dd, day=True) if (a == False): print('非工作日!') exit(0) t1 = time.clock() print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) command = "python {}\daily_{}_{}.py".format(project_dir, data_type, strategy) list = [] if (strategy == 'min5'): list = get_min5_time() if (strategy == 'min15'): list = get_min15_time() if (strategy == 'min30'): list = get_min30_time() if (strategy == 'min60'): list = get_min60_time() if (strategy == 'day'): list = get_day_time() print(strategy) while True: # if in_business(datetime.now(), strategy, True): x = datetime.now().hour * 60 + datetime.now().minute # print ( datetime.now().hour,datetime.now().minute) # print(x) # if(x>list[-1]): if (x > list[-1]): print(list[-1]) print('收盘!') exit(0) if (x == 11 * 60 + 45): print('上午结束') exit(0) if (x in list): print('finded') t1 = time.clock() subprocess.call(command, shell=True) t3 = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) print(t3) dit = {'date': t3, 'status': 1, 'kline': strategy, 'other': ''} model_list['kline_data_update_mark'].insert(dit) t2 = time.clock() a = (change_status(strategy)['time']) - ((t2 - t1) / 60) print(a) if (a > 0): time.sleep(a * 60) print('睡眠结束') else: print('finding:') time.sleep(10)
def train_func(args, log_dir): id = args.id change_status('loading', id) model, model_dir, (trainx, trainy, testx, testy) = build_model(args) # Callback_1 history_callback = Batch_History() # Callback_2 state_file = os.path.join(model_dir, 'state.json') #state_file = "/home/plash/petpen/state.json" state_callback = Model_state(state_file, model.config) # Callback_3 rl_callback = RealtimeLogger(os.path.join(log_dir, 'realtime_logging.txt')) change_status('running', id) history = model.train( callbacks=[history_callback, state_callback, rl_callback]) save_history(os.path.join(log_dir, 'train_log'), history, history_callback) model_result_path = os.path.dirname(log_dir) model.save(os.path.join(model_result_path, 'weights.h5'))
def _create(instance, client, status, **kwargs): b_manager, i_manager = client.backups, client.instances change_status(instance, client, status) backup = b_manager.create(instance=instance['id'], **kwargs) check_ready(instance['id'], i_manager) b_manager.delete(backup)
if not os.path.exists(model_result_path): os.mkdir(model_result_path) log_dir = os.path.join(model_result_path, 'logs') os.mkdir(log_dir) error_log_file = os.path.join(log_dir, 'error_log') id = args.id try: if 'train' in args.func.__name__: args.func(args, log_dir) else: args.func(args, model_result_path) change_status('finish', id) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) with open(error_log_file, 'w') as error_log: for line in lines: error_log.write(line) with open(os.path.join(model_dir, 'state.json'), 'w') as state_file: info = {'status': 'error', 'error_log_file': error_log_file} json.dump(info, state_file) change_status('error', id)