def __init__(self, fielddata): BaseModel.__init__(self) self.footholds = [] self.footholds_count = 0 self.background_img = pygame.image.load( get_file_path(fielddata['background_img'])).convert() self.tiles_img = pygame.image.load( get_file_path(fielddata['tiles_img'])).convert_alpha() for fh in fielddata['footholds']: tmp = { 'pos': fh['pos'], 'width': fh['size'][0], 'image': pygame.Surface(fh['size'], SRCALPHA)} x = 0 for t in fh['tiles']: repeat, sub_pos, rand_tile = 1, (0, 0), False if 'repeat' in t: repeat = t['repeat'] if isinstance(t['sub_pos'], list): rand_tile = True for _ in range(repeat): sub_pos = t['sub_pos'][randindex(len(t['sub_pos']))] if\ rand_tile else t['sub_pos'] x = self._build_image(tmp['image'], x, sub_pos, t['size']) self.footholds.append(tmp) self.footholds_count += 1
def test_model(): """Test the model""" # 1. Get Testing Options cfg = TestOptions() # 2. Load train and val Dataset test_loader = load_database(cfg) # 3. Create a Model model = BaseModel(cfg) # 4. Outer loop for one batch test sample loss = acc = 0.0 for per_step, (images, labels, images_names) in enumerate(test_loader.flow()): results = [] model.test(images=images, labels=labels, mode='test') loss += model.loss acc += model.metric if cfg.opts.test_label == 'None': predict = model.out.argmax(1) for i in range(len(predict)): results.append([images_names[i], predict[i].item()]) wrote_csv_file(model.result_path, results, mode='a', show=False) print_test_info(cfg, [per_step, len(test_loader)], loss, acc)
def __init__(self): BaseModel.__init__(self) # load save file self.level = 10 self.critical_chance = 10 self.damage = 10 self.hit_num = 5 self.critical_power = 50
def __init__(self, env, handle, *args, **kwargs): BaseModel.__init__(self, env, handle) self.env = env self.handle = handle self.n_action = env.get_action_space(handle) self.view_size = env.get_view_space(handle) self.attack_base, self.view2attack = env.get_view2attack(handle)
def __init__(self, env, handle, attack_handle, *args, **kwargs): BaseModel.__init__(self, env, handle) self.attack_channel = env.get_channel(attack_handle) self.attack_base, self.view2attack = env.get_view2attack(handle) print("attack_channel", self.attack_channel) print("view2attack", self.view2attack)
def __init__(self, env, handle, away_handle, *args, **kwargs): BaseModel.__init__(self, env, handle) self.away_channel = env.get_channel(away_handle) self.attack_base, _ = env.get_view2attack(handle) self.move_back = 4 print("attack base", self.attack_base, "away", self.away_channel)
def __init__(self, env, handle, name, subclass_name): """init a model Parameters ---------- env: magent.Environment handle: handle (ctypes.c_int32) name: str subclass_name: str name of subclass """ BaseModel.__init__(self, env, handle) self.name = name self.subclass_name = subclass_name
def train_model(): """Train the model""" # 1. Get Training Options cfg = TrainOptions() # 2. Load train and val Dataset train_loader, val_loader = load_database(cfg) # 3. Create a Model model = BaseModel(cfg) # 4. Training for per_epoch in range(model.start_epoch + 1, cfg.opts.epoch + 1): val_flag = False save_metrics = {"LOSS": 0.0, "ACC": 0.0} # inner loop for one batch for per_step, (images, labels, _) in enumerate(train_loader.flow()): model.train(images=images, labels=labels) save_metrics["LOSS"] += model.loss save_metrics["ACC"] += model.metric val_flag = print_train_info( val_flag, cfg, [model.start_epoch + 1, per_epoch, cfg.opts.epoch], [per_step + 1, len(train_loader)], model.lr, model.loss, model.metric) if cfg.opts.is_val: val_model(cfg, model, val_loader, val_flag, per_epoch) if per_epoch % cfg.opts.save_list == 0: model.save_model(per_epoch, [ "train" + cfg.opts.save_metric, save_metrics[cfg.opts.save_metric] / len(train_loader) ]) model.update_lr(per_epoch + 1)
def test(station, test_dataset, max_values, type): if type == 0: feature_dim = 6 else: feature_dim = 3 model = BaseModel(feature_dim, use_length, rnn_hid_dim) model.load_state_dict(torch.load('./files/params_' + station + '.pkl')) for start in range(0, len(test_dataset) - predict_time): x, t = test_dataset[start] acc = [] PM25 = [] PM10 = [] O3 = [] PM25_actual = [] PM10_actual = [] O3_actual = [] for i in range(0, predict_time): _, t = test_dataset[start + i] x = Variable(x, volatile=True) x = x.unsqueeze(0) output = model(x) output = output.squeeze(0) output = output.data x = x.squeeze(0) x = np.vstack((x[1:], output[-1])) x = torch.from_numpy(x) out = output[-1].numpy() tar = t[-1].numpy() if (out[0] <= 0): out[0] = 0 if (out[1] <= 0): out[1] = 0 if (out[2] <= 0): out[2] = 0 out = np.multiply(out, max_values) tar = np.multiply(tar, max_values) PM25.append(out[0]) PM10.append(out[1]) if type == 0: O3.append(out[2]) else: O3.append(0) PM25_actual.append(tar[0]) PM10_actual.append(tar[1]) O3_actual.append(tar[2]) acc.append(smape(PM25, PM25_actual)) acc.append(smape(PM10, PM10_actual)) acc.append(smape(O3, O3_actual)) print(acc)
def read_price_file_ctp(ctp_file_path): fr = open(ctp_file_path) instrument_array = [] market_array = [] for line in fr.readlines(): base_model = BaseModel() if len(line.strip()) == 0: continue for tempStr in line.split('|')[1].split(','): temp_array = tempStr.replace('\n', '').split(':', 1) setattr(base_model, temp_array[0].strip(), temp_array[1]) if 'OnRspQryInstrument' in line: exchange_id = getattr(base_model, 'ExchangeID', '') product_id = getattr(base_model, 'ProductID', '') instrument_array.append(base_model) elif 'OnRspQryDepthMarketData' in line: market_array.append(base_model) field_list = [ 'VolumeMultiple', 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'MaxLimitOrderVolume', 'MinLimitOrderVolume', 'LongMarginRatio', 'ShortMarginRatio' ] # platform_logger.debug('update_instrument_info, update record num: %d; fields: %s' % # (len(instrument_array), field_list)) print ('update_instrument_info, update record num: %d; fields: %s') % (len(instrument_array), field_list) update_instrument_info(instrument_array) # platform_logger.debug('update market_info, update record num: %d' % (len(market_array))) print ('update market_info, update record num: %d') % len(market_array) update_market_info(market_array) # 根据md行情数据更新prev_close
def index(): return render_template('template.html', weibo_data=BaseModel().select()[:10], baidu_data=BaiduModal().select()[:10], zhihu_data=ZhihuModal().select()[:10], weixin_data=WeixinModal().select()[:10], hotword=fenci())
def read_price_file_femas(femas_file_path): print 'Start read file:', femas_file_path fr = open(femas_file_path) option_array = [] future_array = [] instrument_cff_array = [] market_array = [] for line in fr.readlines(): base_model = BaseModel() for tempStr in line.split('|')[1].split(','): temp_array = tempStr.replace('\n', '').split(':', 1) setattr(base_model, temp_array[0].strip(), temp_array[1]) if 'OnRspQryInstrument' in line: product_id = getattr(base_model, 'ProductID', '') options_type = getattr(base_model, 'OptionsType', '') if (product_id == 'IC') or (product_id == 'IF') or (product_id == 'IH') or (product_id == 'TF') or \ (product_id == 'T'): future_array.append(base_model) instrument_cff_array.append(base_model) elif (options_type == '1') or (options_type == '2'): option_array.append(base_model) instrument_cff_array.append(option_array) elif 'OnRtnDepthMarketData' in line: market_array.append(base_model) update_instrument_cff(future_array) update_market_data(market_array)
def read_price_file_ctp(ctp_file_path): fr = open(ctp_file_path) future_list = [] option_list = [] for line in fr.readlines(): base_model = BaseModel() if len(line.strip()) == 0: continue for tempStr in line.split('|')[1].split(','): temp_array = tempStr.replace('\n', '').split(':', 1) setattr(base_model, temp_array[0].strip(), temp_array[1]) if 'OnRspQryInstrument' in line: product_class = getattr(base_model, 'ProductClass', '') if product_class == '1': future_list.append(base_model) if product_class == '2': option_list.append(base_model) # platform_logger.info('pre_add_future, add record num: %d' % len(future_list)) print('pre_add_future, add record num: %d') % len(future_list) pre_add_future(future_list) # 新增期货 # platform_logger.info('pre_add_option, add record num: %d' % len(option_list)) print('pre_add_option, add record num: %d') % len(option_list) pre_add_option(option_list) # 新增期权
def predict(station, test_data, max_values, type): if type == 0: feature_dim = 6 else: feature_dim = 3 model = BaseModel(feature_dim, use_length, rnn_hid_dim) model.load_state_dict(torch.load('./files/params_' + station + '.pkl')) x = test_data #x, t = x.unsqueeze(0), t.unsqueeze(0) PM25 = [] PM10 = [] O3 = [] for i in range(0, predict_time): x = Variable(x, volatile=True) x = x.unsqueeze(0) output = model(x) output = output.squeeze(0) output = output.data x = x.squeeze(0) x = np.vstack((x[1:], output[-1])) x = torch.from_numpy(x) out = output[-1].numpy() out = np.multiply(out, max_values) if (out[0] <= 0): out[0] = 0 if (out[1] <= 0): out[1] = 0 if (out[2] <= 0): out[2] = 0 PM25.append(out[0]) PM10.append(out[1]) if type == 0: O3.append(out[2]) else: O3.append(0) output_dict = {} output_dict['station_id'] = station output_dict['PM2.5'] = PM25 output_dict['PM10'] = PM10 output_dict['O3'] = O3 if type == 0: beijing_output.append(output_dict) else: london_output.append(output_dict)
class BaseModelAPI(ModelAPI): def _set_model(self): self.model = BaseModel(argv=self.argv, emb=self.emb, n_vocab=self.vocab_word.size(), n_labels=self.vocab_label.size()) self.model.compile(self._get_input_tensor_variables()) def _get_input_tensor_variables(self): # x_w: 1D: batch, 2D: n_words, 3D: 5 + window; word id # x_p: 1D: batch, 2D: n_words; posit id # y: 1D: batch, 2D: n_words; label id if self.argv.mark_phi: return [T.itensor3('x_w'), T.imatrix('x_p'), T.imatrix('y')] return [T.itensor3('x_w'), T.imatrix('y')] def _format_inputs(self, sample): return sample.x
def main(): from true_causal_model import TrueCausalModel from model import BaseModel logging.basicConfig(filename='logs/causalAgent.log', filemode='w', level=logging.INFO) model = BaseModel('configs/model_parameters.json') nature = TrueCausalModel(model) rounds = 100 target_value = 1 half_blind_agent = HalfBlindAgent(nature, model) half_blind_agent.training(rounds, target_value)
def __init__(self): Group.__init__(self) BaseModel.__init__(self) # load image _money_image = pygame.image.load( get_file_path('img/item/money.png')).convert_alpha() _subsurface_data = [(25, 24), (25, 24), (33, 30), (32, 31)] _y = 0 self.money_images = [] for _sub_data in _subsurface_data: _tmp_list = [_money_image.subsurface( (i*_sub_data[0], _y), _sub_data) for i in range(4)] _y += _sub_data[1] self.money_images.append(_tmp_list) _item_rare_image = pygame.image.load( get_file_path('img/item/rare_42x44.png')).convert_alpha() self.item_rare_images = [_item_rare_image.subsurface( (i*ITEM_RARE_SIZE[0], 0), ITEM_RARE_SIZE) for i in range(6)] # load icons, but now only load one image self.item_icons = pygame.image.load( get_file_path('img/item/04000019.png')).convert_alpha()
def test_model(): """Test the model""" # 1. Get Testing Options cfg = TestOptions() # 2. Load train and val Dataset test_loader = load_database(cfg) # 3. Create a Model model = BaseModel(cfg) # 4, Create multi class metrics metrics = MultiClassMetrics(cfg.class_name) # 5. Outer loop for one batch test sample for per_step, (images, labels, images_names) in enumerate(test_loader): results = [] model.input(images=images, labels=labels) model.test() if cfg.opts.test_label != 'None': metrics.eval(labels, model.out.cpu(), indicators="ACC, F1, FPR", step=len(test_loader)) else: predict = model.out.cpu().argmax(1) for i in range(len(predict)): results.append([images_names[i], predict[i].item()]) wrote_csv_file(model.result_path, results, mode='a', show=False) print_test_info(cfg, [per_step, len(test_loader)], metrics.metrics)
def test_model(seed_everything, data_dir, feat_extractor, crop_size, hidden_size): dm = SemanticDataModule(data_dir, imgs_per_item=5, crop_size=crop_size, seed=42, batch_size=2, num_workers=0) dm.prepare_data() model = BaseModel(feat_extractor, crop_size, hidden_size, lr=0.01) trainer = pl.Trainer(fast_dev_run=True) trainer.fit(model, dm)
def demo_test(): feature_dim = 6 model = BaseModel(feature_dim, use_length, rnn_hid_dim) model.load_state_dict(torch.load('params.pkl')) train_dataset, valid_dataset, max_values = make_train_valid_dataset( 'bj_aq.npy', use_length) # sample x, t = valid_dataset[0] #x = Variable(x, volatile=True).cuda() #t = Variable(t, volatile=True).cuda() x = Variable(x, volatile=True) t = Variable(t, volatile=True) x, t = x.unsqueeze(0), t.unsqueeze(0) output = model(x) output = output.squeeze(0) output = output.data x, t = x.squeeze(0), t.squeeze(0) current = np.multiply(x, max_values) output = np.multiply(output, max_values) target = np.multiply(t, max_values) print(current) print(target) print(output)
def run_simulation(config_filename, graphfilename, n): model = BaseModel(config_filename) tcm = TrueCausalModel(model) tcm.model.save_pgm_as_img(graphfilename) intervention_vars = model.get_intervention_variables() target_value = 1 target = { "variable": model.get_target_variable(), "value": target_value } local_data = dict() for i in range(n): idx_intervention_var = np.random.randint(len(intervention_vars)) action = (intervention_vars[idx_intervention_var], np.random.randint(2)) nature_response = tcm.action_simulator([action[0]], [action[1]]) for k in nature_response: if not k in local_data: local_data[k] = [] local_data[k].append(nature_response[k]) df = pd.DataFrame.from_dict(local_data) model_from_data = generate_approx_model_from_graph(ebunch=model.get_ebunch(), nodes=model.nodes, df=df) for cpd in model_from_data.get_cpds(): logging.info(cpd)
def read_position_file_ctp(ctp_file_path): print 'Start read file:' + ctp_file_path fr = open(ctp_file_path) order_array = [] trade_array = [] trading_account_array = [] investor_position_array = [] commission_rate_array = [] for line in fr.readlines(): if 'Account_ID' in line: account_id = line.replace('\n', '').split(':')[1] else: base_model = BaseModel() for tempStr in line.split('|')[1].split(','): temp_array = tempStr.replace('\n', '').split(':', 1) setattr(base_model, temp_array[0].strip(), temp_array[1].strip()) if 'OnRspQryOrder' in line: order_array.append(base_model) elif 'OnRspQryTrade' in line: trade_array.append(base_model) elif 'OnRspQryTradingAccount' in line: trading_account_array.append(base_model) elif 'OnRspQryInvestorPosition' in line: investor_position_array.append(base_model) elif 'OnRspQryInstrumentCommissionRate' in line: commission_rate_array.append(base_model) print 'AccountID:', account_id # 删除该账号今日记录 del_account_position_by_id(account_id) del_order_trader_by_id(account_id) commission_rate_list = __get_commission_rate_list(commission_rate_array) (order_list, order_dict) = __get_order_list(account_id, order_array) trade_list = __get__trade_list(account_id, trade_array, order_dict) (position_dict, position_db_list) = __build_account_position(account_id, investor_position_array) # position_db_list = __calculation_position(trade_list, position_dict) if len(trading_account_array) > 0: cny_position_db = __get_account_cny(account_id, trading_account_array) position_db_list.append(cny_position_db) update_db(commission_rate_list, order_list, trade_list, position_db_list) update_account_trade_restrictions(account_id)
def main(): parser = argparse.ArgumentParser( description='Run experiments causal agents.') parser.add_argument("--config-file", type=str, default="configs/model_parameters.json", help="Path to the configuration files.") parser.add_argument("--experiments", type=int, default=10, help="# of experiments.") parser.add_argument("--rounds", type=int, default=20, help="# of rounds per experiment.") parser.add_argument("--target-value", type=int, default=1, help="Desired value for target variable.") parser.add_argument("--log-file", type=str, default="logs/experiments.log", help="Path to the log files.") args = parser.parse_args() logs_path = args.log_file model_path = args.config_file rounds = args.rounds target_value = args.target_value n_experiments = args.experiments if logs_path: logging.basicConfig(filename=logs_path, filemode='w', level=logging.INFO) model = BaseModel(model_path) nature = TrueCausalModel(model) names = ["Qlearning", "HalfBlindAgent", "FullyInformedAgent", "Random"] agents = [QLearning, HalfBlindAgent, FullyInformedAgent, RandomAgent] measures = [None, None, None, None] for i in range(len(agents)): measures[i] = run_experiments(n_experiments, rounds, agents[i],\ nature, model, names[i], target_value) vis_utils.plot_rewards_comparison( measures, rounds, names, "{}experiments_{}rounds".format(n_experiments, rounds))
def read_position_file_lts(lts_file_path): fr = codecs.open(lts_file_path, 'r', 'gbk') sf_instrument_array = [] option_array = [] stock_array = [] structured_fund_array = [] # 货币基金 mmf_fund_array = [] for line in fr.xreadlines(): base_model = BaseModel() for tempStr in line.split('|')[1].split(','): temp_array = tempStr.replace('\n', '').split(':', 1) setattr(base_model, temp_array[0].strip(), temp_array[1]) if 'OnRspQryInstrument' in line: product_id = getattr(base_model, 'ProductID', '') # productClass = getattr(baseModel, 'ProductClass', '') if (product_id == 'SHEOP') or (product_id == 'SHAOP'): option_array.append(base_model) elif (product_id == 'SZA') or (product_id == 'SHA') or ( product_id == 'HKA') or (product_id == 'CY'): stock_array.append(base_model) elif (product_id == 'SZOF') or (product_id == 'SHOF'): structured_fund_array.append(base_model) elif product_id == 'SHFUNDETF': mmf_fund_array.append(base_model) elif 'OnRspQrySFInstrument' in line: sf_instrument_array.append(base_model) # print structured_fund_array structured_fund_undl_ticker(sf_instrument_array) add_structured_fund(structured_fund_array) # platform_logger.info('add_option, update record num: %d' % len(option_array)) print('add_option, update record num: %d') % len(option_array) add_option(option_array) # 新增期权 # print '**--**' # platform_logger.info('add_stock, update record num: %d' % len(stock_array)) print('add_stock, update record num: %d') % len(stock_array) add_stock(stock_array) # 更新股票停牌日期数据和新增股票 # platform_logger.info('add_mmf_fund, update record num: %d' % len(mmf_fund_array)) print('add_mmf_fund, update record num: %d') % len(mmf_fund_array) add_mmf_fund(mmf_fund_array) # 新增货币基金
def read_position_file_lts(lts_file_path): fr = codecs.open(lts_file_path, 'r', 'gbk') sf_instrument_array = [] of_instrument_array = [] instrument_array = [] market_array = [] index_array = [] for line in fr.xreadlines(): base_model = BaseModel() for temp_str in line.split('|')[1].split(','): temp_array = temp_str.replace('\n', '').split(':', 1) setattr(base_model, temp_array[0].strip(), temp_array[1]) if 'OnRspQrySFInstrument' in line: sf_instrument_array.append(base_model) if 'OnRspQryOFInstrument' in line: of_instrument_array.append(base_model) elif 'OnRspQryInstrument' in line: instrument_array.append(base_model) elif 'OnRtnDepthMarketData' in line: market_array.append(base_model) elif 'OnRtnL2Index' in line: index_array.append(base_model) print('update_fund, sf_instrument record num: %d')% len(sf_instrument_array) print('update_fund,of_instrument record num: %d') % len(of_instrument_array) # platform_logger.info('update_fund, update sf_instrument record num: %d' % len(sf_instrument_array)) # platform_logger.info('update_fund, update of_instrument record num: %d' % len(of_instrument_array)) update_fund(sf_instrument_array, of_instrument_array) # 更新分级基金的prev_nav # platform_logger.info('update_instrument_base_info, update record num: %d' % len(instrument_array)) print('update_instrument_base_info, update record num: %d')% len(instrument_array) update_instrument_base_info(instrument_array) # different time, different function # platform_logger.info('update_market, update record num: %d' % len(market_array)) print('update_market, update record num: %d') % len(market_array) update_market(market_array) # 根据md行情数据更新prev_close # platform_logger.info('update_market_index, update record num: %d' % len(index_array)) print('update_market_index, update record num: %d')% len(index_array) update_market_index(index_array) # 根据md的L2行情数据更新指数的prev_close
def read_position_file_hs(hs_file_path, add_flag): print 'Start read file:' + hs_file_path fr = open(hs_file_path) order_array = [] trade_array = [] trading_account_array = [] investor_position_array = [] for line in fr.readlines(): if 'Account_ID' in line: account_id = line.replace('\n', '').split(':')[1] else: base_model = BaseModel() for tempStr in line.split('|')[1].split(','): temp_array = tempStr.replace('\n', '').split(':', 1) if len(temp_array) != 2: continue setattr(base_model, temp_array[0].strip(), temp_array[1].strip()) if 'QryOrder' in line: order_array.append(base_model) elif 'QryTrade' in line: trade_array.append(base_model) elif 'ReqQryCash' in line: trading_account_array.append(base_model) elif 'QryPosition' in line: investor_position_array.append(base_model) print 'AccountID:', account_id # 删除该账号今日记录 del_account_position_by_id(account_id) (order_list, order_dict) = __get_order_list(account_id, order_array) trade_list = __get__trade_list(account_id, trade_array, order_dict) (position_dict, position_db_list) = __build_account_position(account_id, investor_position_array) cny_position_db = __get_account_cny(account_id, trading_account_array) position_db_list.append(cny_position_db) update_db(order_list, trade_list, position_db_list)
def create_baseline_model(task): if task in ['mnist', 'kmnist', 'fashion']: img_dim = 28 nb_channels = 1 out_dim = 10 res_channels = 16 nb_res_blocks = 5 mlp_dim = 32 elif task in ['cifar10']: img_dim = 32 nb_channels = 3 out_dim = 10 res_channels = 32 nb_res_blocks = 10 mlp_dim = 32 else: print(f"! Unknown task '{task}'!") exit() return BaseModel(img_dim, nb_channels, out_dim, res_channels, nb_res_blocks, mlp_dim)
def __init__(self, device, mtype=0, in_dim=1, out_dim=31, model_file=None, tf_learning=None, name_model=None): self.device = device[0] self.type = mtype self.num_gpu = len(device) pretrained = False if tf_learning == None else True if mtype == 0: self.model = BaseModel(in_dim, out_dim, name_model=name_model, pretrained=pretrained, tf_learning=tf_learning) elif mtype == 1: self.model = TwoInputBaseModel(in_dim, out_dim) elif mtype == 2: self.model = VaeModel(nm_ch=in_dim, dm_lat=out_dim) elif mtype == 3: self.model = ExtdModel(in_dim=in_dim, out_dim=out_dim, name_model=name_model, pretrained=pretrained, tf_learning=tf_learning) else: raise RuntimeError if model_file is not None: self.load_model(model_file) #if len(device) > 1: # print("Let's use", len(device), "GPUs!") # self.model = nn.DataParallel(self.model, device_ids=device) self.model = nn.DataParallel(self.model, device_ids=device) self.model.to(self.device)
def generate_model_from_env(env, lights_off=False): aj_mat = env.aj aj_list, parents = aj_matrix_to_aj_list(aj_mat) nodes = list(aj_list.keys()) ebunch = aj_list_to_ebunch(aj_list) if lights_off: cpdtables = generate_cpdtables_from_aj_list(parents, invert=True) else: cpdtables = generate_cpdtables_from_aj_list(parents) targets = sorted(parents.keys()) interventions = ["cause_{}".format(i) for i in range(env.num + 1)] target_vals = env.goal data = dict() data["digraph"] = ebunch data["cpdtables"] = cpdtables data["target"] = targets data["nature_variables"] = [] data["interventions"] = interventions data["target_vals"] = target_vals data["nodes"] = nodes return BaseModel(data=data)
def main(unused_argv): hparams = create_hparams(FLAGS) print(hparams) if hparams.mode == "train": _mode = tf.contrib.learn.ModeKeys.TRAIN elif hparams.mode == "eval": _mode = tf.contrib.learn.ModeKeys.EVAL elif hparams.mode == "infer": _mode = tf.contrib.learn.ModeKeys.INFER else: raise ("Unknown Mode!!!") model = BaseModel(hparams, mode=_mode) print("num_gpus = %d, batch size = %d" % (model.num_gpus, hparams.batch_size * model.num_gpus)) config = make_config() if _mode == tf.contrib.learn.ModeKeys.TRAIN: train(model, config, hparams) if _mode == tf.contrib.learn.ModeKeys.EVAL: eval(model, config, hparams) if _mode == tf.contrib.learn.ModeKeys.INFER: infer(model, config, hparams)
def read_position_file_lts(lts_file_path): print 'Start read file:' + lts_file_path fr = open(lts_file_path) order_array = [] trade_array = [] trading_account_array = [] investor_position_array = [] for line in fr.readlines(): if 'Account_ID' in line: account_id = line.replace('\n', '').split(':')[1] else: base_model = BaseModel() for tempStr in line.split('|')[1].split(','): temp_array = tempStr.replace('\n', '').split(':', 1) setattr(base_model, temp_array[0].strip(), temp_array[1]) if 'OnRspQryOrder' in line: order_array.append(base_model) elif 'OnRspQryTrade' in line: trade_array.append(base_model) elif 'OnRspQryTradingAccount' in line: trading_account_array.append(base_model) elif 'OnRspQryInvestorPosition' in line: investor_position_array.append(base_model) print 'AccountID:', account_id account_info = get_account_info(account_id) # 删除该账号今日记录 del_account_position_by_id(account_id) del_order_trader_by_id(account_id) save_order(account_id, order_array) save_trade(account_info, trade_array) save_account_cny(account_info, trading_account_array) save_account_position(account_info, investor_position_array)
""" # Model hyperparameters input_size = args.vocab_size output_size = 4 # num of classes embedding_dim = 100 # embedding dimension hidden_dim = 64 # hidden size of RNN num_layers = 1 # Make Train Loader train_dataset = TextDataset(args.data_dir, 'train', args.vocab_size) args.pad_idx = train_dataset.sentences_vocab.wtoi['<PAD>'] train_loader = make_data_loader(train_dataset, args.batch_size, args.batch_first, shuffle=True) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") args.device = device print("device : ", device) # instantiate model model = BaseModel(input_size, output_size, embedding_dim, hidden_dim, num_layers, batch_first=args.batch_first) model = model.to(device) # Training The Model train(args, train_loader, model)
def rootKey(self, _id='root'): if not ndb.Key(BaseModel, _id).get(): _root = BaseModel(id=_id) _root.put() return _root.key return ndb.Key(BaseModel, _id)
def get_data(): weibo = BaseModel().select()[:10] baidu = BaiduModal().select()[:10] zhihu = ZhihuModal().select()[:10] weixin = WeixinModal().select()[:10] return "".join([x.get('title') for x in weibo + baidu + zhihu + weixin])
XE_loss = CrossEntropyLoss() train_metrics = init_metrics() test_metrics = init_metrics() print('--- SETTINGS ---') print('Number of sentiments to classify:', args.num_sentiments) print('Learning rate:', INITIAL_LR) print('Num of epochs per fold:', NUM_EPOCHS) print('Use gaze features:', args.use_gaze) print('\n> Starting 10-fold CV.') for k, (train_loader, test_loader) in enumerate(dataset.split_cross_val(10)): # initialize model and optimizer every fold model = BaseModel(lstm_units, dataset.max_sentence_length, args.num_sentiments, initial_word_embedding.clone(), args.use_gaze) optimizer = SGD(model.parameters(), lr=INITIAL_LR, momentum=0.95, nesterov=True) # optimizer_scheduler = lr_scheduler.StepLR( # optimizer, step_size=halve_lr_every_passes, gamma=0.5) if USE_CUDA: model = model.cuda() for e in range(NUM_EPOCHS): train_loss, train_results = iterate(train_loader) # save the training metrics of last epoch
def _set_model(self): self.model = BaseModel(argv=self.argv, emb=self.emb, n_vocab=self.vocab_word.size(), n_labels=self.vocab_label.size()) self.model.compile(self._get_input_tensor_variables())