Exemplo n.º 1
0
def get_model(train_file,
              model_file,
              ndim=784,
              nout=10,
              val_fraction=0.3,
              islearning=True):
    '''
	'''
    args = parse_args(train_file, model_file, islearning, val_fraction)
    if args.network == 'mlp':
        data_shape = (ndim, )
        net = get_mlp_spice(nout)
    elif args.network == 'cnn':
        data_shape = (15, 30)  #sentence_size, num_embed
        net = get_cnn_spice(num_label=nout, batch_size=args.batch_size)

# train
    model = models.fit(args, net, get_iterator_spice(data_shape))
    model = load_obj(model_file)
    return model, args.network
Exemplo n.º 2
0
def fit(store=None, data=None, **kwargs):
    return models.fit(Configuration(**kwargs), DataContext(store, data))
Exemplo n.º 3
0
def fit(store=None, data=None, **kwargs):
    return models.fit(Configuration(**kwargs), DataContext(store, data))
Exemplo n.º 4
0
    if not os.path.exists(path + 'output\\'):
        os.mkdir(path + 'output\\')
    os.chdir(path + "output\\")
    result.to_csv(filename, index=None)
    zip = zipfile.ZipFile(
        filename.split('.')[0] + '.zip', "w", zipfile.ZIP_DEFLATED)
    zip.write(filename)
    zip.close()


if __name__ == "__main__":
    '''提供了2018年1月1日至2018年12月31日的标的第一期的还款数据作为训练集,
    需要选手预测2019年2月1日至2019年3月31日成交标的第一期的还款情况'''

    #时间序列,回归,或者分类(排除逾期的用户,剩下用户直接统计还款日作为结果) 二分类或多分类(逾期,逾期前1天、2天、3天还款)
    args = parse_command_params()

    # analysis_data(args['path'])

    train, test = gen_data(args)
    print(train.shape, test.shape)
    cols = list(
        set(train.columns) - set([
            'user_id', 'listing_id', 'auditing_date', 'due_date', 'insertdate',
            'label'
        ]))
    model = models.fit(train, args, cols)
    result = gen_result(model, test, cols)
    save_zip(result, args)
    print('done !')
Exemplo n.º 5
0
    # For songs sampling
    "TEMPERATURE": 1,
    "TAKE_MAX_PROBABLE": False,
    "LIMIT_LEN": 300
}
print(config)

# model = VanillaRNN(config["VOCAB_SIZE"], config["HIDDEN"], config["VOCAB_SIZE"]).to(get_device())
model = LSTMSimple(config["VOCAB_SIZE"], config["HIDDEN"],
                   config["VOCAB_SIZE"]).to(get_device())

criterion = CrossEntropyLoss()

# Fit Model
fit(model, train_encoded, val_encoded, config)

# Report NLL for validation and test
nll_val = negative_log_likelihood(model, val_encoded, criterion, config)
nll_test = negative_log_likelihood(model, test_encoded, criterion, config)
print("NLL Validation: {}".format(nll_val))
print("NLL Test: {}".format(nll_test))

# Save error plot to file
save_loss_graph(model)

# Save model to file
print("Saving model...")
now = datetime.now().strftime('%Y-%m-%d-%H-%M')
torch.save(model.state_dict(), "model" + now + ".pth")
print("Saved!")
Exemplo n.º 6
0
def get_reward(actions, dataset):
    reward = models.fit(actions, dataset)
    return reward
Exemplo n.º 7
0
 def get_reward(self, actions):
     reward = models.fit(actions, self.dataset)
     return reward