Example #1
0
def train(epochs, layer, lr, lambd) :
    model = FNN(feature.shape[2], 1, layer, 128)
    model.apply(weight_init)
    optimizer = optim.Adam(model.parameters(), lr = lr, weight_decay = lambd)

    if args.cuda :
        model = model.cuda()
    
    print("Training FNN for %d layers, %f learning rate, %f lambda" %(layer, lr, lambd))

    for epoch in range(epochs) :
        train_epoch(epoch, model, optimizer, lambd)

    output = model(feature[:,0,:])
    output = output.unsqueeze(1)
    for i in range(1, feature.shape[1]) :
        output = torch.cat((output, model(feature[:, i, :]).unsqueeze(1)), 1)

    # t_weight = torch.stack((weight, weight),2)
    t_weight = weight
    output = output.squeeze()

    output = torch.mul(t_weight, output)
    output = torch.sum(output, 1)

    loss_train = Loss(output[idx_train], out[idx_train])

    loss_val = Loss(output[idx_val], out[idx_val])

    print("Result for %d layers, %f learning rate, %f lambda" %(layer, lr, lambd))
    print('loss_val: {:.4f}'.format(loss_val.item()))

    return model, output, loss_val
Example #2
0
def train(epochs, layer, lr, lambd, idx_train, idx_val):
    model = FNN(feature.shape[1], out.shape[1], layer, 128)
    model.apply(weight_init)
    optimizer = optim.Adam(model.parameters(), lr = lr, weight_decay = lambd)

    if args.cuda:
        model = model.cuda()

    print("Training FNN for %d layers, %f learning rate, %f lambda" %(layer, lr, lambd))

    for epoch in range(epochs) :
        train_epoch(epoch, model, optimizer, lambd, idx_train, idx_val)

    output = model(feature)
    loss_val = F.mse_loss(output[idx_val], out[idx_val])

    print("Result for %d layers, %f learning rate, %f lambda" %(layer, lr, lambd))
    print('loss_val: {:.4f}'.format(loss_val.item()))
    
    return output, loss_val