示例#1
0
文件: train.py 项目: RitiP/ZSL
def trainAE(learning_rate, loss_lambda):

    # create a model of Autoencoder
    model = Model.AEModel(85, 2048).cuda()
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)

    for epoch in range(num_epochs):
        for data in loader_tr:
            feature_vec, _, attr_vec, _ = data
            input = Variable(torch.from_numpy(numpy.array(attr_vec)))
            target_h = Variable(torch.from_numpy(numpy.array(feature_vec)))
            target = Variable(torch.from_numpy(numpy.array(attr_vec)))

            output_h, output = model(input)
            loss = criterion(target_h, output_h) + (loss_lambda*criterion(input, output))

            # zero the gradient
            optimizer.zero_grad()

            # perform a backward pass
            loss.backward()

            # update the parameters
            optimizer.update()

        if ((num_epochs % 50) == 0):
            # save the model (work on this: find out how to save)
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': loss,
            }, save_path)

            # save the final model
        torch.save(model, save_path + '/AEModel.pth')