print '<Main> Vocabulary size: objects %d | text %d' % (obj_vocab_size, text_vocab_size) sys.stdout.flush() ######## Instruction model ######## goal_obs = pickle.load( open( os.path.join(args.load_path + 'goal_obs' + str(args.num_worlds) + '.p'), 'r')) indices_obs = pickle.load( open( os.path.join(args.load_path + 'indices_obs' + str(args.num_worlds) + '.p'), 'r')) targets = pickle.load( open(os.path.join(args.load_path, 'targets' + str(args.num_worlds) + '.p'), 'r')) rank = targets.size(1) text_model = models.TextModel(text_vocab_size, args.lstm_inp, args.lstm_hid, args.lstm_layers, args.lstm_out) object_model = models.ObjectModel(obj_vocab_size, args.obj_embed, goal_obs[0].size(), args.lstm_out) psi = models.Psi(text_model, object_model, args.lstm_out, args.goal_hid, rank).cuda() psi = pipeline.Trainer(psi, args.lr, args.batch_size) print '\n<Main> Training psi: (', goal_obs.size(), 'x', indices_obs.size( ), ') -->', targets.size() psi.train((goal_obs, indices_obs), targets, iters=args.psi_iters)
size_per_dataset=args.num_train) train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, num_workers=4, shuffle=True) val_set = pipeline.IntrinsicDataset(args.data_path, args.val_sets, args.intrinsics, size_per_dataset=10) val_loader = torch.utils.data.DataLoader(val_set, batch_size=32, num_workers=4, shuffle=False) trainer = pipeline.Trainer(shader, train_loader, args.lr) for epoch in range(args.num_epochs): print("<Main> Epoch {}".format(epoch)) ## save model and state torch.save(shader, open(os.path.join(args.save_path, "model.t7"), "wb")) torch.save(shader.state_dict(), open(os.path.join(args.save_path, "state.pth"), "wb")) ## visualize predictions of shader save_path = os.path.join(args.save_path, str(epoch) + ".png") pipeline.visualize_shader(shader, val_loader, save_path) ## one sweep through the dataset trainer.train()
print '<Main> Training: (', train_layouts.size(), 'x', train_objects.size(), 'x', train_indices.size(), ') -->', train_values.size() print '<Main> test : (', test_layouts.size(), 'x', test_objects.size(), 'x', test_indices.size(), ') -->', test_values.size() ################################# ############ Training ########### ################################# print '\n<Main> Initializing model: {}'.format(args.model) model = models.init(args, layout_vocab_size, object_vocab_size, text_vocab_size) train_inputs = (train_layouts, train_objects, train_indices) test_inputs = (test_layouts, test_objects, test_indices) print '<Main> Training model' trainer = pipeline.Trainer(model, args.lr, args.batch_size) trainer.train(train_inputs, train_values, test_inputs, test_values, iters=args.iters) ################################# ######## Save predictions ####### ################################# ## make logging directories pickle_path = os.path.join(args.save_path, 'pickle') utils.mkdir(args.save_path) utils.mkdir(pickle_path) print '\n<Main> Saving model to {}'.format(args.save_path) ## save model torch.save(model, os.path.join(args.save_path, 'model.pth'))
def train(model, inputs, targets, lr, batch_size, iters): model = pipeline.Trainer(model, lr, batch_size) model.train(inputs, targets, iters=iters) return model