def main(): #target = "PICO" target = "PICOSentence" #target = "NYT" if target == "PICO": model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder=FLAGS.encoder, num_tasks=1, task_names=["Classification"], max_document_length=FLAGS.max_document_length, cnn_filter_sizes=list( map(int, FLAGS.cnn_filter_sizes.split(","))), cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type, rnn_num_layers=FLAGS.rnn_num_layers) document_reader = pico_reader.PICOReader(annotype="Outcome") elif target == "PICOSentence": model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder="CNN", num_tasks=3, task_names=["Participants", "Intervention", "Outcome"], max_document_length=FLAGS.max_document_length, cnn_filter_sizes=list( map(int, FLAGS.cnn_filter_sizes.split(","))), cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type, rnn_num_layers=FLAGS.rnn_num_layers) document_reader = pico_sentence_reader.PICOSentenceReader( annotype="multitask") elif target == "NYT": model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder="CNN", num_tasks=1, task_names=["Business"], max_document_length=FLAGS.max_document_length, cnn_filter_sizes=list( map(int, FLAGS.cnn_filter_sizes.split(","))), cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type, rnn_num_layers=FLAGS.rnn_num_layers) document_reader = nyt_reader.NYTReader(genre="Business") else: raise ValueError("Error") if FLAGS.mode == MODE_TRAIN: nn_utils.train(model, document_reader, FLAGS) elif FLAGS.mode == MODE_EVAL: checkpoint = "./test/train/model-1000" nn_utils.eval(model, document_reader, checkpoint, FLAGS)
def _add_to_training_data(chosen, choice): with open(CHOICES_PATH, 'a+') as f: f.write(','.join([c for c in chosen.keys() if c != choice]) + ';' + choice + '\n') try: nn_utils.train(suggestor, ingr2vec, [c for c in chosen if c[0] in ingr2vec], choice) suggestor.save(MODEL_PATH) return True except ValueError: print('Failed to train model.') return False
def main(args,digitizeInput,net,_run): use_cuda = not args.noCude and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.test_batch_size, shuffle=True, **kwargs) if digitizeInput: digitize_input(train_loader,args) digitize_input(test_loader,args) models = {'ref_net' : nn_modules.ref_net , 'manhattan_net' : nn_modules.manhattan_net} model = models[net](args) model.to(device) test_iterator = iter(test_loader) start = time.time() for epoch in range(1, args.epochs): train(args, model, device, train_loader, test_loader , test_iterator, model.criterion , epoch , _run) test(args, model, device, test_loader, model.criterion , _run) model.optimizer_step(epoch) end = time.time() _run.log_scalar('Duration',end - start) if (args.save_model): torch.save(model.state_dict(),"mnist_cnn.pt")
def main(): model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder=FLAGS.encoder, num_tasks=1, task_names=[FLAGS.genre], max_document_length=FLAGS.max_document_length, cnn_filter_sizes=list( map(int, FLAGS.cnn_filter_sizes.split(","))), cnn_num_filters=FLAGS.cnn_num_filters, rnn_cell_type=FLAGS.rnn_cell_type, rnn_bidirectional=FLAGS.rnn_bidirectional, rnn_num_layers=FLAGS.rnn_num_layers) document_reader = nyt_reader.NYTReader(genre=FLAGS.genre) if FLAGS.mode == MODE_TRAIN: nn_utils.train(model, document_reader, FLAGS) elif FLAGS.model == MODEL_INFER: pass
if validation: validation_dataset = nn_utils.MyDataset(test_dataset_path) validation_loader = DataLoader(dataset=validation_dataset, batch_size=val_batch_size, shuffle=True, num_workers=0) #-------------------------------------- val_losses = [] val_accuracies = [] for epoch in range(start, n_epochs): start_time = time.time() train_losses, train_accuracies, vis_i = nn_utils.train( net, training_loader, criterion, optimizer, epoch, save, win_name + '_loss_iter', vis_i) print('[%d] train_loss: %.3f' % (epoch + 1, np.asscalar(np.mean(train_losses))) + ' train_acc: %.3f' % (np.asscalar(np.mean(train_accuracies))), end=' ') if validation: val_losses, val_accuracies = nn_utils.validate( net, validation_loader, criterion) print(' val_loss: %.3f' % (np.asscalar(np.mean(val_losses))) + ' val_acc: %.3f' % (np.asscalar(np.mean(val_accuracies))), end=' ')