(train, dev, test), batch_size=args.batch_size, device=args.gpu) config = args config.n_embed = len(inputs.vocab) config.d_out = len(answers.vocab) config.n_cells = config.n_layers if config.birnn: config.n_cells *= 2 if args.resume_snapshot: model = torch.load(args.resume_snapshot, map_location=lambda storage, locatoin: storage.cuda(args.gpu)) else: model = SNLIClassifier(config) if args.word_vectors: model.embed.weight.data = inputs.vocab.vectors model.cuda() criterion = nn.CrossEntropyLoss() opt = O.Adam(model.parameters(), lr=args.lr) iterations = 0 start = time.time() best_dev_acc = -1 train_iter.repeat = False header = ' Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss Accuracy Dev/Accuracy' dev_log_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:8.6f},{:12.4f},{:12.4f}'.split(',')) log_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{},{:12.4f},{}'.split(',')) os.makedirs(args.save_path, exist_ok=True) print(header) for epoch in range(args.epochs):
config.d_out = len(answers.vocab) config.n_cells = config.n_layers # double the number of cells for bidirectional networks if config.birnn: config.n_cells *= 2 if args.resume_snapshot: model = torch.load( args.resume_snapshot, map_location=lambda storage, locatoin: storage.cuda(args.gpu)) else: model = SNLIClassifier(config) if args.word_vectors: model.embed.weight.data.copy_(inputs.vocab.vectors) model.cuda(args.gpu) criterion = nn.CrossEntropyLoss() opt = O.Adam(model.parameters(), lr=args.lr) iterations = 0 start = time.time() best_dev_acc = -1 train_iter.repeat = False header = ' Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss Accuracy Dev/Accuracy' dev_log_template = ' '.join( '{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:8.6f},{:12.4f},{:12.4f}' .split(',')) log_template = ' '.join( '{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{},{:12.4f},{}' .split(','))
config.n_cells *= 2 if args.resume_snapshot: model = torch.load( args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu)) else: model = SNLIClassifier(config) if args.word_vectors: # Load word vectors word_vectors = word_to_vector.aliases[args.word_vectors]() for i, token in enumerate(sentence_encoder.vocab): model.embed.weight.data[i] = word_vectors[token] if args.gpu >= 0: model.cuda() criterion = nn.CrossEntropyLoss() opt = optim.Adam(model.parameters(), lr=args.lr) iterations = 0 start = time.time() best_dev_acc = -1 header = ' Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss Accuracy Dev/Accuracy' dev_log_template = ' '.join( '{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:8.6f},{:12.4f},{:12.4f}' .split(',')) log_template = ' '.join( '{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{},{:12.4f},{}' .split(',')) makedirs(args.save_path)