config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab) print('Dataset {} Mode {}'.format(args.dataset, args.mode)) print('VOCAB num',len(train_iter.dataset.TEXT_FIELD.vocab)) print('LABEL.target_class:', train_iter.dataset.NUM_CLASSES) print('Train instance', len(train_iter.dataset)) print('Dev instance', len(dev_iter.dataset)) print('Test instance', len(test_iter.dataset)) if args.resume_snapshot: if args.cuda: model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu)) else: model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage) else: model = KimCNN(config) if args.cuda: model.cuda() print('Shift model to GPU') parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adadelta(parameter, lr=args.lr, weight_decay=args.weight_decay) if args.dataset == 'SST-1': train_evaluator = EvaluatorFactory.get_evaluator(SST1, model, None, train_iter, args.batch_size, args.gpu) test_evaluator = EvaluatorFactory.get_evaluator(SST1, model, None, test_iter, args.batch_size, args.gpu) dev_evaluator = EvaluatorFactory.get_evaluator(SST1, model, None, dev_iter, args.batch_size, args.gpu) elif args.dataset == 'SST-2': train_evaluator = EvaluatorFactory.get_evaluator(SST2, model, None, train_iter, args.batch_size, args.gpu) test_evaluator = EvaluatorFactory.get_evaluator(SST2, model, None, test_iter, args.batch_size, args.gpu) dev_evaluator = EvaluatorFactory.get_evaluator(SST2, model, None, dev_iter, args.batch_size, args.gpu)
print('VOCAB num', len(train_iter.dataset.TEXT_FIELD.vocab)) print('LABEL.target_class:', train_iter.dataset.NUM_CLASSES) print('Train instance', len(train_iter.dataset)) print('Dev instance', len(dev_iter.dataset)) print('Test instance', len(test_iter.dataset)) if args.resume_snapshot: if args.cuda: model = torch.load( args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu)) else: model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage) else: model = KimCNN(config) if args.cuda: model.cuda() print('Shift model to GPU') parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) if args.dataset not in dataset_map: raise ValueError('Unrecognized dataset') else: train_evaluator = EvaluatorFactory.get_evaluator( dataset_map[args.dataset], model, None, train_iter, args.batch_size, args.gpu)