config.target_class = train_iter.dataset.NUM_CLASSES config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab) print('Dataset:', args.dataset) print('No. of target classes:', train_iter.dataset.NUM_CLASSES) print('No. of train instances', len(train_iter.dataset)) print('No. of dev instances', len(dev_iter.dataset)) print('No. of test instances', len(test_iter.dataset)) if args.resume_snapshot: if args.cuda: model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu)) else: model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage) else: model = XmlCNN(config) model.to(device) if not args.trained_model: save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) os.makedirs(save_path, exist_ok=True) parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, train_iter, args.batch_size, args.gpu) test_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, test_iter, args.batch_size, args.gpu) dev_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, dev_iter, args.batch_size, args.gpu) if hasattr(train_evaluator, 'is_multilabel'): train_evaluator.is_multilabel = dataset_class.IS_MULTILABEL
else: dataset_class = dataset_map[args.dataset] print('Dataset:', args.dataset) print('No. of target classes:', train_iter.dataset.NUM_CLASSES) print('No. of train instances', len(train_iter.dataset)) print('No. of dev instances', len(dev_iter.dataset)) print('No. of test instances', len(test_iter.dataset)) if args.resume_snapshot: if args.cuda: model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu)) else: model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage) else: model = XmlCNN(config) if args.cuda: model.cuda() if not args.trained_model: save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) os.makedirs(save_path, exist_ok=True) parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, train_iter, args.batch_size, args.gpu) test_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, test_iter, args.batch_size, args.gpu) dev_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, dev_iter, args.batch_size, args.gpu) if hasattr(train_evaluator, 'is_multilabel'):
print('Dataset:', args.dataset) print('No. of target classes:', train_iter.dataset.NUM_CLASSES) print('No. of train instances', len(train_iter.dataset)) print('No. of dev instances', len(dev_iter.dataset)) print('No. of test instances', len(test_iter.dataset)) if args.resume_snapshot: if args.cuda: model = torch.load( args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu)) else: model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage) else: model = XmlCNN(config) if args.cuda: model.cuda() if not args.trained_model: save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) os.makedirs(save_path, exist_ok=True) parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, train_iter,