Ejemplo n.º 1
0
    print('Dataset:', args.dataset)
    print('No. of target classes:', train_iter.dataset.NUM_CLASSES)
    print('No. of train instances', len(train_iter.dataset))
    print('No. of dev instances', len(dev_iter.dataset))
    print('No. of test instances', len(test_iter.dataset))

    if args.resume_snapshot:
        if args.cuda:
            model = torch.load(
                args.resume_snapshot,
                map_location=lambda storage, location: storage.cuda(args.gpu))
        else:
            model = torch.load(args.resume_snapshot,
                               map_location=lambda storage, location: storage)
    else:
        model = KimCNN(config)
        if args.cuda:
            model.cuda()

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    parameter = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.Adam(parameter,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset],
                                                     model, None, train_iter,
Ejemplo n.º 2
0
    print('Dataset:', args.dataset)
    print('No. of target classes:', train_iter.dataset.NUM_CLASSES)
    print('No. of train instances', len(train_iter.dataset))
    print('No. of dev instances', len(dev_iter.dataset))
    print('No. of test instances', len(test_iter.dataset))

    if args.resume_snapshot:
        if args.cuda:
            model = torch.load(
                args.resume_snapshot,
                map_location=lambda storage, location: storage.cuda(args.gpu))
        else:
            model = torch.load(args.resume_snapshot,
                               map_location=lambda storage, location: storage)
    else:
        model = KimCNN(config)
        if args.cuda:
            model.to(args.device)

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    parameter = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.Adam(parameter,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset],
                                                     model, None, train_iter,
Ejemplo n.º 3
0
    config.target_class = train_iter.dataset.NUM_CLASSES
    config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab)

    print('Dataset:', args.dataset)
    print('No. of target classes:', train_iter.dataset.NUM_CLASSES)
    print('No. of train instances', len(train_iter.dataset))
    print('No. of dev instances', len(dev_iter.dataset))
    print('No. of test instances', len(test_iter.dataset))

    if args.resume_snapshot:
        if args.cuda:
            model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu))
        else:
            model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage)
    else:
        model = KimCNN(config)
        if args.cuda:
            model.cuda()

    if not args.trained_model:
        save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    print("Num Parameters: " + str(sum(p.numel() for p in model.parameters() if p.requires_grad)))

    parameter = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay)

    train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, train_iter, args.batch_size, args.gpu)
    test_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, test_iter, args.batch_size, args.gpu)
    dev_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, dev_iter, args.batch_size, args.gpu)
Ejemplo n.º 4
0
def run_main(args):
    print('Args: ', args)

    metrics_dev_json = args.metrics_json + '_dev'
    metrics_test_json = args.metrics_json + '_test'

    # Set random seed for reproducibility
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
    if not args.cuda:
        args.gpu = -1
    if torch.cuda.is_available() and args.cuda:
        print('Note: You are using GPU for training')
        torch.cuda.manual_seed(args.seed)
    if torch.cuda.is_available() and not args.cuda:
        print('Warning: Using CPU for training')
    np.random.seed(args.seed)
    random.seed(args.seed)
    logger = get_logger()

    dataset_map = {
        'CongressionalHearing': CongressionalHearing,
    }

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')
    else:
        dataset_class = dataset_map[args.dataset]
        if args.fold_num >= 0:
            dataset_name = os.path.join(dataset_class.NAME + 'Folds',
                                        'fold' + str(args.fold_num))
        else:
            dataset_name = dataset_class.NAME
        if args.evaluate_dev:
            train_iter, dev_iter = dataset_map[args.dataset].iters_dev(
                args.data_dir,
                dataset_name,
                vectors_name=args.word_vectors_file,
                vectors_cache=args.word_vectors_dir,
                batch_size=args.batch_size,
                device=device,
                unk_init=UnknownWordVecCache.unk)
        if args.evaluate_test:
            train_iter, test_iter = dataset_map[args.dataset].iters_test(
                args.data_dir,
                dataset_name,
                vectors_name=args.word_vectors_file,
                vectors_cache=args.word_vectors_dir,
                batch_size=args.batch_size,
                device=device,
                unk_init=UnknownWordVecCache.unk)

    config = deepcopy(args)
    config.dataset = train_iter.dataset
    config.target_class = train_iter.dataset.NUM_CLASSES
    config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab)

    print('Dataset:', args.dataset)
    print('No. of target classes:', train_iter.dataset.NUM_CLASSES)
    print('No. of train instances', len(train_iter.dataset))
    if args.evaluate_dev:
        print('No. of dev instances', len(dev_iter.dataset))
    if args.evaluate_test:
        print('No. of test instances', len(test_iter.dataset))

    if args.resume_snapshot:
        if args.cuda:
            model = torch.load(
                args.resume_snapshot,
                map_location=lambda storage, location: storage.cuda(args.gpu))
        else:
            model = torch.load(args.resume_snapshot,
                               map_location=lambda storage, location: storage)
    else:
        model = KimCNN(config)
        model.to(device)

    if not args.trained_model:
        save_path = os.path.join(args.save_path, dataset_name)
        os.makedirs(save_path, exist_ok=True)

    parameter = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.Adam(parameter,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset],
                                                     model, None, train_iter,
                                                     args.batch_size, args.gpu)
    if args.evaluate_dev:
        dev_evaluator = EvaluatorFactory.get_evaluator(
            dataset_map[args.dataset], model, None, dev_iter, args.batch_size,
            args.gpu)
        if hasattr(dev_evaluator, 'is_multilabel'):
            dev_evaluator.is_multilabel = dataset_class.IS_MULTILABEL

    if args.evaluate_test:
        test_evaluator = EvaluatorFactory.get_evaluator(
            dataset_map[args.dataset], model, None, test_iter, args.batch_size,
            args.gpu)
        if hasattr(test_evaluator, 'is_multilabel'):
            test_evaluator.is_multilabel = dataset_class.IS_MULTILABEL

    if hasattr(train_evaluator, 'is_multilabel'):
        train_evaluator.is_multilabel = dataset_class.IS_MULTILABEL

    trainer_config = {
        'optimizer': optimizer,
        'batch_size': args.batch_size,
        'log_interval': args.log_every,
        'patience': args.patience,
        'model_outfile': args.save_path,
        'logger': logger,
        'is_multilabel': dataset_class.IS_MULTILABEL
    }
    if args.evaluate_dev:
        trainer = TrainerFactory.get_trainer_dev(args.dataset, model, None,
                                                 train_iter, trainer_config,
                                                 train_evaluator,
                                                 dev_evaluator, args)
    if args.evaluate_test:
        trainer = TrainerFactory.get_trainer_test(args.dataset, model, None,
                                                  train_iter, trainer_config,
                                                  train_evaluator,
                                                  test_evaluator, args)

    if not args.trained_model:
        trainer.train(args.epochs)
    else:
        if args.cuda:
            model = torch.load(
                args.trained_model,
                map_location=lambda storage, location: storage.cuda(args.gpu))
        else:
            model = torch.load(args.trained_model,
                               map_location=lambda storage, location: storage)

    # Calculate dev and test metrics
    if hasattr(trainer, 'snapshot_path'):
        model = torch.load(trainer.snapshot_path)

    if args.evaluate_dev:
        evaluate_dataset('dev',
                         dataset_map[args.dataset],
                         model,
                         None,
                         dev_iter,
                         args.batch_size,
                         is_multilabel=dataset_class.IS_MULTILABEL,
                         device=device,
                         save_file=metrics_dev_json)
    if args.evaluate_test:
        evaluate_dataset('test',
                         dataset_map[args.dataset],
                         model,
                         None,
                         test_iter,
                         args.batch_size,
                         is_multilabel=dataset_class.IS_MULTILABEL,
                         device=device,
                         save_file=metrics_test_json)