Esempio n. 1
0
    logging.info("Loading the datasets...")

    # load data
    data_loader = DataLoader(params)
    train_data_path = os.path.join(args.data_dir, 'train', 'train_data.json')
    data_loader.load_data(train_data_path,
                          split='train',
                          size_limit=args.dataset_size_limit)
    # data_loader.split_data(split_ratio=params.split_ratio)
    val_data_path = os.path.join(args.data_dir, 'val', 'val_data.json')
    data_loader.load_data(val_data_path,
                          'val',
                          size_limit=args.dataset_size_limit)

    # specify the train and val dataset sizes( append in data_loader
    params.train_size = data_loader.get_dataset_size('train')
    params.val_size = data_loader.get_dataset_size('val')

    logging.info("- done.")

    # Define the model and optimizer
    model = net.Model(params).cuda() if params.cuda else net.Model(params)
    # optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
    optimizer = optim.Adadelta(model.parameters(), params.learning_rate)
    scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR

    # fetch loss function and metrics
    loss_fn = model.loss_fn
    metrics = {'EM': model.exact_match_score, 'f1': model.f1_score}

    # Train the model
Esempio n. 2
0
    torch.manual_seed(230)
    if params.cuda: torch.cuda.manual_seed(230)

    # Get the logger
    utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    logging.info("Creating the dataset...")

    # load data
    print(params)
    data_loader = DataLoader(params)
    data = data_loader.load_data(args.test_data_path)

    # specify the test set size
    params.test_size = data_loader.get_dataset_size('all')
    test_data_iterator = data_loader.data_iterator(split='all', batch_size=params.batch_size)

    logging.info("- done.")

    # Define the model
    model = net.Model(params).cuda() if params.cuda else net.Model(params)

    loss_fn = model.loss_fn
    metrics = {
        'EM': model.exact_match_score,
        'f1': model.f1_score
    }

    logging.info("Starting evaluation")