def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--name',
                        default='2019-03-16_10:28:52{}.pth',
                        help='the name of detector')
    parser.add_argument('--gpu', default='6', help='the chosen gpu id')
    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    cudnn.benchmark = True
    ########################################## MODEL PREPARATION ########################################
    embedding = bcolz.open(config.glove_path_filtered)[:]
    detector_path = os.path.join('detector/logs/', args.name)
    detector_logs = torch.load(detector_path)
    detector_net = detector.Net(embedding).cuda()
    detector_net.load_state_dict(detector_logs['weights'])
    detector_net.eval()
    ######################################### DATASET PREPARATION #######################################
    split = ['test-dev2015', 'train2014', 'val2014', 'test2015']
    # split = ['test-dev2015', 'test2015']

    tracker = utils.Tracker()
    for set in split:
        if set == 'train2014':
            val_loader = data.get_loader(train=True)
        elif set == 'val2014':
            val_loader = data.get_loader(val=True)
        elif set == 'trainval':
            val_loader = data.get_loader(train=True, val=True)
        else:
            val_loader = data.get_loader(test=True, test_split=set)

        facts = run(detector_net, val_loader, tracker, prefix=set, top=10)
        saved_for_test(val_loader, facts, set)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--name',
        required=True,
        help='the name of the detector model, e.g., /logs/rvqa.pth')
    parser.add_argument('--gpu', default='0', help='the chosen gpu id')
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    cudnn.benchmark = True

    with open(config.vocab_path, 'r') as fd:
        vocabs = json.load(fd)
    question_vocab = vocabs['question']

    embedding = bcolz.open(config.glove_path_filtered)[:]
    detector_logs = torch.load(args.name)
    detector_net = detector.Net(embedding).cuda()
    detector_net.load_state_dict(detector_logs['model_state'])
    detector_net.eval()
    print("loading model parameters...")

    if config.cp_data:
        splits = ['train', 'test']
    else:
        splits = ['train2014', 'val2014', 'test-dev2015', 'test2015']

    tracker = utils.Tracker()
    for split in splits:
        test = False if 'test' not in split or config.cp_data else True
        val_loader = data.get_loader(split, test=test, vocabs=question_vocab)
        facts = run(detector_net, val_loader, tracker, prefix=split, top=10)
        save_to_disk(val_loader, facts, split, vocabs)
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--name', type=str, help='saved and resumed file name')
    parser.add_argument('--resume', action='store_true', help='resumed flag')
    parser.add_argument('--test',
                        dest='test_only',
                        default=False,
                        action='store_true')
    parser.add_argument('--detctor',
                        default='2019-03-16_10:28:52{}.pth',
                        help='the name of detector')
    parser.add_argument('--gpu', default='3', help='the chosen gpu id')
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    cudnn.benchmark = True

    ########################################## ARGUMENT SETTING	 ########################################
    if args.test_only:
        args.resume = True
    if args.resume and not args.name:
        raise ValueError('Resuming requires file name!')
    name = args.name if args.name else datetime.now().strftime(
        "%Y-%m-%d_%H:%M:%S")
    if args.resume:
        target_name = name
        logs = torch.load(target_name)
        # hacky way to tell the VQA classes that they should use the vocab without passing more params around
        data.preloaded_vocab = logs['vocab']
    else:
        target_name = os.path.join('logs', '{}'.format(name))
    if not args.test_only:
        print('will save to {}'.format(target_name))

    ######################################### DATASET PREPARATION #######################################
    if config.train_set == 'train':
        train_loader = data.get_loader(train=True)
        val_loader = data.get_loader(val=True)
    elif args.test_only:
        val_loader = data.get_loader(test=True)
    else:
        train_loader = data.get_loader(train=True, val=True)
        val_loader = data.get_loader(test=True)
    ########################################## MODEL PREPARATION ########################################
    embedding = bcolz.open(config.glove_path_filtered)[:]
    net = model.RelAtt(embedding)
    net = nn.DataParallel(net).cuda()

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           lr=config.initial_lr,
                           weight_decay=1e-8)

    # optimizer = optim.RMSprop(
    # [p for p in net.parameters() if p.requires_grad],
    # lr=config.initial_lr,
    # momentum=0.20,
    # weight_decay=1e-8
    # )
    scheduler = lr_scheduler.ExponentialLR(optimizer, 0.5**(1 / 50000))
    #########################################
    #######################################
    acc_val_best = 0.0
    start_epoch = 0
    if args.resume:
        net.load_state_dict(logs['model_state'])
        optimizer.load_state_dict(logs['optim_state'])
        scheduler.load_state_dict(logs['scheduler_state'])
        start_epoch = logs['epoch']
        acc_val_best = logs['acc_val_best']

    tracker = utils.Tracker()
    r = np.zeros(3)
    for i in range(start_epoch, config.epochs):
        if not args.test_only:
            run(net,
                train_loader,
                optimizer,
                scheduler,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        if not (config.train_set == 'train+val'
                and i in range(config.epochs - 5)):
            r = run(net,
                    val_loader,
                    optimizer,
                    scheduler,
                    tracker,
                    train=False,
                    prefix='val',
                    epoch=i,
                    has_answers=(config.train_set == 'train'))

        if not args.test_only:
            results = {
                'epoch': i,
                'acc_val_best': acc_val_best,
                'name': name,
                'model_state': net.state_dict(),
                'optim_state': optimizer.state_dict(),
                'scheduler_state': scheduler.state_dict(),
                'eval': {
                    'answers': r[0],
                    'accuracies': r[1],
                    'idx': r[2]
                },
                'vocab': val_loader.dataset.vocab,
            }
            if config.train_set == 'train' and r[1].mean() > acc_val_best:
                acc_val_best = r[1].mean()
                torch.save(results, target_name + '.pth')
            if config.train_set == 'train+val':
                torch.save(results, target_name + '{}.pth')
                if i in range(config.epochs - 5, config.epochs):
                    saved_for_test(val_loader, r, i)

        else:
            saved_for_test(val_loader, r)
            break
Beispiel #4
0
    optim = torch.optim.Adamax(model.parameters())

    if args.loss_fn == 'Plain':
        loss_fn = Plain()
    elif args.loss_fn == 'LMH':
        loss_fn = LearnedMixinH(hid_size=args.num_hid).cuda()
    elif args.loss_fn == 'LM':
        loss_fn = LearnedMixin(hid_size=args.num_hid).cuda()
    elif args.loss_fn == 'Focal':
        loss_fn = FocalLoss()
    else:
        raise RuntimeError('not implement for {}'.format(args.loss_fn))

    # ------------------------STATE CREATION------------------------
    eval_score, best_val_score, start_epoch, best_epoch = 0.0, 0.0, 0, 0
    tracker = utils.Tracker()
    if args.resume:
        model.load_state_dict(logs['model_state'])
        optim.load_state_dict(logs['optim_state'])
        if 'loss_state' in logs:
            loss_fn.load_state_dict(logs['loss_state'])
        start_epoch = logs['epoch']
        best_epoch = logs['epoch']
        best_val_score = logs['best_val_score']
        if args.fine_tune:
            print('best accuracy is {:.2f} in baseline'.format(100 *
                                                               best_val_score))
            args.epochs = start_epoch + 10  # 10 more epochs
            for params in optim.param_groups:
                params['lr'] = config.ft_lr
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--name', type=str, help='saved and resumed file name')
    parser.add_argument('--resume', action='store_true', help='resumed flag')
    parser.add_argument('--test', dest='test_only', action='store_true')
    parser.add_argument(
        '--lambd',
        default=1,
        type=float,
        help='trade-off hyperparameters between two types of losses')
    parser.add_argument('--gpu', default='0', help='the chosen gpu id')
    global args
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    cudnn.benchmark = True

    ########################################## ARGUMENT SETTING  ###############################
    if args.test_only:
        args.resume = True
    if args.resume and not args.name:
        raise ValueError('Resuming requires file name!')
    name = args.name if args.name else datetime.now().strftime(
        "%Y-%m-%d_%H:%M:%S")
    if args.resume:
        target_name = name
        logs = torch.load(target_name)
    else:
        target_name = os.path.join('logs', '{}'.format(name))
    if not args.test_only:
        print('will save to {}'.format(target_name))

    ######################################### DATASET PREPARATION ###############################
    if config.train_set == 'train':
        train_loader = data.get_loader(train=True)
        val_loader = data.get_loader(val=True)
    elif args.test_only:
        val_loader = data.get_loader(test=True)
    else:
        train_loader = data.get_loader(train=True, val=True)
        val_loader = data.get_loader(test=True)

    # load pre-trained word embedding (glove) for embedding answers
    vocabs = val_loader.dataset.vocab
    answer_vocab = vocabs['answer']
    embedding, _, _ = word2vec.filter_glove_embedding(answer_vocab)
    embedding = nn.Embedding.from_pretrained(embedding)

    answer_idx_vocab = vocabs['answer_idx']
    answer_idx_vocab = {int(a_idx): a for a_idx, a in answer_idx_vocab.items()}

    ########################################## MODEL PREPARATION #################################
    net = model.Net(val_loader.dataset._num_tokens)
    net = nn.DataParallel(net).cuda()
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           lr=config.initial_lr)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=50000, gamma=0.4)

    acc_val_best = 0.0
    start_epoch = 0
    if args.resume:
        net.load_state_dict(logs['model_state'])
        optimizer.load_state_dict(logs['optim_state'])
        scheduler.load_state_dict(logs['scheduler_state'])
        start_epoch = logs['epoch']
        acc_val_best = logs['acc_val_best']

    tracker = utils.Tracker()

    r = np.zeros(3)
    result_dict = {}
    for i in range(start_epoch, config.epochs):
        if not args.test_only:
            run(net,
                train_loader,
                optimizer,
                scheduler,
                tracker,
                train=True,
                prefix='train',
                embedding=embedding,
                epoch=i)
        if not (config.train_set == 'train+val'
                and i in range(config.epochs - 5)):
            r = run(net,
                    val_loader,
                    optimizer,
                    scheduler,
                    tracker,
                    train=False,
                    prefix='val',
                    epoch=i,
                    embedding=embedding,
                    has_answers=(config.train_set == 'train'),
                    answer_idx_vocab=answer_idx_vocab)

        if not args.test_only:
            results = {
                'epoch': i,
                'name': name,
                'model_state': net.state_dict(),
                'optim_state': optimizer.state_dict(),
                'scheduler_state': scheduler.state_dict(),
                'eval': {
                    'answers': r[0],
                    'accuracies': r[1],
                },
            }
            result_dict[i] = r[2]

            if config.train_set == 'train' and r[1].mean() > acc_val_best:
                acc_val_best = r[1].mean()
                results['acc_val_best'] = acc_val_best
                torch.save(results, target_name + '.pth')
            if config.train_set == 'train+val':
                torch.save(results, target_name + '.pth')
                if i in range(config.epochs - 5, config.epochs):
                    saved_for_test(val_loader, r, i)
                    torch.save(results, target_name + '{}.pth'.format(i))
        else:
            saved_for_test(val_loader, r)
            break
    if config.train_set == 'train':
        with open('./results.json', 'w') as fd:
            json.dump(result_dict, fd)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--name', type=str, help='saved and resumed file name')
    parser.add_argument('--resume', action='store_true', help='resumed flag')
    parser.add_argument('--gpu', default='0', help='the chosen gpu id')
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    cudnn.benchmark = True

    ########################################## ARGUMENT SETTING	 #################################
    if args.resume and not args.name:
        raise ValueError('Resuming requires file name!')
    name = args.name if args.name else datetime.now().strftime(
        "%Y-%m-%d_%H:%M:%S")
    if args.resume:
        target_name = name
        logs = torch.load(target_name)
    else:
        target_name = os.path.join('logs', '{}'.format(name))
    print('will save to {}'.format(target_name))

    ######################################### DATASET PREPARATION #################################
    with open(config.meta_data_path, 'r') as fd:
        meta_data = json.load(fd)
    train_loader = data.get_loader('train', meta_data)
    if config.train_set == 'train':
        val_loader = data.get_loader('val', meta_data)
    if config.train_set == 'train+val':
        val_loader = data.get_loader('test', meta_data)

    ########################################## MODEL PREPARATION ##################################
    embeddings = bcolz.open(config.glove_path_filtered)[:]
    net = model.Net(embeddings).cuda()
    loss = nn.CrossEntropyLoss()
    optimizer = optim.Adam(
        [p for p in net.parameters() if p.requires_grad],
        lr=config.initial_lr,
    )

    start_epoch = 0
    recall_10_val_best = 0.0
    if args.resume:
        net.load_state_dict(logs['model_state'])
        optimizer.load_state_dict(logs['optim_state'])
        start_epoch = logs['epoch']
        recall_10_val_best = logs['recall_10_val_best']

    tracker = utils.Tracker()
    best_epoch = start_epoch
    state = 'Valid' if config.train_set == 'train' else 'Test'
    for i in range(start_epoch, config.epochs):
        run(net,
            train_loader,
            tracker,
            optimizer,
            loss_criterion=loss,
            train=True,
            prefix='train',
            epoch=i)

        results = {
            'epoch': i,
            'name': name,
            'model_state': net.state_dict(),
            'optim_state': optimizer.state_dict(),
        }

        if not config.train_set == 'all':
            r = run(net,
                    val_loader,
                    tracker,
                    optimizer,
                    loss_criterion=loss,
                    train=False,
                    prefix='val',
                    epoch=i)
            print("{} epoch {}: recall@1 is {:.4f}".format(state, i, r[0]),
                  end=", ")
            print("recall@5 is {:.4f}, recall@10 is {:.4f}".format(r[1], r[2]))

            if r[2] > recall_10_val_best:
                recall_10_val_best = r[2]
                results['recall_10_val_best'] = recall_10_val_best
                best_epoch = i
                recall_1_val_best = r[0]
                recall_5_val_best = r[1]
                torch.save(results, target_name + '.pth')
    if not config.train_set == 'all':
        print("The best performance of {} is on epoch {}".format(
            state, best_epoch),
              end=": ")
        print("recall@1 is {:.4f}, recall@5 is {:.4f}, recall@10 is {:.4f}".
              format(recall_1_val_best, recall_5_val_best, recall_10_val_best))
    else:
        torch.save(results, target_name + '.pth')