コード例 #1
0
ファイル: tracking.py プロジェクト: rodsnjr/motion_tracking
 def create_tracker(pos, current_tracker):
     positions = utils.tracker_positions(next_frame, pos)
     n_tracker = utils.Tracker(positions, current_tracker.index,
                               current_tracker.tracking)
     if not n_tracker.noise():
         new_trackers.append(n_tracker)
         return True
     return False
コード例 #2
0
def main():
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    # train_loader = data.get_loader(config.train_path, train=True)
    test_loader = data.get_loader(config.test_path, test=True)
    cap_vcb = test_loader.dataset.token_to_index
    hash_vcb = test_loader.dataset.answer_to_index
    inv_hash_dict = {v: k for k, v in hash_vcb.items()}
    inv_cap_dict = {v: k for k, v in cap_vcb.items()}

    net = model.Net(test_loader.dataset.num_tokens[0],
                    test_loader.dataset.num_tokens[1], [], []).to(device)
    # net = model.Net(train_loader.dataset.num_tokens[0],train_loader.dataset.num_tokens[1]).to(device)
    # optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])
    # print(torch.load('logs/' + args.model_path)['weights'])
    net.load_state_dict(torch.load('logs/' + args.model_path)['weights'])

    tracker = utils.Tracker()
    # for k,v in vars(config).items():
    #     print(k)
    # sdfsd
    #BRING THIS BACK
    # criterion = nn.CrossEntropyLoss(ignore_index = PAD_IDX)
    criterion = nn.CrossEntropyLoss(
        ignore_index=test_loader.dataset.answer_to_index['<pad>'])
    config_as_dict = {
        k: v
        for k, v in vars(config).items()
        if not k.startswith('__') and not k.startswith('os')
        and not k.startswith('expanduser') and not k.startswith('platform')
    }

    r = run(net,
            test_loader,
            tracker,
            criterion,
            cap_vcb,
            hash_vcb,
            train=False,
            prefix='test',
            epoch=0)
    with open('output/hashtags.csv', 'w') as f:
        for key in r[0].keys():
            f.write("%s,%s\n" % (key, r[0][key]))
    with open('output/captions.csv', 'w') as f:
        for key in r[1].keys():
            f.write("%s,%s\n" % (key, r[1][key]))
    with open('output/predictions.csv', 'w') as f:
        for key in r[2].keys():
            f.write("%s,%s\n" % (key, r[2][key]))
コード例 #3
0
def main(name=None):
    print("running on", "cuda:0" if torch.cuda.is_available() else "cpu")
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        if name is None:
            name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
        else:
            name = name + datetime.now().strftime("-%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs_big', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }
    net = nn.DataParallel(Net(train_loader.dataset.num_tokens)).cuda()
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           weight_decay=config_as_dict["weight_decay"])

    tracker = utils.Tracker()

    for i in range(config.epochs):
        _ = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #4
0
def main():
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)
    with open('embedding/word_embedding.p', "rb") as f:
        embedding_model = cPickle.load(f)
    net = model.Net(embedding_model).to(device)
    # net = model.Net(train_loader.dataset.num_tokens).to(device)
    net = nn.DataParallel(net)
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items()
        if (not k.startswith('__')) and (type(v) is not ModuleType)
    }

    for i in range(config.epochs):
        _ = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #5
0
def main():
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    print("Loading training set")
    train_loader = data.get_loader(train=True)

    print("Loading testing set:")
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(model.Net(train_loader.dataset.num_tokens)).cuda()
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    print("Training....")
    for i in range(config.epochs):
        _ = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #6
0
def main():
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    model_name = os.path.join('logs', '{}_model.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(model.Net(train_loader.dataset.num_tokens)).cuda()
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])

    tracker = utils.Tracker()
    config_as_dict = {k: v for k, v in vars(config).items() if not k.startswith('__')}

    for i in range(config.epochs):
        _ = run(net, train_loader, optimizer, tracker, train=True, prefix='train', epoch=i)
        r = run(net, val_loader, optimizer, tracker, train=False, prefix='val', epoch=i)

        results = {
            'name': name,
            'epoch': i,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'optimizer' : optimizer.state_dict(),
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)

        # save best model so far
        val_acc = torch.FloatTensor(tracker.to_dict()['val_acc'])
        val_acc = val_acc.mean(dim=1).numpy()

        is_best = True
        for j in range(len(val_acc) - 1):
          if val_acc[-1] <= val_acc[j]:
            is_best = False
        if is_best:
            save_model = {
                'epoch': i,
                'weights': net.state_dict()
            }
            torch.save(save_model, model_name)
コード例 #7
0
def main():
    if len(sys.argv) > 1:  # 外界获得的参数列表:['/home/users2/xcm09/VQA/train.py']
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('模型 will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)
    net = model.Net(train_loader.dataset.num_tokens).cuda()  # 15193
    # net = nn.DataParallel(model.Net(train_loader.dataset.num_tokens), device_ids=config.device_ids).cuda()
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        a = time()
        _ = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
        b = time() - a
        print('该epoch耗时%d:%d' % (b // 60, b % 60))
コード例 #8
0
def main():
    start_time = datetime.datetime.now()
    start_time_str = start_time.strftime("%Y-%m-%d_%H-%M-%S")
    progress_file = start_time_str + '_progress.csv'

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(model.Net(train_loader.dataset.num_tokens)).cuda()
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }
    train_loss_list = []
    train_acc_list = []
    val_loss_list = []
    val_acc_list = []
    max_acc = 0

    for i in range(config.epochs):
        print('epoch %d' % i)
        print('train')
        train_loss, train_acc = run(net,
                                    train_loader,
                                    optimizer,
                                    tracker,
                                    train=True,
                                    prefix='train',
                                    epoch=i)
        print('validation')
        val_loss, val_acc = run(net,
                                val_loader,
                                optimizer,
                                tracker,
                                train=False,
                                prefix='val',
                                epoch=i)
        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        val_loss_list.append(val_loss)
        val_acc_list.append(val_acc)

        write_progress(train_acc_list, train_loss_list, val_acc_list,
                       val_loss_list, progress_file)
        if val_acc > max_acc and val_acc > 0.45:
            print('model saved')
            torch.save(net.state_dict(), start_time_str + '_best_model.pkl')
            max_acc = val_acc
コード例 #9
0
ファイル: test.py プロジェクト: kxiao1/pytorch-vqa
def main():
    assert len(sys.argv) == 3

    log = torch.load(sys.argv[2], map_location=torch.device('cpu'))
    num_tokens = len(log['vocab']['question']) + 1
    answer_map = {v: k for k, v in log['vocab']['answer'].items()}

    test_loader = data.get_loader(test=True)

    if sys.argv[1] == 'baseline':
        import model_baseline as model
    elif sys.argv[1] == 'modified_attention':
        import model_modified_attention as model
    elif sys.argv[1] == 'big':
        import model_big as model
    elif sys.argv[1] == 'combined':
        import model_combined as model
    elif sys.argv[1] == 'naive':
        import model_degenerate as model
        test_loader = data.get_loader(test=True, include_original_images=True)
    else:
        print("Re-enter the name of model!")
    net = nn.DataParallel(model.Net(num_tokens)).cuda()
    net.load_state_dict(log['weights'])

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           weight_decay=config.weight_decay)
    tracker = utils.Tracker()

    results = model.run(net,
                        test_loader,
                        optimizer,
                        tracker,
                        train=False,
                        prefix='val',
                        epoch=-1)

    anss, accs, idxs = results
    answers = [answer_map[ans.item()] for ans in anss]
    image_names = [f"VizWiz_test_{idx:08d}.jpg" for idx in idxs]
    results = [{
        "image": image_name,
        "answer": ans
    } for image_name, ans in zip(image_names, answers)]

    assert sum(accs) == 0

    log_name = os.path.basename(sys.argv[2])
    log_name = log_name[:log_name.index('-')]

    with open(os.path.join("test_results", f"{log_name}.json"), 'w') as fd:
        json.dump(results, fd)
コード例 #10
0
def main():
    global total_iterations
    total_iterations = 1
 
    if len(sys.argv) > 1:
        name = 'lang_attacker_' + (' '.join(sys.argv[1:]))
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', 'lang_attacker_{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True
    print("Initialize Data Loaders")
    train_loader = data.get_loader(train=True, batch_size=config.batch_size)
    val_loader = data.get_loader(val=True, batch_size=config.batch_size)

    #net = nn.DataParallel(model.Net(train_loader.dataset.num_tokens)).cuda()
    # Define and loadup the models
    print("Initialize VQA model")
    vqa_model = model.VQANet()

    # Get question and vocab answer
    vocab = vqa_model.get_vocab()

    # Define attacker
    #print("Load Attacker model")
    #Uncomment this for AttendAndAttackNet
    #attacker = Attacker(vqa_model)

    #Uncomment this for Carlini
    #attacker = CarliniAttacker(vqa_model)

    #optimizer = optim.Adam([p for p in attacker.attack_model.parameters() if p.requires_grad])
    #scheduler = ReduceLROnPlateau(optimizer, 'min')

    tracker = utils.Tracker()
    config_as_dict = {k: v for k, v in vars(config).items() if not k.startswith('__')}
    print("Begin Inference")
    eval_after_epochs = 1 #Run eval after these many epochs
    for i in range(1): #Only 1 epoch needed for inference. Keep this to later generalize to training to protect against attacks
        #Run a train epoch
        #acc = run(vqa_model, train_loader, tracker, train=True, prefix='train', epoch=i)

        #Run inference
        acc = run(vqa_model, val_loader, tracker, train=False, prefix='val', epoch=i)

        print("Epoch " + str(i) +" : Inference Results: Accuracy: "+ str(acc)) 

        '''
コード例 #11
0
def main():
    from datetime import datetime

    # this has been changed to run jupyter
    #
    # non jupyter ##############################################################
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    ############################################################################


    # remove line below if not running on jupyter
    name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(modelNoAttention.Net(train_loader.dataset.num_tokens)).cuda() #change made here
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad], weight_decay=0.01)

    tracker = utils.Tracker()
    config_as_dict = {k: v for k, v in vars(config).items() if not k.startswith('__')}

    for i in range(config.epochs):
        _ = run(net, train_loader, optimizer, tracker, train=True, prefix='train', epoch=i)
        r = run(net, val_loader, optimizer, tracker, train=False, prefix='val', epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #12
0
ファイル: model_is_color.py プロジェクト: kxiao1/pytorch-vqa
def main():
    from datetime import datetime
    name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs_is_color', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    train_loader = data.get_loader(train=True, check_pertains_to_color=True)
    val_loader = data.get_loader(val=True, check_pertains_to_color=True)
    net = nn.DataParallel(ColorNet(train_loader.dataset.num_tokens)).cuda()
    tracker = utils.Tracker()

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           weight_decay=0.01)
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        _ = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #13
0
def test():
    log = torch.load('results/baseline.pt')
    tokens = len(log['vocab']['question']) + 1

    net = torch.nn.DataParallel(model.Net(tokens))
    net.load_state_dict(log['weights'])
    net.cuda()

    print('model loaded')

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    tracker = utils.Tracker()

    r = run(net, val_loader, None, tracker, train=False, prefix='val')
    print(r[1])
コード例 #14
0
def main():
    print("running on", "cuda:0" if torch.cuda.is_available() else "cpu")
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs_karl', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    # net = nn.DataParallel(model_modified_attention.Net(train_loader.dataset.num_tokens)).cuda()
    net = model_modified_attention.Net(train_loader.dataset.num_tokens)
    print(net)
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])
    for name, layer in net.named_modules():
        if isinstance(layer, torch.nn.Conv2d):
            print(name, layer)
    net.attention.register_forward_hook(get_activation('attention'))

    tracker = utils.Tracker()
    config_as_dict = {k: v for k, v in vars(config).items() if not k.startswith('__')}

    for i in range(1):
        _ = run(net, train_loader, optimizer, tracker, train=True, prefix='train', epoch=i)
        r = run(net, val_loader, optimizer, tracker, train=False, prefix='val', epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #15
0
def evaluate_hw3():
    # all data files should be inside "coco" folder in the project directory
    preprocess_images.run(is_evaluate=True)
    preprocess_vocab.run()
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(model.Net(val_loader.dataset.num_tokens)).cuda()
    net.load_state_dict(
        torch.load('model.pkl', map_location=lambda storage, loc: storage))

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])
    tracker = utils.Tracker()
    answers, accuracies, idx = start(net,
                                     val_loader,
                                     optimizer,
                                     tracker,
                                     train=False,
                                     prefix='val')
    acc = calc_accuracy(accuracies)
    print('%.2f' % acc)
コード例 #16
0
def evaluate_hw3():
    print('preprocessing images')
    preprocess_images(train=False)
    print('preprocessing text')
    preprocess_text()
    print('loading data')
    val_loader = data.get_loader(val=True)
    net = nn.DataParallel(model.Net(config.default_num_tokens)).cuda()
    net.load_state_dict(
        torch.load('model.pkl', map_location=lambda storage, loc: storage))
    net = to_gpu(net)
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])
    tracker = utils.Tracker()
    print('runnning validation')
    val_loss, val_acc = run(net,
                            val_loader,
                            optimizer,
                            tracker,
                            train=False,
                            prefix='val',
                            epoch=0)
    print('validation acc: %.4f' % val_acc)
コード例 #17
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('name', nargs='*')
    parser.add_argument('--eval', dest='eval_only', action='store_true')
    parser.add_argument('--test', action='store_true')
    parser.add_argument('--trainval', action='store_true')
    parser.add_argument('--resume', nargs='*')
    parser.add_argument('--describe',
                        type=str,
                        default='describe your setting')
    args = parser.parse_args()

    print('-' * 50)
    print(args)
    config.print_param()

    # set mannual seed
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed(config.seed)

    if args.test:
        args.eval_only = True
    src = open(config.model_type + '_model.py').read()
    if args.name:
        name = ' '.join(args.name)
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    if not args.test:
        # target_name won't be used in test mode
        print('will save to {}'.format(target_name))
    if args.resume:
        logs = torch.load(' '.join(args.resume))
        # hacky way to tell the VQA classes that they should use the vocab without passing more params around
        data.preloaded_vocab = logs['vocab']

    cudnn.benchmark = True

    if args.trainval:
        train_loader = data.get_loader(trainval=True)
    elif not args.eval_only:
        train_loader = data.get_loader(train=True)

    if args.trainval:
        pass  # since we use the entire train val splits, we don't need val during training
    elif not args.test:
        val_loader = data.get_loader(val=True)
    else:
        val_loader = data.get_loader(test=True)

    question_keys = train_loader.dataset.vocab['question'].keys(
    ) if args.trainval else val_loader.dataset.vocab['question'].keys()
    net = model.Net(question_keys)
    net = nn.DataParallel(net).cuda()  # Support multiple GPUS
    select_optim = optim.Adamax if (config.optim_method
                                    == 'Adamax') else optim.Adam
    optimizer = select_optim([p for p in net.parameters() if p.requires_grad],
                             lr=config.initial_lr,
                             weight_decay=config.weight_decay)
    scheduler = lr_scheduler.ExponentialLR(optimizer,
                                           0.5**(1 / config.lr_halflife))
    if args.resume:
        net.module.load_state_dict(logs['weights'])
    print(net)
    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        if not args.eval_only:
            run(net,
                train_loader,
                optimizer,
                scheduler,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        if not args.trainval:
            r = run(net,
                    val_loader,
                    optimizer,
                    scheduler,
                    tracker,
                    train=False,
                    prefix='val',
                    epoch=i,
                    has_answers=not args.test)
        else:
            r = [[-1], [-1], [-1]]  # dummy results

        if not args.test:
            results = {
                'name':
                name,
                'tracker':
                tracker.to_dict(),
                'config':
                config_as_dict,
                'weights':
                net.module.state_dict(),
                'eval': {
                    'answers': r[0],
                    'accuracies': r[1],
                    'idx': r[2],
                },
                'vocab':
                val_loader.dataset.vocab
                if not args.trainval else train_loader.dataset.vocab,
                'src':
                src,
            }
            torch.save(results, target_name)
        else:
            # in test mode, save a results file in the format accepted by the submission server
            answer_index_to_string = {
                a: s
                for s, a in val_loader.dataset.answer_to_index.items()
            }
            results = []
            for answer, index in zip(r[0], r[2]):
                answer = answer_index_to_string[answer.item()]
                qid = val_loader.dataset.question_ids[index]
                entry = {
                    'question_id': qid,
                    'answer': answer,
                }
                results.append(entry)
            with open(config.result_json_path, 'w') as fd:
                json.dump(results, fd)

        if args.eval_only:
            break
コード例 #18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('name', nargs='*')
    parser.add_argument('--eval', dest='eval_only', action='store_true')
    parser.add_argument('--test', action='store_true')
    parser.add_argument('--resume', nargs='*')
    # parser.add_argument('--image_url', type = str, default='')
    # parser.add_argument('--question_id', type = int)
    # parser.add_argument('--embedding', default='kor2vec', type='str', help='embedding method')
    args = parser.parse_args()

    if args.test:
        args.eval_only = True

    # if args.image_

    # if args.embedding == 'kor2vec':
    #     src = open('model_kor2vec.py').read()
    # if args.embedding == '':
    # src = open('model_original.py').read()
    src = open('model.py').read()
    if args.name:
        name = ' '.join(args.name)
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    # target_name = '/mnt/crawl/counting_logs/{}.pth'.format(name)
    if not args.test:
        # target_name won't be used in test mode
        print('will save to {}'.format(target_name))
    if args.resume:
        logs = torch.load(' '.join(args.resume))

        # hacky way to tell the VQA classes that they should use the vocab without passing more params around
        data.preloaded_vocab = logs['vocab']

    cudnn.benchmark = True

    print('start loader...')
    if not args.eval_only:
        train_loader = data.get_loader(train=True)
    if not args.test:
        val_loader = data.get_loader(val=True)
    else:
        val_loader = data.get_loader(test=True)

    print('start net / optimizer / scheduler...')

    # model = nn.DataParallel(models.Model(config, train_loader.dataset.num_tokens)).cuda()

    # if args.embedding == 'kor2vec':
    #     net = nn.DataParallel(model_kor2vec.Net(val_loader.dataset.num_tokens)).cuda()
    # if args.embedding == '':

    # if you want to use 2 gpus
    # net = nn.DataParallel(model_original.Net(val_loader.dataset.num_tokens)).cuda()
    net = nn.DataParallel(model.Net(val_loader.dataset.num_tokens)).cuda()
    # if you want to use 1 gpus
    # torch.cuda.set_device(0)
    # net = model.Net(val_loader.dataset.num_tokens).cuda()

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           lr=config.initial_lr)
    scheduler = lr_scheduler.ExponentialLR(optimizer,
                                           0.5**(1 / config.lr_halflife))

    # epoch = config.epochs

    if args.resume:
        net.load_state_dict(logs['weights'])
        # checkpoint = torch.load(logs)
        # print("cp", checkpoint)
        # model.load_state_dict(checkpoint['model_state_dict'])
        # optimizer.load_state_dict(logs['optimizer_state_dict'])
        # epoch = logs['epoch']
        # loss = logs['loss']
        # model.train()

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    # if not args.test and not args.eval:

    print('start training...')

    for i in range(config.epochs):
        if not args.eval_only:
            print('train.. not args.eval_only')
            print('start train!!!!...')
            run(net,
                train_loader,
                optimizer,
                scheduler,
                tracker,
                train=True,
                prefix='train',
                epoch=i)

        print('start val!!!!...')
        r = run(net,
                val_loader,
                optimizer,
                scheduler,
                tracker,
                train=False,
                prefix='val',
                epoch=i,
                has_answers=not args.test)

        # print("r3", r[3])

        if not args.test:
            print('train.. if not args.test')
            if i % 5 == 0:
                now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
                save_name = '/mnt/backup/ran/vqa/vqa/save/ko100_{0}_{1}.pth'.format(
                    now, i)  # 20191028기준 lr1이 메인
                # target_name = os.path.join('logs', '{}.pth'.format(name))
                results = {
                    'name': name,
                    'tracker': tracker.to_dict(),
                    'config': config_as_dict,
                    'weights': net.state_dict(),
                    'eval': {
                        'answers': r[0],
                        'accuracies': r[1],
                        'idx': r[2],
                    },
                    'vocab': val_loader.dataset.vocab,
                    'src': src,
                    'epoch': i,
                    # 'model_state_dict': net.state_dict(), #weight
                    'optimizer_state_dict':
                    optimizer.state_dict(),  # optimizer weight
                    # 'loss': loss,
                }

                torch.save(results, save_name)

            if i == 99:
                now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
                save_name = '/mnt/crawl/counting_logs/eng_{0}_final.pth'.format(
                    now)
                # target_name = os.path.join('logs', '{}.pth'.format(name))
                results = {
                    'name': name,
                    'tracker': tracker.to_dict(),
                    'config': config_as_dict,
                    'weights': net.state_dict(),
                    'eval': {
                        'answers': r[0],
                        'accuracies': r[1],
                        'idx': r[2],
                    },
                    'vocab': val_loader.dataset.vocab,
                    'src': src,
                }
                torch.save(results, save_name)
        else:
            # in test mode, save a results file in the format accepted by the submission server
            print('train.. else -> yes args.test')
            # torch.load("")
            answer_index_to_string = {
                a: s
                for s, a in val_loader.dataset.answer_to_index.items()
            }
            # with open('answer_index_to_string.txt', 'w') as f:
            #    json.dump(answer_index_to_string, f)
            results = []

            for answer, index in zip(r[0], r[2]):  # r3
                try:
                    answer = answer_index_to_string[answer.item()]
                except KeyError:
                    continue
                # answer = answer_index_to_string[answer.item()]

                qid = val_loader.dataset.question_ids[index]
                entry = {
                    'question_id': qid,
                    'answer': answer
                    # 'all_answer': rr
                }
                results.append(entry)
            with open('results_test.json', 'w') as fd:
                json.dump(results, fd)

        if args.eval_only:
            # else:
            # in test mode, save a results file in the format accepted by the submission server
            print('eval+pnp')
            # torch.load("")
            answer_index_to_string = {
                a: s
                for s, a in val_loader.dataset.answer_to_index.items()
            }
            # with open('answer_index_to_string.txt', 'w') as f:
            #    json.dump(answer_index_to_string, fd)
            results = []

            for answer, index in zip(r[0], r[2]):
                try:
                    answer = answer_index_to_string[answer.item()]
                except KeyError:
                    continue

                qid = val_loader.dataset.question_ids[index]
                entry = {
                    'question_id': qid,
                    'answer': answer,
                    # 'all_answer':rr
                }
                results.append(entry)
            with open('results_test.json', 'w') as fd:
                json.dump(results, fd)

            break
コード例 #19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('name', nargs='*')
    parser.add_argument('--eval', dest='eval_only', action='store_true')
    parser.add_argument('--test', action='store_true')
    parser.add_argument('--resume', nargs='*')
    args = parser.parse_args()

    if args.test:
        args.eval_only = True
    src = open('model.py').read()
    if len(sys.argv) > 1:
        name = ' '.join(args.name)
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    if not args.test:
        # target_name won't be used in test mode
        print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    if not args.eval_only:
        train_loader = data.get_loader(train=True)
    if not args.test:
        val_loader = data.get_loader(val=True)
    else:
        val_loader = data.get_loader(test=True)

    net = model.Net(val_loader.dataset.num_tokens).cuda()
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           lr=config.initial_lr)
    scheduler = lr_scheduler.ExponentialLR(optimizer,
                                           0.5**(1 / config.lr_halflife))
    if args.resume:
        net.load_state_dict(torch.load(' '.join(args.resume))['weights'])

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        if not args.eval_only:
            run(net,
                train_loader,
                optimizer,
                scheduler,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                scheduler,
                tracker,
                train=False,
                prefix='val',
                epoch=i,
                has_answers=not args.test)

        if not args.test:
            results = {
                'name': name,
                'tracker': tracker.to_dict(),
                'config': config_as_dict,
                'weights': net.state_dict(),
                'eval': {
                    'answers': r[0],
                    'accuracies': r[1],
                    'idx': r[2],
                },
                'vocab': val_loader.dataset.vocab,
                'src': src,
            }
            torch.save(results, target_name)
        else:
            # in test mode, save a results file in the format accepted by the submission server
            answer_index_to_string = {
                a: s
                for s, a in val_loader.dataset.answer_to_index.items()
            }
            results = []
            for answer, index in zip(r[0], r[2]):
                answer = answer_index_to_string[answer]
                qid = val_loader.dataset.question_ids[index]
                entry = {
                    'question_id': qid,
                    'answer': answer,
                }
                results.append(entry)
            with open('results.json', 'w') as fd:
                json.dump(results, fd)

        if args.eval_only:
            break
コード例 #20
0
def main():
    # set is you want to retrain
    retrain = False
    retrain_path = "logs/DropOut0"

    if len(sys.argv) > 1:
        name = ''.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    if retrain:
        print("Retraining from" + retrain_path)
        log = torch.load(retrain_path + '.pth')
        tokens = len(log['vocab']['question']) + 1

        net = torch.nn.DataParallel(model.Net(tokens)).cuda()
        net.load_state_dict(log['weights'])

    else:
        print("Training new net")
        net = nn.DataParallel(model.Net(
            train_loader.dataset.num_tokens)).cuda()

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    train_acc = []
    train_loss = []
    val_acc = []
    for i in range(config.epochs):
        accs, losses = run(net,
                           train_loader,
                           optimizer,
                           tracker,
                           train=True,
                           prefix='train',
                           epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        train_acc.append(sum(accs) / len(accs))
        train_loss.append(sum(losses) / len(losses))
        val_acc.append(sum(r[1]) / len(r[1]))

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)

        with open(name + '_results.pkl',
                  'wb') as f:  # Python 3: open(..., 'wb')
            pickle.dump([train_acc, train_loss, val_acc], f)
コード例 #21
0
def main():
    # Load config yaml file
    parser = argparse.ArgumentParser()
    parser.add_argument('--path_config',
                        default='config/default.yaml',
                        type=str,
                        help='path to a yaml config file')
    args = parser.parse_args()

    if args.path_config is not None:
        with open(args.path_config, 'r') as handle:
            config = yaml.load(handle)

    # generate log directory
    dir_name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    path_log_dir = os.path.join(config['logs']['dir_logs'], dir_name)

    if not os.path.exists(path_log_dir):
        os.makedirs(path_log_dir)

    print('Model logs will be saved in {}'.format(path_log_dir))

    cudnn.benchmark = True

    # Generate datasets and loaders
    train_loader = vqa_dataset.get_loader(config, split='train')
    val_loader = vqa_dataset.get_loader(config, split='val')

    model = models.Model(config, train_loader.dataset.num_tokens).to(dev)

    optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, model.parameters()),
        config['training']['lr'])

    # Load model weights if necessary
    if config['model']['pretrained_model'] is not None:
        print("Loading Model from %s" % config['model']['pretrained_model'])
        log = torch.load(config['model']['pretrained_model'])
        dict_weights = log['weights']
        model.load_state_dict(dict_weights)

    tracker = utils.Tracker()

    min_loss = 10
    max_accuracy = 0

    path_best_accuracy = os.path.join(path_log_dir, 'best_accuracy_log.pth')
    path_best_loss = os.path.join(path_log_dir, 'best_loss_log.pth')

    for i in range(config['training']['epochs']):

        train(model,
              train_loader,
              optimizer,
              tracker,
              epoch=i,
              split=config['training']['train_split'])
        # If we are training on the train split (and not on train+val) we can evaluate on val
        if config['training']['train_split'] == 'train':
            eval_results = evaluate(model,
                                    val_loader,
                                    tracker,
                                    epoch=i,
                                    split='val')

            # save all the information in the log file
            log_data = {
                'epoch': i,
                'tracker': tracker.to_dict(),
                'config': config,
                'weights': model.state_dict(),
                'eval_results': eval_results,
                'vocabs': train_loader.dataset.vocabs,
            }

            # save logs for min validation loss and max validation accuracy
            if eval_results['avg_loss'] < min_loss:
                #                 torch.save(log_data, path_best_loss)  # save model
                min_loss = eval_results['avg_loss']  # update min loss value

            if eval_results['avg_accuracy'] > max_accuracy:
                #                 torch.save(log_data, path_best_accuracy)  # save model
                max_accuracy = eval_results[
                    'avg_accuracy']  # update max accuracy value

    # Save final model
    log_data = {
        'tracker': tracker.to_dict(),
        'config': config,
        'weights': model.state_dict(),
        'vocabs': train_loader.dataset.vocabs,
    }

    path_final_log = os.path.join(path_log_dir, 'final_log.pth')
    torch.save(log_data, path_final_log)
コード例 #22
0
def main():
    print('-' * 50)
    config.print_param()

    # set mannual seed
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed(config.seed)

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    cudnn.benchmark = True

    question_keys = val_loader.dataset.vocab['question'].keys()
    net = Net(question_keys)
    net = nn.DataParallel(net).cuda()  # Support multiple GPUS
    select_optim = optim.Adamax if (config.optim_method
                                    == 'Adamax') else optim.Adam
    optimizer = select_optim([p for p in net.parameters() if p.requires_grad],
                             lr=config.initial_lr,
                             weight_decay=config.weight_decay)
    scheduler = lr_scheduler.ExponentialLR(optimizer,
                                           0.5**(1 / config.lr_halflife))
    print(net)

    train_errors_list = []
    train_losses_list = []

    val_errors_list = []
    val_losses_list = []
    tracker = utils.Tracker()
    for i in range(1, config.epochs + 1):
        # Train:
        train_acc, train_loss = run(net,
                                    train_loader,
                                    optimizer,
                                    scheduler,
                                    tracker,
                                    train=True,
                                    prefix='train',
                                    epoch=i)
        # Val:
        val_acc, val_loss = run(net,
                                val_loader,
                                optimizer,
                                scheduler,
                                tracker,
                                train=False,
                                prefix='val',
                                epoch=i)

        train_errors_list.append(1 - train_acc)
        train_losses_list.append(train_loss)

        val_errors_list.append(1 - val_acc)
        val_losses_list.append(val_loss)

    torch.save(net.module.state_dict(), 'model.pkl')

    sn.set()
    ind = list(range(1, config.epochs + 1))
    # Error
    plt.plot(ind, train_errors_list, label='Train')
    plt.plot(ind, val_errors_list, label='Validation')
    plt.title('Error-rate during epochs')
    plt.xlabel('Epochs')
    plt.ylabel('Error-rate')
    plt.legend()
    # plt.show()
    plt.savefig('error_rate.png')

    plt.clf()

    # Loss
    plt.plot(ind, train_losses_list, label='Train')
    plt.plot(ind, val_losses_list, label='Validation')
    plt.title('Loss during epochs')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    # plt.show()
    plt.savefig('loss_rate.png')
コード例 #23
0
ファイル: train.py プロジェクト: AgarwalVedika/pytorch-vqa
def main():
    start_time = time.time()

    cudnn.benchmark = True

    train_dataset, train_loader = data.get_loader(train=True)
    _, val_loader = data.get_loader(val=True)
    #test_loader = data.get_loader(test=True)

    if config.model_type == 'no_attn':
        net = nn.DataParallel(model2.Net(
            train_loader.dataset.num_tokens)).cuda()
        target_name = os.path.join(config.model_path_no_attn)

    elif config.model_type == 'with_attn':
        net = nn.DataParallel(model.Net(
            train_loader.dataset.num_tokens)).cuda()
        target_name = os.path.join(config.model_path_show_ask_attend_answer)

    elif 'finetuning_CNN_LSTM' in config.model_type:
        #ipdb.set_trace()
        net = nn.DataParallel(model2.Net(
            train_loader.dataset.num_tokens)).cuda()
        model_path = os.path.join(config.model_path_no_attn)
        net.load_state_dict(
            torch.load(model_path)["weights"]
        )  ## SO LOAD  THE MODEL HERE- WE WANT TO START FINETUNING FROM THE BEST WE HAVE
        target_name = os.path.join(
            config.trained_model_save_folder)  # so this will store the models
        os.makedirs(target_name, exist_ok=True)

    elif 'data_aug_CNN_LSTM' in config.model_type:
        #ipdb.set_trace()
        net = nn.DataParallel(model2.Net(
            train_loader.dataset.num_tokens)).cuda()
        target_name = os.path.join(
            config.trained_model_save_folder)  # so this will store the models
        os.makedirs(target_name, exist_ok=True)

    elif 'data_aug_SAAA' in config.model_type:
        #ipdb.set_trace()
        net = nn.DataParallel(model.Net(
            train_loader.dataset.num_tokens)).cuda()
        target_name = os.path.join(
            config.trained_model_save_folder)  # so this will store the models
        os.makedirs(target_name, exist_ok=True)

    elif 'finetuning_SAAA' in config.model_type:
        net = nn.DataParallel(model.Net(
            train_loader.dataset.num_tokens)).cuda()
        model_path = os.path.join(config.model_path_show_ask_attend_answer)
        net.load_state_dict(
            torch.load(model_path)["weights"]
        )  ## SO LOAD  THE MODEL HERE- WE WANT TO START FINETUNING FROM THE BEST WE HAVE
        target_name = os.path.join(
            config.trained_model_save_folder)  # so this will store the models
        os.makedirs(target_name, exist_ok=True)

        # os.makedirs(target_name, exist_ok=True)
    print('will save to {}'.format(target_name))

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])
    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        _ = run(
            net,
            train_loader,
            optimizer,
            tracker,
            train=True,
            prefix='train',
            epoch=i,
            dataset=train_dataset
        )  ## prefix needed as ths is passed to tracker- which stroes then train_acc/loss
        _ = run(
            net,
            val_loader,
            optimizer,
            tracker,
            train=False,
            prefix='val',
            epoch=i
        )  ## prefix needed as ths is passed to tracker- which stroes then val acc/loss

        results = {
            'tracker': tracker.to_dict(
            ),  ## tracker saves acc/loss for all 50 epochs- since it appends the values ( lines 91..)
            'config': config_as_dict,
            'weights': net.state_dict(),
            # 'eval': {          ## # edit_vedika FT101 you are svaing the results here - you dont need this!
            #     'answers': r[0],
            #     'accuracies': r[1],
            #     'idx': r[2],
            # },
            'vocab': train_loader.dataset.vocab,
        }
        saving_target_name = 'epoch_{}.pth'.format(
            i
        )  ## you want to have all finetuned models- so save every model at everye epoch
        torch.save(
            results, os.path.join(target_name, saving_target_name)
        )  ## keys:  "name", "tracker", "config", "weights", "eval", "vocab"

    #checkpoint_file = os.path.join('./models/epoch_{}.pth'.format(i))
    #torch.save(net, checkpoint_file)
    #print('saving model to '+ checkpoint_file)

    print('time_taken:', time.time() - start_time)
    print(config.model_type)
コード例 #24
0
def main():
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    # Build Model
    vocab_size = train_loader.dataset.num_tokens
    model = rDAN(num_embeddings=vocab_size,
                 embedding_dim=config.embedding_dim,
                 hidden_size=config.hidden_size,
                 answer_size=config.max_answers)
    if config.pretrained:
        model.textencoder.load_pretrained(
            train_loader.dataset.vocab['question'])
    net = nn.DataParallel(model).cuda()

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad])

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        _ = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #25
0
def main():
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True, batch_size=config.batch_size)
    val_loader = data.get_loader(val=True, batch_size=config.batch_size)

    #log = torch.load(config.vqa_model_path)
    #tokens = len(log['vocab']['question']) + 1

    #net = torch.nn.DataParallel(vqa_model.Net(tokens)).cuda()
    #net = vqa_model.Net(tokens).cuda()
    #net.load_state_dict(log['weights'], strict=False)

    vqa_model = model.VQANet()
    #vocab = vqa_model.get_vocab()

    optimizer = optim.Adam(
        [p for p in vqa_model.vqa_net.parameters() if p.requires_grad])

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        _ = run(vqa_model,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(vqa_model,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        #pdb.set_trace()

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': vqa_model.vqa_net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #26
0
ファイル: tracking.py プロジェクト: rodsnjr/motion_tracking
 def create_tracker(current_tracker, square, positions):
     n_tracker = utils.Tracker(positions, current_tracker.index,
                               current_tracker.tracking, square)
     if not n_tracker.noise():
         new_trackers.append(n_tracker)
コード例 #27
0
G_opt = optim.Adam(list(netG.parameters()), lr=args.lrG)
D_opt = optim.Adam(list(netD.parameters()), lr=args.lrD)
GS_opt = optim.Adam(list(netGS.parameters()), lr=args.lrG)

# loss criteria
logsigmoid = nn.LogSigmoid()
mse = nn.MSELoss(reduce=False)
LOG2 = Variable(torch.from_numpy(np.ones(1) * np.log(2)).float())
print(LOG2)
if torch.cuda.is_available():
    LOG2 = LOG2.cuda()

#=========== LOGGING INITIALIZATION ================

vis = utils.init_visdom(args.env)
tracker = utils.Tracker()
tracker_plot = None
scale_plot = None

#============================================================
#============ MAIN TRAINING LOOP ============================
#============================================================

for epoch in range(args.max_iter):

    for it, (s_inputs, t_inputs) in enumerate(loader):

        s_inputs, t_inputs = Variable(s_inputs), Variable(t_inputs)
        if torch.cuda.is_available():
            s_inputs, t_inputs = s_inputs.cuda(), t_inputs.cuda()
コード例 #28
0
def main():
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(model.Net()).cuda()
    optim_params = [{
        'params': net.module.text.parameters(),
        'lr': config.initial_lr
    }, {
        'params': net.module.attention.parameters(),
        'lr': config.initial_lr
    }, {
        'params': net.module.classifier.parameters(),
        'lr': config.initial_lr
    }]
    optimizer = optim.Adam(optim_params)

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        l = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'train': {
                'answers': l[0],
                'accuracies': l[1],
                'idx': l[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
コード例 #29
0
import torch
from ddpg import DDPG
#from naf import NAF
from normalized_actions import NormalizedActions
from ounoise import OUNoise
from replay_memory import ReplayMemory, Transition
from rover_domain import Task_Rovers
import utils as utils
from torch.autograd import Variable
#import calculate_rewards
from visualizer import visualize
from D_VAE_Parameters import Parameters
from calculate_rewards import *

args = Parameters()
tracker = utils.Tracker(args, ['rewards'], '')
env = Task_Rovers(args)

torch.manual_seed(args.seed)
np.random.seed(args.seed)

poi_vals = env.set_poi_values()
if not os.path.exists(args.save_foldername): os.makedirs(args.save_foldername)

if args.algo == "NAF":
    agent = NAF(args.gamma, args.tau, args.num_hnodes,
                env.observation_space.shape[0], env.action_space, args)
else:
    agent = DDPG(args.gamma, args.tau, args.num_hnodes,
                 env.observation_space.shape[0], env.action_space, args)
コード例 #30
0
def main(name=None, config2=None):
    print("running on", "cuda:0" if torch.cuda.is_available() else "cpu")
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        from datetime import datetime
        if name is None:
            name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
        else:
            name = name + datetime.now().strftime("-%Y-%m-%d_%H:%M:%S")
    target_name = os.path.join('logs_baseline', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(Net(train_loader.dataset.num_tokens)).cuda()
    # def get_activation(name):
    #     def hook(model, input, output):
    #         activation[name] = output.detach()
    #     return hook
    # for name, layer in net.named_modules():
    #     if name == "module.attention.x_conv":
    #         layer.register_forward_hook(get_activation('attention'))
    tracker = utils.Tracker()

    global config_as_dict
    if config2 is None:
        config_as_dict = {
            k: v
            for k, v in vars(config).items() if not k.startswith('__')
        }
    else:
        config_as_dict = config2

    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           weight_decay=config_as_dict["weight_decay"])

    for i in range(config_as_dict["epochs"]):
        _ = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)