示例#1
0
def main():
    global opt
    # train data loader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))

    # create model
    if opt.model is 'VAMetric':
        model = models.VAMetric()
    elif opt.model is 'VAMetric2':
        model = models.VAMetric2()
    elif opt.model is 'VAMetric3':
        model = models.VAMetric3()
    else:
        model = models.VA_Linear()
        opt.model = 'VA_Linear'

    if opt.init_model != '':
        print('loading pretrained model from {0}'.format(opt.init_model))
        model.load_state_dict(torch.load(opt.init_model))

    # Contrastive Loss
    criterion = models.ContrastiveLoss()
    #criterion = nn.BCELoss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        model = model.cuda()
        criterion = criterion.cuda()

    # optimizer
    #optimizer = optim.SGD(model.parameters(), opt.lr,
    #momentum=opt.momentum,
    #weight_decay=opt.weight_decay)
    optimizer = optim.Adam(model.parameters())
    # adjust learning rate every lr_decay_epoch
    #lambda_lr = lambda epoch: opt.lr_decay ** ((epoch + 1) // opt.lr_decay_epoch)   #poly policy

    for epoch in range(opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        train(train_loader, model, criterion, optimizer, epoch, opt)
        #LR_Policy(optimizer, opt.lr, lambda_lr(epoch))      # adjust learning rate through poly policy

        ##################################
        # save checkpoints
        ##################################

        # save model every 10 epochs
        if ((epoch + 1) % opt.epoch_save) == 0:
            path_checkpoint = '{0}/{1}_state_epoch{2}.pth'.format(
                opt.checkpoint_folder, opt.model, epoch + 1)
            utils.save_checkpoint(model, path_checkpoint)
示例#2
0
def test(video_loader, audio_loader, model, opt):
    """
    train for one epoch on the training set
    """
    criterion = models.ContrastiveLoss()
    # training mode
    model.eval()

    sim_mat = []
    right = 0
    loss = 0
    for _, vfeat in enumerate(video_loader):
        for _, afeat in enumerate(audio_loader):
            # transpose feats
            vfeat = vfeat.transpose(2, 1)
            afeat = afeat.transpose(2, 1)

            # shuffling the index orders
            bz = vfeat.size()[0]
            for k in np.arange(bz):
                cur_vfeat = vfeat[k].clone()
                cur_vfeats = cur_vfeat.repeat(bz, 1, 1)

                vfeat_var = Variable(cur_vfeats)
                afeat_var = Variable(afeat)

                if opt.cuda:
                    vfeat_var = vfeat_var.cuda()
                    afeat_var = afeat_var.cuda()

                target = torch.ones(30)
                target[k] = 0
                cur_sim = model(vfeat_var, afeat_var).unsqueeze(1)

                if opt.cuda:
                    loss += criterion(cur_sim, Variable(target).cuda())
                else:
                    loss += criterion(cur_sim, Variable(target))
                if k == 0:
                    simmat = cur_sim.clone()
                else:
                    simmat = torch.cat((simmat, cur_sim), 1)
    sorted, indices = torch.sort(simmat, 0)
    np_indices = indices.cpu().data.numpy()
    topk = np_indices[:opt.topk, :]
    print(topk)
    print(loss / bz)
    for k in np.arange(bz):
        order = topk[:, k]
        if k in order:
            right = right + 1
        else:
            print(k)
            print(simmat[k, k], simmat[topk[0, k], k])
    print('The similarity matrix: \n {}'.format(simmat))
    print('Testing accuracy (top{}): {:.3f}'.format(opt.topk, right / bz))
示例#3
0
def main():
    global opt
    best_prec1 = 0
    # only used when we resume training from some checkpoint model
    resume_epoch = 0
    # train data loader
    # for loader, droplast by default is set to false
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))

    # create model
    model = models.VAMetric()

    if not opt.train and opt.init_model != '':
        print('loading pretrained model from {0}'.format(opt.init_model))
        model.load_state_dict(torch.load(opt.init_model))

    # Contrastive Loss
    criterion = models.ContrastiveLoss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        model = model.cuda()
        criterion = criterion.cuda()

    # optimizer
    optimizer = optim.SGD(model.parameters(),
                          opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  #poly policy
    scheduler = LR_Policy(optimizer, lambda_lr)

    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        train(train_loader, model, criterion, optimizer, epoch, opt)
        scheduler.step()

        ##################################
        # save checkpoints
        ##################################

        # save model every 10 epochs
        if ((epoch + 1) % opt.epoch_save) == 0:
            path_checkpoint = '{0}/{1}_state_epoch{2}.pth'.format(
                opt.checkpoint_folder, opt.prefix, epoch + 1)
            utils.save_checkpoint(model.state_dict(), path_checkpoint)
示例#4
0
def main():
    global opt
    # model = models.Test2()
    # model = model.double()
    # train_dataset = dset(opt.data_dir, flist=opt.flist, pca=10)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))

    # create model
    model = models.Test4()
    if opt.init_model != '':
        print('loading pretrained model from {0}'.format(opt.init_model))
        model.load_state_dict(torch.load(opt.init_model))

    # Contrastive Loss
    criterion = models.ContrastiveLoss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        model = model.cuda()
        criterion = criterion.cuda()

    # optimizer = optim.SGD(model.parameters(), opt.lr,
    #                             momentum=opt.momentum,
    #                             weight_decay=opt.weight_decay)
    optimizer = optim.Adam(model.parameters(),
                           opt.lr,
                           weight_decay=opt.weight_decay)

    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  #poly policy

    for epoch in range(opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        # train(train_loader, model, criterion, optimizer, epoch, opt)
        train(train_loader, model, criterion, optimizer, epoch, opt)
        LR_Policy(optimizer, opt.lr,
                  lambda_lr(epoch))  # adjust learning rate through poly policy

        ##################################
        # save checkpoint every 10 epochs
        ##################################
        if ((epoch + 1) % opt.epoch_save) == 0:
            path_checkpoint = '{0}/{1}_state.pth'.format(
                opt.checkpoint_folder, model.__name__)
            utils.save_checkpoint(model.state_dict(), path_checkpoint)