Example #1
0
def main():
    global opt
    best_prec1 = 0
    # only used when we resume training from some checkpoint model
    resume_epoch = 0
    # train data loader
    # for loader, droplast by default is set to false
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))

    # create model
    model = models.VAMetric()

    if not opt.train and opt.init_model != '':
        print('loading pretrained model from {0}'.format(opt.init_model))
        model.load_state_dict(torch.load(opt.init_model))

    # Contrastive Loss
    criterion = models.ContrastiveLoss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        model = model.cuda()
        criterion = criterion.cuda()

    # optimizer
    optimizer = optim.SGD(model.parameters(),
                          opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  #poly policy
    scheduler = LR_Policy(optimizer, lambda_lr)

    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        train(train_loader, model, criterion, optimizer, epoch, opt)
        scheduler.step()

        ##################################
        # save checkpoints
        ##################################

        # save model every 10 epochs
        if ((epoch + 1) % opt.epoch_save) == 0:
            path_checkpoint = '{0}/{1}_state_epoch{2}.pth'.format(
                opt.checkpoint_folder, opt.prefix, epoch + 1)
            utils.save_checkpoint(model.state_dict(), path_checkpoint)
Example #2
0
def main():
    global opt
    # model = models.Test2()
    # model = model.double()
    # train_dataset = dset(opt.data_dir, flist=opt.flist, pca=10)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))

    # create model
    model = models.Test4()
    if opt.init_model != '':
        print('loading pretrained model from {0}'.format(opt.init_model))
        model.load_state_dict(torch.load(opt.init_model))

    # Contrastive Loss
    criterion = models.ContrastiveLoss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        model = model.cuda()
        criterion = criterion.cuda()

    # optimizer = optim.SGD(model.parameters(), opt.lr,
    #                             momentum=opt.momentum,
    #                             weight_decay=opt.weight_decay)
    optimizer = optim.Adam(model.parameters(),
                           opt.lr,
                           weight_decay=opt.weight_decay)

    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  #poly policy

    for epoch in range(opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        # train(train_loader, model, criterion, optimizer, epoch, opt)
        train(train_loader, model, criterion, optimizer, epoch, opt)
        LR_Policy(optimizer, opt.lr,
                  lambda_lr(epoch))  # adjust learning rate through poly policy

        ##################################
        # save checkpoint every 10 epochs
        ##################################
        if ((epoch + 1) % opt.epoch_save) == 0:
            path_checkpoint = '{0}/{1}_state.pth'.format(
                opt.checkpoint_folder, model.__name__)
            utils.save_checkpoint(model.state_dict(), path_checkpoint)
Example #3
0
def main():
    global opt
    # train data loader
    tl_ls = []
    for tds in tds_ls:
        tl_ls.append(
            torch.utils.data.DataLoader(tds,
                                        batch_size=opt.batchSize,
                                        shuffle=True,
                                        num_workers=int(opt.workers)))

    # create model
    model_ls = []
    for i in range(opt.model_number):
        m = models.VA_lstm()
        # m = models.VAMetric_conv()
        model_ls.append(m)

    if opt.init_model_epoch != '':

        for i in range(opt.model_number):
            path = '{0}/{1}_state_epoch{2}_model{3}.pth'.format(
                opt.checkpoint_folder, opt.prefix, opt.init_model_epoch, i + 1)
            print('loading pretrained model from {0}'.format(path))
            model_ls[i].load_state_dict(torch.load(path))

    # Contrastive Loss
    # criterion = models.conv_loss_dqy()
    # criterion = models.N_pair_loss()
    # criterion = models.Topk_loss()
    criterion = models.lstm_loss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        for i in range(opt.model_number):
            model_ls[i] = model_ls[i].cuda()
        criterion = criterion.cuda()

    # optimizer
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr,
    #                      momentum=opt.momentum,
    #                      weight_decay=opt.weight_decay)

    opt_ls = []
    for m in model_ls:
        op = optim.Adam(m.parameters(), lr=opt.lr)
        # op = optim.SGD(m.parameters(), lr=opt.lr,
        #                momentum=opt.momentum,
        #                weight_decay=opt.weight_decay)
        opt_ls.append(op)

    # optimizer = optim.SGD(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
    # optimizer = optim.Adadelta(params=model.parameters(), lr=opt.lr)
    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  # poly policy
    scheduler_ls = []
    for op in opt_ls:
        scheduler_ls.append(LR_Policy(op, lambda_lr))

    resume_epoch = 0

    global positive_rec
    global negative_rec
    global loss_rec

    loss_rec = []
    positive_rec = []
    negative_rec = []

    ######### to test each epoch
    parser = OptionParser()
    parser.add_option('--config',
                      type=str,
                      help="evaluation configuration",
                      default="./configs/test_config.yaml")

    (opts_test, args) = parser.parse_args()
    opts_test = Config(opts_test.config)
    test_video_dataset = VideoFeatDataset(root=opts_test.data_dir,
                                          flist=opts_test.video_flist,
                                          which_feat='vfeat',
                                          creat_test=0)
    test_audio_dataset = VideoFeatDataset(root=opts_test.data_dir,
                                          flist=opts_test.audio_flist,
                                          which_feat='afeat',
                                          creat_test=0)
    test_video_loader = torch.utils.data.DataLoader(
        test_video_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))
    test_audio_loader = torch.utils.data.DataLoader(
        test_audio_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))

    ########

    # another test for git
    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        for i in range(opt.model_number):
            train(train_loader=tl_ls[i],
                  model=model_ls[i],
                  criterion=criterion,
                  optimizer=opt_ls[i],
                  epoch=epoch + 1,
                  opt=opt,
                  num=i + 1)
            scheduler_ls[i].step()
        ##################################
        # save checkpoints
        ##################################

        # save model every 10 epochs
        if ((epoch + 1) % opt.epoch_save) == 0:
            for i in range(opt.model_number):
                path_checkpoint = '{0}/{1}_state_epoch{2}_model{3}.pth'.format(
                    opt.checkpoint_folder, opt.prefix, epoch + 1, i + 1)
                utils.save_checkpoint(model_ls[i].state_dict(),
                                      path_checkpoint)

        if ((epoch + 1) % opt.epoch_plot) == 0:
            plt.figure(1)
            plt.subplot(1, 2, 1)
            plt.plot(loss_rec)
            plt.legend('loss')
            plt.subplot(1, 2, 2)
            plt.plot(positive_rec)
            plt.plot(negative_rec)
            plt.legend(
                ('simmilarity of positives', 'simmilarity of negatives'))
            plt.show()
            plt.savefig('./figures/result{0}.jpg'.format(epoch + 1))
            plt.close()
        if ((epoch + 1) % opt.epoch_test) == 0:
            evaluate.test(test_video_loader, test_audio_loader, model_ls,
                          opts_test)
Example #4
0
def main():
    global opt
    loss_rec = np.zeros((opt.folds, 100))
    acc_rec = np.zeros((opt.folds, 100))
    #loss_rec = np.load('acc_train.npy')
    #acc_rec = np.load('acc.npy')
    for iteration in range(opt.folds):
        train_dataset = mnist_Dataset(num_of_cross=iteration)

        print('number of train samples is: {0}'.format(len(train_dataset)))
        print('finished loading data')

        if opt.manualSeed is None:
            opt.manualSeed = random.randint(1, 10000)

        if torch.cuda.is_available() and not opt.cuda:
            print(
                "WARNING: You have a CUDA device, so you should probably run with \"cuda: True\""
            )
            torch.manual_seed(opt.manualSeed)
        else:
            if int(opt.ngpu) == 1:
                print('so we use 1 gpu to training')
                print('setting gpu on gpuid {0}'.format(opt.gpu_id))

                if opt.cuda:
                    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id
                    torch.cuda.manual_seed(opt.manualSeed)
                    cudnn.benchmark = True
        print('Random Seed: {0}'.format(opt.manualSeed))
        # train data loader
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=opt.batchSize,
                                                   shuffle=True,
                                                   num_workers=int(
                                                       opt.workers))

        # create model
        model = mnist_model.cat_and_dog_resnet()

        if opt.init_model != '':
            print('loading pretrained model from {0}'.format(opt.init_model))
            model.load_state_dict(torch.load(opt.init_model))

        # Contrastive Loss
        #criterion = mnist_model.StableBCELoss()
        criterion = nn.CrossEntropyLoss()

        if opt.cuda:
            print('shift model and criterion to GPU .. ')
            model = model.cuda()
            criterion = criterion.cuda()

        # optimizer
        # optimizer = optim.SGD(model.parameters(), lr=opt.lr,
        #                      momentum=opt.momentum,
        #                      weight_decay=opt.weight_decay)

        optimizer = optim.Adam(model.parameters(), lr=opt.lr)
        # optimizer = optim.SGD(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
        # optimizer = optim.Adadelta(params=model.parameters(), lr=opt.lr)
        # adjust learning rate every lr_decay_epoch
        lambda_lr = lambda epoch: opt.lr_decay**(
            (epoch + 1) // opt.lr_decay_epoch)  # poly policy
        scheduler = LR_Policy(optimizer, lambda_lr)

        resume_epoch = 0
        acc = test(model, opt, iteration)
        acc_rec[iteration][0] = acc
        acc = test(model, opt, iteration, Training=True)
        loss_rec[iteration][0] = acc
        for epoch in range(resume_epoch, opt.max_epochs):
            #################################
            # train for one epoch
            #################################
            #accuracy = test(model, opt, epoch)
            train(train_loader, model, criterion, optimizer, iteration, opt,
                  epoch)
            scheduler.step()

            ##################################
            # save checkpoints
            ##################################

            # save model every 10 epochs
            accuracy = test(model, opt, iteration)
            acc_rec[iteration][epoch + 1] = accuracy
            np.save('acc.npy', acc_rec)
            accuracy = test(model, opt, iteration, Training=True)
            loss_rec[iteration][epoch + 1] = accuracy
            np.save('acc_train.npy', loss_rec)

            if ((epoch + 1) % opt.epoch_save) == 0:
                path_checkpoint = '{0}/{1}_{3}_epoch{2}.pth'.format(
                    opt.checkpoint_folder, opt.prefix, epoch + 1, iteration)
                utils.save_checkpoint(model.state_dict(), path_checkpoint)
Example #5
0
def main():
    global opt
    # train data loader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))

    # create model
    model = models.VAMetric_conv()

    if opt.init_model != '':
        print('loading pretrained model from {0}'.format(opt.init_model))
        model.load_state_dict(torch.load(opt.init_model))

    # Contrastive Loss
    criterion = models.conv_loss_dqy()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        model = model.cuda()
        criterion = criterion.cuda()

    # optimizer
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr,
    #                      momentum=opt.momentum,
    #                      weight_decay=opt.weight_decay)

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
    # optimizer = optim.Adadelta(params=model.parameters(), lr=opt.lr)
    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  # poly policy
    scheduler = LR_Policy(optimizer, lambda_lr)

    resume_epoch = 0

    global dis1_rec
    global dis2_rec
    global loss_rec

    loss_rec = []
    dis1_rec = []
    dis2_rec = []

    ######### to test each epoch
    parser = OptionParser()
    parser.add_option('--config',
                      type=str,
                      help="evaluation configuration",
                      default="./configs/test_config.yaml")

    (opts_test, args) = parser.parse_args()
    opts_test = Config(opts_test.config)
    test_video_dataset = VideoFeatDataset(opts_test.data_dir,
                                          opts_test.video_flist,
                                          which_feat='vfeat')
    test_audio_dataset = VideoFeatDataset(opts_test.data_dir,
                                          opts_test.audio_flist,
                                          which_feat='afeat')
    test_video_loader = torch.utils.data.DataLoader(
        test_video_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))
    test_audio_loader = torch.utils.data.DataLoader(
        test_audio_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))

    ########

    # another test for git
    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        train(train_loader, model, criterion, optimizer, epoch, opt,
              test_video_loader, test_audio_loader, opts_test)
        scheduler.step()

        ##################################
        # save checkpoints
        ##################################

        # save model every 10 epochs
        if ((epoch + 1) % opt.epoch_save) == 0:
            path_checkpoint = '{0}/{1}_state_epoch{2}.pth'.format(
                opt.checkpoint_folder, opt.prefix, epoch + 1)
            utils.save_checkpoint(model.state_dict(), path_checkpoint)

    plt.figure(1)
    plt.subplot(1, 2, 1)
    plt.plot(loss_rec)
    plt.legend('loss')
    plt.subplot(1, 2, 2)
    plt.plot(dis1_rec)
    plt.plot(dis2_rec)
    plt.legend(('distance between positives', 'distance between negatives'))
    plt.show()
    plt.savefig("./figures/conv.jpg")
Example #6
0
def main():
    global opt
    # train data loader
    tl_ls = []
    for tds in tds_ls:
        tl_ls.append(
            torch.utils.data.DataLoader(tds,
                                        batch_size=opt.batchSize,
                                        shuffle=True,
                                        num_workers=int(opt.workers)))

    # create model
    model_ls = []
    for i in range(opt.model_number):
        encoder = models.Encoder()
        decoder = models.AttnDecoder()
        model_ls.append([encoder, decoder])

    # if opt.init_model_epoch != '':
    #     for i in range(opt.model_number):
    #         path = '{0}/{1}_state_epoch{2}_model{3}.pth'.format(opt.checkpoint_folder, opt.prefix,
    #                                                             opt.init_model_epoch, i + 1)
    #         print('loading pretrained model from {0}'.format(path))
    #         model_ls[i].load_state_dict(torch.load(path))

    criterion = models.pairwise_loss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        for i in range(opt.model_number):
            cp = model_ls[i]
            cp[0] = cp[0].cuda()
            cp[1] = cp[1].cuda()
        criterion = criterion.cuda()

    opt_ls = []
    for m in model_ls:
        encoder = m[0]
        decoder = m[1]
        encoder_optim = optim.Adam(encoder.parameters(), lr=opt.lr)
        decoder_optim = optim.Adam(decoder.parameters(), lr=opt.lr)
        # encoder_optim = optim.SGD(encoder.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
        # decoder_optim = optim.SGD(decoder.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
        op = [encoder_optim, decoder_optim]
        opt_ls.append(op)

    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  # poly policy
    scheduler_ls = []
    for op in opt_ls:
        en = LR_Policy(op[0], lambda_lr)
        de = LR_Policy(op[1], lambda_lr)
        scheduler_ls.append([en, de])

    resume_epoch = 0

    global positive_rec
    global negative_rec
    global loss_rec

    loss_rec = []
    positive_rec = []
    negative_rec = []

    ######### to test each epoch ###############################################################
    parser = OptionParser()
    parser.add_option('--config',
                      type=str,
                      help="evaluation configuration",
                      default="./configs/test_config.yaml")

    (opts_test, args) = parser.parse_args()
    opts_test = Config(opts_test.config)
    test_video_dataset = VideoFeatDataset(root=opts_test.data_dir,
                                          flist=opts_test.video_flist,
                                          which_feat='vfeat',
                                          creat_test=0)
    test_audio_dataset = VideoFeatDataset(root=opts_test.data_dir,
                                          flist=opts_test.audio_flist,
                                          which_feat='afeat',
                                          creat_test=0)
    test_video_loader = torch.utils.data.DataLoader(
        test_video_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))
    test_audio_loader = torch.utils.data.DataLoader(
        test_audio_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))

    ############################################################################################

    # another test for git
    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        for i in range(opt.model_number):
            m = model_ls[i]
            op = opt_ls[i]
            train(train_loader=tl_ls[i],
                  encoder=m[0],
                  decoder=m[1],
                  criterion=criterion,
                  encoder_optim=op[0],
                  decoder_optim=op[1],
                  epoch=epoch + 1,
                  opt=opt,
                  num=i + 1)
            s = scheduler_ls[i]
            s[0].step()
            s[1].step()

        ##################################
        # save checkpoints
        ##################################
        if ((epoch + 1) % opt.epoch_save) == 0:
            for i in range(opt.model_number):
                m = model_ls[i]
                encoder_path_checkpoint = '{0}/{1}_state_epoch{2}_encoder_model_{3}.pth'.format(
                    opt.checkpoint_folder, opt.prefix, epoch + 1, i + 1)
                utils.save_checkpoint(m[0].state_dict(),
                                      encoder_path_checkpoint)

                decoder_path_checkpoint = '{0}/{1}_state_epoch{2}_decoder_model_{3}.pth'.format(
                    opt.checkpoint_folder, opt.prefix, epoch + 1, i + 1)
                utils.save_checkpoint(m[1].state_dict(),
                                      decoder_path_checkpoint)

                print('Save encoder model to {0}'.format(
                    encoder_path_checkpoint))
                print('Save decoder model to {0}'.format(
                    decoder_path_checkpoint))

        if ((epoch + 1) % opt.epoch_plot) == 0:
            plt.figure(1)
            # plt.subplot(1, 2, 1)
            plt.plot(loss_rec)
            plt.legend('loss')
            # plt.subplot(1, 2, 2)
            # plt.plot(positive_rec)
            # plt.plot(negative_rec)
            # plt.legend(('simmilarity of positives', 'simmilarity of negatives'))
            plt.show()
            plt.savefig('./figures/lstm_result{0}.jpg'.format(epoch + 1))
            plt.close()
        if ((epoch + 1) % opt.epoch_test) == 0:
            evaluate.test(test_video_loader, test_audio_loader, model_ls,
                          opts_test)
Example #7
0
def main():
    global opt
    # train data loader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))

    # create model
    model = models.VAMetric_conv()

    if opt.init_model != '':
        print('loading pretrained model from {0}'.format(opt.init_model))
        model.load_state_dict(torch.load(opt.init_model))

    # Contrastive Loss
    #criterion = models.StableBCELoss()
    criterion = nn.CrossEntropyLoss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        model = model.cuda()
        criterion = criterion.cuda()

    # optimizer
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr,
    #                      momentum=opt.momentum,
    #                      weight_decay=opt.weight_decay)

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
    # optimizer = optim.Adadelta(params=model.parameters(), lr=opt.lr)
    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  # poly policy
    scheduler = LR_Policy(optimizer, lambda_lr)

    resume_epoch = 0

    global dis1_rec
    global dis2_rec
    global loss_rec

    loss_rec = []
    dis1_rec = []
    dis2_rec = []
    # another test for git
    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        train(train_loader, model, criterion, optimizer, epoch, opt)
        scheduler.step()

        ##################################
        # save checkpoints
        ##################################

        # save model every 10 epochs
        if ((epoch + 1) % opt.epoch_save) == 0:
            path_checkpoint = '{0}/{1}_state_epoch{2}.pth'.format(
                opt.checkpoint_folder, opt.prefix, epoch + 1)
            utils.save_checkpoint(model.state_dict(), path_checkpoint)

    plt.figure(1)
    plt.subplot(1, 2, 1)
    plt.plot(loss_rec)
    plt.legend('loss')
    plt.subplot(1, 2, 2)
    #plt.plot(dis1_rec)
    #plt.plot(dis2_rec)
    plt.legend(('distance between positives', 'distance between negatives'))
    plt.show()
    plt.savefig("./figures/conv.jpg")