예제 #1
0
파일: eval.py 프로젝트: zhuhd15/aima
def main():
    global opt
    parser = OptionParser()
    parser.add_option('--config',
                      type=str,
                      help="evaluation configuration",
                      default="./configs/test_config.yaml")

    (opts, args) = parser.parse_args()
    assert isinstance(opts, object)
    opt = Config(opts.config)
    #print(opt)
    if torch.cuda.is_available():
        if not opt.cuda:
            print("WARNING: You have a CUDA device, so you should probably run with \"cuda: True\"")
    else:
        cudnn.benchmark = True

    # loading test dataset
    test_audio_dataset = dset(opt.data_dir, opt.audio_flist, which_feat='afeat')
    print('number of test samples is: {0}'.format(len(test_video_dataset)))
    print('finished loading data')
    # test data loader
    
    test_audio_loader = torch.utils.data.DataLoader(test_audio_dataset, batch_size=opt.batchSize,
                                     shuffle=False, num_workers=int(opt.workers))
    # create model
    model = mnist_model.mnistModel()
    if opt.cuda:
        print('shift model to GPU .. ')
        model = model.cuda()

    test(test_video_loader, test_audio_loader, model, opt)
예제 #2
0
def main():
    global opt
    # train data loader
    tl_ls = []
    for tds in tds_ls:
        tl_ls.append(
            torch.utils.data.DataLoader(tds,
                                        batch_size=opt.batchSize,
                                        shuffle=True,
                                        num_workers=int(opt.workers)))

    # create model
    model_ls = []
    for i in range(opt.model_number):
        m = models.VA_lstm()
        # m = models.VAMetric_conv()
        model_ls.append(m)

    if opt.init_model_epoch != '':

        for i in range(opt.model_number):
            path = '{0}/{1}_state_epoch{2}_model{3}.pth'.format(
                opt.checkpoint_folder, opt.prefix, opt.init_model_epoch, i + 1)
            print('loading pretrained model from {0}'.format(path))
            model_ls[i].load_state_dict(torch.load(path))

    # Contrastive Loss
    # criterion = models.conv_loss_dqy()
    # criterion = models.N_pair_loss()
    # criterion = models.Topk_loss()
    criterion = models.lstm_loss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        for i in range(opt.model_number):
            model_ls[i] = model_ls[i].cuda()
        criterion = criterion.cuda()

    # optimizer
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr,
    #                      momentum=opt.momentum,
    #                      weight_decay=opt.weight_decay)

    opt_ls = []
    for m in model_ls:
        op = optim.Adam(m.parameters(), lr=opt.lr)
        # op = optim.SGD(m.parameters(), lr=opt.lr,
        #                momentum=opt.momentum,
        #                weight_decay=opt.weight_decay)
        opt_ls.append(op)

    # optimizer = optim.SGD(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
    # optimizer = optim.Adadelta(params=model.parameters(), lr=opt.lr)
    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  # poly policy
    scheduler_ls = []
    for op in opt_ls:
        scheduler_ls.append(LR_Policy(op, lambda_lr))

    resume_epoch = 0

    global positive_rec
    global negative_rec
    global loss_rec

    loss_rec = []
    positive_rec = []
    negative_rec = []

    ######### to test each epoch
    parser = OptionParser()
    parser.add_option('--config',
                      type=str,
                      help="evaluation configuration",
                      default="./configs/test_config.yaml")

    (opts_test, args) = parser.parse_args()
    opts_test = Config(opts_test.config)
    test_video_dataset = VideoFeatDataset(root=opts_test.data_dir,
                                          flist=opts_test.video_flist,
                                          which_feat='vfeat',
                                          creat_test=0)
    test_audio_dataset = VideoFeatDataset(root=opts_test.data_dir,
                                          flist=opts_test.audio_flist,
                                          which_feat='afeat',
                                          creat_test=0)
    test_video_loader = torch.utils.data.DataLoader(
        test_video_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))
    test_audio_loader = torch.utils.data.DataLoader(
        test_audio_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))

    ########

    # another test for git
    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        for i in range(opt.model_number):
            train(train_loader=tl_ls[i],
                  model=model_ls[i],
                  criterion=criterion,
                  optimizer=opt_ls[i],
                  epoch=epoch + 1,
                  opt=opt,
                  num=i + 1)
            scheduler_ls[i].step()
        ##################################
        # save checkpoints
        ##################################

        # save model every 10 epochs
        if ((epoch + 1) % opt.epoch_save) == 0:
            for i in range(opt.model_number):
                path_checkpoint = '{0}/{1}_state_epoch{2}_model{3}.pth'.format(
                    opt.checkpoint_folder, opt.prefix, epoch + 1, i + 1)
                utils.save_checkpoint(model_ls[i].state_dict(),
                                      path_checkpoint)

        if ((epoch + 1) % opt.epoch_plot) == 0:
            plt.figure(1)
            plt.subplot(1, 2, 1)
            plt.plot(loss_rec)
            plt.legend('loss')
            plt.subplot(1, 2, 2)
            plt.plot(positive_rec)
            plt.plot(negative_rec)
            plt.legend(
                ('simmilarity of positives', 'simmilarity of negatives'))
            plt.show()
            plt.savefig('./figures/result{0}.jpg'.format(epoch + 1))
            plt.close()
        if ((epoch + 1) % opt.epoch_test) == 0:
            evaluate.test(test_video_loader, test_audio_loader, model_ls,
                          opts_test)
예제 #3
0
from sklearn.decomposition import PCA
import matplotlib as mpl

mpl.use('Agg')

from matplotlib import pyplot as plt

parser = OptionParser()
parser.add_option('--config',
                  type=str,
                  help="training configuration",
                  default="./configs/train_config.yaml")

(opts, args) = parser.parse_args()
assert isinstance(opts, object)
opt = Config(opts.config)
print(opt)

if opt.checkpoint_folder is None:
    opt.checkpoint_folder = 'checkpoints'

# make dir
if not os.path.exists(opt.checkpoint_folder):
    os.system('mkdir {0}'.format(opt.checkpoint_folder))

tds_ls = []
for i in range(opt.model_number):
    if i == 0:
        tds_ls.append(
            VideoFeatDataset(root=opt.data_dir,
                             flist=opt.flist,
예제 #4
0
from tools.glog_tools import get_logger
from tools.config_tools import Config
from tools import utils

from optparse import OptionParser
import pdb

parser = OptionParser()
parser.add_option('--config',
                  type=str,
                  help="training configuration",
                  default="./configs/train_config.yaml")

(opts, args) = parser.parse_args()
assert isinstance(opts, object)
opt = Config(opts.config)

mylog, logfile= get_logger(fileName=opt.log_name)
print(opt)
os.popen('cat {0} >> {1}'.format(opts.config, logfile))

if opt.checkpoint_folder is None:
    opt.checkpoint_folder = 'models_checkpoint'

# make dir
if not os.path.exists(opt.checkpoint_folder):
    os.system('mkdir {0}'.format(opt.checkpoint_folder))

train_dataset = dset(opt.data_dir, flist=opt.flist)

mylog.info('number of train samples is: {0}'.format(len(train_dataset)))
예제 #5
0
from dataset import VideoFeatDataset as dset
from tools.config_tools import Config

import torch
from torch.autograd import Variable

# load opts
parser = OptionParser()
parser.add_option('--config',
                  type=str,
                  help="training configuration",
                  default="./configs/bf_train_config.yaml")

(bf_opts, args) = parser.parse_args()
assert isinstance(bf_opts, object)
bf_opt = Config(bf_opts.config)
print(bf_opt)


def extract_feats(data_loader, deep_model):
    # inferrance
    deep_model.eval()

    X, y = [], []

    cum_sample = 0
    num_sample = len(data_loader)
    for vfeat0, afeat0 in data_loader:
        # shuffling the index orders
        bz = vfeat0.size()[0]
        orders = np.arange(bz).astype('int32')
예제 #6
0
from __future__ import print_function
from __future__ import division
import os
from optparse import OptionParser
from tools.config_tools import Config

# ----------------------------------- loading paramters -------------------------------------------#
parser = OptionParser()
parser.add_option('--config',
                  type=str,
                  help="evaluation configuration",
                  default="./configs/test_config.yaml")

(opts, args) = parser.parse_args()
assert isinstance(opts, object)
opt = Config(opts.config)
print(opt)
# --------------------------------------------------------------------------------------------------#

# ------------------ environment variable should be set before import torch  -----------------------#
if opt.cuda:
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id
    print('setting gpu on gpuid {0}'.format(opt.gpu_id))
# --------------------------------------------------------------------------------------------------#

import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
예제 #7
0
파일: evaluation.py 프로젝트: zhuhd15/aima
import pickle
from eval import test

mpl.use('Agg')

from matplotlib import pyplot as plt

parser = OptionParser()
parser.add_option('--config',
                  type=str,
                  help="training configuration",
                  default="./configs/test_config.yaml")

(opts, args) = parser.parse_args()
assert isinstance(opts, object)
opt = Config(opts.config)
#print(opt)

if opt.checkpoint_folder is None:
    opt.checkpoint_folder = 'checkpoints'

# make dir
if not os.path.exists(opt.checkpoint_folder):
    os.system('mkdir {0}'.format(opt.checkpoint_folder))




def main():
    global opt
    train_dataset = mnist_Dataset(num_of_cross=0,cross=1)
예제 #8
0
def main():
    global opt
    # train data loader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))

    # create model
    model = models.VAMetric_conv()

    if opt.init_model != '':
        print('loading pretrained model from {0}'.format(opt.init_model))
        model.load_state_dict(torch.load(opt.init_model))

    # Contrastive Loss
    criterion = models.conv_loss_dqy()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        model = model.cuda()
        criterion = criterion.cuda()

    # optimizer
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr,
    #                      momentum=opt.momentum,
    #                      weight_decay=opt.weight_decay)

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
    # optimizer = optim.Adadelta(params=model.parameters(), lr=opt.lr)
    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  # poly policy
    scheduler = LR_Policy(optimizer, lambda_lr)

    resume_epoch = 0

    global dis1_rec
    global dis2_rec
    global loss_rec

    loss_rec = []
    dis1_rec = []
    dis2_rec = []

    ######### to test each epoch
    parser = OptionParser()
    parser.add_option('--config',
                      type=str,
                      help="evaluation configuration",
                      default="./configs/test_config.yaml")

    (opts_test, args) = parser.parse_args()
    opts_test = Config(opts_test.config)
    test_video_dataset = VideoFeatDataset(opts_test.data_dir,
                                          opts_test.video_flist,
                                          which_feat='vfeat')
    test_audio_dataset = VideoFeatDataset(opts_test.data_dir,
                                          opts_test.audio_flist,
                                          which_feat='afeat')
    test_video_loader = torch.utils.data.DataLoader(
        test_video_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))
    test_audio_loader = torch.utils.data.DataLoader(
        test_audio_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))

    ########

    # another test for git
    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        train(train_loader, model, criterion, optimizer, epoch, opt,
              test_video_loader, test_audio_loader, opts_test)
        scheduler.step()

        ##################################
        # save checkpoints
        ##################################

        # save model every 10 epochs
        if ((epoch + 1) % opt.epoch_save) == 0:
            path_checkpoint = '{0}/{1}_state_epoch{2}.pth'.format(
                opt.checkpoint_folder, opt.prefix, epoch + 1)
            utils.save_checkpoint(model.state_dict(), path_checkpoint)

    plt.figure(1)
    plt.subplot(1, 2, 1)
    plt.plot(loss_rec)
    plt.legend('loss')
    plt.subplot(1, 2, 2)
    plt.plot(dis1_rec)
    plt.plot(dis2_rec)
    plt.legend(('distance between positives', 'distance between negatives'))
    plt.show()
    plt.savefig("./figures/conv.jpg")
예제 #9
0
def main():
    global opt
    # train data loader
    tl_ls = []
    for tds in tds_ls:
        tl_ls.append(
            torch.utils.data.DataLoader(tds,
                                        batch_size=opt.batchSize,
                                        shuffle=True,
                                        num_workers=int(opt.workers)))

    # create model
    model_ls = []
    for i in range(opt.model_number):
        encoder = models.Encoder()
        decoder = models.AttnDecoder()
        model_ls.append([encoder, decoder])

    # if opt.init_model_epoch != '':
    #     for i in range(opt.model_number):
    #         path = '{0}/{1}_state_epoch{2}_model{3}.pth'.format(opt.checkpoint_folder, opt.prefix,
    #                                                             opt.init_model_epoch, i + 1)
    #         print('loading pretrained model from {0}'.format(path))
    #         model_ls[i].load_state_dict(torch.load(path))

    criterion = models.pairwise_loss()

    if opt.cuda:
        print('shift model and criterion to GPU .. ')
        for i in range(opt.model_number):
            cp = model_ls[i]
            cp[0] = cp[0].cuda()
            cp[1] = cp[1].cuda()
        criterion = criterion.cuda()

    opt_ls = []
    for m in model_ls:
        encoder = m[0]
        decoder = m[1]
        encoder_optim = optim.Adam(encoder.parameters(), lr=opt.lr)
        decoder_optim = optim.Adam(decoder.parameters(), lr=opt.lr)
        # encoder_optim = optim.SGD(encoder.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
        # decoder_optim = optim.SGD(decoder.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, momentum=opt.momentum)
        op = [encoder_optim, decoder_optim]
        opt_ls.append(op)

    # adjust learning rate every lr_decay_epoch
    lambda_lr = lambda epoch: opt.lr_decay**(
        (epoch + 1) // opt.lr_decay_epoch)  # poly policy
    scheduler_ls = []
    for op in opt_ls:
        en = LR_Policy(op[0], lambda_lr)
        de = LR_Policy(op[1], lambda_lr)
        scheduler_ls.append([en, de])

    resume_epoch = 0

    global positive_rec
    global negative_rec
    global loss_rec

    loss_rec = []
    positive_rec = []
    negative_rec = []

    ######### to test each epoch ###############################################################
    parser = OptionParser()
    parser.add_option('--config',
                      type=str,
                      help="evaluation configuration",
                      default="./configs/test_config.yaml")

    (opts_test, args) = parser.parse_args()
    opts_test = Config(opts_test.config)
    test_video_dataset = VideoFeatDataset(root=opts_test.data_dir,
                                          flist=opts_test.video_flist,
                                          which_feat='vfeat',
                                          creat_test=0)
    test_audio_dataset = VideoFeatDataset(root=opts_test.data_dir,
                                          flist=opts_test.audio_flist,
                                          which_feat='afeat',
                                          creat_test=0)
    test_video_loader = torch.utils.data.DataLoader(
        test_video_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))
    test_audio_loader = torch.utils.data.DataLoader(
        test_audio_dataset,
        batch_size=opts_test.batchSize,
        shuffle=False,
        num_workers=int(opts_test.workers))

    ############################################################################################

    # another test for git
    for epoch in range(resume_epoch, opt.max_epochs):
        #################################
        # train for one epoch
        #################################
        for i in range(opt.model_number):
            m = model_ls[i]
            op = opt_ls[i]
            train(train_loader=tl_ls[i],
                  encoder=m[0],
                  decoder=m[1],
                  criterion=criterion,
                  encoder_optim=op[0],
                  decoder_optim=op[1],
                  epoch=epoch + 1,
                  opt=opt,
                  num=i + 1)
            s = scheduler_ls[i]
            s[0].step()
            s[1].step()

        ##################################
        # save checkpoints
        ##################################
        if ((epoch + 1) % opt.epoch_save) == 0:
            for i in range(opt.model_number):
                m = model_ls[i]
                encoder_path_checkpoint = '{0}/{1}_state_epoch{2}_encoder_model_{3}.pth'.format(
                    opt.checkpoint_folder, opt.prefix, epoch + 1, i + 1)
                utils.save_checkpoint(m[0].state_dict(),
                                      encoder_path_checkpoint)

                decoder_path_checkpoint = '{0}/{1}_state_epoch{2}_decoder_model_{3}.pth'.format(
                    opt.checkpoint_folder, opt.prefix, epoch + 1, i + 1)
                utils.save_checkpoint(m[1].state_dict(),
                                      decoder_path_checkpoint)

                print('Save encoder model to {0}'.format(
                    encoder_path_checkpoint))
                print('Save decoder model to {0}'.format(
                    decoder_path_checkpoint))

        if ((epoch + 1) % opt.epoch_plot) == 0:
            plt.figure(1)
            # plt.subplot(1, 2, 1)
            plt.plot(loss_rec)
            plt.legend('loss')
            # plt.subplot(1, 2, 2)
            # plt.plot(positive_rec)
            # plt.plot(negative_rec)
            # plt.legend(('simmilarity of positives', 'simmilarity of negatives'))
            plt.show()
            plt.savefig('./figures/lstm_result{0}.jpg'.format(epoch + 1))
            plt.close()
        if ((epoch + 1) % opt.epoch_test) == 0:
            evaluate.test(test_video_loader, test_audio_loader, model_ls,
                          opts_test)
예제 #10
0
import models
from dataset import VideoFeatDataset as dset
from tools.config_tools import Config
from tools import utils
import time

parser = OptionParser()
parser.add_option('--config',
                  type=str,
                  help="evaluation configuration",
                  default="./configs/test_config.yaml")

(test_opts, args) = parser.parse_args()
assert isinstance(test_opts, object)
test_opt = Config(test_opts.config)
print(test_opt)

if test_opt.checkpoint_folder is None:
    test_opt.checkpoint_folder = 'checkpoints'

test_video_dataset = dset(test_opt.data_dir,
                          test_opt.video_flist,
                          which_feat='vfeat')
test_audio_dataset = dset(test_opt.data_dir,
                          test_opt.audio_flist,
                          which_feat='afeat')

print('number of test samples is: {0}'.format(len(test_video_dataset)))
print('finished loading data')