Exemple #1
0
(opts, args) = parser.parse_args()
assert isinstance(opts, object)
opt = Config(opts.config)

mylog, logfile= get_logger(fileName=opt.log_name)
print(opt)
os.popen('cat {0} >> {1}'.format(opts.config, logfile))

if opt.checkpoint_folder is None:
    opt.checkpoint_folder = 'models_checkpoint'

# make dir
if not os.path.exists(opt.checkpoint_folder):
    os.system('mkdir {0}'.format(opt.checkpoint_folder))

train_dataset = dset(opt.data_dir, flist=opt.flist)

mylog.info('number of train samples is: {0}'.format(len(train_dataset)))
mylog.info('finished loading data')

os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id

ngpu = int(opt.ngpu)
opt.manualSeed = random.randint(1, 10000) # fix seed
# opt.manualSeed = 123456

if torch.cuda.is_available() and not opt.cuda:
    mylog.info("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
    if ngpu == 1:
        mylog.info('so we use 1 gpu to training')
Exemple #2
0
        # forward, backward optimize
        feat = deep_model.forward(vfeat_var, afeat_var)
        X.append(feat.cpu().data.numpy())

        cum_sample += 1
        print('extract deep features: {} / {}'.format(cum_sample, num_sample))

    X = np.concatenate(X)
    y = np.concatenate(y)
    print('extract deep features complete.')
    return X, y


if __name__ == '__main__':
    # load dataset
    train_dataset = dset(bf_opt.data_dir, flist=bf_opt.flist)
    print('number of train samples is: {0}'.format(len(train_dataset)))
    print('finished loading data')

    # train data loader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=bf_opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(bf_opt.workers))

    # create neural network deep_model
    deep_model = extract_models.VAMetric()

    if bf_opt.init_deep_model != '':
        print('loading pretrained deep_model from {0}'.format(
            bf_opt.init_deep_model))
import setproctitle

setproctitle.setproctitle('train@changshuhao')

# reminding the cuda option
if torch.cuda.is_available():
    if not opt.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with \"cuda: True\""
        )
    else:
        cudnn.benchmark = True

# loading test dataset
test_video_dataset = dset(opt.data_dir, opt.video_flist, which_feat='vfeat')
test_audio_dataset = dset(opt.data_dir, opt.audio_flist, which_feat='afeat')
print('number of test samples is: {0}'.format(len(test_video_dataset)))
print('finished loading data')


# test function for metric learning
def test(video_loader, audio_loader, model, opt):
    """
    train for one epoch on the training set
    """
    # evaluation mode: only useful for the models with batchnorm or dropout
    model.eval()

    right = 0  # correct sample number
    sample_num = 0  # total sample number
from torch.autograd import Variable
import numpy as np

import models_v3 as models
from dataset import VideoFeatDataset as dset
from tools import utils

# reminding the cuda option
if torch.cuda.is_available():
    if not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with \"cuda: True\"")
    else:
        cudnn.benchmark = True

# loading test dataset
test_video_dataset = dset(root=opt.data_dir, flist=opt.video_flist, which_feat='vfeat', creat_test=0)
test_audio_dataset = dset(root=opt.data_dir, flist=opt.audio_flist, which_feat='afeat', creat_test=0)
print('number of test samples is: {0}'.format(len(test_video_dataset)))
print('finished loading data')


# test function for metric learning
def test(video_loader, audio_loader, model, opt):
    """
    train for one epoch on the training set
    """
    # evaluation mode: only useful for the models with batchnorm or dropout
    model.eval()

    right = 0  # correct sample number
    sample_num = 0  # total sample number
    parser.add_argument("-tl", dest='transfer', help='transfer_learning', default=False, type=bool)

    parser.add_argument("-c", dest='cfgfile', help="Config file",
                        default="cfg/yolov3.cfg", type=str)
    parser.add_argument("-t", dest="use_tensorboard", help="Disable tensorboard", default=True, type=bool)

    return parser.parse_args()


if __name__ == '__main__':

    args = arg_parse()

    # Use LMDB custom dataset or VOC-style
    if cfg.lmdb:
        dataset = dset(cfg.target_file, cfg.root_dir, cfg.multi_scale_inp_size)  # , cfg.transforms)
        dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch, shuffle=True, num_workers=args.workers)

    else:

        image_data = torchvision.datasets.ImageFolder(args.path)
        data_loader = torch.utils.data.DataLoader(image_data, batch_size=args.batch, shuffle=True, num_workers=args.workers, multiscale=cfg.multi_scale_inp_size)

    # replace 4 with the number of classes in your custom dataset
    classes = 20 if args.transfer else cfg.num_classes

    # create the network
    net = Darknet(classes)

    # Load weights