Esempio n. 1
0
    os.environ["CUDA_VISIBLE_DEVICES"] = gpus
else:
    gpus = ""
    for i in range(len(args.gpus)):
        gpus = gpus + args.gpus[i] + ","
    os.environ["CUDA_VISIBLE_DEVICES"] = gpus[:-1]

torch.backends.cudnn.enabled = True  # make sure to use cudnn for computational performance

test_folder = args.dataset_path + args.dataset_type + "/testing/frames"

# Loading dataset
test_dataset = DataLoader(test_folder,
                          transforms.Compose([
                              transforms.ToTensor(),
                          ]),
                          resize_height=args.h,
                          resize_width=args.w,
                          time_step=args.t_length - 1)

test_size = len(test_dataset)

test_batch = data.DataLoader(test_dataset,
                             batch_size=args.test_batch_size,
                             shuffle=False,
                             num_workers=args.num_workers_test,
                             drop_last=False)

# Loading the trained model
model = torch.load(args.model_dir)
model.cuda()
Esempio n. 2
0
# coding:utf-8

import tensorflow as tf
from model.mrcnn import MRCNN
from model.utils import DataLoader
from model.config import Config

config = Config()

with tf.Graph().as_default():
    session_conf = tf.ConfigProto(
        allow_soft_placement=config.allow_soft_placement,
        log_device_placement=config.log_device_placement)
    # prepare data set
    dataloader = DataLoader()
    num_classes = dataloader.numclass

    word_embeddings1 = dataloader.word_embeddings1
    word_embeddings2 = dataloader.word_embeddings2
    # train_data = dataloader.trainset
    # test_data = dataloader.testset
    # testset_size = dataloader.testset_size
    train_data_iter, train_size, train_num_batches = dataloader.batch_iter(
        is_train=True,
        batch_size=config.batch_size,
        num_epochs=config.num_epochs,
        oversample=False,
        shuffle=True)
    test_data_iter, test_size, test_num_batches = dataloader.batch_iter(
        is_train=False,
        batch_size=20,