def test_mx_data_loader_nopython():
    from mxnet.gluon.data.dataloader import DataLoader
    from mxnet.gluon.data.vision.transforms import ToTensor
    dataset = mx.gluon.data.vision.MNIST(train=False)
    dl1 = DataLoader(dataset=dataset.transform_first(ToTensor()), batch_size=32, try_nopython=True, shuffle=False)
    dl2 = DataLoader(dataset=dataset.transform_first(ToTensor()), batch_size=32, try_nopython=False, shuffle=False)
    assert len(dl1) == len(dl2)
    assert np.all(next(iter(dl1))[1].asnumpy() == next(iter(dl2))[1].asnumpy())
    for _ in dl1:
        pass
Beispiel #2
0
def official_transform(x, args):
    transformer = Compose([
        CenterCrop(args.CenterCropSize),
        ToTensor(),
        Normalize(mean=args.mean, std=args.std),
    ])
    x = transformer(x)
    return x
Beispiel #3
0
def get_transform(transform_params):
    transform = []
    if 'RESIZE' in transform_params:
        transform.append(Resize(transform_params['RESIZE']))

    transform.append(ToTensor())
    if 'MEAN' in transform_params and 'STD' in transform_params:
        transform.append(Normalize(transform_params['MEAN'], transform_params['STD']))

    transform = Compose(transform)
    return transform
def get_loader(json_path,
               data_dir,
               mask_dir,
               inp_size,
               feat_stride,
               batch_size,
               params_transform,
               training=True,
               shuffle=True,
               num_workers=3):
    """ Build a COCO dataloader
    :param json_path: string, path to jso file
    :param datadir: string, path to coco data
    :returns : the data_loader
    """
    with open(json_path) as data_file:
        data_this = json.load(data_file)
        data = data_this['root']

    num_samples = len(data)
    train_indexes = []
    val_indexes = []
    for count in range(num_samples):
        if data[count]['isValidation'] != 0.:
            val_indexes.append(count)
        else:
            train_indexes.append(count)

    coco_data = Cocokeypoints(
        root=data_dir,
        mask_dir=mask_dir,
        index_list=train_indexes if training else val_indexes,
        data=data,
        inp_size=inp_size,
        feat_stride=feat_stride,
        transform=ToTensor(),
        params_transform=params_transform)

    data_loader = DataLoader(coco_data,
                             batch_size=batch_size,
                             shuffle=shuffle,
                             num_workers=num_workers)

    return data_loader
    net = MVRNN(cnn_arch='vgg11_bn',
                cnn_feature_length=4096,
                num_views=args.num_views,
                num_class=args.num_classes,
                pretrained=True,
                pretrained_cnn=None,
                ctx=ctx)
    net.load_parameters(args.checkpoint, ctx=ctx)

    net.hybridize()
    metric = mxnet.metric.Accuracy()
    test_ds = MultiViewImageDataset(os.path.join(args.dataset_path, 'test'),
                                    args.num_views,
                                    transform=Compose([
                                        ToTensor(),
                                        Normalize(mean=(0.485, 0.456, 0.406),
                                                  std=(0.229, 0.224, 0.225))
                                    ]))
    loader = gluon.data.DataLoader
    test_data = loader(test_ds,
                       args.batch_size,
                       shuffle=False,
                       last_batch='keep')
    print('test on dataset %s, acc %s ' %
          (args.dataset_path,
           utils.test(metric,
                      ctx,
                      net,
                      test_data,
                      num_views=args.num_views,
Beispiel #6
0
RNN_Block_net=nn.HybridSequential()
RNN_Block_net.add(RNN_Block(n_class=num_class))
RNN_Block_net.collect_params().initialize(mx.init.Normal(0.02), ctx=ctx)


GE_trainer = gluon.Trainer(Generator_Encorder_net.collect_params(), 'adam', {'learning_rate': lr, 'beta1': 0.9,'beta2': 0.999})
GD_trainer = gluon.Trainer(Generator_Decorder_net.collect_params(), 'adam', {'learning_rate': lr, 'beta1': 0.9,'beta2': 0.999})
D_trainer = gluon.Trainer(Discriminator_net.collect_params(), 'adam', {'learning_rate': lr, 'beta1': 0.9,'beta2': 0.999})
RNN_trainer = gluon.Trainer(RNN_Block_net.collect_params(), 'adam', {'learning_rate': lr, 'beta1': 0.9,'beta2': 0.999})


#######################################################
######    dataset path    ######
dataset = ImageDataset('/home/cumt306/zhouyi/dataset/Train.txt', (32, 128), 3, 32, alphabet)
data_loader = DataLoader(dataset.transform_first(ToTensor()), batch_size=batch_size, shuffle=True, num_workers=12)
val_dataset = ImageDataset('/home/cumt306/zhouyi/dataset/Val.txt', (32, 128), 3, 32, alphabet)
val_data_loader = DataLoader(dataset.transform_first(ToTensor()), batch_size=batch_size, shuffle=True, num_workers=12)
test_dataset = ImageDataset('/home/cumt306/zhouyi/dataset/Test.txt', (32, 128), 3, 32, alphabet)
test_data_loader = DataLoader(test_dataset.transform_first(ToTensor()), batch_size=batch_size, shuffle=True, num_workers=12)
#######################################################



stamp =  datetime.now().strftime('%Y_%m_%d-%H_%M')
logging.basicConfig(level=logging.DEBUG)
GAN_loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
ctc_loss = gluon.loss.CTCLoss(weight=0.2)
L1_loss=gluon.loss.L1Loss()
sw = SummaryWriter(log_dir)
global_step = 0
Beispiel #7
0
    trans = Resize(size)
    for i in range(length):
        data.append(trans(mx.nd.stack(*array[i:i + depth]).astype("float32")))
        label.append(
            trans(
                mx.nd.expand_dims(array[i + depth + target].astype("float32"),
                                  0)))
    return gluon.data.ArrayDataset(data, label)


images = []
for filename in sorted(os.listdir("handmade")):
    images.append(mx.image.imread("handmade/" + filename, flag=0))

dataset = sliding_window(images, depth, target).transform_first(ToTensor())
dataloader = gluon.data.DataLoader(dataset,
                                   batch_size=batch_size,
                                   last_batch="discard",
                                   shuffle=True)
# Data processing #
###################

###################
# Declare metrics #
loss = gluon.loss.L2Loss()
trainer = gluon.Trainer(net.collect_params(), optimizer, {
    'learning_rate': lr,
    "momentum": 0.9
})
# Declare metrics #
def main(args):
    '''create dir'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = Path('./experiment/checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = Path('./experiment/logs/')
    log_dir.mkdir(exist_ok=True)

    ctx = [mxnet.gpu(gpu_id) for gpu_id in args.gpu]
    '''initialize the network'''
    net = MVRNN(cnn_arch='vgg11_bn',
                cnn_feature_length=4096,
                num_views=args.num_views,
                num_class=args.num_classes,
                pretrained=True,
                pretrained_cnn=args.pretrained_cnn,
                ctx=ctx)
    if args.checkpoint:
        net.load_parameters(args.checkpoint, ctx=ctx)
    else:
        net.initialize(init=init.MSRAPrelu(), ctx=ctx)
    net.hybridize()
    '''set grad_req to 'add' to manually aggregate gradients'''
    net.collect_params().setattr('grad_req', 'add')
    net._cnn2.collect_params().setattr('lr_mult', args.output_lr_mult)
    '''Setup loss function'''
    loss_fun = gluon.loss.SoftmaxCrossEntropyLoss(
        sparse_label=not args.label_smoothing)
    '''Loading dataset'''
    train_ds = MultiViewImageDataset(os.path.join(args.dataset_path, 'train'),
                                     args.num_views,
                                     transform=Compose([
                                         ToTensor(),
                                         Normalize(mean=(0.485, 0.456, 0.406),
                                                   std=(0.229, 0.224, 0.225))
                                     ]))
    test_ds = MultiViewImageDataset(os.path.join(args.dataset_path, 'test'),
                                    args.num_views,
                                    transform=Compose([
                                        ToTensor(),
                                        Normalize(mean=(0.485, 0.456, 0.406),
                                                  std=(0.229, 0.224, 0.225))
                                    ]))
    loader = gluon.data.DataLoader
    train_data = loader(train_ds,
                        args.batch_size,
                        shuffle=True,
                        last_batch='keep',
                        num_workers=4)
    test_data = loader(test_ds,
                       args.batch_size,
                       shuffle=False,
                       last_batch='keep',
                       num_workers=4)

    current_time = datetime.datetime.now()
    time_str = '%d-%d-%d--%d-%d-%d' % (
        current_time.year, current_time.month, current_time.day,
        current_time.hour, current_time.minute, current_time.second)
    log_filename = time_str + '.txt'
    checkpoint_name = 'checkpoint_' + time_str
    checkpoint_dir = Path(os.path.join(checkpoints_dir, checkpoint_name))
    checkpoint_dir.mkdir(exist_ok=True)

    with open(os.path.join(
            log_dir,
            log_filename,
    ), 'w') as log_out:
        try:
            kv = mxnet.kv.create('device')
            utils.log_string(log_out, sys.argv[0])
            utils.train(net, train_data, test_data, loss_fun, kv, log_out,
                        str(checkpoint_dir), args)
        except Exception as e:
            raise e