コード例 #1
0
def test_detection_dataloader():
    dataset = DummyDetectionDataset(8)
    for num_workers in [0, 1, 2, 4]:
        for shuffle in (True, False):
            for last_batch in ('keep', 'discard', 'rollover'):
                dataloader = DetectionDataLoader(dataset,
                                                 batch_size=2,
                                                 shuffle=shuffle,
                                                 last_batch=last_batch,
                                                 num_workers=num_workers)
                for batch in dataloader:
                    mx.nd.waitall()
                    pass
コード例 #2
0
def test_detection_dataloader():
    dataset = DummyDetectionDataset(8)
    for num_workers in [0, 1, 2, 4]:
        for shuffle in (True, False):
            for last_batch in ('keep', 'discard', 'rollover'):
                dataloader = DetectionDataLoader(dataset,
                                                 batch_size=2,
                                                 shuffle=shuffle,
                                                 last_batch=last_batch,
                                                 num_workers=num_workers)
                for batch in dataloader:
                    mx.nd.waitall()
                    pass

                # new dataloader methods
                batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
                dataloader = mx.gluon.data.DataLoader(dataset,
                                                      batch_size=2,
                                                      shuffle=shuffle,
                                                      last_batch=last_batch,
                                                      batchify_fn=batchify_fn,
                                                      num_workers=num_workers)
                for batch in dataloader:
                    mx.nd.waitall()
                    pass

                batchify_fn = Tuple(Append(), Append())
                dataloader = mx.gluon.data.DataLoader(dataset,
                                                      batch_size=2,
                                                      shuffle=shuffle,
                                                      last_batch=last_batch,
                                                      batchify_fn=batchify_fn,
                                                      num_workers=num_workers)
                for batch in dataloader:
                    mx.nd.waitall()
                    pass
コード例 #3
0
                   class_names=train_dataset.classes)
plt.show()

#%% 测试转换函数,尝试将图片缩小到标准图片
train_transform = SSDDefaultTrainTransform(width, height)
train_image2, train_label2 = train_transform(train_image, train_label)
print('tensor shape:', train_image2.shape)
print('label shape:', train_label2.shape)

train_image2 = train_image2.transpose((1, 2, 0)) * nd.array(
    (0.229, 0.224, 0.225)) + nd.array((0.485, 0.456, 0.406))
train_image2 = (train_image2 * 255).clip(0, 255)
ax = viz.plot_bbox(train_image2.asnumpy(),
                   train_label2[:, :4],
                   labels=train_label2[:, 4:5],
                   class_names=train_dataset.classes)
plt.show()

batch_size = 2  # for tutorial, we use smaller batch-size
num_workers = 0  # you can make it larger(if your CPU has more cores) to accelerate data loading

train_loader = DetectionDataLoader(train_dataset.transform(train_transform),
                                   batch_size,
                                   shuffle=True,
                                   last_batch='rollover',
                                   num_workers=num_workers)
for ib, batch in enumerate(train_loader):
    if ib > 3:
        break
    print('data:', batch[0].shape, 'label:', batch[1].shape)
コード例 #4
0
batch_size = 20
maxTrain_inOneEpoch = 1e20
epoch_num = 10
lambd = 1 / 4
Models_tmp_Dir = 'D:/Temps/Models_tmp/'
#CPU_percentage = 0.2

#使用训练元件
ctx = [mx.gpu(i) for i in range(mx.context.num_gpus())]
#ctx = [mx.cpu()]

val_transform = presets.ssd.SSDDefaultValTransform(width, height)
val_loader = DetectionDataLoader(val_dataset.transform(val_transform),
                                 batch_size,
                                 shuffle=False,
                                 last_batch='keep',
                                 num_workers=0)

#%% 网络
classes = data.VOCDetection.CLASSES

name = 'resnet50_v1'
base_size = 512
features = ['stage3_activation5', 'stage4_activation2']
filters = [512, 512, 256, 256]
sizes = [51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492]
ratios = [[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0 / 3]] * 3 + [[1, 2, 0.5]] * 2
steps = [16, 32, 64, 128, 256, 512]

pretrained = True