def Model(self, model_name, use_pretrained=True, use_gpu=True, gpu_devices=[0]):
        self.system_dict["model_name"] = model_name;
        self.system_dict["use_pretrained"] = use_pretrained;
        if(self.system_dict["model_name"] in self.system_dict["model_set_1"]):
            self.system_dict["local"]["net"] = gcv.model_zoo.get_model(self.system_dict["model_name"], 
                pretrained=self.system_dict["use_pretrained"]);
            self.system_dict["local"]["net"].reset_class(self.system_dict["classes"])
            self.system_dict["img_shape"] = (300, 300); 

            width, height = self.system_dict["img_shape"][0], self.system_dict["img_shape"][1]
            with autograd.train_mode():
                _, _, anchors = self.system_dict["local"]["net"](mx.nd.zeros((1, 3, height, width)))

            batchify_fn = Tuple(Stack(), Stack(), Stack())
            self.system_dict["local"]["train_loader"] = gluon.data.DataLoader(
                self.system_dict["local"]["train_dataset"].transform(SSDDefaultTrainTransform(width, height, anchors)),
                self.system_dict["batch_size"], True, batchify_fn=batchify_fn, last_batch='rollover', 
                num_workers=self.system_dict["num_workers"])

            self.set_device(use_gpu=use_gpu ,gpu_devices=gpu_devices);
            self.system_dict["local"]["net"].collect_params().reset_ctx(self.system_dict["local"]["ctx"])

        elif((self.system_dict["model_name"] in self.system_dict["model_set_2"]) or (self.system_dict["model_name"] in self.system_dict["model_set_3"])
            or (self.system_dict["model_name"] in self.system_dict["model_set_4"])):
            self.system_dict["local"]["net"] = gcv.model_zoo.get_model(self.system_dict["model_name"], 
                pretrained=self.system_dict["use_pretrained"]);
            self.system_dict["local"]["net"].reset_class(self.system_dict["classes"])
            self.system_dict["img_shape"] = (512, 512); 

            width, height = self.system_dict["img_shape"][0], self.system_dict["img_shape"][1]
            with autograd.train_mode():
                _, _, anchors = self.system_dict["local"]["net"](mx.nd.zeros((1, 3, height, width)))

            batchify_fn = Tuple(Stack(), Stack(), Stack())
            self.system_dict["local"]["train_loader"] = gluon.data.DataLoader(
                self.system_dict["local"]["train_dataset"].transform(SSDDefaultTrainTransform(width, height, anchors)),
                self.system_dict["batch_size"], True, batchify_fn=batchify_fn, last_batch='rollover', 
                num_workers=self.system_dict["num_workers"])

            self.set_device(use_gpu=use_gpu, gpu_devices=gpu_devices);
            self.system_dict["local"]["net"].collect_params().reset_ctx(self.system_dict["local"]["ctx"])

        elif((self.system_dict["model_name"] in self.system_dict["model_set_5"]) or (self.system_dict["model_name"] in self.system_dict["model_set_6"])) :
            self.system_dict["local"]["net"] = gcv.model_zoo.get_model(self.system_dict["model_name"], 
                pretrained=self.system_dict["use_pretrained"]);
            self.system_dict["local"]["net"].reset_class(self.system_dict["classes"])
            self.system_dict["img_shape"] = (416, 416); 

            width, height = self.system_dict["img_shape"][0], self.system_dict["img_shape"][1]

            train_transform = YOLO3DefaultTrainTransform(width, height, self.system_dict["local"]["net"])
            batchify_fn = Tuple(*([Stack() for _ in range(6)] + [Pad(axis=0, pad_val=-1) for _ in range(1)]))

            self.system_dict["local"]["train_loader"] = gluon.data.DataLoader(
                self.system_dict["local"]["train_dataset"].transform(train_transform),
                self.system_dict["batch_size"], True, batchify_fn=batchify_fn, last_batch='rollover', 
                num_workers=self.system_dict["num_workers"])

            self.set_device(use_gpu=use_gpu, gpu_devices=gpu_devices);
            self.system_dict["local"]["net"].collect_params().reset_ctx(self.system_dict["local"]["ctx"])
def get_dataloader(model, train_dataset, validation_dataset, height, width,
                   batch_size, num_workers):
    """Data pre-processing. Returns mini batches of dataset with transformations

    Args:
        model (SSD model): Object detection model
        train_dataset (Dataset): Training images and labels
        validation_dataset (Dataset): Validation images and labels
        height (int): Height of the training image
        width (int): Width of training image
        batch_size (int): Number of images in a mini batch
        num_workers (int): Number of multiprocessing workers

    Returns:
        Dataloader : Mini batches of data
    """

    with autograd.train_mode():
        _, _, anchors = model(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(), Stack())

    train_loader = gluon.data.DataLoader(train_dataset.transform(
        SSDDefaultTrainTransform(height, width, anchors)),
                                         batch_size,
                                         True,
                                         batchify_fn=batchify_fn,
                                         last_batch='rollover',
                                         num_workers=num_workers)

    return train_loader
def validate(epoch,
             val_loader,
             model,
             crit_cls,
             crit_reg,
             opt,
             ctx,
             gen_shape=False):
    """
    One validation
    """
    generated_shapes = []
    original_shapes = []
    sample_prob = opt.inner_sample_prob
    loss_cls_sum, loss_reg_sum, n = 0.0, 0.0, 0

    for idx, data in enumerate(val_loader):
        start = time.time()

        shapes, labels, masks, params, param_masks = data[0], data[1], data[
            2], data[3], data[4]
        gt = shapes
        shapes = nd.expand_dims(shapes, axis=1)

        shapes = shapes.as_in_context(ctx)
        labels = labels.as_in_context(ctx)
        masks = masks.as_in_context(ctx)
        params = params.as_in_context(ctx)
        param_masks = param_masks.as_in_context(ctx)
        with autograd.train_mode():
            out = model.decode(shapes)
        #out = model(shapes, labels, sample_prob)
        bsz, n_block, n_step = labels.shape
        labels = labels.reshape(bsz, n_block * n_step)
        masks = masks.reshape(bsz, n_block * n_step)
        out_pgm = out[0].reshape(bsz, n_block * n_step, opt.program_size + 1)

        bsz, n_block, n_step, n_param = params.shape
        params = params.reshape(bsz, n_block * n_step, n_param)
        param_masks = param_masks.reshape(bsz, n_block * n_step, n_param)
        out_param = out[1].reshape(bsz, n_block * n_step, n_param)
        loss_cls, acc = crit_cls(out_pgm, labels, masks)
        loss_reg = crit_reg(out_param, params, param_masks)

        end = time.time()

        loss_cls = loss_cls.mean().asscalar()
        loss_reg = loss_reg.mean().asscalar()

        if idx % opt.info_interval == 0:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            pred = nd.from_numpy(decode_multiple_block(
                out_1, out_2)).astype("float32").as_in_context(mx.cpu())
            IoU = BatchIoU(pred, gt)
            print(
                "Test: epoch {} batch {}/{}, loss_cls = {:.3f}, loss_reg = {:.3f}, acc = {:.3f}, IoU = {:.3f} time = {:.3f}"
                .format(epoch, idx, len(val_loader), loss_cls, loss_reg,
                        acc[0].asscalar(), IoU.mean(), end - start))
            sys.stdout.flush()
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size,
                   num_workers):
    """Get dataloader."""
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        anchors, _, _, _, _, _, _ = net(mx.nd.zeros((1, 3, height, width)))

    # stack image, anchor_cls_targets, anchor_box_targets
    # pad real_targets(xmin, ymin, xmax, ymax, label). will return length
    batchify_fn = Tuple(Stack(), Stack(), Stack(),
                        Pad(axis=0, pad_val=-1, ret_length=True))
    train_loader = gluon.data.DataLoader(train_dataset.transform(
        RefineDetDefaultTrainTransform(width, height, anchors)),
                                         batch_size,
                                         True,
                                         batchify_fn=batchify_fn,
                                         last_batch='rollover',
                                         num_workers=num_workers)
    # return: img (B, H, W, C); anchor_cls_targets (B, N); anchor_box_targets(B, N, 4);
    # targets(B, P, 5), target_len (B, ). m_i is the num of objects in each img, P is the length after pad.

    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
    val_loader = gluon.data.DataLoader(val_dataset.transform(
        SSDDefaultValTransform(width, height)),
                                       batch_size,
                                       False,
                                       batchify_fn=val_batchify_fn,
                                       last_batch='keep',
                                       num_workers=num_workers)
    return train_loader, val_loader
Ejemplo n.º 5
0
def get_dataloader(net, data_shape, batch_size, num_workers, ctx):
    """Get dataloader."""
    import os

    os.system('pip3 install gluoncv --pre')

    from gluoncv import data as gdata
    from gluoncv.data.batchify import Tuple, Stack, Pad
    from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform

    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width), ctx))
    anchors = anchors.as_in_context(mx.cpu())
    batchify_fn = Tuple(Stack(), Stack(),
                        Stack())  # stack image, cls_targets, box_targets
    train_dataset = gdata.RecordFileDetection(
        os.path.join(os.environ['SM_CHANNEL_TRAIN'], 'train.rec'))
    train_loader = gluon.data.DataLoader(train_dataset.transform(
        SSDDefaultTrainTransform(width, height, anchors)),
                                         batch_size,
                                         True,
                                         batchify_fn=batchify_fn,
                                         last_batch='rollover',
                                         num_workers=num_workers)
    return train_loader
Ejemplo n.º 6
0
def get_dataloader(net, train_dataset, valid_dataset, data_shape, batch_size,
                   num_workers):
    from gluoncv.data.batchify import Tuple, Stack, Pad
    from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(),
                        Stack())  # stack image, cls_targets, box_targets
    train_loader = gluon.data.DataLoader(train_dataset.transform(
        SSDDefaultTrainTransform(width, height, anchors)),
                                         batch_size,
                                         True,
                                         batchify_fn=batchify_fn,
                                         last_batch='rollover',
                                         num_workers=num_workers)

    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))

    val_loader = gluon.data.DataLoader(valid_dataset.transform(
        SSDDefaultValTransform(width, height)),
                                       batch_size,
                                       False,
                                       batchify_fn=val_batchify_fn,
                                       last_batch='keep',
                                       num_workers=num_workers)

    eval_metric = VOC07MApMetric(iou_thresh=0.5, class_names=classes)
    return train_loader, val_loader, eval_metric
Ejemplo n.º 7
0
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size,
                   num_workers):
    """Get dataloader."""
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(),
                        Stack())  # stack image, cls_targets, box_targets
    train_loader = gluon.data.DataLoader(train_dataset.transform(
        SSDDefaultTrainTransform(width, height, anchors)),
                                         batch_size,
                                         True,
                                         batchify_fn=batchify_fn,
                                         last_batch='rollover',
                                         num_workers=num_workers)
    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
    val_loader = gluon.data.DataLoader(val_dataset.transform(
        SSDDefaultValTransform(width, height)),
                                       batch_size,
                                       False,
                                       batchify_fn=val_batchify_fn,
                                       last_batch='keep',
                                       num_workers=num_workers)
    return train_loader, val_loader
Ejemplo n.º 8
0
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size,
                   num_workers):
    """Get dataloader."""
    width, height = data_shape, data_shape

    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, 512, 512)))
    batchify_fn_train = Tuple(Stack(), Stack(), Stack())
    train_loader = gluon.data.DataLoader(train_dataset.transform(
        TrainTransform(width, height, anchors)),
                                         batch_size,
                                         shuffle=True,
                                         batchify_fn=batchify_fn_train,
                                         last_batch='rollover',
                                         num_workers=num_workers)

    batchify_fn = Tuple(Stack(), Stack())
    val_loader = gluon.data.DataLoader(val_dataset.transform(
        SSDDefaultValTransform(width, height)),
                                       batch_size,
                                       shuffle=False,
                                       batchify_fn=batchify_fn,
                                       last_batch='keep',
                                       num_workers=num_workers)
    return train_loader, val_loader
Ejemplo n.º 9
0
 def getDataloader(self, train_dataset, val_dataset):
     width, height = self.args.data_shape, self.args.data_shape
     # use fake data to generate fixed anchors for target generation
     with autograd.train_mode():
         foo, bar, anchors = self.net(
             mx.nd.zeros((1, 3, height, width), self.ctx[0]))
     anchors = anchors.as_in_context(mx.cpu())
     batchify_fn = Tuple(Stack(), Stack(),
                         Stack())  # stack image, cls_targets, box_targets
     train_loader = gluon.data.DataLoader(train_dataset.transform(
         SSDDefaultTrainTransform(width, height, anchors)),
                                          self.args.batch_size,
                                          True,
                                          batchify_fn=batchify_fn,
                                          last_batch='rollover',
                                          num_workers=self.args.num_workers)
     val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
     val_loader = None
     if val_dataset is not None:
         val_loader = gluon.data.DataLoader(
             val_dataset.transform(SSDDefaultValTransform(width, height)),
             self.args.batch_size,
             False,
             batchify_fn=val_batchify_fn,
             last_batch='keep',
             num_workers=self.args.num_workers)
     return train_loader, val_loader
Ejemplo n.º 10
0
def get_dataloader(model, train_dataset, validation_dataset, height, width,
                   batch_size, num_workers):
    """
    Get dataloader.
    """

    import gluoncv as gcv
    from gluoncv.data.batchify import Tuple, Stack, Pad
    from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform

    #In training mode, SSD returns three intermediate values
    #cls_preds are the class predictions prior to softmax
    #box_preds are bounding box offsets with one-to-one correspondence to anchors
    with autograd.train_mode():
        _, _, anchors = model(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(), Stack())

    # SSDDefaultTrainTransform: data augmentation and prepprocessing
    # random color jittering, random expansion with prob 0.5, random cropping
    # resize with random interpolation, random horizontal flip,
    # normalize (substract mean and divide by std)
    train_loader = gluon.data.DataLoader(train_dataset.transform(
        SSDDefaultTrainTransform(height, width, anchors)),
                                         batch_size,
                                         True,
                                         batchify_fn=batchify_fn,
                                         last_batch='rollover',
                                         num_workers=num_workers)

    return train_loader
Ejemplo n.º 11
0
    def __init__(self,
                 width,
                 height,
                 net=None,
                 mean=(0.485, 0.456, 0.406),
                 std=(0.229, 0.224, 0.225),
                 mixup=False,
                 deg=8,
                 **kwargs):
        self._width = width
        self._height = height
        self._mean = mean
        self._std = std
        self._mixup = mixup
        self._target_generator = None
        self._deg = deg
        if net is None:
            return

        # in case network has reset_ctx to gpu
        self._fake_x = mx.nd.zeros((1, 3, height, width))
        net = copy.deepcopy(net)
        net.collect_params().reset_ctx(None)
        with autograd.train_mode():
            _, self._anchors, self._offsets, self._feat_maps, _, _, _, _, _, _ = net(
                self._fake_x)
        from ....model_zoo.yolo.yolo_target import YOLOV3PrefetchTargetGenerator
        self._target_generator = YOLOV3PrefetchTargetGenerator(num_class=len(
            net.classes),
                                                               deg=self._deg,
                                                               **kwargs)
def test_transforms_presets_ssd():
    im_fname = gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' +
                                  'gluoncv/detection/biking.jpg?raw=true', path='biking.jpg')
    x, orig_img = ssd.load_test(im_fname, short=512)
    x1, orig_img1 = ssd.transform_test(mx.image.imread(im_fname), short=512)
    np.testing.assert_allclose(x.asnumpy(), x1.asnumpy())
    np.testing.assert_allclose(orig_img, orig_img1)
    if not osp.isdir(osp.expanduser('~/.mxnet/datasets/voc')):
        return
    train_dataset = gcv.data.VOCDetection(splits=((2007, 'trainval'), (2012, 'trainval')))
    val_dataset = gcv.data.VOCDetection(splits=[(2007, 'test')])
    width, height = (512, 512)
    net = gcv.model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=False, pretrained_base=False)
    net.initialize()
    num_workers = 0
    batch_size = 4
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(), Stack())  # stack image, cls_targets, box_targets
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(ssd.SSDDefaultTrainTransform(width, height, anchors)),
        batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
    val_loader = gluon.data.DataLoader(
        val_dataset.transform(ssd.SSDDefaultValTransform(width, height)),
        batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
    train_loader2 = gluon.data.DataLoader(
        train_dataset.transform(ssd.SSDDefaultTrainTransform(width, height)),
        batch_size, True, batchify_fn=val_batchify_fn, last_batch='rollover', num_workers=num_workers)

    for loader in [train_loader, val_loader, train_loader2]:
        for i, batch in enumerate(loader):
            if i > 1:
                break
            pass
Ejemplo n.º 13
0
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers, args):
    """Get dataloader: transform and batchify."""
    height, width = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, face_anchors, \
        _, _, head_anchors, \
        _, _, body_anchors = net(mx.nd.zeros((1, 3, height, width)))
    anchors = [face_anchors, head_anchors, body_anchors]
    # stack image,cls_target box_target
    train_batchify_fn = Tuple(Stack(),  # source img
                              Stack(), Stack(), Stack(),  # face_cls_targets,head_cls_targets,body_cls_targets
                              Stack(), Stack(), Stack())  # face_box_targets,head_box_targets,body_cls_targets
    # train_batchify_fn = Tuple(Stack(),  # source img
    #                           Pad(), Pad(), Pad(),  # face_cls_targets,head_cls_targets,body_cls_targets
    #                           Pad(), Pad(), Pad())  # face_box_targets,head_box_targets,body_cls_targets
    # getdataloader
    train_loader = gluon.data.DataLoader(train_dataset.transform(
        PyramidBoxTrainTransform(width, height, anchors)),
        batch_size=batch_size, shuffle=True,
        batchify_fn=train_batchify_fn, num_workers=num_workers, last_batch='rollover')
    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
    val_loader = gluon.data.DataLoader(
        val_dataset.transform(PyramidBoxValTransform()),
        batch_size=batch_size, shuffle=False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
    return train_loader, val_loader
Ejemplo n.º 14
0
    def __init__(self,
                 k,
                 width,
                 height,
                 net=None,
                 mean=(0.485, 0.456, 0.406),
                 std=(0.229, 0.224, 0.225),
                 mixup=False,
                 **kwargs):
        self._k = k
        self._width = width
        self._height = height
        self._mean = mean
        self._std = std
        self._mixup = mixup
        self._target_generator = None
        if net is None:
            return

        # in case network has reset_ctx to gpu
        self._fake_x = (mx.nd.zeros((1, 256, int(height / 8), int(width / 8))),
                        mx.nd.zeros(
                            (1, 512, int(height / 16), int(width / 16))),
                        mx.nd.zeros(
                            (1, 1024, int(height / 32), int(width / 32))))
        net = copy.deepcopy(net)
        net.collect_params().reset_ctx(None)
        with autograd.train_mode():
            _, self._anchors, self._offsets, self._feat_maps, _, _, _, _ = net(
                *self._fake_x)
        # from gluoncv.model_zoo.yolo.yolo_target import YOLOV3PrefetchTargetGenerator
        self._target_generator = YOLOV3PrefetchTargetGenerator(num_class=len(
            net.classes),
                                                               **kwargs)
Ejemplo n.º 15
0
def test_transforms_presets_ssd():
    im_fname = gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' +
                                  'gluoncv/detection/biking.jpg?raw=true', path='biking.jpg')
    x, orig_img = ssd.load_test(im_fname, short=512)
    x1, orig_img1 = ssd.transform_test(mx.image.imread(im_fname), short=512)
    np.testing.assert_allclose(x.asnumpy(), x1.asnumpy())
    np.testing.assert_allclose(orig_img, orig_img1)
    if not osp.isdir(osp.expanduser('~/.mxnet/datasets/voc')):
        return
    train_dataset = VOCDetectionTiny()
    val_dataset = VOCDetectionTiny(splits=[('tiny_motorbike', 'test')])
    width, height = (512, 512)
    net = gcv.model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=False, pretrained_base=False)
    net.initialize()
    num_workers = 0
    batch_size = 4
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(), Stack())  # stack image, cls_targets, box_targets
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(ssd.SSDDefaultTrainTransform(width, height, anchors)),
        batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
    val_loader = gluon.data.DataLoader(
        val_dataset.transform(ssd.SSDDefaultValTransform(width, height)),
        batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
    train_loader2 = gluon.data.DataLoader(
        train_dataset.transform(ssd.SSDDefaultTrainTransform(width, height)),
        batch_size, True, batchify_fn=val_batchify_fn, last_batch='rollover', num_workers=num_workers)

    for loader in [train_loader, val_loader, train_loader2]:
        for i, batch in enumerate(loader):
            if i > 1:
                break
            pass
Ejemplo n.º 16
0
    def __init__(self,
                 width,
                 height,
                 net=None,
                 mean=(0.485, 0.456, 0.406),
                 std=(0.229, 0.224, 0.225),
                 mixup=False,
                 **kwargs):
        self._width = width
        self._height = height
        self._mean = mean
        self._std = std
        self._mixup = mixup
        self._internal_target_generator = None
        self._net_none = False
        if net is None:
            self._net_none = True
            return
        self._num_classes = len(net.classes)
        self._kwargs = kwargs

        # in case network has reset_ctx to gpu
        self._fake_x = mx.nd.zeros((1, 3, height, width))
        old_ctx = list(net.collect_params().values())[0].list_ctx()
        net.collect_params().reset_ctx(mx.cpu())
        with autograd.train_mode():
            _, self._anchors, self._offsets, self._feat_maps, _, _, _, _ = net(
                self._fake_x)
        net.collect_params().reset_ctx(old_ctx)
Ejemplo n.º 17
0
    def __init__(self, width, height, net=None, mean=(0.485, 0.456, 0.406),
                 std=(0.229, 0.224, 0.225), mixup=False, **kwargs):
        self._width = width
        self._height = height
        self._mean = mean
        self._std = std
        self._mixup = mixup
        self._target_generator = None
        if net is None:
            return

        # in case network has reset_ctx to gpu
        # 准备fake数据用于生成anchor
        # TO_DO:是否同样帮助了模型的延后初始化
        self._fake_x = mx.nd.zeros((1, 3, height, width))
        net = copy.deepcopy(net)
        net.collect_params().reset_ctx(None)
        with autograd.train_mode():
            # 设置为train_mode,获取anchor等其他需要的信息
            # 得到信息:
            # _anchors: 列表中装着每个层的3个anchor的大小
            # _offsets: 每个cell的左上角坐标值 
            # _feat_maps:用于获得各层的特征图大小
            _, self._anchors, self._offsets, self._feat_maps, _, _, _, _ = net(self._fake_x)
        from ....model_zoo.yolo.yolo_target import YOLOV3PrefetchTargetGenerator
        self._target_generator = YOLOV3PrefetchTargetGenerator(
            num_class=len(net.classes), **kwargs)
Ejemplo n.º 18
0
def get_dataloader(net, data_shape, batch_size, num_workers, ctx):
    """Get dataloader."""

    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width), ctx))
    anchors = anchors.as_in_context(mx.cpu())
    batchify_fn = Tuple(Stack(), Stack(),
                        Stack())  # stack image, cls_targets, box_targets

    # can I point that to a bundle of png files instead?
    train_dataset = gdata.RecordFileDetection(
        os.path.join(os.environ['SM_CHANNEL_TRAIN'], 'train.rec'))

    # this is the folder with all the training images
    train_folder = os.environ['SM_CHANNEL_TRAIN']

    train_loader = gluon.data.DataLoader(train_dataset.transform(
        SSDDefaultTrainTransform(width, height, anchors)),
                                         batch_size,
                                         True,
                                         batchify_fn=batchify_fn,
                                         last_batch='rollover',
                                         num_workers=num_workers)
    return train_loader
Ejemplo n.º 19
0
def ssd_train_dataloader(net,
                         train_dataset,
                         data_shape=512,
                         batch_size=10,
                         num_workers=0):
    '''
    returns the train loader from gluoncv
    '''
    from gluoncv.data.batchify import Tuple, Stack, Pad
    from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform

    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(),
                        Stack())  # stack image, cls_targets, box_targets
    new_SSDDefaultTrainTransform = SSDDefaultTrainTransform(
        width, height, anchors)
    new_SSDDefaultTrainTransform.__call__ = new_trainloader_call
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(new_SSDDefaultTrainTransform),
        batch_size,
        True,
        batchify_fn=batchify_fn,
        last_batch="rollover",
        num_workers=num_workers,
    )
    return train_loader
Ejemplo n.º 20
0
def get_coco_data_loaders(net, train_data, val_data, in_size, bs, n_workers,
                          ctx):
    with autograd.train_mode():
        fake_in_size = 1
        channels = 3
        _, _, anchors = net(
            nd.zeros((fake_in_size, channels, in_size, in_size), ctx))
    anchors = anchors.as_in_context(mx.cpu())

    img_train_s = Stack()
    class_targets = Stack()
    box_targets = Stack()
    train_batchify_fn = Tuple(img_train_s, class_targets, box_targets)
    train_data_transformed = train_data.transform(
        SSDDefaultTrainTransform(in_size, in_size, anchors))
    train_data_loader = gluon.data.DataLoader(train_data_transformed,
                                              batch_size=bs,
                                              shuffle=True,
                                              batchify_fn=train_batchify_fn,
                                              last_batch='rollover',
                                              num_workers=n_workers)

    img_val_s = Stack()
    padding = Pad(pad_val=-1)
    val_batchify_fn = Tuple(img_val_s, padding)
    val_data_transformed = val_data.transform(
        SSDDefaultValTransform(in_size, in_size))
    val_data_loader = gluon.data.DataLoader(val_data_transformed,
                                            batch_size=bs,
                                            shuffle=False,
                                            batchify_fn=val_batchify_fn,
                                            last_batch='keep',
                                            num_workers=n_workers)

    return train_data_loader, val_data_loader
Ejemplo n.º 21
0
def validate(epoch, val_loader, generator, opt, ctx, gen_shape=False):
    """
    evaluate program generator, in terms of IoU
    """
    generated_shapes = []
    original_shapes = []
    for idx, data in enumerate(val_loader):
        start = time.time()
        shapes = data.as_in_context(ctx)
        shapes = nd.expand_dims(shapes, axis=1)
        with autograd.train_mode():
            out = generator.decode(shapes)

        end = time.time()

        if gen_shape:
            out_1 = nd.round(out[0]).astype('int64')
            out_2 = nd.round(out[1]).astype('int64')
            generated_shapes.append(
                decode_multiple_block(out_1, out_2).astype("float32"))
            original_shapes.append(data.asnumpy())

        if idx % opt.info_interval == 0:
            print("Test: epoch {} batch {}/{}, time={:.3f}".format(
                epoch, idx, len(val_loader), end - start))

    if gen_shape:
        generated_shapes = np.concatenate(generated_shapes, axis=0)
        original_shapes = np.concatenate(original_shapes, axis=0)

    return generated_shapes, original_shapes
Ejemplo n.º 22
0
def test_graph_conv(idtype, out_dim):
    g = dgl.from_networkx(nx.path_graph(3))
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    adj = g.adjacency_matrix(transpose=True, ctx=ctx)

    conv = nn.GraphConv(5, out_dim, norm='none', bias=True)
    conv.initialize(ctx=ctx)
    # test#1: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))
    # test#2: more-dim
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))

    conv = nn.GraphConv(5, out_dim)
    conv.initialize(ctx=ctx)

    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    conv = nn.GraphConv(5, out_dim)
    conv.initialize(ctx=ctx)

    with autograd.train_mode():
        # test#3: basic
        h0 = F.ones((3, 5))
        h1 = conv(g, h0)
        assert len(g.ndata) == 0
        assert len(g.edata) == 0
        # test#4: basic
        h0 = F.ones((3, 5, 5))
        h1 = conv(g, h0)
        assert len(g.ndata) == 0
        assert len(g.edata) == 0

    # test not override features
    g.ndata["h"] = 2 * F.ones((3, 1))
    h1 = conv(g, h0)
    assert len(g.ndata) == 1
    assert len(g.edata) == 0
    assert "h" in g.ndata
    check_close(g.ndata['h'], 2 * F.ones((3, 1)))
Ejemplo n.º 23
0
 def get_anchors(net):
     if net is None: return None
     for v in net.collect_params().values():
         ctx = v.data().context
         break
     x = nd.zeros(shape=(1, 3, height, width)).as_in_context(ctx)
     with autograd.train_mode():
         cls_preds, box_preds, anchors = net(x)
     return anchors
Ejemplo n.º 24
0
Archivo: test_nn.py Proyecto: zwcdp/dgl
def test_graph_conv():
    g = dgl.DGLGraph(nx.path_graph(3))
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)

    conv = nn.GraphConv(5, 2, norm=False, bias=True)
    conv.initialize(ctx=ctx)
    # test#1: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))
    # test#2: more-dim
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))

    conv = nn.GraphConv(5, 2)
    conv.initialize(ctx=ctx)

    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    conv = nn.GraphConv(5, 2)
    conv.initialize(ctx=ctx)

    with autograd.train_mode():
        # test#3: basic
        h0 = F.ones((3, 5))
        h1 = conv(g, h0)
        assert len(g.ndata) == 0
        assert len(g.edata) == 0
        # test#4: basic
        h0 = F.ones((3, 5, 5))
        h1 = conv(g, h0)
        assert len(g.ndata) == 0
        assert len(g.edata) == 0

    # test not override features
    g.ndata["h"] = 2 * F.ones((3, 1))
    h1 = conv(g, h0)
    assert len(g.ndata) == 1
    assert len(g.edata) == 0
    assert "h" in g.ndata
    check_close(g.ndata['h'], 2 * F.ones((3, 1)))
Ejemplo n.º 25
0
def _get_dali_dataloader(net, train_dataset, val_dataset, data_shape,
                         global_batch_size, num_workers, devices, ctx,
                         horovod):
    width, height = data_shape, data_shape
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width), ctx=ctx))
    anchors = anchors.as_in_context(mx.cpu())

    if horovod:
        batch_size = global_batch_size // hvd.size()
        pipelines = [
            SSDDALIPipeline(device_id=hvd.local_rank(),
                            batch_size=batch_size,
                            data_shape=data_shape,
                            anchors=anchors,
                            num_workers=num_workers,
                            dataset_reader=train_dataset[0])
        ]
    else:
        num_devices = len(devices)
        batch_size = global_batch_size // num_devices
        pipelines = [
            SSDDALIPipeline(device_id=device_id,
                            batch_size=batch_size,
                            data_shape=data_shape,
                            anchors=anchors,
                            num_workers=num_workers,
                            dataset_reader=train_dataset[i])
            for i, device_id in enumerate(devices)
        ]

    epoch_size = train_dataset[0].size()
    if horovod:
        epoch_size //= hvd.size()
    train_loader = DALIGenericIterator(
        pipelines, [('data', DALIGenericIterator.DATA_TAG),
                    ('bboxes', DALIGenericIterator.LABEL_TAG),
                    ('label', DALIGenericIterator.LABEL_TAG)],
        epoch_size,
        auto_reset=True)

    # validation
    if not horovod or hvd.rank() == 0:
        val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
        val_loader = gluon.data.DataLoader(val_dataset.transform(
            SSDDefaultValTransform(width, height)),
                                           global_batch_size,
                                           False,
                                           batchify_fn=val_batchify_fn,
                                           last_batch='keep',
                                           num_workers=num_workers)
    else:
        val_loader = None

    return train_loader, val_loader
Ejemplo n.º 26
0
def get_dataloader(net, train_dataset, data_shape, batch_size, num_workers):
    from gluoncv.data.batchify import Tuple, Stack, Pad
    from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(), Stack())  # stack image, cls_targets, box_targets
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)),
        batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
    return train_loader
Ejemplo n.º 27
0
def get_dataloader(net, train_dataset, data_shape, batch_size, num_workers):
    from gluoncv.data.batchify import Tuple, Stack, Pad
    from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(), Stack())  # stack image, cls_targets, box_targets
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)),
        batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
    return train_loader
Ejemplo n.º 28
0
def get_traindataloader(net, train_dataset, data_shape, batch_size, num_workers, is_shuffle=True):
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    #print(anchors)  # mxnet ndarray, shape: 1 * 6132 * 4
    batchify_fn = Tuple(Stack(), Stack(), Stack())  # stack image, cls_targets, box_targets
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)),
        batch_size, is_shuffle, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)

    return train_loader
Ejemplo n.º 29
0
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size,
                   num_workers, ctx):
    """Loads data from a dataset and returns mini-batches of data, for both the training
    and the validation set.

    Arguments:
        net: the Gluon model you will train, used to generate fake anchors for target generation.
        train_dataset: Training dataset. Note that numpy and mxnet arrays can be directly used as a Dataset.
        val_dataset: Validation dataset. Note that numpy and mxnet arrays can be directly used as a Dataset.
        data_shape: Tuple, the input_shape of the model
        batch_size: Size of mini-batch.
        num_workers: The number of multiprocessing workers to use for data preprocessing.
        ctx: Indicator to the usage of GPU.
    Returns:
        train_loader: Gluon training dataloader
        val_loader: Gluon testing dataloader
    Raises:

    """
    width, height = data_shape

    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width), ctx))

    anchors = anchors.as_in_context(mx.cpu())

    batchify_fn = Tuple(Stack(), Stack(),
                        Stack())  # stack image, cls_targets, box_targets

    train_loader = gluon.data.DataLoader(
        train_dataset.transform(
            SSDDefaultTrainTransform(width, height, anchors)),
        batch_size,
        True,
        batchify_fn=batchify_fn,
        last_batch="rollover",
        num_workers=num_workers,
    )

    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))

    val_loader = gluon.data.DataLoader(
        val_dataset.transform(SSDDefaultValTransform(width, height)),
        batch_size,
        False,
        batchify_fn=val_batchify_fn,
        last_batch="keep",
        num_workers=num_workers,
    )

    return train_loader, val_loader
Ejemplo n.º 30
0
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers):
    """Get dataloader."""
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    train_loader = gdata.DetectionDataLoader(
        train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)),
        batch_size, True, last_batch='rollover', num_workers=num_workers)
    val_loader = gdata.DetectionDataLoader(
        val_dataset.transform(SSDDefaultValTransform(width, height)),
        batch_size, False, last_batch='keep', num_workers=num_workers)
    return train_loader, val_loader
Ejemplo n.º 31
0
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers):
    """Get dataloader."""
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    batchify_fn = Tuple(Stack(), Stack(), Stack())  # stack image, cls_targets, box_targets
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)),
        batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
    val_loader = gluon.data.DataLoader(
        val_dataset.transform(SSDDefaultValTransform(width, height)),
        batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
    return train_loader, val_loader
Ejemplo n.º 32
0
 def hybrid_forward(self, F, x, gamma, beta):
     # normalization
     with autograd.train_mode():
         y = x.expand_dims(0).reshape(0, 0, self.ngroups, -1)
         y = y.reshape(1, -3, -1)
         batch = x.shape[0]
         y = F.BatchNorm(y,
                         F.ones(batch * self.ngroups, ctx=x.context),
                         F.zeros(batch * self.ngroups, ctx=x.context),
                         F.zeros(batch * self.ngroups, ctx=x.context),
                         F.ones(batch * self.ngroups, ctx=x.context),
                         name='fwd',
                         **self._kwargs)
     # scale and shift
     y = y.reshape_like(x).reshape(0, 0, -1)
     y = y * gamma.reshape(1, -1, 1) + beta.reshape(1, -1, 1)
     return y.reshape_like(x)
Ejemplo n.º 33
0
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers, args):
    """Get dataloader: transform and batchify."""
    width, height = data_shape, data_shape
    # use fake data to generate fixed anchors for target generation
    with autograd.train_mode():
        _, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
    # stack image, cls_targets, box_targets
    batchify_fn = Tuple(Stack(), Stack(), Stack())
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(
            SFDTrainTransform(width, height, anchors, (args.match_high_thresh, args.match_low_thresh), args.match_topk)),
        batch_size, shuffle=True, batchify_fn=batchify_fn, last_batch='rollover', 
    num_workers=num_workers)
    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
    val_loader = gluon.data.DataLoader(
        val_dataset.transform(SFDValTransform()),
        batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
    return train_loader, val_loader
Ejemplo n.º 34
0
    def __init__(self, width, height, net=None, mean=(0.485, 0.456, 0.406),
                 std=(0.229, 0.224, 0.225), mixup=False, **kwargs):
        self._width = width
        self._height = height
        self._mean = mean
        self._std = std
        self._mixup = mixup
        self._target_generator = None
        if net is None:
            return

        # in case network has reset_ctx to gpu
        self._fake_x = mx.nd.zeros((1, 3, height, width))
        net = copy.deepcopy(net)
        net.collect_params().reset_ctx(None)
        with autograd.train_mode():
            _, self._anchors, self._offsets, self._feat_maps, _, _, _, _ = net(self._fake_x)
        from ....model_zoo.yolo.yolo_target import YOLOV3PrefetchTargetGenerator
        self._target_generator = YOLOV3PrefetchTargetGenerator(
            num_class=len(net.classes), **kwargs)
##############################################################################
# Faster-RCNN network is callable with image tensor
import mxnet as mx
x = mx.nd.zeros(shape=(1, 3, 600, 800))
net.initialize()
cids, scores, bboxes = net(x)

##############################################################################
# Faster-RCNN returns three values, where ``cids`` are the class labels,
# ``scores`` are confidence scores of each prediction,
# and ``bboxes`` are absolute coordinates of corresponding bounding boxes.

##############################################################################
# Faster-RCNN network behave differently during training mode:
from mxnet import autograd
with autograd.train_mode():
    # this time we need ground-truth to generate high quality roi proposals during training
    gt_box = mx.nd.zeros(shape=(1, 1, 4))
    cls_preds, box_preds, roi, samples, matches, rpn_score, rpn_box, anchors = net(x, gt_box)

##############################################################################
# In training mode, Faster-RCNN returns a lot of intermediate values, which we require to train in an end-to-end flavor,
# where ``cls_preds`` are the class predictions prior to softmax,
# ``box_preds`` are bounding box offsets with one-to-one correspondence to proposals
# ``roi`` is the proposal candidates, ``samples`` and ``matches`` are the samling/matching results of RPN anchors.
# ``rpn_score`` and ``rpn_box`` are the raw outputs from RPN's convolutional layers.
# and ``anchors`` are absolute coordinates of corresponding anchors boxes.


##########################################################
# Training losses