Пример #1
0
 def test_normalize_numpy_float(self):
     op = Normalize(inputs="image",
                    outputs="image",
                    mean=0.482,
                    std=0.289,
                    max_pixel_value=27.0)
     data = op.forward(data=self.numpy_array, state={})
     np.testing.assert_array_almost_equal(data, self.expected_result, 2)
Пример #2
0
 def test_normalize_torch_multi(self):
     op = Normalize(inputs="image",
                    outputs="image",
                    mean=(0.44, 0.48, 0.52),
                    std=(0.287, 0.287, 0.287),
                    max_pixel_value=27)
     data = op.forward(data=to_tensor(self.numpy_array, "torch"), state={})
     np.testing.assert_array_almost_equal(data.numpy(),
                                          self.expected_result_multi, 2)
Пример #3
0
 def test_normalize_tf_int(self):
     op = Normalize(inputs="image",
                    outputs="image",
                    mean=0.482,
                    std=0.289,
                    max_pixel_value=27)
     data = op.forward(data=tf.convert_to_tensor(self.numpy_array),
                       state={})
     np.testing.assert_array_almost_equal(data.numpy(),
                                          self.expected_result, 2)
Пример #4
0
def get_estimator(data_dir=None,
                  epochs=12,
                  batch_size_per_gpu=4,
                  im_size=1344,
                  model_dir=tempfile.mkdtemp(),
                  train_steps_per_epoch=None,
                  eval_steps_per_epoch=None):
    assert im_size % 32 == 0, "im_size must be a multiple of 32"
    num_device = get_num_devices()
    train_ds, val_ds = mscoco.load_data(root_dir=data_dir, load_masks=True)
    batch_size = num_device * batch_size_per_gpu
    pipeline = fe.Pipeline(
        train_data=train_ds,
        eval_data=val_ds,
        test_data=val_ds,
        ops=[
            ReadImage(inputs="image", outputs="image"),
            MergeMask(inputs="mask", outputs="mask"),
            GetImageSize(inputs="image", outputs="imsize", mode="test"),
            LongestMaxSize(max_size=im_size,
                           image_in="image",
                           mask_in="mask",
                           bbox_in="bbox",
                           bbox_params="coco"),
            RemoveIf(fn=lambda x: len(x) == 0, inputs="bbox"),
            PadIfNeeded(min_height=im_size,
                        min_width=im_size,
                        image_in="image",
                        mask_in="mask",
                        bbox_in="bbox",
                        bbox_params="coco",
                        border_mode=cv2.BORDER_CONSTANT,
                        value=0),
            Sometimes(
                HorizontalFlip(image_in="image",
                               mask_in="mask",
                               bbox_in="bbox",
                               bbox_params="coco",
                               mode="train")),
            Resize(height=im_size // 4, width=im_size // 4,
                   image_in='mask'),  # downscale mask for memory efficiency
            Gt2Target(inputs=("mask", "bbox"),
                      outputs=("gt_match", "mask", "classes")),
            Delete(keys="bbox"),
            Delete(keys="image_id", mode="!test"),
            Batch(batch_size=batch_size, pad_value=0)
        ],
        num_process=8 * num_device)
    init_lr = 1e-2 / 16 * batch_size
    model = fe.build(
        model_fn=SoloV2,
        optimizer_fn=lambda x: torch.optim.SGD(x, lr=init_lr, momentum=0.9))
    network = fe.Network(ops=[
        Normalize(inputs="image",
                  outputs="image",
                  mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225)),
        Permute(inputs="image", outputs='image'),
        ModelOp(model=model,
                inputs="image",
                outputs=("feat_seg", "feat_cls_list", "feat_kernel_list")),
        LambdaOp(fn=lambda x: x,
                 inputs="feat_cls_list",
                 outputs=("cls1", "cls2", "cls3", "cls4", "cls5")),
        LambdaOp(fn=lambda x: x,
                 inputs="feat_kernel_list",
                 outputs=("k1", "k2", "k3", "k4", "k5")),
        Solov2Loss(0,
                   40,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls1",
                           "k1"),
                   outputs=("l_c1", "l_s1")),
        Solov2Loss(1,
                   36,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls2",
                           "k2"),
                   outputs=("l_c2", "l_s2")),
        Solov2Loss(2,
                   24,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls3",
                           "k3"),
                   outputs=("l_c3", "l_s3")),
        Solov2Loss(3,
                   16,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls4",
                           "k4"),
                   outputs=("l_c4", "l_s4")),
        Solov2Loss(4,
                   12,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls5",
                           "k5"),
                   outputs=("l_c5", "l_s5")),
        CombineLoss(inputs=("l_c1", "l_s1", "l_c2", "l_s2", "l_c3", "l_s3",
                            "l_c4", "l_s4", "l_c5", "l_s5"),
                    outputs=("total_loss", "cls_loss", "seg_loss")),
        L2Regularizaton(inputs="total_loss",
                        outputs="total_loss_l2",
                        model=model,
                        beta=1e-5,
                        mode="train"),
        UpdateOp(model=model, loss_name="total_loss_l2"),
        PointsNMS(inputs="feat_cls_list", outputs="feat_cls_list",
                  mode="test"),
        Predict(inputs=("feat_seg", "feat_cls_list", "feat_kernel_list"),
                outputs=("seg_preds", "cate_scores", "cate_labels"),
                mode="test")
    ])
    train_steps_epoch = int(np.ceil(len(train_ds) / batch_size))
    lr_schedule = {
        1:
        LRScheduler(
            model=model,
            lr_fn=lambda step: lr_schedule_warmup(step, init_lr=init_lr)),
        2:
        LRScheduler(
            model=model,
            lr_fn=lambda step: cosine_decay(step,
                                            cycle_length=train_steps_epoch *
                                            (epochs - 1),
                                            init_lr=init_lr,
                                            min_lr=init_lr / 100,
                                            start=train_steps_epoch))
    }
    traces = [
        EpochScheduler(lr_schedule),
        COCOMaskmAP(data_dir=val_ds.root_dir,
                    inputs=("seg_preds", "cate_scores", "cate_labels",
                            "image_id", "imsize"),
                    mode="test"),
        BestModelSaver(model=model, save_dir=model_dir, metric="total_loss")
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             monitor_names=("cls_loss", "seg_loss"),
                             train_steps_per_epoch=train_steps_per_epoch,
                             eval_steps_per_epoch=eval_steps_per_epoch)
    return estimator