def test_single_input(self): cc = CenterCrop(image_in='x', height=10, width=10) output = cc.forward(data=self.single_input, state={}) with self.subTest('Check output type'): self.assertEqual(type(output), list) with self.subTest('Check output image shape'): self.assertEqual(output[0].shape, self.single_output_shape)
def test_multi_input(self): cc = CenterCrop(height=10, width=10, image_in='x', mask_in='x_mask') output = cc.forward(data=self.input_image_and_mask, state={}) with self.subTest('Check output type'): self.assertEqual(type(output), list) with self.subTest('Check output image shape'): self.assertEqual(output[0].shape, self.image_and_mask_output_shape) with self.subTest('Check output mask shape'): self.assertEqual(output[1].shape, self.image_and_mask_output_shape)
def get_estimator(data_dir=None, model_dir=tempfile.mkdtemp(), epochs=200, batch_size_per_gpu=32, train_steps_per_epoch=None, eval_steps_per_epoch=None): num_device = get_num_devices() train_ds, val_ds = mscoco.load_data(root_dir=data_dir) train_ds = PreMosaicDataset(mscoco_ds=train_ds) batch_size = num_device * batch_size_per_gpu pipeline = fe.Pipeline( train_data=train_ds, eval_data=val_ds, ops=[ ReadImage(inputs=("image1", "image2", "image3", "image4"), outputs=("image1", "image2", "image3", "image4"), mode="train"), ReadImage(inputs="image", outputs="image", mode="eval"), LongestMaxSize(max_size=640, image_in="image1", bbox_in="bbox1", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image2", bbox_in="bbox2", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image3", bbox_in="bbox3", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image4", bbox_in="bbox4", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="eval"), PadIfNeeded(min_height=640, min_width=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="eval", border_mode=cv2.BORDER_CONSTANT, value=(114, 114, 114)), CombineMosaic(inputs=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4"), outputs=("image", "bbox"), mode="train"), CenterCrop(height=640, width=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), Sometimes( HorizontalFlip(image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="train")), HSVAugment(inputs="image", outputs="image", mode="train"), ToArray(inputs="bbox", outputs="bbox", dtype="float32"), CategoryID2ClassID(inputs="bbox", outputs="bbox"), GTBox(inputs="bbox", outputs=("gt_sbbox", "gt_mbbox", "gt_lbbox"), image_size=640), Delete(keys=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4", "bbox"), mode="train"), Delete(keys="image_id", mode="eval"), Batch(batch_size=batch_size, pad_value=0) ]) init_lr = 1e-2 / 64 * batch_size model = fe.build( lambda: YoloV5(w=640, h=640, c=3), optimizer_fn=lambda x: torch.optim.SGD( x, lr=init_lr, momentum=0.937, weight_decay=0.0005, nesterov=True), mixed_precision=True) network = fe.Network(ops=[ RescaleTranspose(inputs="image", outputs="image"), ModelOp(model=model, inputs="image", outputs=("pred_s", "pred_m", "pred_l")), DecodePred(inputs=("pred_s", "pred_m", "pred_l"), outputs=("pred_s", "pred_m", "pred_l")), ComputeLoss(inputs=("pred_s", "gt_sbbox"), outputs=("sbbox_loss", "sconf_loss", "scls_loss")), ComputeLoss(inputs=("pred_m", "gt_mbbox"), outputs=("mbbox_loss", "mconf_loss", "mcls_loss")), ComputeLoss(inputs=("pred_l", "gt_lbbox"), outputs=("lbbox_loss", "lconf_loss", "lcls_loss")), Average(inputs=("sbbox_loss", "mbbox_loss", "lbbox_loss"), outputs="bbox_loss"), Average(inputs=("sconf_loss", "mconf_loss", "lconf_loss"), outputs="conf_loss"), Average(inputs=("scls_loss", "mcls_loss", "lcls_loss"), outputs="cls_loss"), Average(inputs=("bbox_loss", "conf_loss", "cls_loss"), outputs="total_loss"), PredictBox(width=640, height=640, inputs=("pred_s", "pred_m", "pred_l"), outputs="box_pred", mode="eval"), UpdateOp(model=model, loss_name="total_loss") ]) traces = [ MeanAveragePrecision(num_classes=80, true_key='bbox', pred_key='box_pred', mode="eval"), BestModelSaver(model=model, save_dir=model_dir, metric='mAP', save_best_mode="max") ] lr_schedule = { 1: LRScheduler(model=model, lr_fn=lambda step: lr_schedule_warmup( step, train_steps_epoch=np.ceil(len(train_ds) / batch_size), init_lr=init_lr)), 4: LRScheduler(model=model, lr_fn=lambda epoch: cosine_decay(epoch, cycle_length=epochs - 3, init_lr=init_lr, min_lr=init_lr / 100, start=4)) } traces.append(EpochScheduler(lr_schedule)) estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, monitor_names=["bbox_loss", "conf_loss", "cls_loss"], train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator