def _build_estimator(model: Union[tf.keras.Model, torch.nn.Module], trace: Traceability, axis: int = -1): train_data, eval_data = mnist.load_data() test_data = eval_data.split(0.5) batch_size = 32 pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ExpandDims(inputs="x", outputs="x", axis=axis), Minmax(inputs="x", outputs="x")]) network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) traces = [ Accuracy(true_key="y", pred_key="y_pred"), LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3)), trace ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=1, traces=traces, max_train_steps_per_epoch=1, max_eval_steps_per_epoch=None) fake_data = tf.ones(shape=(batch_size, 28, 28, 1)) if axis == -1 else torch.ones(size=(batch_size, 1, 28, 28)) model.fe_input_spec = FeInputSpec(fake_data, model) return estimator
def get_estimator(epochs=2, batch_size=32, save_dir=tempfile.mkdtemp()): # step 1 train_data, eval_data = mnist.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")]) # step 2 model = fe.build(model_fn=LeNet, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) # step 3 traces = [ Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"), LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3)) ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces) return estimator
def get_estimator(epochs=2, batch_size=32, max_train_steps_per_epoch=None, max_eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): # step 1 train_data, eval_data = mnist.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x") ], num_process=0) # step 2 model = fe.build(model_fn=LeNet, optimizer_fn="adam") print([f"{idx}: {x.name}" for idx, x in enumerate(model.submodules)]) network = fe.Network(ops=[ Watch(inputs="x"), ModelOp(model=model, inputs="x", outputs=["y_pred", "embedding"], intermediate_layers='dense'), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), GradientOp(finals="embedding", inputs="x", outputs="grads"), UpdateOp(model=model, loss_name="ce") ]) # step 3 traces = [ Accuracy(true_key="y", pred_key="y_pred"), Inspector(), BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"), LRScheduler(model=model, lr_fn=lambda step: cosine_decay( step, cycle_length=3750, init_lr=1e-3)), TensorBoard(log_dir="tf_logs", write_embeddings="embedding", embedding_labels="y") ] estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch, max_eval_steps_per_epoch=max_eval_steps_per_epoch) return estimator
def get_estimator(data_dir=None, model_dir=tempfile.mkdtemp(), epochs=200, batch_size_per_gpu=32, train_steps_per_epoch=None, eval_steps_per_epoch=None): num_device = get_num_devices() train_ds, val_ds = mscoco.load_data(root_dir=data_dir) train_ds = PreMosaicDataset(mscoco_ds=train_ds) batch_size = num_device * batch_size_per_gpu pipeline = fe.Pipeline( train_data=train_ds, eval_data=val_ds, ops=[ ReadImage(inputs=("image1", "image2", "image3", "image4"), outputs=("image1", "image2", "image3", "image4"), mode="train"), ReadImage(inputs="image", outputs="image", mode="eval"), LongestMaxSize(max_size=640, image_in="image1", bbox_in="bbox1", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image2", bbox_in="bbox2", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image3", bbox_in="bbox3", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image4", bbox_in="bbox4", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="eval"), PadIfNeeded(min_height=640, min_width=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="eval", border_mode=cv2.BORDER_CONSTANT, value=(114, 114, 114)), CombineMosaic(inputs=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4"), outputs=("image", "bbox"), mode="train"), CenterCrop(height=640, width=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), Sometimes( HorizontalFlip(image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="train")), HSVAugment(inputs="image", outputs="image", mode="train"), ToArray(inputs="bbox", outputs="bbox", dtype="float32"), CategoryID2ClassID(inputs="bbox", outputs="bbox"), GTBox(inputs="bbox", outputs=("gt_sbbox", "gt_mbbox", "gt_lbbox"), image_size=640), Delete(keys=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4", "bbox"), mode="train"), Delete(keys="image_id", mode="eval"), Batch(batch_size=batch_size, pad_value=0) ]) init_lr = 1e-2 / 64 * batch_size model = fe.build( lambda: YoloV5(w=640, h=640, c=3), optimizer_fn=lambda x: torch.optim.SGD( x, lr=init_lr, momentum=0.937, weight_decay=0.0005, nesterov=True), mixed_precision=True) network = fe.Network(ops=[ RescaleTranspose(inputs="image", outputs="image"), ModelOp(model=model, inputs="image", outputs=("pred_s", "pred_m", "pred_l")), DecodePred(inputs=("pred_s", "pred_m", "pred_l"), outputs=("pred_s", "pred_m", "pred_l")), ComputeLoss(inputs=("pred_s", "gt_sbbox"), outputs=("sbbox_loss", "sconf_loss", "scls_loss")), ComputeLoss(inputs=("pred_m", "gt_mbbox"), outputs=("mbbox_loss", "mconf_loss", "mcls_loss")), ComputeLoss(inputs=("pred_l", "gt_lbbox"), outputs=("lbbox_loss", "lconf_loss", "lcls_loss")), Average(inputs=("sbbox_loss", "mbbox_loss", "lbbox_loss"), outputs="bbox_loss"), Average(inputs=("sconf_loss", "mconf_loss", "lconf_loss"), outputs="conf_loss"), Average(inputs=("scls_loss", "mcls_loss", "lcls_loss"), outputs="cls_loss"), Average(inputs=("bbox_loss", "conf_loss", "cls_loss"), outputs="total_loss"), PredictBox(width=640, height=640, inputs=("pred_s", "pred_m", "pred_l"), outputs="box_pred", mode="eval"), UpdateOp(model=model, loss_name="total_loss") ]) traces = [ MeanAveragePrecision(num_classes=80, true_key='bbox', pred_key='box_pred', mode="eval"), BestModelSaver(model=model, save_dir=model_dir, metric='mAP', save_best_mode="max") ] lr_schedule = { 1: LRScheduler(model=model, lr_fn=lambda step: lr_schedule_warmup( step, train_steps_epoch=np.ceil(len(train_ds) / batch_size), init_lr=init_lr)), 4: LRScheduler(model=model, lr_fn=lambda epoch: cosine_decay(epoch, cycle_length=epochs - 3, init_lr=init_lr, min_lr=init_lr / 100, start=4)) } traces.append(EpochScheduler(lr_schedule)) estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, monitor_names=["bbox_loss", "conf_loss", "cls_loss"], train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator
def get_estimator(batch_size=8, epochs=50, train_steps_per_epoch=None, eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp(), data_dir=None): # load CUB200 dataset. train_data = cub200.load_data(root_dir=data_dir) eval_data = train_data.split(0.3) test_data = eval_data.split(0.5) # step 1, pipeline pipeline = fe.Pipeline(batch_size=batch_size, train_data=train_data, eval_data=eval_data, test_data=test_data, ops=[ ReadImage(inputs="image", outputs="image", parent_path=train_data.parent_path), Normalize(inputs="image", outputs="image", mean=1.0, std=1.0, max_pixel_value=127.5), ReadMat(file='annotation', keys="seg", parent_path=train_data.parent_path), LongestMaxSize(max_size=512, image_in="image", image_out="image", mask_in="seg", mask_out="seg"), PadIfNeeded(min_height=512, min_width=512, image_in="image", image_out="image", mask_in="seg", mask_out="seg", border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0), ShiftScaleRotate( image_in="image", mask_in="seg", image_out="image", mask_out="seg", mode="train", shift_limit=0.2, rotate_limit=15.0, scale_limit=0.2, border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0), Sometimes( HorizontalFlip(image_in="image", mask_in="seg", image_out="image", mask_out="seg", mode="train")), Reshape(shape=(512, 512, 1), inputs="seg", outputs="seg") ]) # step 2, network resunet50 = fe.build(model_fn=ResUnet50, model_name="resunet50", optimizer_fn=lambda: tf.optimizers.Adam(1e-4)) uncertainty = fe.build(model_fn=UncertaintyLossNet, model_name="uncertainty", optimizer_fn=lambda: tf.optimizers.Adam(2e-5)) network = fe.Network(ops=[ ModelOp(inputs='image', model=resunet50, outputs=["label_pred", "mask_pred"]), CrossEntropy(inputs=["label_pred", "label"], outputs="cls_loss", form="sparse", average_loss=False), CrossEntropy(inputs=["mask_pred", "seg"], outputs="seg_loss", form="binary", average_loss=False), ModelOp(inputs=["cls_loss", "seg_loss"], model=uncertainty, outputs="total_loss"), ReduceLoss(inputs="total_loss", outputs="total_loss"), UpdateOp(model=resunet50, loss_name="total_loss"), UpdateOp(model=uncertainty, loss_name="total_loss") ]) # step 3, estimator traces = [ Accuracy(true_key="label", pred_key="label_pred"), Dice(true_key="seg", pred_key='mask_pred'), BestModelSaver(model=resunet50, save_dir=save_dir, metric="total_loss", save_best_mode="min"), LRScheduler(model=resunet50, lr_fn=lambda step: cosine_decay( step, cycle_length=26400, init_lr=1e-4)) ] estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch, log_steps=500) return estimator
def get_estimator(data_dir=None, epochs=12, batch_size_per_gpu=4, im_size=1344, model_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None): assert im_size % 32 == 0, "im_size must be a multiple of 32" num_device = get_num_devices() train_ds, val_ds = mscoco.load_data(root_dir=data_dir, load_masks=True) batch_size = num_device * batch_size_per_gpu pipeline = fe.Pipeline( train_data=train_ds, eval_data=val_ds, test_data=val_ds, ops=[ ReadImage(inputs="image", outputs="image"), MergeMask(inputs="mask", outputs="mask"), GetImageSize(inputs="image", outputs="imsize", mode="test"), LongestMaxSize(max_size=im_size, image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco"), RemoveIf(fn=lambda x: len(x) == 0, inputs="bbox"), PadIfNeeded(min_height=im_size, min_width=im_size, image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco", border_mode=cv2.BORDER_CONSTANT, value=0), Sometimes( HorizontalFlip(image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco", mode="train")), Resize(height=im_size // 4, width=im_size // 4, image_in='mask'), # downscale mask for memory efficiency Gt2Target(inputs=("mask", "bbox"), outputs=("gt_match", "mask", "classes")), Delete(keys="bbox"), Delete(keys="image_id", mode="!test"), Batch(batch_size=batch_size, pad_value=0) ], num_process=8 * num_device) init_lr = 1e-2 / 16 * batch_size model = fe.build( model_fn=SoloV2, optimizer_fn=lambda x: torch.optim.SGD(x, lr=init_lr, momentum=0.9)) network = fe.Network(ops=[ Normalize(inputs="image", outputs="image", mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), Permute(inputs="image", outputs='image'), ModelOp(model=model, inputs="image", outputs=("feat_seg", "feat_cls_list", "feat_kernel_list")), LambdaOp(fn=lambda x: x, inputs="feat_cls_list", outputs=("cls1", "cls2", "cls3", "cls4", "cls5")), LambdaOp(fn=lambda x: x, inputs="feat_kernel_list", outputs=("k1", "k2", "k3", "k4", "k5")), Solov2Loss(0, 40, inputs=("mask", "classes", "gt_match", "feat_seg", "cls1", "k1"), outputs=("l_c1", "l_s1")), Solov2Loss(1, 36, inputs=("mask", "classes", "gt_match", "feat_seg", "cls2", "k2"), outputs=("l_c2", "l_s2")), Solov2Loss(2, 24, inputs=("mask", "classes", "gt_match", "feat_seg", "cls3", "k3"), outputs=("l_c3", "l_s3")), Solov2Loss(3, 16, inputs=("mask", "classes", "gt_match", "feat_seg", "cls4", "k4"), outputs=("l_c4", "l_s4")), Solov2Loss(4, 12, inputs=("mask", "classes", "gt_match", "feat_seg", "cls5", "k5"), outputs=("l_c5", "l_s5")), CombineLoss(inputs=("l_c1", "l_s1", "l_c2", "l_s2", "l_c3", "l_s3", "l_c4", "l_s4", "l_c5", "l_s5"), outputs=("total_loss", "cls_loss", "seg_loss")), L2Regularizaton(inputs="total_loss", outputs="total_loss_l2", model=model, beta=1e-5, mode="train"), UpdateOp(model=model, loss_name="total_loss_l2"), PointsNMS(inputs="feat_cls_list", outputs="feat_cls_list", mode="test"), Predict(inputs=("feat_seg", "feat_cls_list", "feat_kernel_list"), outputs=("seg_preds", "cate_scores", "cate_labels"), mode="test") ]) train_steps_epoch = int(np.ceil(len(train_ds) / batch_size)) lr_schedule = { 1: LRScheduler( model=model, lr_fn=lambda step: lr_schedule_warmup(step, init_lr=init_lr)), 2: LRScheduler( model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=train_steps_epoch * (epochs - 1), init_lr=init_lr, min_lr=init_lr / 100, start=train_steps_epoch)) } traces = [ EpochScheduler(lr_schedule), COCOMaskmAP(data_dir=val_ds.root_dir, inputs=("seg_preds", "cate_scores", "cate_labels", "image_id", "imsize"), mode="test"), BestModelSaver(model=model, save_dir=model_dir, metric="total_loss") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, monitor_names=("cls_loss", "seg_loss"), train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator