def test_multi_input(self): data = [self.img1_path, self.img2_path] image = ReadImage(inputs='x', outputs='x') output = image.forward(data=data, state={}) with self.subTest('Check first image in data'): self.assertTrue(is_equal(output[0], self.expected_image_output)) with self.subTest('Check second image in data'): self.assertTrue( is_equal(output[1], self.expected_second_image_output))
def get_estimator(batch_size=4, epochs=2, max_train_steps_per_epoch=None, log_steps=100, style_weight=5.0, content_weight=1.0, tv_weight=1e-4, save_dir=tempfile.mkdtemp(), style_img_path='Vassily_Kandinsky,_1913_-_Composition_7.jpg', data_dir=None): train_data, _ = mscoco.load_data(root_dir=data_dir, load_bboxes=False, load_masks=False, load_captions=False) device = "cuda" if torch.cuda.is_available() else "cpu" style_img = cv2.imread(style_img_path) assert style_img is not None, "cannot load the style image, please go to the folder with style image" style_img = cv2.resize(style_img, (256, 256)) style_img = (style_img.astype(np.float32) - 127.5) / 127.5 pipeline = fe.Pipeline( train_data=train_data, batch_size=batch_size, ops=[ ReadImage(inputs="image", outputs="image"), Normalize(inputs="image", outputs="image", mean=1.0, std=1.0, max_pixel_value=127.5), Resize(height=256, width=256, image_in="image", image_out="image"), LambdaOp(fn=lambda: style_img, outputs="style_image"), ChannelTranspose(inputs=["image", "style_image"], outputs=["image", "style_image"]) ]) model = fe.build(model_fn=StyleTransferNet, model_name="style_transfer_net", optimizer_fn=lambda x: torch.optim.Adam(x, lr=1e-3)) network = fe.Network(ops=[ ModelOp(inputs="image", model=model, outputs="image_out"), ExtractVGGFeatures(inputs="style_image", outputs="y_style", device=device), ExtractVGGFeatures(inputs="image", outputs="y_content", device=device), ExtractVGGFeatures(inputs="image_out", outputs="y_pred", device=device), StyleContentLoss(style_weight=style_weight, content_weight=content_weight, tv_weight=tv_weight, inputs=('y_pred', 'y_style', 'y_content', 'image_out'), outputs='loss'), UpdateOp(model=model, loss_name="loss") ]) estimator = fe.Estimator(network=network, pipeline=pipeline, traces=ModelSaver(model=model, save_dir=save_dir, frequency=1), epochs=epochs, max_train_steps_per_epoch=max_train_steps_per_epoch, log_steps=log_steps) return estimator
def get_estimator(epochs=20, batch_size=4, train_steps_per_epoch=None, eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp(), log_steps=20, data_dir=None): # step 1 csv = montgomery.load_data(root_dir=data_dir) pipeline = fe.Pipeline( train_data=csv, eval_data=csv.split(0.2), batch_size=batch_size, ops=[ ReadImage(inputs="image", parent_path=csv.parent_path, outputs="image", color_flag='gray'), ReadImage(inputs="mask_left", parent_path=csv.parent_path, outputs="mask_left", color_flag='gray', mode='!infer'), ReadImage(inputs="mask_right", parent_path=csv.parent_path, outputs="mask_right", color_flag='gray', mode='!infer'), CombineLeftRightMask(inputs=("mask_left", "mask_right"), outputs="mask", mode='!infer'), Resize(image_in="image", width=512, height=512), Resize(image_in="mask", width=512, height=512, mode='!infer'), Sometimes(numpy_op=HorizontalFlip( image_in="image", mask_in="mask", mode='train')), Sometimes(numpy_op=Rotate(image_in="image", mask_in="mask", limit=(-10, 10), border_mode=cv2.BORDER_CONSTANT, mode='train')), Minmax(inputs="image", outputs="image"), Minmax(inputs="mask", outputs="mask", mode='!infer') ]) # step 2 model = fe.build( model_fn=lambda: UNet(input_size=(512, 512, 1)), optimizer_fn=lambda: tf.keras.optimizers.Adam(learning_rate=0.0001), model_name="lung_segmentation") network = fe.Network(ops=[ ModelOp(inputs="image", model=model, outputs="pred_segment"), CrossEntropy( inputs=("pred_segment", "mask"), outputs="loss", form="binary"), UpdateOp(model=model, loss_name="loss") ]) # step 3 traces = [ Dice(true_key="mask", pred_key="pred_segment"), BestModelSaver(model=model, save_dir=save_dir, metric='Dice', save_best_mode='max') ] estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, log_steps=log_steps, traces=traces, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator
def get_estimator(data_dir=None, model_dir=tempfile.mkdtemp(), epochs=200, batch_size_per_gpu=32, train_steps_per_epoch=None, eval_steps_per_epoch=None): num_device = get_num_devices() train_ds, val_ds = mscoco.load_data(root_dir=data_dir) train_ds = PreMosaicDataset(mscoco_ds=train_ds) batch_size = num_device * batch_size_per_gpu pipeline = fe.Pipeline( train_data=train_ds, eval_data=val_ds, ops=[ ReadImage(inputs=("image1", "image2", "image3", "image4"), outputs=("image1", "image2", "image3", "image4"), mode="train"), ReadImage(inputs="image", outputs="image", mode="eval"), LongestMaxSize(max_size=640, image_in="image1", bbox_in="bbox1", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image2", bbox_in="bbox2", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image3", bbox_in="bbox3", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image4", bbox_in="bbox4", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="eval"), PadIfNeeded(min_height=640, min_width=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="eval", border_mode=cv2.BORDER_CONSTANT, value=(114, 114, 114)), CombineMosaic(inputs=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4"), outputs=("image", "bbox"), mode="train"), CenterCrop(height=640, width=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), Sometimes( HorizontalFlip(image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="train")), HSVAugment(inputs="image", outputs="image", mode="train"), ToArray(inputs="bbox", outputs="bbox", dtype="float32"), CategoryID2ClassID(inputs="bbox", outputs="bbox"), GTBox(inputs="bbox", outputs=("gt_sbbox", "gt_mbbox", "gt_lbbox"), image_size=640), Delete(keys=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4", "bbox"), mode="train"), Delete(keys="image_id", mode="eval"), Batch(batch_size=batch_size, pad_value=0) ]) init_lr = 1e-2 / 64 * batch_size model = fe.build( lambda: YoloV5(w=640, h=640, c=3), optimizer_fn=lambda x: torch.optim.SGD( x, lr=init_lr, momentum=0.937, weight_decay=0.0005, nesterov=True), mixed_precision=True) network = fe.Network(ops=[ RescaleTranspose(inputs="image", outputs="image"), ModelOp(model=model, inputs="image", outputs=("pred_s", "pred_m", "pred_l")), DecodePred(inputs=("pred_s", "pred_m", "pred_l"), outputs=("pred_s", "pred_m", "pred_l")), ComputeLoss(inputs=("pred_s", "gt_sbbox"), outputs=("sbbox_loss", "sconf_loss", "scls_loss")), ComputeLoss(inputs=("pred_m", "gt_mbbox"), outputs=("mbbox_loss", "mconf_loss", "mcls_loss")), ComputeLoss(inputs=("pred_l", "gt_lbbox"), outputs=("lbbox_loss", "lconf_loss", "lcls_loss")), Average(inputs=("sbbox_loss", "mbbox_loss", "lbbox_loss"), outputs="bbox_loss"), Average(inputs=("sconf_loss", "mconf_loss", "lconf_loss"), outputs="conf_loss"), Average(inputs=("scls_loss", "mcls_loss", "lcls_loss"), outputs="cls_loss"), Average(inputs=("bbox_loss", "conf_loss", "cls_loss"), outputs="total_loss"), PredictBox(width=640, height=640, inputs=("pred_s", "pred_m", "pred_l"), outputs="box_pred", mode="eval"), UpdateOp(model=model, loss_name="total_loss") ]) traces = [ MeanAveragePrecision(num_classes=80, true_key='bbox', pred_key='box_pred', mode="eval"), BestModelSaver(model=model, save_dir=model_dir, metric='mAP', save_best_mode="max") ] lr_schedule = { 1: LRScheduler(model=model, lr_fn=lambda step: lr_schedule_warmup( step, train_steps_epoch=np.ceil(len(train_ds) / batch_size), init_lr=init_lr)), 4: LRScheduler(model=model, lr_fn=lambda epoch: cosine_decay(epoch, cycle_length=epochs - 3, init_lr=init_lr, min_lr=init_lr / 100, start=4)) } traces.append(EpochScheduler(lr_schedule)) estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, monitor_names=["bbox_loss", "conf_loss", "cls_loss"], train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator
def get_estimator(batch_size=8, epochs=50, train_steps_per_epoch=None, eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp(), data_dir=None): # load CUB200 dataset. train_data = cub200.load_data(root_dir=data_dir) eval_data = train_data.split(0.3) test_data = eval_data.split(0.5) # step 1, pipeline pipeline = fe.Pipeline(batch_size=batch_size, train_data=train_data, eval_data=eval_data, test_data=test_data, ops=[ ReadImage(inputs="image", outputs="image", parent_path=train_data.parent_path), Normalize(inputs="image", outputs="image", mean=1.0, std=1.0, max_pixel_value=127.5), ReadMat(file='annotation', keys="seg", parent_path=train_data.parent_path), LongestMaxSize(max_size=512, image_in="image", image_out="image", mask_in="seg", mask_out="seg"), PadIfNeeded(min_height=512, min_width=512, image_in="image", image_out="image", mask_in="seg", mask_out="seg", border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0), ShiftScaleRotate( image_in="image", mask_in="seg", image_out="image", mask_out="seg", mode="train", shift_limit=0.2, rotate_limit=15.0, scale_limit=0.2, border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0), Sometimes( HorizontalFlip(image_in="image", mask_in="seg", image_out="image", mask_out="seg", mode="train")), Reshape(shape=(512, 512, 1), inputs="seg", outputs="seg") ]) # step 2, network resunet50 = fe.build(model_fn=ResUnet50, model_name="resunet50", optimizer_fn=lambda: tf.optimizers.Adam(1e-4)) uncertainty = fe.build(model_fn=UncertaintyLossNet, model_name="uncertainty", optimizer_fn=lambda: tf.optimizers.Adam(2e-5)) network = fe.Network(ops=[ ModelOp(inputs='image', model=resunet50, outputs=["label_pred", "mask_pred"]), CrossEntropy(inputs=["label_pred", "label"], outputs="cls_loss", form="sparse", average_loss=False), CrossEntropy(inputs=["mask_pred", "seg"], outputs="seg_loss", form="binary", average_loss=False), ModelOp(inputs=["cls_loss", "seg_loss"], model=uncertainty, outputs="total_loss"), ReduceLoss(inputs="total_loss", outputs="total_loss"), UpdateOp(model=resunet50, loss_name="total_loss"), UpdateOp(model=uncertainty, loss_name="total_loss") ]) # step 3, estimator traces = [ Accuracy(true_key="label", pred_key="label_pred"), Dice(true_key="seg", pred_key='mask_pred'), BestModelSaver(model=resunet50, save_dir=save_dir, metric="total_loss", save_best_mode="min"), LRScheduler(model=resunet50, lr_fn=lambda step: cosine_decay( step, cycle_length=26400, init_lr=1e-4)) ] estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch, log_steps=500) return estimator
def get_estimator(target_size=128, epochs=55, save_dir=tempfile.mkdtemp(), max_train_steps_per_epoch=None, data_dir=None): # assert growth parameters num_grow = np.log2(target_size) - 2 assert num_grow >= 1 and num_grow % 1 == 0, "need exponential of 2 and greater than 8 as target size" num_phases = int(2 * num_grow + 1) assert epochs % num_phases == 0, "epoch must be multiple of {} for size {}".format( num_phases, target_size) num_grow, phase_length = int(num_grow), int(epochs / num_phases) event_epoch = [1, 1 + phase_length] + [ phase_length * (2 * i + 1) + 1 for i in range(1, num_grow) ] event_size = [4] + [2**(i + 3) for i in range(num_grow)] # set up data schedules dataset = nih_chestxray.load_data(root_dir=data_dir) resize_map = { epoch: Resize(image_in="x", image_out="x", height=size, width=size) for (epoch, size) in zip(event_epoch, event_size) } resize_low_res_map1 = { epoch: Resize(image_in="x", image_out="x_low_res", height=size // 2, width=size // 2) for (epoch, size) in zip(event_epoch, event_size) } resize_low_res_map2 = { epoch: Resize(image_in="x_low_res", image_out="x_low_res", height=size, width=size) for (epoch, size) in zip(event_epoch, event_size) } batch_size_map = { epoch: 512 // size * get_num_devices() if size <= 128 else 4 * get_num_devices() for (epoch, size) in zip(event_epoch, event_size) } batch_scheduler = EpochScheduler(epoch_dict=batch_size_map) pipeline = fe.Pipeline( batch_size=batch_scheduler, train_data=dataset, drop_last=True, ops=[ ReadImage(inputs="x", outputs="x", color_flag='gray'), EpochScheduler(epoch_dict=resize_map), EpochScheduler(epoch_dict=resize_low_res_map1), EpochScheduler(epoch_dict=resize_low_res_map2), Normalize(inputs=["x", "x_low_res"], outputs=["x", "x_low_res"], mean=1.0, std=1.0, max_pixel_value=127.5), LambdaOp(fn=lambda: np.random.normal(size=[512]).astype('float32'), outputs="z") ]) # now model schedule fade_in_alpha = tf.Variable(initial_value=1.0, dtype='float32', trainable=False) d_models = fe.build( model_fn=lambda: build_D(fade_in_alpha, target_resolution=int(np.log2(target_size)), num_channels=1), optimizer_fn=[ lambda: Adam(0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) ] * len(event_size), model_name=["d_{}".format(size) for size in event_size]) g_models = fe.build( model_fn=lambda: build_G(fade_in_alpha, target_resolution=int(np.log2(target_size)), num_channels=1), optimizer_fn=[ lambda: Adam(0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) ] * len(event_size) + [None], model_name=["g_{}".format(size) for size in event_size] + ["G"]) fake_img_map = { epoch: ModelOp(inputs="z", outputs="x_fake", model=model) for (epoch, model) in zip(event_epoch, g_models[:-1]) } fake_score_map = { epoch: ModelOp(inputs="x_fake", outputs="fake_score", model=model) for (epoch, model) in zip(event_epoch, d_models) } real_score_map = { epoch: ModelOp(inputs="x_blend", outputs="real_score", model=model) for (epoch, model) in zip(event_epoch, d_models) } interp_score_map = { epoch: ModelOp(inputs="x_interp", outputs="interp_score", model=model) for (epoch, model) in zip(event_epoch, d_models) } g_update_map = { epoch: UpdateOp(loss_name="gloss", model=model) for (epoch, model) in zip(event_epoch, g_models[:-1]) } d_update_map = { epoch: UpdateOp(loss_name="dloss", model=model) for (epoch, model) in zip(event_epoch, d_models) } network = fe.Network(ops=[ EpochScheduler(fake_img_map), EpochScheduler(fake_score_map), ImageBlender( alpha=fade_in_alpha, inputs=("x", "x_low_res"), outputs="x_blend"), EpochScheduler(real_score_map), Interpolate(inputs=("x_fake", "x"), outputs="x_interp"), EpochScheduler(interp_score_map), GradientPenalty(inputs=("x_interp", "interp_score"), outputs="gp"), GLoss(inputs="fake_score", outputs="gloss"), DLoss(inputs=("real_score", "fake_score", "gp"), outputs="dloss"), EpochScheduler(g_update_map), EpochScheduler(d_update_map) ]) traces = [ AlphaController(alpha=fade_in_alpha, fade_start_epochs=event_epoch[1:], duration=phase_length, batch_scheduler=batch_scheduler, num_examples=len(dataset)), ModelSaver(model=g_models[-1], save_dir=save_dir, frequency=phase_length), ImageSaving(epoch_model_map={ epoch - 1: model for (epoch, model) in zip(event_epoch[1:] + [epochs + 1], g_models[:-1]) }, save_dir=save_dir) ] estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch) return estimator
def get_estimator(data_dir=None, model_dir=tempfile.mkdtemp(), batch_size=16, epochs=13, max_train_steps_per_epoch=None, max_eval_steps_per_epoch=None, image_size=512, num_classes=90): # pipeline train_ds, eval_ds = mscoco.load_data(root_dir=data_dir) pipeline = fe.Pipeline( train_data=train_ds, eval_data=eval_ds, batch_size=batch_size, ops=[ ReadImage(inputs="image", outputs="image"), LongestMaxSize(image_size, image_in="image", image_out="image", bbox_in="bbox", bbox_out="bbox", bbox_params=BboxParams("coco", min_area=1.0)), PadIfNeeded( image_size, image_size, border_mode=cv2.BORDER_CONSTANT, image_in="image", image_out="image", bbox_in="bbox", bbox_out="bbox", bbox_params=BboxParams("coco", min_area=1.0), ), Sometimes( HorizontalFlip(mode="train", image_in="image", image_out="image", bbox_in="bbox", bbox_out="bbox", bbox_params='coco')), # normalize from uint8 to [-1, 1] Normalize(inputs="image", outputs="image", mean=1.0, std=1.0, max_pixel_value=127.5), ShiftLabel(inputs="bbox", outputs="bbox"), AnchorBox(inputs="bbox", outputs="anchorbox", width=image_size, height=image_size), ChannelTranspose(inputs="image", outputs="image") ], pad_value=0) # network model = fe.build(model_fn=lambda: RetinaNet(num_classes=num_classes), optimizer_fn=lambda x: torch.optim.SGD( x, lr=2e-4, momentum=0.9, weight_decay=0.0001)) network = fe.Network(ops=[ ModelOp(model=model, inputs="image", outputs=["cls_pred", "loc_pred"]), RetinaLoss(inputs=["anchorbox", "cls_pred", "loc_pred"], outputs=["total_loss", "focal_loss", "l1_loss"]), UpdateOp(model=model, loss_name="total_loss"), PredictBox(input_shape=(image_size, image_size, 3), inputs=["cls_pred", "loc_pred"], outputs="pred", mode="eval") ]) # estimator traces = [ LRScheduler(model=model, lr_fn=lr_fn), BestModelSaver(model=model, save_dir=model_dir, metric='mAP', save_best_mode="max"), MeanAveragePrecision(num_classes=num_classes, true_key='bbox', pred_key='pred', mode="eval") ] estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch, max_eval_steps_per_epoch=max_eval_steps_per_epoch, monitor_names=["l1_loss", "focal_loss"]) return estimator
def get_estimator(weight=10.0, epochs=200, batch_size=1, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp(), data_dir=None): train_data, _ = load_data(batch_size=batch_size, root_dir=data_dir) device = "cuda" if torch.cuda.is_available() else "cpu" pipeline = fe.Pipeline(train_data=train_data, ops=[ ReadImage(inputs=["A", "B"], outputs=["A", "B"]), Normalize(inputs=["A", "B"], outputs=["real_A", "real_B"], mean=1.0, std=1.0, max_pixel_value=127.5), Resize(height=286, width=286, image_in="real_A", image_out="real_A", mode="train"), RandomCrop(height=256, width=256, image_in="real_A", image_out="real_A", mode="train"), Resize(height=286, width=286, image_in="real_B", image_out="real_B", mode="train"), RandomCrop(height=256, width=256, image_in="real_B", image_out="real_B", mode="train"), Sometimes( HorizontalFlip(image_in="real_A", image_out="real_A", mode="train")), Sometimes( HorizontalFlip(image_in="real_B", image_out="real_B", mode="train")), ChannelTranspose(inputs=["real_A", "real_B"], outputs=["real_A", "real_B"]) ]) g_AtoB = fe.build(model_fn=Generator, model_name="g_AtoB", optimizer_fn=lambda x: torch.optim.Adam( x, lr=2e-4, betas=(0.5, 0.999))) g_BtoA = fe.build(model_fn=Generator, model_name="g_BtoA", optimizer_fn=lambda x: torch.optim.Adam( x, lr=2e-4, betas=(0.5, 0.999))) d_A = fe.build(model_fn=Discriminator, model_name="d_A", optimizer_fn=lambda x: torch.optim.Adam( x, lr=2e-4, betas=(0.5, 0.999))) d_B = fe.build(model_fn=Discriminator, model_name="d_B", optimizer_fn=lambda x: torch.optim.Adam( x, lr=2e-4, betas=(0.5, 0.999))) network = fe.Network(ops=[ ModelOp(inputs="real_A", model=g_AtoB, outputs="fake_B"), ModelOp(inputs="real_B", model=g_BtoA, outputs="fake_A"), Buffer(image_in="fake_A", image_out="buffer_fake_A"), Buffer(image_in="fake_B", image_out="buffer_fake_B"), ModelOp(inputs="real_A", model=d_A, outputs="d_real_A"), ModelOp(inputs="fake_A", model=d_A, outputs="d_fake_A"), ModelOp(inputs="buffer_fake_A", model=d_A, outputs="buffer_d_fake_A"), ModelOp(inputs="real_B", model=d_B, outputs="d_real_B"), ModelOp(inputs="fake_B", model=d_B, outputs="d_fake_B"), ModelOp(inputs="buffer_fake_B", model=d_B, outputs="buffer_d_fake_B"), ModelOp(inputs="real_A", model=g_BtoA, outputs="same_A"), ModelOp(inputs="fake_B", model=g_BtoA, outputs="cycled_A"), ModelOp(inputs="real_B", model=g_AtoB, outputs="same_B"), ModelOp(inputs="fake_A", model=g_AtoB, outputs="cycled_B"), GLoss(inputs=("real_A", "d_fake_B", "cycled_A", "same_A"), weight=weight, device=device, outputs="g_AtoB_loss"), GLoss(inputs=("real_B", "d_fake_A", "cycled_B", "same_B"), weight=weight, device=device, outputs="g_BtoA_loss"), DLoss(inputs=("d_real_A", "buffer_d_fake_A"), outputs="d_A_loss", device=device), DLoss(inputs=("d_real_B", "buffer_d_fake_B"), outputs="d_B_loss", device=device), UpdateOp(model=g_AtoB, loss_name="g_AtoB_loss"), UpdateOp(model=g_BtoA, loss_name="g_BtoA_loss"), UpdateOp(model=d_A, loss_name="d_A_loss"), UpdateOp(model=d_B, loss_name="d_B_loss") ]) traces = [ ModelSaver(model=g_AtoB, save_dir=save_dir, frequency=10), ModelSaver(model=g_BtoA, save_dir=save_dir, frequency=10), LRScheduler(model=g_AtoB, lr_fn=lr_schedule), LRScheduler(model=g_BtoA, lr_fn=lr_schedule), LRScheduler(model=d_A, lr_fn=lr_schedule), LRScheduler(model=d_B, lr_fn=lr_schedule) ] estimator = fe.Estimator( network=network, pipeline=pipeline, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch) return estimator
def get_estimator(weight=10.0, epochs=200, batch_size=1, train_steps_per_epoch=None, save_dir=tempfile.mkdtemp(), data_dir=None): train_data, _ = load_data(batch_size=batch_size, root_dir=data_dir) pipeline = fe.Pipeline( train_data=train_data, ops=[ ReadImage(inputs=["A", "B"], outputs=["A", "B"]), Normalize(inputs=["A", "B"], outputs=["real_A", "real_B"], mean=1.0, std=1.0, max_pixel_value=127.5), Resize(height=286, width=286, image_in="real_A", image_out="real_A", mode="train"), RandomCrop(height=256, width=256, image_in="real_A", image_out="real_A", mode="train"), Resize(height=286, width=286, image_in="real_B", image_out="real_B", mode="train"), RandomCrop(height=256, width=256, image_in="real_B", image_out="real_B", mode="train"), Sometimes(HorizontalFlip(image_in="real_A", image_out="real_A", mode="train")), Sometimes(HorizontalFlip(image_in="real_B", image_out="real_B", mode="train")), PlaceholderOp(outputs=("index_A", "buffer_A")), PlaceholderOp(outputs=("index_B", "buffer_B")) ]) g_AtoB = fe.build(model_fn=build_generator, model_name="g_AtoB", optimizer_fn=lambda: tf.optimizers.Adam(2e-4, 0.5)) g_BtoA = fe.build(model_fn=build_generator, model_name="g_BtoA", optimizer_fn=lambda: tf.optimizers.Adam(2e-4, 0.5)) d_A = fe.build(model_fn=build_discriminator, model_name="d_A", optimizer_fn=lambda: tf.optimizers.Adam(2e-4, 0.5)) d_B = fe.build(model_fn=build_discriminator, model_name="d_B", optimizer_fn=lambda: tf.optimizers.Adam(2e-4, 0.5)) network = fe.Network(ops=[ ModelOp(inputs="real_A", model=g_AtoB, outputs="fake_B"), ModelOp(inputs="real_B", model=g_BtoA, outputs="fake_A"), Buffer(image_in="fake_A", buffer_in="buffer_A", index_in="index_A", image_out="buffer_fake_A"), Buffer(image_in="fake_B", buffer_in="buffer_B", index_in="index_B", image_out="buffer_fake_B"), ModelOp(inputs="real_A", model=d_A, outputs="d_real_A"), ModelOp(inputs="fake_A", model=d_A, outputs="d_fake_A"), ModelOp(inputs="buffer_fake_A", model=d_A, outputs="buffer_d_fake_A"), ModelOp(inputs="real_B", model=d_B, outputs="d_real_B"), ModelOp(inputs="fake_B", model=d_B, outputs="d_fake_B"), ModelOp(inputs="buffer_fake_B", model=d_B, outputs="buffer_d_fake_B"), ModelOp(inputs="real_A", model=g_BtoA, outputs="same_A"), ModelOp(inputs="fake_B", model=g_BtoA, outputs="cycled_A"), ModelOp(inputs="real_B", model=g_AtoB, outputs="same_B"), ModelOp(inputs="fake_A", model=g_AtoB, outputs="cycled_B"), GLoss(inputs=("real_A", "d_fake_B", "cycled_A", "same_A"), weight=weight, outputs="g_AtoB_loss"), GLoss(inputs=("real_B", "d_fake_A", "cycled_B", "same_B"), weight=weight, outputs="g_BtoA_loss"), DLoss(inputs=("d_real_A", "buffer_d_fake_A"), outputs="d_A_loss"), DLoss(inputs=("d_real_B", "buffer_d_fake_B"), outputs="d_B_loss"), UpdateOp(model=g_AtoB, loss_name="g_AtoB_loss"), UpdateOp(model=g_BtoA, loss_name="g_BtoA_loss"), UpdateOp(model=d_A, loss_name="d_A_loss"), UpdateOp(model=d_B, loss_name="d_B_loss") ]) traces = [ BufferUpdate(input_name="fake_A", buffer_size=50, batch_size=batch_size, mode="train", output_name=["buffer_A", "index_A"]), BufferUpdate(input_name="fake_B", buffer_size=50, batch_size=batch_size, mode="train", output_name=["buffer_B", "index_B"]), ModelSaver(model=g_AtoB, save_dir=save_dir, frequency=5), ModelSaver(model=g_BtoA, save_dir=save_dir, frequency=5), LRScheduler(model=g_AtoB, lr_fn=lr_schedule), LRScheduler(model=g_BtoA, lr_fn=lr_schedule), LRScheduler(model=d_A, lr_fn=lr_schedule), LRScheduler(model=d_B, lr_fn=lr_schedule) ] estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch) return estimator
def get_estimator(data_dir=None, epochs=12, batch_size_per_gpu=4, im_size=1344, model_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None): assert im_size % 32 == 0, "im_size must be a multiple of 32" num_device = get_num_devices() train_ds, val_ds = mscoco.load_data(root_dir=data_dir, load_masks=True) batch_size = num_device * batch_size_per_gpu pipeline = fe.Pipeline( train_data=train_ds, eval_data=val_ds, test_data=val_ds, ops=[ ReadImage(inputs="image", outputs="image"), MergeMask(inputs="mask", outputs="mask"), GetImageSize(inputs="image", outputs="imsize", mode="test"), LongestMaxSize(max_size=im_size, image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco"), RemoveIf(fn=lambda x: len(x) == 0, inputs="bbox"), PadIfNeeded(min_height=im_size, min_width=im_size, image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco", border_mode=cv2.BORDER_CONSTANT, value=0), Sometimes( HorizontalFlip(image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco", mode="train")), Resize(height=im_size // 4, width=im_size // 4, image_in='mask'), # downscale mask for memory efficiency Gt2Target(inputs=("mask", "bbox"), outputs=("gt_match", "mask", "classes")), Delete(keys="bbox"), Delete(keys="image_id", mode="!test"), Batch(batch_size=batch_size, pad_value=0) ], num_process=8 * num_device) init_lr = 1e-2 / 16 * batch_size model = fe.build( model_fn=SoloV2, optimizer_fn=lambda x: torch.optim.SGD(x, lr=init_lr, momentum=0.9)) network = fe.Network(ops=[ Normalize(inputs="image", outputs="image", mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), Permute(inputs="image", outputs='image'), ModelOp(model=model, inputs="image", outputs=("feat_seg", "feat_cls_list", "feat_kernel_list")), LambdaOp(fn=lambda x: x, inputs="feat_cls_list", outputs=("cls1", "cls2", "cls3", "cls4", "cls5")), LambdaOp(fn=lambda x: x, inputs="feat_kernel_list", outputs=("k1", "k2", "k3", "k4", "k5")), Solov2Loss(0, 40, inputs=("mask", "classes", "gt_match", "feat_seg", "cls1", "k1"), outputs=("l_c1", "l_s1")), Solov2Loss(1, 36, inputs=("mask", "classes", "gt_match", "feat_seg", "cls2", "k2"), outputs=("l_c2", "l_s2")), Solov2Loss(2, 24, inputs=("mask", "classes", "gt_match", "feat_seg", "cls3", "k3"), outputs=("l_c3", "l_s3")), Solov2Loss(3, 16, inputs=("mask", "classes", "gt_match", "feat_seg", "cls4", "k4"), outputs=("l_c4", "l_s4")), Solov2Loss(4, 12, inputs=("mask", "classes", "gt_match", "feat_seg", "cls5", "k5"), outputs=("l_c5", "l_s5")), CombineLoss(inputs=("l_c1", "l_s1", "l_c2", "l_s2", "l_c3", "l_s3", "l_c4", "l_s4", "l_c5", "l_s5"), outputs=("total_loss", "cls_loss", "seg_loss")), L2Regularizaton(inputs="total_loss", outputs="total_loss_l2", model=model, beta=1e-5, mode="train"), UpdateOp(model=model, loss_name="total_loss_l2"), PointsNMS(inputs="feat_cls_list", outputs="feat_cls_list", mode="test"), Predict(inputs=("feat_seg", "feat_cls_list", "feat_kernel_list"), outputs=("seg_preds", "cate_scores", "cate_labels"), mode="test") ]) train_steps_epoch = int(np.ceil(len(train_ds) / batch_size)) lr_schedule = { 1: LRScheduler( model=model, lr_fn=lambda step: lr_schedule_warmup(step, init_lr=init_lr)), 2: LRScheduler( model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=train_steps_epoch * (epochs - 1), init_lr=init_lr, min_lr=init_lr / 100, start=train_steps_epoch)) } traces = [ EpochScheduler(lr_schedule), COCOMaskmAP(data_dir=val_ds.root_dir, inputs=("seg_preds", "cate_scores", "cate_labels", "image_id", "imsize"), mode="test"), BestModelSaver(model=model, save_dir=model_dir, metric="total_loss") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, monitor_names=("cls_loss", "seg_loss"), train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator
def get_estimator(epochs=200, batch_size=128, max_train_steps_per_epoch=None, max_eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp(), data_dir=None): # step 1. prepare pipeline train_data, eval_data = omniglot.load_data(root_dir=data_dir) test_data = eval_data.split(0.5) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ ReadImage(inputs="x_a", outputs="x_a", color_flag='gray'), ReadImage(inputs="x_b", outputs="x_b", color_flag='gray'), Sometimes(ShiftScaleRotate(image_in="x_a", image_out="x_a", shift_limit=0.05, scale_limit=0.2, rotate_limit=10, mode="train"), prob=0.89), Sometimes(ShiftScaleRotate(image_in="x_b", image_out="x_b", shift_limit=0.05, scale_limit=0.2, rotate_limit=10, mode="train"), prob=0.89), Minmax(inputs="x_a", outputs="x_a"), Minmax(inputs="x_b", outputs="x_b") ]) # step 2. prepare model model = fe.build(model_fn=siamese_network, model_name="siamese_net", optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(inputs=["x_a", "x_b"], model=model, outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="loss", form="binary"), UpdateOp(model=model, loss_name="loss") ]) # step 3.prepare estimator traces = [ LRScheduler(model=model, lr_fn=lr_schedule), Accuracy(true_key="y", pred_key="y_pred"), OneShotAccuracy(dataset=eval_data, model=model, output_name='one_shot_accuracy'), BestModelSaver(model=model, save_dir=save_dir, metric="one_shot_accuracy", save_best_mode="max"), EarlyStopping(monitor="one_shot_accuracy", patience=20, compare='max', mode="eval") ] estimator = fe.Estimator( network=network, pipeline=pipeline, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch, max_eval_steps_per_epoch=max_eval_steps_per_epoch) return estimator
def test_single_input(self): data = [self.img1_path] image = ReadImage(inputs='x', outputs='x') output = image.forward(data=data, state={}) self.assertTrue(is_equal(output[0], self.expected_image_output))