def test_input(self): pad = PadIfNeeded(image_in='x') output = pad.forward(data=self.single_input, state={}) with self.subTest('Check output type'): self.assertEqual(type(output), list) with self.subTest('Check output image shape'): self.assertEqual(output[0].shape, self.single_output_shape)
def test_input_image_and_mask(self): pad = PadIfNeeded(image_in='x', mask_in='x_mask') output = pad.forward(data=self.input_image_and_mask, state={}) with self.subTest('Check output type'): self.assertEqual(type(output), list) with self.subTest('Check output image shape'): self.assertEqual(output[0].shape, self.image_and_mask_output_shape) with self.subTest('Check output mask shape'): self.assertEqual(output[1].shape, self.image_and_mask_output_shape)
def pretrain_model(epochs, batch_size, max_train_steps_per_epoch, save_dir): # step 1: prepare dataset train_data, test_data = load_data() pipeline = fe.Pipeline( train_data=train_data, batch_size=batch_size, ops=[ PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x"), # augmentation 1 RandomCrop(32, 32, image_in="x", image_out="x_aug"), Sometimes(HorizontalFlip(image_in="x_aug", image_out="x_aug"), prob=0.5), Sometimes( ColorJitter(inputs="x_aug", outputs="x_aug", brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2), prob=0.8), Sometimes(ToGray(inputs="x_aug", outputs="x_aug"), prob=0.2), Sometimes(GaussianBlur(inputs="x_aug", outputs="x_aug", blur_limit=(3, 3), sigma_limit=(0.1, 2.0)), prob=0.5), ToFloat(inputs="x_aug", outputs="x_aug"), # augmentation 2 RandomCrop(32, 32, image_in="x", image_out="x_aug2"), Sometimes(HorizontalFlip(image_in="x_aug2", image_out="x_aug2"), prob=0.5), Sometimes( ColorJitter(inputs="x_aug2", outputs="x_aug2", brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2), prob=0.8), Sometimes(ToGray(inputs="x_aug2", outputs="x_aug2"), prob=0.2), Sometimes(GaussianBlur(inputs="x_aug2", outputs="x_aug2", blur_limit=(3, 3), sigma_limit=(0.1, 2.0)), prob=0.5), ToFloat(inputs="x_aug2", outputs="x_aug2") ]) # step 2: prepare network model_con, model_finetune = fe.build(model_fn=ResNet9, optimizer_fn=["adam", "adam"]) network = fe.Network(ops=[ LambdaOp(lambda x, y: tf.concat([x, y], axis=0), inputs=["x_aug", "x_aug2"], outputs="x_com"), ModelOp(model=model_con, inputs="x_com", outputs="y_com"), LambdaOp(lambda x: tf.split(x, 2, axis=0), inputs="y_com", outputs=["y_pred", "y_pred2"]), NTXentOp(arg1="y_pred", arg2="y_pred2", outputs=["NTXent", "logit", "label"]), UpdateOp(model=model_con, loss_name="NTXent") ]) # step 3: prepare estimator traces = [ Accuracy(true_key="label", pred_key="logit", mode="train", output_name="contrastive_accuracy"), ModelSaver(model=model_con, save_dir=save_dir), ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch, monitor_names="contrastive_accuracy") estimator.fit() return model_con, model_finetune
def get_estimator(epochs=50, batch_size=128, max_train_steps_per_epoch=None, max_eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): # step 1 train_data, eval_data = cifair100.load_data() # Add label noise to simulate real-world labeling problems corrupt_dataset(train_data) test_data = eval_data.split(range(len(eval_data) // 2)) pipeline = fe.Pipeline( train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1), ChannelTranspose(inputs="x", outputs="x") ]) # step 2 model = fe.build(model_fn=big_lenet, optimizer_fn='adam') network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), SuperLoss(CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), output_confidence="confidence"), UpdateOp(model=model, loss_name="ce") ]) # step 3 traces = [ MCC(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=save_dir, metric="mcc", save_best_mode="max", load_best_final=True), LabelTracker(metric="confidence", label="data_labels", label_mapping={ "Normal": 0, "Corrupted": 1 }, mode="train", outputs="label_confidence"), ImageSaver(inputs="label_confidence", save_dir=save_dir, mode="train"), ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch, max_eval_steps_per_epoch=max_eval_steps_per_epoch) return estimator
def get_estimator(epochs=24, batch_size=128, lr_epochs=100, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): # step 1: prepare dataset train_data, test_data = load_data() pipeline = fe.Pipeline( train_data=train_data, eval_data=test_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1), ChannelTranspose(inputs="x", outputs="x"), Onehot(inputs="y", outputs="y", mode="train", num_classes=10, label_smoothing=0.2) ]) # step 2: prepare network model = fe.build(model_fn=ResNet9, optimizer_fn="sgd") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) # get the max learning rate lr_max = search_max_lr(pipeline=pipeline, model=model, network=network, epochs=lr_epochs) lr_min = lr_max / 40 print(f"The maximum LR: {lr_max}, and minimun LR: {lr_min}") mid_step = int(epochs * 0.45 * len(train_data) / batch_size) end_step = int(epochs * len(train_data) / batch_size) # reinitialize the model model = fe.build(model_fn=ResNet9, optimizer_fn="sgd") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) # step 3: prepare estimator traces = [ Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"), LRScheduler(model=model, lr_fn=lambda step: super_schedule(step, lr_max, lr_min, mid_step, end_step)) ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch) return estimator
def finetune(weights_path, batch_size, epochs, model_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None): train_data, eval_data = cifair10.load_data() pipeline = fe.Pipeline( train_data=train_data, eval_data=eval_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1) ]) _, model = fe.build( model_fn=lambda: vision_transformer(num_class=10, weights_path=weights_path, image_size=(32, 32, 3), patch_size=4, num_layers=6, em_dim=256, num_heads=8, dff=512), optimizer_fn=[None, lambda: tf.optimizers.SGD(0.01, momentum=0.9)]) network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce", from_logits=True), UpdateOp(model=model, loss_name="ce") ]) traces = [ Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=model_dir, metric="accuracy", save_best_mode="max") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) estimator.fit(warmup=False)
def get_estimator(epochs=24, batch_size=512, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): # step 1: prepare dataset train_data, test_data = load_data() pipeline = fe.Pipeline( train_data=train_data, test_data=test_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1), ChannelTranspose(inputs="x", outputs="x"), Onehot(inputs="y", outputs="y", mode="train", num_classes=10, label_smoothing=0.2) ]) # step 2: prepare network model = fe.build(model_fn=FastCifar, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) # step 3 prepare estimator traces = [ Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"), LRScheduler(model=model, lr_fn=lr_schedule) ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch) return estimator
def get_estimator(data_dir=None, model_dir=tempfile.mkdtemp(), epochs=200, batch_size_per_gpu=32, train_steps_per_epoch=None, eval_steps_per_epoch=None): num_device = get_num_devices() train_ds, val_ds = mscoco.load_data(root_dir=data_dir) train_ds = PreMosaicDataset(mscoco_ds=train_ds) batch_size = num_device * batch_size_per_gpu pipeline = fe.Pipeline( train_data=train_ds, eval_data=val_ds, ops=[ ReadImage(inputs=("image1", "image2", "image3", "image4"), outputs=("image1", "image2", "image3", "image4"), mode="train"), ReadImage(inputs="image", outputs="image", mode="eval"), LongestMaxSize(max_size=640, image_in="image1", bbox_in="bbox1", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image2", bbox_in="bbox2", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image3", bbox_in="bbox3", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image4", bbox_in="bbox4", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), LongestMaxSize(max_size=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="eval"), PadIfNeeded(min_height=640, min_width=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="eval", border_mode=cv2.BORDER_CONSTANT, value=(114, 114, 114)), CombineMosaic(inputs=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4"), outputs=("image", "bbox"), mode="train"), CenterCrop(height=640, width=640, image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="train"), Sometimes( HorizontalFlip(image_in="image", bbox_in="bbox", bbox_params=BboxParams("coco", min_area=1.0), mode="train")), HSVAugment(inputs="image", outputs="image", mode="train"), ToArray(inputs="bbox", outputs="bbox", dtype="float32"), CategoryID2ClassID(inputs="bbox", outputs="bbox"), GTBox(inputs="bbox", outputs=("gt_sbbox", "gt_mbbox", "gt_lbbox"), image_size=640), Delete(keys=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4", "bbox"), mode="train"), Delete(keys="image_id", mode="eval"), Batch(batch_size=batch_size, pad_value=0) ]) init_lr = 1e-2 / 64 * batch_size model = fe.build( lambda: YoloV5(w=640, h=640, c=3), optimizer_fn=lambda x: torch.optim.SGD( x, lr=init_lr, momentum=0.937, weight_decay=0.0005, nesterov=True), mixed_precision=True) network = fe.Network(ops=[ RescaleTranspose(inputs="image", outputs="image"), ModelOp(model=model, inputs="image", outputs=("pred_s", "pred_m", "pred_l")), DecodePred(inputs=("pred_s", "pred_m", "pred_l"), outputs=("pred_s", "pred_m", "pred_l")), ComputeLoss(inputs=("pred_s", "gt_sbbox"), outputs=("sbbox_loss", "sconf_loss", "scls_loss")), ComputeLoss(inputs=("pred_m", "gt_mbbox"), outputs=("mbbox_loss", "mconf_loss", "mcls_loss")), ComputeLoss(inputs=("pred_l", "gt_lbbox"), outputs=("lbbox_loss", "lconf_loss", "lcls_loss")), Average(inputs=("sbbox_loss", "mbbox_loss", "lbbox_loss"), outputs="bbox_loss"), Average(inputs=("sconf_loss", "mconf_loss", "lconf_loss"), outputs="conf_loss"), Average(inputs=("scls_loss", "mcls_loss", "lcls_loss"), outputs="cls_loss"), Average(inputs=("bbox_loss", "conf_loss", "cls_loss"), outputs="total_loss"), PredictBox(width=640, height=640, inputs=("pred_s", "pred_m", "pred_l"), outputs="box_pred", mode="eval"), UpdateOp(model=model, loss_name="total_loss") ]) traces = [ MeanAveragePrecision(num_classes=80, true_key='bbox', pred_key='box_pred', mode="eval"), BestModelSaver(model=model, save_dir=model_dir, metric='mAP', save_best_mode="max") ] lr_schedule = { 1: LRScheduler(model=model, lr_fn=lambda step: lr_schedule_warmup( step, train_steps_epoch=np.ceil(len(train_ds) / batch_size), init_lr=init_lr)), 4: LRScheduler(model=model, lr_fn=lambda epoch: cosine_decay(epoch, cycle_length=epochs - 3, init_lr=init_lr, min_lr=init_lr / 100, start=4)) } traces.append(EpochScheduler(lr_schedule)) estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, monitor_names=["bbox_loss", "conf_loss", "cls_loss"], train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator
def get_estimator(batch_size=8, epochs=50, train_steps_per_epoch=None, eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp(), data_dir=None): # load CUB200 dataset. train_data = cub200.load_data(root_dir=data_dir) eval_data = train_data.split(0.3) test_data = eval_data.split(0.5) # step 1, pipeline pipeline = fe.Pipeline(batch_size=batch_size, train_data=train_data, eval_data=eval_data, test_data=test_data, ops=[ ReadImage(inputs="image", outputs="image", parent_path=train_data.parent_path), Normalize(inputs="image", outputs="image", mean=1.0, std=1.0, max_pixel_value=127.5), ReadMat(file='annotation', keys="seg", parent_path=train_data.parent_path), LongestMaxSize(max_size=512, image_in="image", image_out="image", mask_in="seg", mask_out="seg"), PadIfNeeded(min_height=512, min_width=512, image_in="image", image_out="image", mask_in="seg", mask_out="seg", border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0), ShiftScaleRotate( image_in="image", mask_in="seg", image_out="image", mask_out="seg", mode="train", shift_limit=0.2, rotate_limit=15.0, scale_limit=0.2, border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0), Sometimes( HorizontalFlip(image_in="image", mask_in="seg", image_out="image", mask_out="seg", mode="train")), Reshape(shape=(512, 512, 1), inputs="seg", outputs="seg") ]) # step 2, network resunet50 = fe.build(model_fn=ResUnet50, model_name="resunet50", optimizer_fn=lambda: tf.optimizers.Adam(1e-4)) uncertainty = fe.build(model_fn=UncertaintyLossNet, model_name="uncertainty", optimizer_fn=lambda: tf.optimizers.Adam(2e-5)) network = fe.Network(ops=[ ModelOp(inputs='image', model=resunet50, outputs=["label_pred", "mask_pred"]), CrossEntropy(inputs=["label_pred", "label"], outputs="cls_loss", form="sparse", average_loss=False), CrossEntropy(inputs=["mask_pred", "seg"], outputs="seg_loss", form="binary", average_loss=False), ModelOp(inputs=["cls_loss", "seg_loss"], model=uncertainty, outputs="total_loss"), ReduceLoss(inputs="total_loss", outputs="total_loss"), UpdateOp(model=resunet50, loss_name="total_loss"), UpdateOp(model=uncertainty, loss_name="total_loss") ]) # step 3, estimator traces = [ Accuracy(true_key="label", pred_key="label_pred"), Dice(true_key="seg", pred_key='mask_pred'), BestModelSaver(model=resunet50, save_dir=save_dir, metric="total_loss", save_best_mode="min"), LRScheduler(model=resunet50, lr_fn=lambda step: cosine_decay( step, cycle_length=26400, init_lr=1e-4)) ] estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch, log_steps=500) return estimator
def get_estimator(epochs=150, batch_size=32, save_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None): # step 1: prepare dataset train_data, eval_data = load_data() pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, batch_size=batch_size * get_num_devices(), ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes( HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1) ]) # step 2: prepare network model = fe.build( model_fn=lambda: pyramidnet_cifar(inputs_shape=(32, 32, 3), depth=272, alpha=200, num_classes=10, bottleneck=True), optimizer_fn=lambda: tfa.optimizers.SGDW( weight_decay=0.0001, lr=0.1, momentum=0.9)) network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce", from_logits=True), UpdateOp(model=model, loss_name="ce") ]) # step 3 prepare estimator traces = [ Accuracy(true_key="y", pred_key="y_pred"), LRScheduler(model=model, lr_fn=lr_schedule), BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator
def get_estimator(data_dir=None, model_dir=tempfile.mkdtemp(), batch_size=16, epochs=13, max_train_steps_per_epoch=None, max_eval_steps_per_epoch=None, image_size=512, num_classes=90): # pipeline train_ds, eval_ds = mscoco.load_data(root_dir=data_dir) pipeline = fe.Pipeline( train_data=train_ds, eval_data=eval_ds, batch_size=batch_size, ops=[ ReadImage(inputs="image", outputs="image"), LongestMaxSize(image_size, image_in="image", image_out="image", bbox_in="bbox", bbox_out="bbox", bbox_params=BboxParams("coco", min_area=1.0)), PadIfNeeded( image_size, image_size, border_mode=cv2.BORDER_CONSTANT, image_in="image", image_out="image", bbox_in="bbox", bbox_out="bbox", bbox_params=BboxParams("coco", min_area=1.0), ), Sometimes( HorizontalFlip(mode="train", image_in="image", image_out="image", bbox_in="bbox", bbox_out="bbox", bbox_params='coco')), # normalize from uint8 to [-1, 1] Normalize(inputs="image", outputs="image", mean=1.0, std=1.0, max_pixel_value=127.5), ShiftLabel(inputs="bbox", outputs="bbox"), AnchorBox(inputs="bbox", outputs="anchorbox", width=image_size, height=image_size), ChannelTranspose(inputs="image", outputs="image") ], pad_value=0) # network model = fe.build(model_fn=lambda: RetinaNet(num_classes=num_classes), optimizer_fn=lambda x: torch.optim.SGD( x, lr=2e-4, momentum=0.9, weight_decay=0.0001)) network = fe.Network(ops=[ ModelOp(model=model, inputs="image", outputs=["cls_pred", "loc_pred"]), RetinaLoss(inputs=["anchorbox", "cls_pred", "loc_pred"], outputs=["total_loss", "focal_loss", "l1_loss"]), UpdateOp(model=model, loss_name="total_loss"), PredictBox(input_shape=(image_size, image_size, 3), inputs=["cls_pred", "loc_pred"], outputs="pred", mode="eval") ]) # estimator traces = [ LRScheduler(model=model, lr_fn=lr_fn), BestModelSaver(model=model, save_dir=model_dir, metric='mAP', save_best_mode="max"), MeanAveragePrecision(num_classes=num_classes, true_key='bbox', pred_key='pred', mode="eval") ] estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch, max_eval_steps_per_epoch=max_eval_steps_per_epoch, monitor_names=["l1_loss", "focal_loss"]) return estimator
def finetune(pretrained_model, batch_size, epochs, model_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None): train_data, eval_data = cifair10.load_data() pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes( HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1), ChannelTranspose(inputs="x", outputs="x") ]) model = fe.build(model_fn=lambda: ViTModel(num_classes=100, image_size=32, patch_size=4, num_layers=6, num_channels=3, em_dim=256, num_heads=8, ff_dim=512), optimizer_fn=lambda x: torch.optim.SGD( x, lr=0.01, momentum=0.9, weight_decay=1e-4)) # load the encoder's weight if hasattr(model, "module"): model.module.vit_encoder.load_state_dict( pretrained_model.module.vit_encoder.state_dict()) else: model.vit_encoder.load_state_dict( pretrained_model.vit_encoder.state_dict()) network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce", from_logits=True), UpdateOp(model=model, loss_name="ce") ]) traces = [ Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=model_dir, metric="accuracy", save_best_mode="max") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) estimator.fit(warmup=False)
def get_estimator(data_dir=None, epochs=12, batch_size_per_gpu=4, im_size=1344, model_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None): assert im_size % 32 == 0, "im_size must be a multiple of 32" num_device = get_num_devices() train_ds, val_ds = mscoco.load_data(root_dir=data_dir, load_masks=True) batch_size = num_device * batch_size_per_gpu pipeline = fe.Pipeline( train_data=train_ds, eval_data=val_ds, test_data=val_ds, ops=[ ReadImage(inputs="image", outputs="image"), MergeMask(inputs="mask", outputs="mask"), GetImageSize(inputs="image", outputs="imsize", mode="test"), LongestMaxSize(max_size=im_size, image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco"), RemoveIf(fn=lambda x: len(x) == 0, inputs="bbox"), PadIfNeeded(min_height=im_size, min_width=im_size, image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco", border_mode=cv2.BORDER_CONSTANT, value=0), Sometimes( HorizontalFlip(image_in="image", mask_in="mask", bbox_in="bbox", bbox_params="coco", mode="train")), Resize(height=im_size // 4, width=im_size // 4, image_in='mask'), # downscale mask for memory efficiency Gt2Target(inputs=("mask", "bbox"), outputs=("gt_match", "mask", "classes")), Delete(keys="bbox"), Delete(keys="image_id", mode="!test"), Batch(batch_size=batch_size, pad_value=0) ], num_process=8 * num_device) init_lr = 1e-2 / 16 * batch_size model = fe.build( model_fn=SoloV2, optimizer_fn=lambda x: torch.optim.SGD(x, lr=init_lr, momentum=0.9)) network = fe.Network(ops=[ Normalize(inputs="image", outputs="image", mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), Permute(inputs="image", outputs='image'), ModelOp(model=model, inputs="image", outputs=("feat_seg", "feat_cls_list", "feat_kernel_list")), LambdaOp(fn=lambda x: x, inputs="feat_cls_list", outputs=("cls1", "cls2", "cls3", "cls4", "cls5")), LambdaOp(fn=lambda x: x, inputs="feat_kernel_list", outputs=("k1", "k2", "k3", "k4", "k5")), Solov2Loss(0, 40, inputs=("mask", "classes", "gt_match", "feat_seg", "cls1", "k1"), outputs=("l_c1", "l_s1")), Solov2Loss(1, 36, inputs=("mask", "classes", "gt_match", "feat_seg", "cls2", "k2"), outputs=("l_c2", "l_s2")), Solov2Loss(2, 24, inputs=("mask", "classes", "gt_match", "feat_seg", "cls3", "k3"), outputs=("l_c3", "l_s3")), Solov2Loss(3, 16, inputs=("mask", "classes", "gt_match", "feat_seg", "cls4", "k4"), outputs=("l_c4", "l_s4")), Solov2Loss(4, 12, inputs=("mask", "classes", "gt_match", "feat_seg", "cls5", "k5"), outputs=("l_c5", "l_s5")), CombineLoss(inputs=("l_c1", "l_s1", "l_c2", "l_s2", "l_c3", "l_s3", "l_c4", "l_s4", "l_c5", "l_s5"), outputs=("total_loss", "cls_loss", "seg_loss")), L2Regularizaton(inputs="total_loss", outputs="total_loss_l2", model=model, beta=1e-5, mode="train"), UpdateOp(model=model, loss_name="total_loss_l2"), PointsNMS(inputs="feat_cls_list", outputs="feat_cls_list", mode="test"), Predict(inputs=("feat_seg", "feat_cls_list", "feat_kernel_list"), outputs=("seg_preds", "cate_scores", "cate_labels"), mode="test") ]) train_steps_epoch = int(np.ceil(len(train_ds) / batch_size)) lr_schedule = { 1: LRScheduler( model=model, lr_fn=lambda step: lr_schedule_warmup(step, init_lr=init_lr)), 2: LRScheduler( model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=train_steps_epoch * (epochs - 1), init_lr=init_lr, min_lr=init_lr / 100, start=train_steps_epoch)) } traces = [ EpochScheduler(lr_schedule), COCOMaskmAP(data_dir=val_ds.root_dir, inputs=("seg_preds", "cate_scores", "cate_labels", "image_id", "imsize"), mode="test"), BestModelSaver(model=model, save_dir=model_dir, metric="total_loss") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, monitor_names=("cls_loss", "seg_loss"), train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) return estimator
train_data, test_data = load_data() pipeline = fe.Pipeline(train_data=train_data, test_data=test_data, batch_size={ "train": 128, "test": 32 }, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes( HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1),
def pretrain_model(epochs, batch_size, train_steps_per_epoch, save_dir): train_data, test_data = load_data() pipeline = fe.Pipeline( train_data=train_data, batch_size=batch_size, ops=[ PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), # augmentation 1 RandomCrop(32, 32, image_in="x", image_out="x_aug"), Sometimes(HorizontalFlip(image_in="x_aug", image_out="x_aug"), prob=0.5), Sometimes(ColorJitter(inputs="x_aug", outputs="x_aug", brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2), prob=0.8), Sometimes(ToGray(inputs="x_aug", outputs="x_aug"), prob=0.2), Sometimes(GaussianBlur(inputs="x_aug", outputs="x_aug", blur_limit=(3, 3), sigma_limit=(0.1, 2.0)), prob=0.5), ChannelTranspose(inputs="x_aug", outputs="x_aug"), ToFloat(inputs="x_aug", outputs="x_aug"), # augmentation 2 RandomCrop(32, 32, image_in="x", image_out="x_aug2"), Sometimes(HorizontalFlip(image_in="x_aug2", image_out="x_aug2"), prob=0.5), Sometimes(ColorJitter(inputs="x_aug2", outputs="x_aug2", brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2), prob=0.8), Sometimes(ToGray(inputs="x_aug2", outputs="x_aug2"), prob=0.2), Sometimes(GaussianBlur(inputs="x_aug2", outputs="x_aug2", blur_limit=(3, 3), sigma_limit=(0.1, 2.0)), prob=0.5), ChannelTranspose(inputs="x_aug2", outputs="x_aug2"), ToFloat(inputs="x_aug2", outputs="x_aug2") ]) model_con = fe.build(model_fn=lambda: ResNet9OneLayerHead(length=128), optimizer_fn="adam") network = fe.Network(ops=[ LambdaOp(lambda x, y: torch.cat([x, y], dim=0), inputs=["x_aug", "x_aug2"], outputs="x_com"), ModelOp(model=model_con, inputs="x_com", outputs="y_com"), LambdaOp(lambda x: torch.chunk(x, 2, dim=0), inputs="y_com", outputs=["y_pred", "y_pred2"], mode="train"), NTXentOp(arg1="y_pred", arg2="y_pred2", outputs=["NTXent", "logit", "label"], mode="train"), UpdateOp(model=model_con, loss_name="NTXent") ]) traces = [ Accuracy(true_key="label", pred_key="logit", mode="train", output_name="contrastive_accuracy"), ModelSaver(model=model_con, save_dir=save_dir) ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch) estimator.fit() return model_con
def get_estimator(epochs=12, batch_size=512, save_dir=tempfile.mkdtemp()): # epoch 1-10, train on cifair100, epoch 11-end: train on cifar10 cifair10_train, cifari10_test = cifair10.load_data() cifair100_train, _ = cifair100.load_data() train_ds = EpochScheduler({1: cifair100_train, 11: cifair10_train}) pipeline = fe.Pipeline(train_data=train_ds, test_data=cifari10_test, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes( HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1), ChannelTranspose(inputs="x", outputs="x") ]) # step 2: prepare network backbone = fe.build(model_fn=lambda: Backbone(input_size=(3, 32, 32)), optimizer_fn="adam") cls_head_cifar100 = fe.build(model_fn=lambda: Classifier(classes=100), optimizer_fn="adam") cls_head_cifar10 = fe.build(model_fn=lambda: Classifier(classes=10), optimizer_fn="adam") # if you want to save the final cifar10 model, you can build a model then provide it to ModelSaver # final_model_cifar10 = fe.build(model_fn=lambda: MyModel(backbone, cls_head_cifar10), optimizer_fn=None) # epoch 1-10: train backbone and cls_head_cifar100, epoch 11-end: train cls_head_cifar10 only ModelOp_cls_head = EpochScheduler({ 1: ModelOp(model=cls_head_cifar100, inputs="feature", outputs="y_pred"), 11: ModelOp(model=cls_head_cifar10, inputs="feature", outputs="y_pred"), }) UpdateOp_backbone = EpochScheduler({ 1: UpdateOp(model=backbone, loss_name="ce"), 11: None }) UpdateOp_cls_head = EpochScheduler({ 1: UpdateOp(model=cls_head_cifar100, loss_name="ce"), 11: UpdateOp(model=cls_head_cifar10, loss_name="ce") }) network = fe.Network(ops=[ ModelOp(model=backbone, inputs="x", outputs="feature"), ModelOp_cls_head, CrossEntropy(inputs=("y_pred", "y"), outputs="ce", from_logits=True), UpdateOp_backbone, UpdateOp_cls_head ]) traces = [Accuracy(true_key="y", pred_key="y_pred")] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces) return estimator