Example #1
0
 def test_torch_model_on_batch_begin(self):
     lr_scheduler = LRScheduler(model=self.torch_model,
                                lr_fn=lambda step: fe.schedule.cosine_decay(
                                    step, cycle_length=3750, init_lr=1e-3))
     lr_scheduler.system = sample_system_object()
     lr_scheduler.system.global_step = 3
     lr_scheduler.on_batch_begin(data=self.data)
     new_lr = list(self.torch_model.optimizer.param_groups)[0]['lr']
     self.assertTrue(math.isclose(new_lr, 0.0009999993, rel_tol=1e-5))
Example #2
0
 def test_tf_model_on_batch_begin(self):
     lr_scheduler = LRScheduler(model=self.tf_model,
                                lr_fn=lambda step: fe.schedule.cosine_decay(
                                    step, cycle_length=3750, init_lr=1e-3))
     lr_scheduler.system = sample_system_object()
     lr_scheduler.system.global_step = 3
     lr_scheduler.on_batch_begin(data=self.data)
     self.assertTrue(
         math.isclose(self.tf_model.optimizer.lr.numpy(),
                      0.0009999973,
                      rel_tol=1e-5))
Example #3
0
 def test_torch_model_on_batch_end(self):
     model_name = self.torch_model.model_name + '_lr'
     lr_scheduler = LRScheduler(model=self.torch_model,
                                lr_fn=lambda step: fe.schedule.cosine_decay(
                                    step, cycle_length=3750, init_lr=1e-3))
     lr_scheduler.system = sample_system_object()
     lr_scheduler.system.global_step = 3
     lr_scheduler.system.log_steps = 1
     lr_scheduler.on_batch_end(data=self.data)
     self.assertTrue(
         math.isclose(self.data[model_name], 0.001, rel_tol=1e-3))
Example #4
0
def _build_estimator(model: Union[tf.keras.Model, torch.nn.Module], trace: Traceability, axis: int = -1):
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    batch_size = 32
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x", axis=axis), Minmax(inputs="x", outputs="x")])
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3)),
        trace
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=1,
                             traces=traces,
                             max_train_steps_per_epoch=1,
                             max_eval_steps_per_epoch=None)
    fake_data = tf.ones(shape=(batch_size, 28, 28, 1)) if axis == -1 else torch.ones(size=(batch_size, 1, 28, 28))
    model.fe_input_spec = FeInputSpec(fake_data, model)
    return estimator
Example #5
0
def get_estimator(epochs=2,
                  batch_size=32,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"),
        LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3))
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces)
    return estimator
Example #6
0
def get_estimator(epochs=2,
                  batch_size=32,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ],
                           num_process=0)

    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    print([f"{idx}: {x.name}" for idx, x in enumerate(model.submodules)])
    network = fe.Network(ops=[
        Watch(inputs="x"),
        ModelOp(model=model,
                inputs="x",
                outputs=["y_pred", "embedding"],
                intermediate_layers='dense'),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        GradientOp(finals="embedding", inputs="x", outputs="grads"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        Inspector(),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric="accuracy",
                       save_best_mode="max"),
        LRScheduler(model=model,
                    lr_fn=lambda step: cosine_decay(
                        step, cycle_length=3750, init_lr=1e-3)),
        TensorBoard(log_dir="tf_logs",
                    write_embeddings="embedding",
                    embedding_labels="y")
    ]
    estimator = fe.Estimator(
        pipeline=pipeline,
        network=network,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator
def get_estimator(epochs=24, batch_size=128, lr_epochs=100, max_train_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1: prepare dataset
    train_data, test_data = load_data()
    pipeline = fe.Pipeline(
        train_data=train_data,
        eval_data=test_data,
        batch_size=batch_size,
        ops=[
            Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
            PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"),
            RandomCrop(32, 32, image_in="x", image_out="x", mode="train"),
            Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")),
            CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1),
            ChannelTranspose(inputs="x", outputs="x"),
            Onehot(inputs="y", outputs="y", mode="train", num_classes=10, label_smoothing=0.2)
        ])

    # step 2: prepare network
    model = fe.build(model_fn=ResNet9, optimizer_fn="sgd")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])

    # get the max learning rate
    lr_max = search_max_lr(pipeline=pipeline, model=model, network=network, epochs=lr_epochs)
    lr_min = lr_max / 40
    print(f"The maximum LR: {lr_max}, and minimun LR: {lr_min}")
    mid_step = int(epochs * 0.45 * len(train_data) / batch_size)
    end_step = int(epochs * len(train_data) / batch_size)

    # reinitialize the model
    model = fe.build(model_fn=ResNet9, optimizer_fn="sgd")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])

    # step 3: prepare estimator
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"),
        LRScheduler(model=model, lr_fn=lambda step: super_schedule(step, lr_max, lr_min, mid_step, end_step))
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch)
    return estimator
def get_estimator(data_dir=None,
                  model_dir=tempfile.mkdtemp(),
                  epochs=20,
                  em_dim=128,
                  batch_size=32,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None):
    train_ds, eval_ds, test_ds = tednmt.load_data(data_dir, translate_option="pt_to_en")
    pt_tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
    en_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
    pipeline = fe.Pipeline(
        train_data=train_ds,
        eval_data=eval_ds,
        test_data=test_ds,
        batch_size=batch_size,
        ops=[
            Encode(inputs="source", outputs="source", tokenizer=pt_tokenizer),
            Encode(inputs="target", outputs="target", tokenizer=en_tokenizer)
        ],
        pad_value=0)
    model = fe.build(
        model_fn=lambda: Transformer(num_layers=4,
                                     em_dim=em_dim,
                                     num_heads=8,
                                     ff_dim=512,
                                     input_vocab=pt_tokenizer.vocab_size,
                                     target_vocab=en_tokenizer.vocab_size,
                                     max_pos_enc=1000,
                                     max_pos_dec=1000),
        optimizer_fn="adam")
    network = fe.Network(ops=[
        ShiftData(inputs="target", outputs=("target_inp", "target_real")),
        CreateMasks(inputs=("source", "target_inp"),
                    outputs=("encode_pad_mask", "decode_pad_mask", "dec_look_ahead_mask")),
        ModelOp(model=model,
                inputs=("source", "target_inp", "encode_pad_mask", "decode_pad_mask", "dec_look_ahead_mask"),
                outputs="pred"),
        MaskedCrossEntropy(inputs=("pred", "target_real"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    traces = [
        MaskedAccuracy(inputs=("pred", "target_real"), outputs="masked_acc", mode="!train"),
        BestModelSaver(model=model, save_dir=model_dir, metric="masked_acc", save_best_mode="max"),
        LRScheduler(model=model, lr_fn=lambda step: lr_fn(step, em_dim))
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             traces=traces,
                             epochs=epochs,
                             max_train_steps_per_epoch=max_train_steps_per_epoch,
                             max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator
Example #9
0
def get_estimator(epochs=30,
                  batch_size=128,
                  seq_length=20,
                  vocab_size=10000,
                  data_dir=None,
                  train_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    train_data, eval_data, _, _ = load_data(root_dir=data_dir,
                                            seq_length=seq_length + 1)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           ops=[
                               CreateInputAndTarget(inputs="x",
                                                    outputs=("x", "y")),
                               Batch(batch_size=batch_size, drop_last=True)
                           ])
    # step 2
    model = fe.build(
        model_fn=lambda: BuildModel(
            vocab_size, embedding_dim=300, rnn_units=600),
        optimizer_fn=lambda x: torch.optim.SGD(x, lr=1.0, momentum=0.9))
    network = fe.Network(ops=[
        DimesionAdjust(inputs=("x", "y"), outputs=("x", "y")),
        ModelOp(model=model, inputs="x", outputs="y_pred", mode=None),
        CrossEntropy(inputs=("y_pred", "y"),
                     outputs="ce",
                     form="sparse",
                     from_logits=True),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Perplexity(inputs="ce", outputs="perplexity", mode="eval"),
        LRScheduler(model=model,
                    lr_fn=lambda step: lr_schedule(step, init_lr=1.0)),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric='perplexity',
                       save_best_mode='min',
                       load_best_final=True),
        EarlyStopping(monitor="perplexity", patience=5)
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             train_steps_per_epoch=train_steps_per_epoch)
    return estimator
def search_max_lr(pipeline, model, network, epochs):
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"), LRScheduler(model=model, lr_fn=lambda step: linear_increase(step))
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=10,
                             log_steps=10)
    print("Running LR range test for super convergence. It will take a while...")
    with Suppressor():
        summary = estimator.fit("LR_range_test")

    best_step = max(summary.history["eval"]["accuracy"].items(), key=lambda k: k[1])[0]
    max_lr = summary.history["train"]["model_lr"][best_step]
    return max_lr
Example #11
0
def get_estimator():
    # step 1
    train_data, eval_data = mnist.load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=32,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])
    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             traces=LRScheduler(model=model, lr_fn="arc"),
                             epochs=30,
                             max_train_steps_per_epoch=500)
    return estimator
Example #12
0
 def create_estimator_for_arc(self, model, use_eval, axis):
     train_data, eval_data = mnist.load_data()
     pipeline = fe.Pipeline(train_data=train_data,
                            eval_data=eval_data if use_eval else None,
                            batch_size=8,
                            ops=[
                                ExpandDims(inputs="x",
                                           outputs="x",
                                           axis=axis),
                                Minmax(inputs="x", outputs="x")
                            ])
     network = fe.Network(ops=[
         ModelOp(model=model, inputs="x", outputs="y_pred"),
         CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
         UpdateOp(model=model, loss_name="ce")
     ])
     estimator = fe.Estimator(pipeline=pipeline,
                              network=network,
                              epochs=2,
                              traces=LRScheduler(model=model, lr_fn=ARC(1)),
                              max_train_steps_per_epoch=10)
     return estimator
def get_estimator(epochs=24, batch_size=512, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()):
    # step 1: prepare dataset
    train_data, test_data = load_data()
    pipeline = fe.Pipeline(
        train_data=train_data,
        test_data=test_data,
        batch_size=batch_size,
        ops=[
            Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
            PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"),
            RandomCrop(32, 32, image_in="x", image_out="x", mode="train"),
            Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")),
            CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1),
            ChannelTranspose(inputs="x", outputs="x"),
            Onehot(inputs="y", outputs="y", mode="train", num_classes=10, label_smoothing=0.2)
        ])

    # step 2: prepare network
    model = fe.build(model_fn=FastCifar, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])

    # step 3 prepare estimator
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"),
        LRScheduler(model=model, lr_fn=lr_schedule)
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch)
    return estimator
Example #14
0
def get_estimator(weight=10.0,
                  epochs=200,
                  batch_size=1,
                  max_train_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp(),
                  data_dir=None):
    train_data, _ = load_data(batch_size=batch_size, root_dir=data_dir)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    pipeline = fe.Pipeline(train_data=train_data,
                           ops=[
                               ReadImage(inputs=["A", "B"], outputs=["A",
                                                                     "B"]),
                               Normalize(inputs=["A", "B"],
                                         outputs=["real_A", "real_B"],
                                         mean=1.0,
                                         std=1.0,
                                         max_pixel_value=127.5),
                               Resize(height=286,
                                      width=286,
                                      image_in="real_A",
                                      image_out="real_A",
                                      mode="train"),
                               RandomCrop(height=256,
                                          width=256,
                                          image_in="real_A",
                                          image_out="real_A",
                                          mode="train"),
                               Resize(height=286,
                                      width=286,
                                      image_in="real_B",
                                      image_out="real_B",
                                      mode="train"),
                               RandomCrop(height=256,
                                          width=256,
                                          image_in="real_B",
                                          image_out="real_B",
                                          mode="train"),
                               Sometimes(
                                   HorizontalFlip(image_in="real_A",
                                                  image_out="real_A",
                                                  mode="train")),
                               Sometimes(
                                   HorizontalFlip(image_in="real_B",
                                                  image_out="real_B",
                                                  mode="train")),
                               ChannelTranspose(inputs=["real_A", "real_B"],
                                                outputs=["real_A", "real_B"])
                           ])

    g_AtoB = fe.build(model_fn=Generator,
                      model_name="g_AtoB",
                      optimizer_fn=lambda x: torch.optim.Adam(
                          x, lr=2e-4, betas=(0.5, 0.999)))
    g_BtoA = fe.build(model_fn=Generator,
                      model_name="g_BtoA",
                      optimizer_fn=lambda x: torch.optim.Adam(
                          x, lr=2e-4, betas=(0.5, 0.999)))
    d_A = fe.build(model_fn=Discriminator,
                   model_name="d_A",
                   optimizer_fn=lambda x: torch.optim.Adam(
                       x, lr=2e-4, betas=(0.5, 0.999)))
    d_B = fe.build(model_fn=Discriminator,
                   model_name="d_B",
                   optimizer_fn=lambda x: torch.optim.Adam(
                       x, lr=2e-4, betas=(0.5, 0.999)))

    network = fe.Network(ops=[
        ModelOp(inputs="real_A", model=g_AtoB, outputs="fake_B"),
        ModelOp(inputs="real_B", model=g_BtoA, outputs="fake_A"),
        Buffer(image_in="fake_A", image_out="buffer_fake_A"),
        Buffer(image_in="fake_B", image_out="buffer_fake_B"),
        ModelOp(inputs="real_A", model=d_A, outputs="d_real_A"),
        ModelOp(inputs="fake_A", model=d_A, outputs="d_fake_A"),
        ModelOp(inputs="buffer_fake_A", model=d_A, outputs="buffer_d_fake_A"),
        ModelOp(inputs="real_B", model=d_B, outputs="d_real_B"),
        ModelOp(inputs="fake_B", model=d_B, outputs="d_fake_B"),
        ModelOp(inputs="buffer_fake_B", model=d_B, outputs="buffer_d_fake_B"),
        ModelOp(inputs="real_A", model=g_BtoA, outputs="same_A"),
        ModelOp(inputs="fake_B", model=g_BtoA, outputs="cycled_A"),
        ModelOp(inputs="real_B", model=g_AtoB, outputs="same_B"),
        ModelOp(inputs="fake_A", model=g_AtoB, outputs="cycled_B"),
        GLoss(inputs=("real_A", "d_fake_B", "cycled_A", "same_A"),
              weight=weight,
              device=device,
              outputs="g_AtoB_loss"),
        GLoss(inputs=("real_B", "d_fake_A", "cycled_B", "same_B"),
              weight=weight,
              device=device,
              outputs="g_BtoA_loss"),
        DLoss(inputs=("d_real_A", "buffer_d_fake_A"),
              outputs="d_A_loss",
              device=device),
        DLoss(inputs=("d_real_B", "buffer_d_fake_B"),
              outputs="d_B_loss",
              device=device),
        UpdateOp(model=g_AtoB, loss_name="g_AtoB_loss"),
        UpdateOp(model=g_BtoA, loss_name="g_BtoA_loss"),
        UpdateOp(model=d_A, loss_name="d_A_loss"),
        UpdateOp(model=d_B, loss_name="d_B_loss")
    ])

    traces = [
        ModelSaver(model=g_AtoB, save_dir=save_dir, frequency=10),
        ModelSaver(model=g_BtoA, save_dir=save_dir, frequency=10),
        LRScheduler(model=g_AtoB, lr_fn=lr_schedule),
        LRScheduler(model=g_BtoA, lr_fn=lr_schedule),
        LRScheduler(model=d_A, lr_fn=lr_schedule),
        LRScheduler(model=d_B, lr_fn=lr_schedule)
    ]

    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch)

    return estimator
Example #15
0
def get_estimator(epochs=200,
                  batch_size=128,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp(),
                  data_dir=None):
    # step 1. prepare pipeline
    train_data, eval_data = omniglot.load_data(root_dir=data_dir)
    test_data = eval_data.split(0.5)

    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[
                               ReadImage(inputs="x_a",
                                         outputs="x_a",
                                         color_flag='gray'),
                               ReadImage(inputs="x_b",
                                         outputs="x_b",
                                         color_flag='gray'),
                               Sometimes(ShiftScaleRotate(image_in="x_a",
                                                          image_out="x_a",
                                                          shift_limit=0.05,
                                                          scale_limit=0.2,
                                                          rotate_limit=10,
                                                          mode="train"),
                                         prob=0.89),
                               Sometimes(ShiftScaleRotate(image_in="x_b",
                                                          image_out="x_b",
                                                          shift_limit=0.05,
                                                          scale_limit=0.2,
                                                          rotate_limit=10,
                                                          mode="train"),
                                         prob=0.89),
                               Minmax(inputs="x_a", outputs="x_a"),
                               Minmax(inputs="x_b", outputs="x_b")
                           ])

    # step 2. prepare model
    model = fe.build(model_fn=siamese_network,
                     model_name="siamese_net",
                     optimizer_fn="adam")

    network = fe.Network(ops=[
        ModelOp(inputs=["x_a", "x_b"], model=model, outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="loss", form="binary"),
        UpdateOp(model=model, loss_name="loss")
    ])

    # step 3.prepare estimator
    traces = [
        LRScheduler(model=model, lr_fn=lr_schedule),
        Accuracy(true_key="y", pred_key="y_pred"),
        OneShotAccuracy(dataset=eval_data,
                        model=model,
                        output_name='one_shot_accuracy'),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric="one_shot_accuracy",
                       save_best_mode="max"),
        EarlyStopping(monitor="one_shot_accuracy",
                      patience=20,
                      compare='max',
                      mode="eval")
    ]

    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator
def get_estimator(data_dir=None,
                  epochs=12,
                  batch_size_per_gpu=4,
                  im_size=1344,
                  model_dir=tempfile.mkdtemp(),
                  train_steps_per_epoch=None,
                  eval_steps_per_epoch=None):
    assert im_size % 32 == 0, "im_size must be a multiple of 32"
    num_device = get_num_devices()
    train_ds, val_ds = mscoco.load_data(root_dir=data_dir, load_masks=True)
    batch_size = num_device * batch_size_per_gpu
    pipeline = fe.Pipeline(
        train_data=train_ds,
        eval_data=val_ds,
        test_data=val_ds,
        ops=[
            ReadImage(inputs="image", outputs="image"),
            MergeMask(inputs="mask", outputs="mask"),
            GetImageSize(inputs="image", outputs="imsize", mode="test"),
            LongestMaxSize(max_size=im_size,
                           image_in="image",
                           mask_in="mask",
                           bbox_in="bbox",
                           bbox_params="coco"),
            RemoveIf(fn=lambda x: len(x) == 0, inputs="bbox"),
            PadIfNeeded(min_height=im_size,
                        min_width=im_size,
                        image_in="image",
                        mask_in="mask",
                        bbox_in="bbox",
                        bbox_params="coco",
                        border_mode=cv2.BORDER_CONSTANT,
                        value=0),
            Sometimes(
                HorizontalFlip(image_in="image",
                               mask_in="mask",
                               bbox_in="bbox",
                               bbox_params="coco",
                               mode="train")),
            Resize(height=im_size // 4, width=im_size // 4,
                   image_in='mask'),  # downscale mask for memory efficiency
            Gt2Target(inputs=("mask", "bbox"),
                      outputs=("gt_match", "mask", "classes")),
            Delete(keys="bbox"),
            Delete(keys="image_id", mode="!test"),
            Batch(batch_size=batch_size, pad_value=0)
        ],
        num_process=8 * num_device)
    init_lr = 1e-2 / 16 * batch_size
    model = fe.build(
        model_fn=SoloV2,
        optimizer_fn=lambda x: torch.optim.SGD(x, lr=init_lr, momentum=0.9))
    network = fe.Network(ops=[
        Normalize(inputs="image",
                  outputs="image",
                  mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225)),
        Permute(inputs="image", outputs='image'),
        ModelOp(model=model,
                inputs="image",
                outputs=("feat_seg", "feat_cls_list", "feat_kernel_list")),
        LambdaOp(fn=lambda x: x,
                 inputs="feat_cls_list",
                 outputs=("cls1", "cls2", "cls3", "cls4", "cls5")),
        LambdaOp(fn=lambda x: x,
                 inputs="feat_kernel_list",
                 outputs=("k1", "k2", "k3", "k4", "k5")),
        Solov2Loss(0,
                   40,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls1",
                           "k1"),
                   outputs=("l_c1", "l_s1")),
        Solov2Loss(1,
                   36,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls2",
                           "k2"),
                   outputs=("l_c2", "l_s2")),
        Solov2Loss(2,
                   24,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls3",
                           "k3"),
                   outputs=("l_c3", "l_s3")),
        Solov2Loss(3,
                   16,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls4",
                           "k4"),
                   outputs=("l_c4", "l_s4")),
        Solov2Loss(4,
                   12,
                   inputs=("mask", "classes", "gt_match", "feat_seg", "cls5",
                           "k5"),
                   outputs=("l_c5", "l_s5")),
        CombineLoss(inputs=("l_c1", "l_s1", "l_c2", "l_s2", "l_c3", "l_s3",
                            "l_c4", "l_s4", "l_c5", "l_s5"),
                    outputs=("total_loss", "cls_loss", "seg_loss")),
        L2Regularizaton(inputs="total_loss",
                        outputs="total_loss_l2",
                        model=model,
                        beta=1e-5,
                        mode="train"),
        UpdateOp(model=model, loss_name="total_loss_l2"),
        PointsNMS(inputs="feat_cls_list", outputs="feat_cls_list",
                  mode="test"),
        Predict(inputs=("feat_seg", "feat_cls_list", "feat_kernel_list"),
                outputs=("seg_preds", "cate_scores", "cate_labels"),
                mode="test")
    ])
    train_steps_epoch = int(np.ceil(len(train_ds) / batch_size))
    lr_schedule = {
        1:
        LRScheduler(
            model=model,
            lr_fn=lambda step: lr_schedule_warmup(step, init_lr=init_lr)),
        2:
        LRScheduler(
            model=model,
            lr_fn=lambda step: cosine_decay(step,
                                            cycle_length=train_steps_epoch *
                                            (epochs - 1),
                                            init_lr=init_lr,
                                            min_lr=init_lr / 100,
                                            start=train_steps_epoch))
    }
    traces = [
        EpochScheduler(lr_schedule),
        COCOMaskmAP(data_dir=val_ds.root_dir,
                    inputs=("seg_preds", "cate_scores", "cate_labels",
                            "image_id", "imsize"),
                    mode="test"),
        BestModelSaver(model=model, save_dir=model_dir, metric="total_loss")
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             monitor_names=("cls_loss", "seg_loss"),
                             train_steps_per_epoch=train_steps_per_epoch,
                             eval_steps_per_epoch=eval_steps_per_epoch)
    return estimator
Example #17
0
def get_estimator(weight=10.0,
                  epochs=200,
                  batch_size=1,
                  train_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp(),
                  data_dir=None):
    train_data, _ = load_data(batch_size=batch_size, root_dir=data_dir)

    pipeline = fe.Pipeline(
        train_data=train_data,
        ops=[
            ReadImage(inputs=["A", "B"], outputs=["A", "B"]),
            Normalize(inputs=["A", "B"], outputs=["real_A", "real_B"], mean=1.0, std=1.0, max_pixel_value=127.5),
            Resize(height=286, width=286, image_in="real_A", image_out="real_A", mode="train"),
            RandomCrop(height=256, width=256, image_in="real_A", image_out="real_A", mode="train"),
            Resize(height=286, width=286, image_in="real_B", image_out="real_B", mode="train"),
            RandomCrop(height=256, width=256, image_in="real_B", image_out="real_B", mode="train"),
            Sometimes(HorizontalFlip(image_in="real_A", image_out="real_A", mode="train")),
            Sometimes(HorizontalFlip(image_in="real_B", image_out="real_B", mode="train")),
            PlaceholderOp(outputs=("index_A", "buffer_A")),
            PlaceholderOp(outputs=("index_B", "buffer_B"))
        ])

    g_AtoB = fe.build(model_fn=build_generator, model_name="g_AtoB", optimizer_fn=lambda: tf.optimizers.Adam(2e-4, 0.5))
    g_BtoA = fe.build(model_fn=build_generator, model_name="g_BtoA", optimizer_fn=lambda: tf.optimizers.Adam(2e-4, 0.5))
    d_A = fe.build(model_fn=build_discriminator, model_name="d_A", optimizer_fn=lambda: tf.optimizers.Adam(2e-4, 0.5))
    d_B = fe.build(model_fn=build_discriminator, model_name="d_B", optimizer_fn=lambda: tf.optimizers.Adam(2e-4, 0.5))

    network = fe.Network(ops=[
        ModelOp(inputs="real_A", model=g_AtoB, outputs="fake_B"),
        ModelOp(inputs="real_B", model=g_BtoA, outputs="fake_A"),
        Buffer(image_in="fake_A", buffer_in="buffer_A", index_in="index_A", image_out="buffer_fake_A"),
        Buffer(image_in="fake_B", buffer_in="buffer_B", index_in="index_B", image_out="buffer_fake_B"),
        ModelOp(inputs="real_A", model=d_A, outputs="d_real_A"),
        ModelOp(inputs="fake_A", model=d_A, outputs="d_fake_A"),
        ModelOp(inputs="buffer_fake_A", model=d_A, outputs="buffer_d_fake_A"),
        ModelOp(inputs="real_B", model=d_B, outputs="d_real_B"),
        ModelOp(inputs="fake_B", model=d_B, outputs="d_fake_B"),
        ModelOp(inputs="buffer_fake_B", model=d_B, outputs="buffer_d_fake_B"),
        ModelOp(inputs="real_A", model=g_BtoA, outputs="same_A"),
        ModelOp(inputs="fake_B", model=g_BtoA, outputs="cycled_A"),
        ModelOp(inputs="real_B", model=g_AtoB, outputs="same_B"),
        ModelOp(inputs="fake_A", model=g_AtoB, outputs="cycled_B"),
        GLoss(inputs=("real_A", "d_fake_B", "cycled_A", "same_A"), weight=weight, outputs="g_AtoB_loss"),
        GLoss(inputs=("real_B", "d_fake_A", "cycled_B", "same_B"), weight=weight, outputs="g_BtoA_loss"),
        DLoss(inputs=("d_real_A", "buffer_d_fake_A"), outputs="d_A_loss"),
        DLoss(inputs=("d_real_B", "buffer_d_fake_B"), outputs="d_B_loss"),
        UpdateOp(model=g_AtoB, loss_name="g_AtoB_loss"),
        UpdateOp(model=g_BtoA, loss_name="g_BtoA_loss"),
        UpdateOp(model=d_A, loss_name="d_A_loss"),
        UpdateOp(model=d_B, loss_name="d_B_loss")
    ])

    traces = [
        BufferUpdate(input_name="fake_A",
                     buffer_size=50,
                     batch_size=batch_size,
                     mode="train",
                     output_name=["buffer_A", "index_A"]),
        BufferUpdate(input_name="fake_B",
                     buffer_size=50,
                     batch_size=batch_size,
                     mode="train",
                     output_name=["buffer_B", "index_B"]),
        ModelSaver(model=g_AtoB, save_dir=save_dir, frequency=5),
        ModelSaver(model=g_BtoA, save_dir=save_dir, frequency=5),
        LRScheduler(model=g_AtoB, lr_fn=lr_schedule),
        LRScheduler(model=g_BtoA, lr_fn=lr_schedule),
        LRScheduler(model=d_A, lr_fn=lr_schedule),
        LRScheduler(model=d_B, lr_fn=lr_schedule)
    ]

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             train_steps_per_epoch=train_steps_per_epoch)

    return estimator
Example #18
0
def get_estimator(batch_size=8,
                  epochs=50,
                  train_steps_per_epoch=None,
                  eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp(),
                  data_dir=None):
    # load CUB200 dataset.
    train_data = cub200.load_data(root_dir=data_dir)
    eval_data = train_data.split(0.3)
    test_data = eval_data.split(0.5)

    # step 1, pipeline
    pipeline = fe.Pipeline(batch_size=batch_size,
                           train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           ops=[
                               ReadImage(inputs="image",
                                         outputs="image",
                                         parent_path=train_data.parent_path),
                               Normalize(inputs="image",
                                         outputs="image",
                                         mean=1.0,
                                         std=1.0,
                                         max_pixel_value=127.5),
                               ReadMat(file='annotation',
                                       keys="seg",
                                       parent_path=train_data.parent_path),
                               LongestMaxSize(max_size=512,
                                              image_in="image",
                                              image_out="image",
                                              mask_in="seg",
                                              mask_out="seg"),
                               PadIfNeeded(min_height=512,
                                           min_width=512,
                                           image_in="image",
                                           image_out="image",
                                           mask_in="seg",
                                           mask_out="seg",
                                           border_mode=cv2.BORDER_CONSTANT,
                                           value=0,
                                           mask_value=0),
                               ShiftScaleRotate(
                                   image_in="image",
                                   mask_in="seg",
                                   image_out="image",
                                   mask_out="seg",
                                   mode="train",
                                   shift_limit=0.2,
                                   rotate_limit=15.0,
                                   scale_limit=0.2,
                                   border_mode=cv2.BORDER_CONSTANT,
                                   value=0,
                                   mask_value=0),
                               Sometimes(
                                   HorizontalFlip(image_in="image",
                                                  mask_in="seg",
                                                  image_out="image",
                                                  mask_out="seg",
                                                  mode="train")),
                               Reshape(shape=(512, 512, 1),
                                       inputs="seg",
                                       outputs="seg")
                           ])

    # step 2, network
    resunet50 = fe.build(model_fn=ResUnet50,
                         model_name="resunet50",
                         optimizer_fn=lambda: tf.optimizers.Adam(1e-4))
    uncertainty = fe.build(model_fn=UncertaintyLossNet,
                           model_name="uncertainty",
                           optimizer_fn=lambda: tf.optimizers.Adam(2e-5))

    network = fe.Network(ops=[
        ModelOp(inputs='image',
                model=resunet50,
                outputs=["label_pred", "mask_pred"]),
        CrossEntropy(inputs=["label_pred", "label"],
                     outputs="cls_loss",
                     form="sparse",
                     average_loss=False),
        CrossEntropy(inputs=["mask_pred", "seg"],
                     outputs="seg_loss",
                     form="binary",
                     average_loss=False),
        ModelOp(inputs=["cls_loss", "seg_loss"],
                model=uncertainty,
                outputs="total_loss"),
        ReduceLoss(inputs="total_loss", outputs="total_loss"),
        UpdateOp(model=resunet50, loss_name="total_loss"),
        UpdateOp(model=uncertainty, loss_name="total_loss")
    ])

    # step 3, estimator
    traces = [
        Accuracy(true_key="label", pred_key="label_pred"),
        Dice(true_key="seg", pred_key='mask_pred'),
        BestModelSaver(model=resunet50,
                       save_dir=save_dir,
                       metric="total_loss",
                       save_best_mode="min"),
        LRScheduler(model=resunet50,
                    lr_fn=lambda step: cosine_decay(
                        step, cycle_length=26400, init_lr=1e-4))
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             traces=traces,
                             epochs=epochs,
                             train_steps_per_epoch=train_steps_per_epoch,
                             eval_steps_per_epoch=eval_steps_per_epoch,
                             log_steps=500)

    return estimator
def get_estimator(epochs=150,
                  batch_size=32,
                  save_dir=tempfile.mkdtemp(),
                  train_steps_per_epoch=None,
                  eval_steps_per_epoch=None):
    # step 1: prepare dataset
    train_data, eval_data = load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=batch_size * get_num_devices(),
                           ops=[
                               Normalize(inputs="x",
                                         outputs="x",
                                         mean=(0.4914, 0.4822, 0.4465),
                                         std=(0.2471, 0.2435, 0.2616)),
                               PadIfNeeded(min_height=40,
                                           min_width=40,
                                           image_in="x",
                                           image_out="x",
                                           mode="train"),
                               RandomCrop(32,
                                          32,
                                          image_in="x",
                                          image_out="x",
                                          mode="train"),
                               Sometimes(
                                   HorizontalFlip(image_in="x",
                                                  image_out="x",
                                                  mode="train")),
                               CoarseDropout(inputs="x",
                                             outputs="x",
                                             mode="train",
                                             max_holes=1)
                           ])

    # step 2: prepare network
    model = fe.build(
        model_fn=lambda: pyramidnet_cifar(inputs_shape=(32, 32, 3),
                                          depth=272,
                                          alpha=200,
                                          num_classes=10,
                                          bottleneck=True),
        optimizer_fn=lambda: tfa.optimizers.SGDW(
            weight_decay=0.0001, lr=0.1, momentum=0.9))

    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce", from_logits=True),
        UpdateOp(model=model, loss_name="ce")
    ])

    # step 3 prepare estimator
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        LRScheduler(model=model, lr_fn=lr_schedule),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric="accuracy",
                       save_best_mode="max")
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             train_steps_per_epoch=train_steps_per_epoch,
                             eval_steps_per_epoch=eval_steps_per_epoch)
    return estimator
Example #20
0
def get_estimator(data_dir=None,
                  model_dir=tempfile.mkdtemp(),
                  epochs=200,
                  batch_size_per_gpu=32,
                  train_steps_per_epoch=None,
                  eval_steps_per_epoch=None):
    num_device = get_num_devices()
    train_ds, val_ds = mscoco.load_data(root_dir=data_dir)
    train_ds = PreMosaicDataset(mscoco_ds=train_ds)
    batch_size = num_device * batch_size_per_gpu
    pipeline = fe.Pipeline(
        train_data=train_ds,
        eval_data=val_ds,
        ops=[
            ReadImage(inputs=("image1", "image2", "image3", "image4"),
                      outputs=("image1", "image2", "image3", "image4"),
                      mode="train"),
            ReadImage(inputs="image", outputs="image", mode="eval"),
            LongestMaxSize(max_size=640,
                           image_in="image1",
                           bbox_in="bbox1",
                           bbox_params=BboxParams("coco", min_area=1.0),
                           mode="train"),
            LongestMaxSize(max_size=640,
                           image_in="image2",
                           bbox_in="bbox2",
                           bbox_params=BboxParams("coco", min_area=1.0),
                           mode="train"),
            LongestMaxSize(max_size=640,
                           image_in="image3",
                           bbox_in="bbox3",
                           bbox_params=BboxParams("coco", min_area=1.0),
                           mode="train"),
            LongestMaxSize(max_size=640,
                           image_in="image4",
                           bbox_in="bbox4",
                           bbox_params=BboxParams("coco", min_area=1.0),
                           mode="train"),
            LongestMaxSize(max_size=640,
                           image_in="image",
                           bbox_in="bbox",
                           bbox_params=BboxParams("coco", min_area=1.0),
                           mode="eval"),
            PadIfNeeded(min_height=640,
                        min_width=640,
                        image_in="image",
                        bbox_in="bbox",
                        bbox_params=BboxParams("coco", min_area=1.0),
                        mode="eval",
                        border_mode=cv2.BORDER_CONSTANT,
                        value=(114, 114, 114)),
            CombineMosaic(inputs=("image1", "image2", "image3", "image4",
                                  "bbox1", "bbox2", "bbox3", "bbox4"),
                          outputs=("image", "bbox"),
                          mode="train"),
            CenterCrop(height=640,
                       width=640,
                       image_in="image",
                       bbox_in="bbox",
                       bbox_params=BboxParams("coco", min_area=1.0),
                       mode="train"),
            Sometimes(
                HorizontalFlip(image_in="image",
                               bbox_in="bbox",
                               bbox_params=BboxParams("coco", min_area=1.0),
                               mode="train")),
            HSVAugment(inputs="image", outputs="image", mode="train"),
            ToArray(inputs="bbox", outputs="bbox", dtype="float32"),
            CategoryID2ClassID(inputs="bbox", outputs="bbox"),
            GTBox(inputs="bbox",
                  outputs=("gt_sbbox", "gt_mbbox", "gt_lbbox"),
                  image_size=640),
            Delete(keys=("image1", "image2", "image3", "image4", "bbox1",
                         "bbox2", "bbox3", "bbox4", "bbox"),
                   mode="train"),
            Delete(keys="image_id", mode="eval"),
            Batch(batch_size=batch_size, pad_value=0)
        ])
    init_lr = 1e-2 / 64 * batch_size
    model = fe.build(
        lambda: YoloV5(w=640, h=640, c=3),
        optimizer_fn=lambda x: torch.optim.SGD(
            x, lr=init_lr, momentum=0.937, weight_decay=0.0005, nesterov=True),
        mixed_precision=True)
    network = fe.Network(ops=[
        RescaleTranspose(inputs="image", outputs="image"),
        ModelOp(model=model,
                inputs="image",
                outputs=("pred_s", "pred_m", "pred_l")),
        DecodePred(inputs=("pred_s", "pred_m", "pred_l"),
                   outputs=("pred_s", "pred_m", "pred_l")),
        ComputeLoss(inputs=("pred_s", "gt_sbbox"),
                    outputs=("sbbox_loss", "sconf_loss", "scls_loss")),
        ComputeLoss(inputs=("pred_m", "gt_mbbox"),
                    outputs=("mbbox_loss", "mconf_loss", "mcls_loss")),
        ComputeLoss(inputs=("pred_l", "gt_lbbox"),
                    outputs=("lbbox_loss", "lconf_loss", "lcls_loss")),
        Average(inputs=("sbbox_loss", "mbbox_loss", "lbbox_loss"),
                outputs="bbox_loss"),
        Average(inputs=("sconf_loss", "mconf_loss", "lconf_loss"),
                outputs="conf_loss"),
        Average(inputs=("scls_loss", "mcls_loss", "lcls_loss"),
                outputs="cls_loss"),
        Average(inputs=("bbox_loss", "conf_loss", "cls_loss"),
                outputs="total_loss"),
        PredictBox(width=640,
                   height=640,
                   inputs=("pred_s", "pred_m", "pred_l"),
                   outputs="box_pred",
                   mode="eval"),
        UpdateOp(model=model, loss_name="total_loss")
    ])
    traces = [
        MeanAveragePrecision(num_classes=80,
                             true_key='bbox',
                             pred_key='box_pred',
                             mode="eval"),
        BestModelSaver(model=model,
                       save_dir=model_dir,
                       metric='mAP',
                       save_best_mode="max")
    ]
    lr_schedule = {
        1:
        LRScheduler(model=model,
                    lr_fn=lambda step: lr_schedule_warmup(
                        step,
                        train_steps_epoch=np.ceil(len(train_ds) / batch_size),
                        init_lr=init_lr)),
        4:
        LRScheduler(model=model,
                    lr_fn=lambda epoch: cosine_decay(epoch,
                                                     cycle_length=epochs - 3,
                                                     init_lr=init_lr,
                                                     min_lr=init_lr / 100,
                                                     start=4))
    }
    traces.append(EpochScheduler(lr_schedule))
    estimator = fe.Estimator(
        pipeline=pipeline,
        network=network,
        epochs=epochs,
        traces=traces,
        monitor_names=["bbox_loss", "conf_loss", "cls_loss"],
        train_steps_per_epoch=train_steps_per_epoch,
        eval_steps_per_epoch=eval_steps_per_epoch)
    return estimator
Example #21
0
def get_estimator(data_dir=None,
                  model_dir=tempfile.mkdtemp(),
                  batch_size=16,
                  epochs=13,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  image_size=512,
                  num_classes=90):
    # pipeline
    train_ds, eval_ds = mscoco.load_data(root_dir=data_dir)
    pipeline = fe.Pipeline(
        train_data=train_ds,
        eval_data=eval_ds,
        batch_size=batch_size,
        ops=[
            ReadImage(inputs="image", outputs="image"),
            LongestMaxSize(image_size,
                           image_in="image",
                           image_out="image",
                           bbox_in="bbox",
                           bbox_out="bbox",
                           bbox_params=BboxParams("coco", min_area=1.0)),
            PadIfNeeded(
                image_size,
                image_size,
                border_mode=cv2.BORDER_CONSTANT,
                image_in="image",
                image_out="image",
                bbox_in="bbox",
                bbox_out="bbox",
                bbox_params=BboxParams("coco", min_area=1.0),
            ),
            Sometimes(
                HorizontalFlip(mode="train",
                               image_in="image",
                               image_out="image",
                               bbox_in="bbox",
                               bbox_out="bbox",
                               bbox_params='coco')),
            # normalize from uint8 to [-1, 1]
            Normalize(inputs="image",
                      outputs="image",
                      mean=1.0,
                      std=1.0,
                      max_pixel_value=127.5),
            ShiftLabel(inputs="bbox", outputs="bbox"),
            AnchorBox(inputs="bbox",
                      outputs="anchorbox",
                      width=image_size,
                      height=image_size),
            ChannelTranspose(inputs="image", outputs="image")
        ],
        pad_value=0)
    # network
    model = fe.build(model_fn=lambda: RetinaNet(num_classes=num_classes),
                     optimizer_fn=lambda x: torch.optim.SGD(
                         x, lr=2e-4, momentum=0.9, weight_decay=0.0001))
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="image", outputs=["cls_pred", "loc_pred"]),
        RetinaLoss(inputs=["anchorbox", "cls_pred", "loc_pred"],
                   outputs=["total_loss", "focal_loss", "l1_loss"]),
        UpdateOp(model=model, loss_name="total_loss"),
        PredictBox(input_shape=(image_size, image_size, 3),
                   inputs=["cls_pred", "loc_pred"],
                   outputs="pred",
                   mode="eval")
    ])
    # estimator
    traces = [
        LRScheduler(model=model, lr_fn=lr_fn),
        BestModelSaver(model=model,
                       save_dir=model_dir,
                       metric='mAP',
                       save_best_mode="max"),
        MeanAveragePrecision(num_classes=num_classes,
                             true_key='bbox',
                             pred_key='pred',
                             mode="eval")
    ]
    estimator = fe.Estimator(
        pipeline=pipeline,
        network=network,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        max_eval_steps_per_epoch=max_eval_steps_per_epoch,
        monitor_names=["l1_loss", "focal_loss"])
    return estimator