예제 #1
0
 def test_single_input(self):
     minmax = Minmax(inputs='x', outputs='y', mode='test')
     minmax2 = Minmax(inputs=["y", "z"], outputs="w", mode='test')
     fuse = Fuse([minmax, minmax2])
     with self.subTest('Check op inputs'):
         self.assertListEqual(fuse.inputs, ['x', 'z'])
     with self.subTest('Check op outputs'):
         self.assertListEqual(fuse.outputs, ['y', 'w'])
     with self.subTest('Check op mode'):
         self.assertSetEqual(fuse.mode, {'test'})
     output = fuse.forward(data=self.multi_input, state={"mode": "test"})
     with self.subTest('Check output type'):
         self.assertEqual(type(output), list)
     with self.subTest('Check output image shape'):
         self.assertEqual(output[0].shape, self.output_shape)
예제 #2
0
def create_pipeline():
    train_data, eval_data = mnist.load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=32,
                           ops=[ExpandDims(inputs="x", outputs="x1"), Minmax(inputs="x1", outputs="x")])
    return pipeline
예제 #3
0
        def run_test(mixed_precision, merge_grad, gradient):
            lr = 0.1
            lr2 = 0.01
            pipeline = fe.Pipeline(train_data=self.train_data,
                                   batch_size=4,
                                   ops=[ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x")])

            optimizer_fn = RepeatScheduler(
                [lambda x: torch.optim.SGD(params=x, lr=lr), lambda x: torch.optim.SGD(params=x, lr=lr2)])

            model = fe.build(model_fn=LeNet_torch, optimizer_fn=optimizer_fn, mixed_precision=mixed_precision)
            network = fe.Network(ops=[
                ModelOp(model=model, inputs="x", outputs="y_pred"),
                CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
                GradientOp(model=model, finals="ce", outputs="grad"),
                UpdateOp(model=model, loss_name="ce", gradients=gradient, merge_grad=merge_grad),
            ])

            traces = [
                CheckNetworkWeight(model=model,
                                   grad_key="grad",
                                   merge_grad=merge_grad,
                                   test_self=self,
                                   framework="torch",
                                   lrs=[lr, lr2, lr, lr2],
                                   work_intervals=[[1, 2], [2, 3], [3, 4], [4, 5]])
            ]
            estimator = fe.Estimator(pipeline=pipeline,
                                     network=network,
                                     epochs=4,
                                     traces=traces,
                                     train_steps_per_epoch=2)
            estimator.fit(warmup=False)
예제 #4
0
        def run_test(mixed_precision, merge_grad, gradient):
            lr = 0.1
            pipeline = fe.Pipeline(train_data=self.train_data,
                                   batch_size=4,
                                   ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

            model = fe.build(model_fn=LeNet_tf,
                             optimizer_fn=lambda: tf.optimizers.SGD(lr),
                             mixed_precision=mixed_precision)
            network = fe.Network(ops=[
                ModelOp(model=model, inputs="x", outputs="y_pred"),
                CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
                GradientOp(model=model, finals="ce", outputs="grad"),
                UpdateOp(model=model, loss_name="ce", gradients=gradient, merge_grad=merge_grad),
            ])

            traces = [
                CheckNetworkWeight(model=model,
                                   grad_key="grad",
                                   merge_grad=merge_grad,
                                   test_self=self,
                                   lrs=lr,
                                   framework="tf")
            ]
            estimator = fe.Estimator(pipeline=pipeline,
                                     network=network,
                                     epochs=2,
                                     traces=traces,
                                     train_steps_per_epoch=2)
            estimator.fit(warmup=False)
예제 #5
0
def get_estimator(epochs=2, batch_size=32):
    # step 1
    train_data, eval_data = mnist.load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model,
                inputs="x",
                outputs=["y_pred", "feature_vector"],
                intermediate_layers='dense'),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        CustomLoss(inputs=("feature_vector", "feature_selected"),
                   outputs="feature_loss"),
        LambdaOp(fn=lambda x, y: x + y,
                 inputs=("ce", "feature_loss"),
                 outputs="total_loss"),
        UpdateOp(model=model, loss_name="total_loss")
    ])
    # step 3
    traces = [
        MemoryBank(inputs=("feature_vector", "y"), outputs="feature_selected")
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces)
    return estimator
예제 #6
0
def _build_estimator(model: Union[tf.keras.Model, torch.nn.Module], trace: Traceability, axis: int = -1):
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    batch_size = 32
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x", axis=axis), Minmax(inputs="x", outputs="x")])
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3)),
        trace
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=1,
                             traces=traces,
                             max_train_steps_per_epoch=1,
                             max_eval_steps_per_epoch=None)
    fake_data = tf.ones(shape=(batch_size, 28, 28, 1)) if axis == -1 else torch.ones(size=(batch_size, 1, 28, 28))
    model.fe_input_spec = FeInputSpec(fake_data, model)
    return estimator
예제 #7
0
def get_estimator(epochs=2,
                  batch_size=32,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"),
        LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3))
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces)
    return estimator
예제 #8
0
 def test_mode_ds_id_interaction(self):
     train_data, eval_data = mnist.load_data()
     test_data = eval_data.split(0.5)
     pipeline = fe.Pipeline(train_data=train_data,
                            eval_data=eval_data,
                            test_data=test_data,
                            batch_size=32,
                            ops=[
                                ExpandDims(inputs="x", outputs="x"),
                                Minmax(inputs="x", outputs="x")
                            ])
     model = fe.build(model_fn=LeNet, optimizer_fn="adam")
     network = fe.Network(ops=[
         ModelOp(model=model, inputs="x", outputs="y_pred"),
         CrossEntropy(inputs=("y_pred", "y"), outputs="ce", ds_id="ds_1")
     ])
     pipeline_data = pipeline.transform(data=train_data[0], mode="train")
     data1 = network.transform(data=pipeline_data,
                               mode="infer",
                               ds_id="ds_1")
     assert "ce" not in data1
     data2 = network.transform(data=pipeline_data,
                               mode="infer",
                               ds_id="ds_2")
     assert "ce" not in data2
예제 #9
0
 def test_single_input(self):
     minmax = Minmax(inputs='x', outputs='x')
     sometimes = Sometimes(minmax, prob=0.75)
     output = sometimes.forward(data=self.single_input, state={})
     with self.subTest('Check output type'):
         self.assertEqual(type(output), list)
     with self.subTest('Check output image shape'):
         self.assertEqual(output[0].shape, self.output_shape)
예제 #10
0
 def test_single_input(self):
     minmax = Minmax(inputs='x', outputs='x')
     binarize = Binarize(inputs='x', outputs='x', threshold=1)
     oneof = OneOf(minmax, binarize)
     output = oneof.forward(data=self.single_input, state={})
     with self.subTest('Check output type'):
         self.assertEqual(type(output), list)
     with self.subTest('Check output image shape'):
         self.assertEqual(output[0].shape, self.output_shape)
예제 #11
0
 def test_multi_input(self):
     minmax = Minmax(inputs='x', outputs='x')
     sometimes = Sometimes(minmax)
     output = sometimes.forward(data=self.multi_input, state={})
     with self.subTest('Check output type'):
         self.assertEqual(type(output), list)
     with self.subTest('Check output list length'):
         self.assertEqual(len(output), 2)
     for img_output in output:
         with self.subTest('Check output image shape'):
             self.assertEqual(img_output.shape, self.output_shape)
예제 #12
0
def get_estimator(epochs=2,
                  batch_size=32,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ],
                           num_process=0)

    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    print([f"{idx}: {x.name}" for idx, x in enumerate(model.submodules)])
    network = fe.Network(ops=[
        Watch(inputs="x"),
        ModelOp(model=model,
                inputs="x",
                outputs=["y_pred", "embedding"],
                intermediate_layers='dense'),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        GradientOp(finals="embedding", inputs="x", outputs="grads"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        Inspector(),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric="accuracy",
                       save_best_mode="max"),
        LRScheduler(model=model,
                    lr_fn=lambda step: cosine_decay(
                        step, cycle_length=3750, init_lr=1e-3)),
        TensorBoard(log_dir="tf_logs",
                    write_embeddings="embedding",
                    embedding_labels="y")
    ]
    estimator = fe.Estimator(
        pipeline=pipeline,
        network=network,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator
예제 #13
0
    def test_pytorch_weight_decay_vs_l2(self):
        # Get Data
        train_data, _ = mnist.load_data()
        t_d = train_data.split(128)
        # Initializing models
        pytorch_wd = fe.build(model_fn=MyNet_torch,
                              optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01, weight_decay=self.beta))

        pytorch_l2 = fe.build(model_fn=MyNet_torch, optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01))
        # Initialize pipeline
        pipeline = fe.Pipeline(train_data=t_d,
                               batch_size=128,
                               ops=[ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x")])
        # Define the two pytorch networks
        network_weight_decay = fe.Network(ops=[
            ModelOp(model=pytorch_wd, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            UpdateOp(model=pytorch_wd, loss_name="ce")
        ])

        network_l2 = fe.Network(ops=[
            ModelOp(model=pytorch_l2, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            L2Regularizaton(inputs="ce", outputs="l2", model=pytorch_l2, beta=self.beta),
            UpdateOp(model=pytorch_l2, loss_name="l2")
        ])

        # defining traces
        traces = [Accuracy(true_key="y", pred_key="y_pred")]

        # Setting up estimators
        estimator_wd = fe.Estimator(pipeline=pipeline,
                                    network=network_weight_decay,
                                    epochs=1,
                                    traces=traces,
                                    train_steps_per_epoch=1)

        estimator_l2 = fe.Estimator(pipeline=pipeline,
                                    network=network_l2,
                                    epochs=1,
                                    traces=traces,
                                    train_steps_per_epoch=1)
        # Training
        print('********************************Pytorch weight decay training************************************')
        estimator_wd.fit()
        print()
        print('********************************Pytorch L2 Regularization training************************************')
        estimator_l2.fit()
        # testing weights
        count = 0
        for wt, l2 in zip(pytorch_wd.parameters(), pytorch_l2.parameters()):
            if ((wt - l2).abs()).sum() < torch.tensor(10**-6):
                count += 1
        self.assertTrue(count == 6)
예제 #14
0
 def test_multi_input(self):
     minmax = Minmax(inputs='x', outputs='x')
     normalize = Normalize(inputs='x', outputs='x')
     binarize = Binarize(inputs='x', outputs='x', threshold=1)
     oneof = OneOf(minmax, normalize, binarize)
     output = oneof.forward(data=self.multi_input, state={})
     with self.subTest('Check output type'):
         self.assertEqual(type(output), list)
     with self.subTest('Check output list length'):
         self.assertEqual(len(output), 2)
     for img_output in output:
         with self.subTest('Check output image shape'):
             self.assertEqual(img_output.shape, self.output_shape)
예제 #15
0
    def test_tf_model_end_to_end_gradient(self):
        train_data, _ = mnist.load_data()
        pipeline = fe.Pipeline(train_data=train_data,
                               batch_size=4,
                               ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

        model = fe.build(model_fn=LeNet_tf, optimizer_fn="adam")
        network = fe.Network(ops=[
            ModelOp(model=model, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            GradientOp(model=model, finals="ce", outputs="gradients"),
            UpdateOp(model=model, gradients="gradients", loss_name="ce")
        ])
        estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2, max_train_steps_per_epoch=10)
        estimator.fit()
예제 #16
0
def get_estimator():
    pos_real, _ = mnist.load_data()
    neg_real, _ = mnist.load_data()
    neg_sim, _ = mnist.load_data()
    neg_sim = NegativeImageSimulatedTube(neg_sim)
    batch_ds = BatchDataset(datasets=(pos_real, neg_real, neg_sim), num_samples=(2, 2, 1))
    pipeline = fe.Pipeline(train_data=batch_ds,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2)
    return estimator
예제 #17
0
def get_estimator():
    ds, _ = mnist.load_data()
    ds = NegativeImageSimulatedTube(ds)
    pipeline = fe.Pipeline(train_data=ds,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2)
    return estimator
예제 #18
0
def get_estimator(batch_size=100,
                  epochs=20,
                  max_train_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    train_data, _ = load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x", axis=0),
                               Minmax(inputs="x", outputs="x"),
                               Binarize(inputs="x", outputs="x",
                                        threshold=0.5),
                           ])

    encode_model = fe.build(model_fn=EncoderNet,
                            optimizer_fn="adam",
                            model_name="encoder")
    decode_model = fe.build(model_fn=DecoderNet,
                            optimizer_fn="adam",
                            model_name="decoder")

    network = fe.Network(ops=[
        ModelOp(model=encode_model, inputs="x", outputs="meanlogvar"),
        SplitOp(inputs="meanlogvar", outputs=("mean", "logvar")),
        ReparameterizepOp(inputs=("mean", "logvar"), outputs="z"),
        ModelOp(model=decode_model, inputs="z", outputs="x_logit"),
        CrossEntropy(inputs=("x_logit", "x"), outputs="cross_entropy"),
        CVAELoss(inputs=("cross_entropy", "mean", "logvar", "z"),
                 outputs="loss"),
        UpdateOp(model=encode_model, loss_name="loss"),
        UpdateOp(model=decode_model, loss_name="loss"),
    ])

    traces = [
        BestModelSaver(model=encode_model, save_dir=save_dir),
        BestModelSaver(model=decode_model, save_dir=save_dir)
    ]

    estimator = fe.Estimator(
        pipeline=pipeline,
        network=network,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch)

    return estimator
예제 #19
0
def get_estimator(epochs=2, batch_size=32):
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        DebugOp(inputs="ce", outputs="ce", mode="train"),
        UpdateOp(model=model, loss_name="ce")
    ])
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs)
    return estimator
예제 #20
0
def get_estimator():
    # step 1
    train_data, eval_data = mnist.load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=32,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce", merge_grad=4)
    ])
    # step 3
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2)
    return estimator
def get_estimator():
    ds1 = create_dataset1()
    ds2 = create_dataset2()
    batch_ds = BatchDataset(datasets=(ds1, ds2), num_samples=(1, 1))
    pipeline = fe.Pipeline(train_data=batch_ds,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=4,
                             traces=DebugTrace(inputs="y"))
    return estimator
예제 #22
0
 def create_estimator_for_arc(self, model, use_eval, axis):
     train_data, eval_data = mnist.load_data()
     pipeline = fe.Pipeline(train_data=train_data,
                            eval_data=eval_data if use_eval else None,
                            batch_size=8,
                            ops=[
                                ExpandDims(inputs="x",
                                           outputs="x",
                                           axis=axis),
                                Minmax(inputs="x", outputs="x")
                            ])
     network = fe.Network(ops=[
         ModelOp(model=model, inputs="x", outputs="y_pred"),
         CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
         UpdateOp(model=model, loss_name="ce")
     ])
     estimator = fe.Estimator(pipeline=pipeline,
                              network=network,
                              epochs=2,
                              traces=LRScheduler(model=model, lr_fn=ARC(1)),
                              max_train_steps_per_epoch=10)
     return estimator
예제 #23
0
def get_estimator(epochs=20,
                  batch_size=4,
                  train_steps_per_epoch=None,
                  eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp(),
                  log_steps=20,
                  data_dir=None):
    # step 1
    csv = montgomery.load_data(root_dir=data_dir)
    pipeline = fe.Pipeline(
        train_data=csv,
        eval_data=csv.split(0.2),
        batch_size=batch_size,
        ops=[
            ReadImage(inputs="image",
                      parent_path=csv.parent_path,
                      outputs="image",
                      color_flag='gray'),
            ReadImage(inputs="mask_left",
                      parent_path=csv.parent_path,
                      outputs="mask_left",
                      color_flag='gray',
                      mode='!infer'),
            ReadImage(inputs="mask_right",
                      parent_path=csv.parent_path,
                      outputs="mask_right",
                      color_flag='gray',
                      mode='!infer'),
            CombineLeftRightMask(inputs=("mask_left", "mask_right"),
                                 outputs="mask",
                                 mode='!infer'),
            Resize(image_in="image", width=512, height=512),
            Resize(image_in="mask", width=512, height=512, mode='!infer'),
            Sometimes(numpy_op=HorizontalFlip(
                image_in="image", mask_in="mask", mode='train')),
            Sometimes(numpy_op=Rotate(image_in="image",
                                      mask_in="mask",
                                      limit=(-10, 10),
                                      border_mode=cv2.BORDER_CONSTANT,
                                      mode='train')),
            Minmax(inputs="image", outputs="image"),
            Minmax(inputs="mask", outputs="mask", mode='!infer')
        ])

    # step 2
    model = fe.build(
        model_fn=lambda: UNet(input_size=(512, 512, 1)),
        optimizer_fn=lambda: tf.keras.optimizers.Adam(learning_rate=0.0001),
        model_name="lung_segmentation")
    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="pred_segment"),
        CrossEntropy(
            inputs=("pred_segment", "mask"), outputs="loss", form="binary"),
        UpdateOp(model=model, loss_name="loss")
    ])

    # step 3
    traces = [
        Dice(true_key="mask", pred_key="pred_segment"),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric='Dice',
                       save_best_mode='max')
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             log_steps=log_steps,
                             traces=traces,
                             train_steps_per_epoch=train_steps_per_epoch,
                             eval_steps_per_epoch=eval_steps_per_epoch)

    return estimator
예제 #24
0
 def test_multi_input(self):
     op = Minmax(inputs='x', outputs='x')
     data = op.forward(data=self.multi_input, state={})
     self.assertTrue(is_equal(data, self.multi_output))
예제 #25
0
    def test_pytorch_l2_vs_tensorflow_l2(self):
        # Get Data
        train_data, eval_data = mnist.load_data()
        t_d = train_data.split(128)
        # Initializing Pytorch model
        pytorch_l2 = fe.build(model_fn=MyNet_torch, optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01))
        # Initialize Pytorch pipeline
        pipeline = fe.Pipeline(train_data=t_d,
                        eval_data=eval_data,
                        batch_size=128,
                        ops=[ExpandDims(inputs="x", outputs="x", axis=0),
                                Minmax(inputs="x", outputs="x")])
        # Initialize Pytorch Network
        network_l2 = fe.Network(ops=[
            ModelOp(model=pytorch_l2, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            L2Regularizaton(inputs="ce",outputs="l2",model=pytorch_l2,beta = self.beta),
            UpdateOp(model=pytorch_l2, loss_name="l2")
        ])
        # step 3
        traces = [
            Accuracy(true_key="y", pred_key="y_pred")
        ]
        # Initialize Pytorch estimator
        estimator_l2 = fe.Estimator(pipeline=pipeline,
                            network=network_l2,
                            epochs=1,
                            traces=traces,
                            train_steps_per_epoch=1,
                            monitor_names=["ce","l2"])
        print('********************************Pytorch L2 Regularization training************************************')
        estimator_l2.fit()

        # Converting Pytorch weights to numpy
        torch_wt = []
        for _, param in pytorch_l2.named_parameters():
            if param.requires_grad:
                torch_wt.append(param.detach().numpy())

        # step 1
        pipeline = fe.Pipeline(train_data=t_d,
                               eval_data=eval_data,
                               batch_size=128,
                               ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])
        # step 2
        model_tf = fe.build(model_fn=MyNet_tf, optimizer_fn=lambda: tf.optimizers.SGD(learning_rate=0.01))
        network = fe.Network(ops=[
            ModelOp(model=model_tf, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            L2Regularizaton(inputs="ce",outputs="l2",model=model_tf,beta = self.beta),
            UpdateOp(model=model_tf, loss_name="l2")
        ])
        # step 3
        traces = [
            Accuracy(true_key="y", pred_key="y_pred")
        ]
        estimator = fe.Estimator(pipeline=pipeline,
                                network=network,
                                epochs=1,
                                traces=traces,
                                train_steps_per_epoch=1,
                                monitor_names=["ce","l2"])
        print('*******************************Tensorflow L2 Regularization training***********************************')
        estimator.fit()


        # Converting TF weights to numpy
        tf_wt = []
        for layer in model_tf.layers:
            for w in layer.trainable_variables:
                tf_wt.append(w.numpy())

        # testing weights
        count = 0
        for tf_t,tr in zip(tf_wt,torch_wt):
            if np.sum(np.abs(tf_t-np.transpose(tr))) < (10**-5):
                count += 1
        self.assertTrue(count == 6)
예제 #26
0
def get_estimator(epochs=200,
                  batch_size=128,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp(),
                  data_dir=None):
    # step 1. prepare pipeline
    train_data, eval_data = omniglot.load_data(root_dir=data_dir)
    test_data = eval_data.split(0.5)

    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[
                               ReadImage(inputs="x_a",
                                         outputs="x_a",
                                         color_flag='gray'),
                               ReadImage(inputs="x_b",
                                         outputs="x_b",
                                         color_flag='gray'),
                               Sometimes(ShiftScaleRotate(image_in="x_a",
                                                          image_out="x_a",
                                                          shift_limit=0.05,
                                                          scale_limit=0.2,
                                                          rotate_limit=10,
                                                          mode="train"),
                                         prob=0.89),
                               Sometimes(ShiftScaleRotate(image_in="x_b",
                                                          image_out="x_b",
                                                          shift_limit=0.05,
                                                          scale_limit=0.2,
                                                          rotate_limit=10,
                                                          mode="train"),
                                         prob=0.89),
                               Minmax(inputs="x_a", outputs="x_a"),
                               Minmax(inputs="x_b", outputs="x_b")
                           ])

    # step 2. prepare model
    model = fe.build(model_fn=siamese_network,
                     model_name="siamese_net",
                     optimizer_fn="adam")

    network = fe.Network(ops=[
        ModelOp(inputs=["x_a", "x_b"], model=model, outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="loss", form="binary"),
        UpdateOp(model=model, loss_name="loss")
    ])

    # step 3.prepare estimator
    traces = [
        LRScheduler(model=model, lr_fn=lr_schedule),
        Accuracy(true_key="y", pred_key="y_pred"),
        OneShotAccuracy(dataset=eval_data,
                        model=model,
                        output_name='one_shot_accuracy'),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric="one_shot_accuracy",
                       save_best_mode="max"),
        EarlyStopping(monitor="one_shot_accuracy",
                      patience=20,
                      compare='max',
                      mode="eval")
    ]

    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator