예제 #1
0
    def build_network(self):
        """
        Define the FastEstimator network flow.

        Args:
            None

        Returns:
            network: KerasNetwork object
        """
        epsilon = 0.04

        network = fe.Network(ops=[
            Watch(inputs="x"),
            ModelOp(model=self.model, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="base_ce"),
            FGSM(
                data="x", loss="base_ce", outputs="x_adverse",
                epsilon=epsilon),
            ModelOp(model=self.model, inputs="x_adverse",
                    outputs="y_pred_adv"),
            CrossEntropy(inputs=("y_pred_adv", "y"), outputs="adv_ce"),
            Average(inputs=("base_ce", "adv_ce"), outputs="avg_ce"),
            UpdateOp(model=self.model, loss_name="avg_ce")
        ])

        return network
def get_estimator(epochs=10, batch_size=50, epsilon=0.04, save_dir=tempfile.mkdtemp()):
    train_data, eval_data = cifar10.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(
        train_data=train_data,
        eval_data=eval_data,
        test_data=test_data,
        batch_size=batch_size,
        ops=[Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616))])
    model = fe.build(model_fn=lambda: LeNet(input_shape=(32, 32, 3)), optimizer_fn="adam", model_name="adv_model")
    network = fe.Network(ops=[
        Watch(inputs="x"),
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="base_ce"),
        FGSM(data="x", loss="base_ce", outputs="x_adverse", epsilon=epsilon),
        ModelOp(model=model, inputs="x_adverse", outputs="y_pred_adv"),
        CrossEntropy(inputs=("y_pred_adv", "y"), outputs="adv_ce"),
        Average(inputs=("base_ce", "adv_ce"), outputs="avg_ce"),
        UpdateOp(model=model, loss_name="avg_ce")
    ])
    traces = [
        Accuracy(true_key="y", pred_key="y_pred", output_name="clean_accuracy"),
        Accuracy(true_key="y", pred_key="y_pred_adv", output_name="adversarial_accuracy"),
        BestModelSaver(model=model, save_dir=save_dir, metric="base_ce", save_best_mode="min"),
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             monitor_names=["base_ce", "adv_ce"],
                             log_steps=1000)
    return estimator
 def test_categorical_crossentropy_weights(self):
     ce = CrossEntropy(inputs='x',
                       outputs='x',
                       class_weights=self.tf_cat_weights)
     ce.build('tf')
     output = ce.forward(data=[self.tf_pred_cat, self.tf_true_cat],
                         state={})
     self.assertTrue(np.allclose(output.numpy(), 0.54055756))
 def test_sparse_categorical_crossentropy_weights(self):
     ce = CrossEntropy(inputs='x',
                       outputs='x',
                       class_weights=self.tf_sparse_weights)
     ce.build('tf')
     output = ce.forward(data=[self.tf_pred_sparse, self.tf_true_sparse],
                         state={})
     self.assertTrue(np.allclose(output.numpy(), 3.532212))
 def test_torch_input_weights(self):
     ce = CrossEntropy(inputs='x',
                       outputs='x',
                       class_weights=self.torch_binary_weights)
     ce.build('torch')
     output = ce.forward(
         data=[self.torch_pred_binary, self.torch_true_binary], state={})
     self.assertTrue(np.allclose(output.detach().numpy(), 0.2797609))
 def test_binary_crossentropy_weights(self):
     ce = CrossEntropy(inputs='x',
                       outputs='x',
                       class_weights=self.tf_binary_weights)
     ce.build('tf')
     output = ce.forward(data=[self.tf_pred_binary, self.tf_true_binary],
                         state={})
     self.assertTrue(np.allclose(output.numpy(), -56.221874))
예제 #7
0
    def test_pytorch_weight_decay_vs_l2(self):
        # Get Data
        train_data, _ = mnist.load_data()
        t_d = train_data.split(128)
        # Initializing models
        pytorch_wd = fe.build(model_fn=MyNet_torch,
                              optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01, weight_decay=self.beta))

        pytorch_l2 = fe.build(model_fn=MyNet_torch, optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01))
        # Initialize pipeline
        pipeline = fe.Pipeline(train_data=t_d,
                               batch_size=128,
                               ops=[ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x")])
        # Define the two pytorch networks
        network_weight_decay = fe.Network(ops=[
            ModelOp(model=pytorch_wd, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            UpdateOp(model=pytorch_wd, loss_name="ce")
        ])

        network_l2 = fe.Network(ops=[
            ModelOp(model=pytorch_l2, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            L2Regularizaton(inputs="ce", outputs="l2", model=pytorch_l2, beta=self.beta),
            UpdateOp(model=pytorch_l2, loss_name="l2")
        ])

        # defining traces
        traces = [Accuracy(true_key="y", pred_key="y_pred")]

        # Setting up estimators
        estimator_wd = fe.Estimator(pipeline=pipeline,
                                    network=network_weight_decay,
                                    epochs=1,
                                    traces=traces,
                                    train_steps_per_epoch=1)

        estimator_l2 = fe.Estimator(pipeline=pipeline,
                                    network=network_l2,
                                    epochs=1,
                                    traces=traces,
                                    train_steps_per_epoch=1)
        # Training
        print('********************************Pytorch weight decay training************************************')
        estimator_wd.fit()
        print()
        print('********************************Pytorch L2 Regularization training************************************')
        estimator_l2.fit()
        # testing weights
        count = 0
        for wt, l2 in zip(pytorch_wd.parameters(), pytorch_l2.parameters()):
            if ((wt - l2).abs()).sum() < torch.tensor(10**-6):
                count += 1
        self.assertTrue(count == 6)
def get_estimator(epochs=24, batch_size=128, lr_epochs=100, max_train_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1: prepare dataset
    train_data, test_data = load_data()
    pipeline = fe.Pipeline(
        train_data=train_data,
        eval_data=test_data,
        batch_size=batch_size,
        ops=[
            Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
            PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"),
            RandomCrop(32, 32, image_in="x", image_out="x", mode="train"),
            Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")),
            CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1),
            ChannelTranspose(inputs="x", outputs="x"),
            Onehot(inputs="y", outputs="y", mode="train", num_classes=10, label_smoothing=0.2)
        ])

    # step 2: prepare network
    model = fe.build(model_fn=ResNet9, optimizer_fn="sgd")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])

    # get the max learning rate
    lr_max = search_max_lr(pipeline=pipeline, model=model, network=network, epochs=lr_epochs)
    lr_min = lr_max / 40
    print(f"The maximum LR: {lr_max}, and minimun LR: {lr_min}")
    mid_step = int(epochs * 0.45 * len(train_data) / batch_size)
    end_step = int(epochs * len(train_data) / batch_size)

    # reinitialize the model
    model = fe.build(model_fn=ResNet9, optimizer_fn="sgd")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])

    # step 3: prepare estimator
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"),
        LRScheduler(model=model, lr_fn=lambda step: super_schedule(step, lr_max, lr_min, mid_step, end_step))
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch)
    return estimator
예제 #9
0
def finetune_model(model, epochs, batch_size, max_train_steps_per_epoch, save_dir):
    train_data, test_data = load_data()
    train_data = train_data.split(0.1)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=test_data,
                           batch_size=batch_size,
                           ops=[
                               ToFloat(inputs="x", outputs="x"),
                           ])

    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=["y_pred", "y"], outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch)
    estimator.fit()
예제 #10
0
def get_estimator(epochs=2, batch_size=32):
    # step 1
    train_data, eval_data = mnist.load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model,
                inputs="x",
                outputs=["y_pred", "feature_vector"],
                intermediate_layers='dense'),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        CustomLoss(inputs=("feature_vector", "feature_selected"),
                   outputs="feature_loss"),
        LambdaOp(fn=lambda x, y: x + y,
                 inputs=("ce", "feature_loss"),
                 outputs="total_loss"),
        UpdateOp(model=model, loss_name="total_loss")
    ])
    # step 3
    traces = [
        MemoryBank(inputs=("feature_vector", "y"), outputs="feature_selected")
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces)
    return estimator
예제 #11
0
def get_estimator(epochs=10, batch_size=32, train_steps_per_epoch=None):
    train_data, eval_data = cifair10.load_data()

    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=batch_size,
                           ops=[Normalize(inputs="x", outputs="x")])

    # step 2
    model = fe.build(model_fn=lambda: LeNet(input_shape=(32, 32, 3)),
                     optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             train_steps_per_epoch=train_steps_per_epoch)
    return estimator
예제 #12
0
    def test_network_transform_lenet_torch(self):
        model = fe.build(
            model_fn=LeNetTorch,
            optimizer_fn=lambda x: torch.optim.Adam(params=x, lr=1.0))
        weight = get_torch_lenet_model_weight(model)
        network = fe.Network(ops=[
            ModelOp(model=model, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            UpdateOp(model=model, loss_name="ce")
        ])
        batch = {
            "x": np.ones((1, 1, 28, 28), dtype=np.float32),
            "y": np.array([[1]], dtype=np.float32)
        }
        batch = network.transform(data=batch, mode="train")

        with self.subTest("output y_pred check"):
            self.assertTrue("y_pred" in batch.keys())
            self.assertIsNotNone(batch["y_pred"])

        with self.subTest("output ce check"):
            self.assertTrue("ce" in batch.keys())
            self.assertIsNotNone(batch["ce"])

        with self.subTest("check whether model weight changed"):
            weight2 = get_torch_lenet_model_weight(model)
            self.assertFalse(is_equal(weight, weight2))
예제 #13
0
        def run_test(mixed_precision, merge_grad, gradient):
            lr = 0.1
            pipeline = fe.Pipeline(train_data=self.train_data,
                                   batch_size=4,
                                   ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

            model = fe.build(model_fn=LeNet_tf,
                             optimizer_fn=lambda: tf.optimizers.SGD(lr),
                             mixed_precision=mixed_precision)
            network = fe.Network(ops=[
                ModelOp(model=model, inputs="x", outputs="y_pred"),
                CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
                GradientOp(model=model, finals="ce", outputs="grad"),
                UpdateOp(model=model, loss_name="ce", gradients=gradient, merge_grad=merge_grad),
            ])

            traces = [
                CheckNetworkWeight(model=model,
                                   grad_key="grad",
                                   merge_grad=merge_grad,
                                   test_self=self,
                                   lrs=lr,
                                   framework="tf")
            ]
            estimator = fe.Estimator(pipeline=pipeline,
                                     network=network,
                                     epochs=2,
                                     traces=traces,
                                     train_steps_per_epoch=2)
            estimator.fit(warmup=False)
예제 #14
0
def get_estimator(max_words=10000,
                  max_len=500,
                  epochs=10,
                  batch_size=64,
                  train_steps_per_epoch=None,
                  eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    train_data, eval_data = imdb_review.load_data(max_len, max_words)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=batch_size,
                           ops=Reshape(1, inputs="y", outputs="y"))
    # step 2. prepare model
    model = fe.build(model_fn=lambda: ReviewSentiment(max_words=max_words), optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="loss"),
        UpdateOp(model=model, loss_name="loss")
    ])
    # step 3.prepare estimator
    traces = [Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=save_dir)]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             train_steps_per_epoch=train_steps_per_epoch,
                             eval_steps_per_epoch=eval_steps_per_epoch)

    return estimator
예제 #15
0
def get_estimator(epochs=10, batch_size=32, extend_ds=False):
    train_data, eval_data = cifair10.load_data()

    if extend_ds:
        train_data = ExtendDataset(dataset=train_data,
                                   spoof_length=len(train_data) * 2)
        epochs //= 2

    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=batch_size,
                           ops=[Normalize(inputs="x", outputs="x")])

    # step 2
    model = fe.build(model_fn=lambda: LeNet(input_shape=(32, 32, 3)),
                     optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces)
    return estimator
예제 #16
0
def _build_estimator(model: Union[tf.keras.Model, torch.nn.Module], trace: Traceability, axis: int = -1):
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    batch_size = 32
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x", axis=axis), Minmax(inputs="x", outputs="x")])
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3)),
        trace
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=1,
                             traces=traces,
                             max_train_steps_per_epoch=1,
                             max_eval_steps_per_epoch=None)
    fake_data = tf.ones(shape=(batch_size, 28, 28, 1)) if axis == -1 else torch.ones(size=(batch_size, 1, 28, 28))
    model.fe_input_spec = FeInputSpec(fake_data, model)
    return estimator
예제 #17
0
def get_estimator(epochs=2,
                  batch_size=32,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"),
        LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3))
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces)
    return estimator
예제 #18
0
 def test_mode_ds_id_interaction(self):
     train_data, eval_data = mnist.load_data()
     test_data = eval_data.split(0.5)
     pipeline = fe.Pipeline(train_data=train_data,
                            eval_data=eval_data,
                            test_data=test_data,
                            batch_size=32,
                            ops=[
                                ExpandDims(inputs="x", outputs="x"),
                                Minmax(inputs="x", outputs="x")
                            ])
     model = fe.build(model_fn=LeNet, optimizer_fn="adam")
     network = fe.Network(ops=[
         ModelOp(model=model, inputs="x", outputs="y_pred"),
         CrossEntropy(inputs=("y_pred", "y"), outputs="ce", ds_id="ds_1")
     ])
     pipeline_data = pipeline.transform(data=train_data[0], mode="train")
     data1 = network.transform(data=pipeline_data,
                               mode="infer",
                               ds_id="ds_1")
     assert "ce" not in data1
     data2 = network.transform(data=pipeline_data,
                               mode="infer",
                               ds_id="ds_2")
     assert "ce" not in data2
예제 #19
0
        def run_test(mixed_precision, merge_grad, gradient):
            lr = 0.1
            lr2 = 0.01
            pipeline = fe.Pipeline(train_data=self.train_data,
                                   batch_size=4,
                                   ops=[ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x")])

            optimizer_fn = RepeatScheduler(
                [lambda x: torch.optim.SGD(params=x, lr=lr), lambda x: torch.optim.SGD(params=x, lr=lr2)])

            model = fe.build(model_fn=LeNet_torch, optimizer_fn=optimizer_fn, mixed_precision=mixed_precision)
            network = fe.Network(ops=[
                ModelOp(model=model, inputs="x", outputs="y_pred"),
                CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
                GradientOp(model=model, finals="ce", outputs="grad"),
                UpdateOp(model=model, loss_name="ce", gradients=gradient, merge_grad=merge_grad),
            ])

            traces = [
                CheckNetworkWeight(model=model,
                                   grad_key="grad",
                                   merge_grad=merge_grad,
                                   test_self=self,
                                   framework="torch",
                                   lrs=[lr, lr2, lr, lr2],
                                   work_intervals=[[1, 2], [2, 3], [3, 4], [4, 5]])
            ]
            estimator = fe.Estimator(pipeline=pipeline,
                                     network=network,
                                     epochs=4,
                                     traces=traces,
                                     train_steps_per_epoch=2)
            estimator.fit(warmup=False)
예제 #20
0
    def test_estimator_configure_loader_torch_data_loader_tf_model(self):
        loader = get_sample_torch_dataloader()
        pipeline = fe.Pipeline(train_data=loader)
        model = fe.build(model_fn=LeNetTf, optimizer_fn="adam")

        network = fe.Network(ops=[
            ModelOp(model=model, inputs="x_out", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            UpdateOp(model=model, loss_name="ce")
        ])

        est = fe.Estimator(pipeline=pipeline,
                           network=network,
                           max_train_steps_per_epoch=3,
                           epochs=1)

        est.system.mode = "train"
        new_loader = est._configure_loader(loader)

        with self.subTest("check loader type"):
            strategy = tf.distribute.get_strategy()
            if isinstance(strategy, tf.distribute.MirroredStrategy):
                self.assertIsInstance(new_loader,
                                      tf.distribute.DistributedDataset)
            else:
                self.assertIsInstance(new_loader, tf.data.Dataset)

        with self.subTest("max_train_steps_per_epoch=3"):
            iterator = iter(new_loader)
            for i in range(3):
                batch = next(iterator)

            with self.assertRaises(StopIteration):
                batch = next(iterator)
예제 #21
0
 def test_tf_static_superloss_categorical_ce(self):
     sl = SuperLoss(CrossEntropy(inputs=['y_pred', 'y'], outputs='ce'))
     sl.build(framework="tf", device=None)
     output = self.do_forward(sl,
                              data=[self.tf_pred_cat, self.tf_true_cat],
                              state=self.state)
     self.assertTrue(np.allclose(output.numpy(), -0.0026386082))
예제 #22
0
 def instantiate_system():
     system = sample_system_object()
     model = fe.build(model_fn=fe.architecture.pytorch.LeNet, optimizer_fn='adam', model_name='tf')
     system.network = fe.Network(ops=[
         ModelOp(model=model, inputs="x_out", outputs="y_pred"),
         SuperLoss(CrossEntropy(inputs=['y_pred', 'y'], outputs='ce'))
     ])
     return system
예제 #23
0
 def test_torch_superloss_binary_ce(self):
     sl = SuperLoss(CrossEntropy(inputs=['y_pred', 'y'], outputs='ce'))
     sl.build(framework="torch",
              device="cuda:0" if torch.cuda.is_available() else "cpu")
     output = sl.forward(
         data=[self.torch_pred_binary, self.torch_true_binary],
         state=self.state)
     self.assertTrue(
         np.allclose(output.detach().to("cpu").numpy(), -0.0026238672))
예제 #24
0
def get_estimator(epsilon=0.04,
                  epochs=20,
                  batch_size=32,
                  code_length=16,
                  train_steps_per_epoch=None,
                  eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = cifair10.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(
        train_data=train_data,
        eval_data=eval_data,
        test_data=test_data,
        batch_size=batch_size,
        ops=[Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616))])

    # step 2
    model = fe.build(model_fn=lambda: ecc_lenet(code_length=code_length), optimizer_fn="adam")

    network = fe.Network(ops=[
        Watch(inputs="x", mode=('eval', 'test')),
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="base_ce"),
        UpdateOp(model=model, loss_name="base_ce"),
        FGSM(data="x", loss="base_ce", outputs="x_adverse", epsilon=epsilon, mode=('eval', 'test')),
        ModelOp(model=model, inputs="x_adverse", outputs="y_pred_adv", mode=('eval', 'test')),
        CrossEntropy(inputs=("y_pred_adv", "y"), outputs="adv_ce", mode=('eval', 'test')),
        Average(inputs=("base_ce", "adv_ce"), outputs="avg_ce", mode='eval')
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred", output_name="base_accuracy"),
        Accuracy(true_key="y", pred_key="y_pred_adv", output_name="adversarial_accuracy"),
        BestModelSaver(model=model, save_dir=save_dir, metric="avg_ce", save_best_mode="min", load_best_final=True)
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             train_steps_per_epoch=train_steps_per_epoch,
                             eval_steps_per_epoch=eval_steps_per_epoch,
                             monitor_names=["adv_ce", "avg_ce"])
    return estimator
예제 #25
0
def get_estimator(epochs=2,
                  batch_size=32,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ],
                           num_process=0)

    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    print([f"{idx}: {x.name}" for idx, x in enumerate(model.submodules)])
    network = fe.Network(ops=[
        Watch(inputs="x"),
        ModelOp(model=model,
                inputs="x",
                outputs=["y_pred", "embedding"],
                intermediate_layers='dense'),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        GradientOp(finals="embedding", inputs="x", outputs="grads"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        Inspector(),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric="accuracy",
                       save_best_mode="max"),
        LRScheduler(model=model,
                    lr_fn=lambda step: cosine_decay(
                        step, cycle_length=3750, init_lr=1e-3)),
        TensorBoard(log_dir="tf_logs",
                    write_embeddings="embedding",
                    embedding_labels="y")
    ]
    estimator = fe.Estimator(
        pipeline=pipeline,
        network=network,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator
예제 #26
0
def get_estimator(epochs=50,
                  batch_size=128,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = cifair100.load_data()

    # Add label noise to simulate real-world labeling problems
    corrupt_dataset(train_data)

    test_data = eval_data.split(range(len(eval_data) // 2))
    pipeline = fe.Pipeline(
        train_data=train_data,
        eval_data=eval_data,
        test_data=test_data,
        batch_size=batch_size,
        ops=[
            Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
            PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"),
            RandomCrop(32, 32, image_in="x", image_out="x", mode="train"),
            Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")),
            CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1),
            ChannelTranspose(inputs="x", outputs="x")
        ])

    # step 2
    model = fe.build(model_fn=big_lenet, optimizer_fn='adam')
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        SuperLoss(CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), output_confidence="confidence"),
        UpdateOp(model=model, loss_name="ce")
    ])

    # step 3
    traces = [
        MCC(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="mcc", save_best_mode="max", load_best_final=True),
        LabelTracker(metric="confidence",
                     label="data_labels",
                     label_mapping={
                         "Normal": 0, "Corrupted": 1
                     },
                     mode="train",
                     outputs="label_confidence"),
        ImageSaver(inputs="label_confidence", save_dir=save_dir, mode="train"),
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch,
                             max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator
예제 #27
0
    def setUpClass(cls):
        train_data, eval_data = mnist.load_data()
        cls.pipeline = fe.Pipeline(train_data=train_data)

        model = fe.build(model_fn=LeNetTf, optimizer_fn="adam")

        cls.network = fe.Network(ops=[
            ModelOp(model=model, inputs="x_out", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            UpdateOp(model=model, loss_name="ce")
        ])
예제 #28
0
def get_estimator(epochs=2,
                  batch_size=32,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    train_data, eval_data = breast_cancer.load_data()

    # Apply some global pre-processing to the data
    scaler = StandardScaler()
    train_data["x"] = scaler.fit_transform(train_data["x"])
    eval_data["x"] = scaler.transform(eval_data["x"])

    pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, batch_size=batch_size)

    # step 2. prepare model
    model = fe.build(model_fn=DNN, optimizer_fn="adam")

    class_weights = {0: 2.0, 1: 2.0}

    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce_weighted", class_weights=class_weights),
        UpdateOp(model=model, loss_name="ce")
    ])

    # step 3.prepare estimator
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max")
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             log_steps=10,
                             traces=traces,
                             monitor_names="ce_weighted",
                             max_train_steps_per_epoch=max_train_steps_per_epoch,
                             max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator
예제 #29
0
def get_estimator():
    # step 1
    pipeline = create_pipeline()
    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2)
    return estimator
예제 #30
0
def get_estimator(max_len=20,
                  epochs=10,
                  batch_size=64,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  pretrained_model='bert-base-uncased',
                  save_dir=tempfile.mkdtemp(),
                  data_dir=None):
    # step 1 prepare data
    train_data, eval_data, data_vocab, label_vocab = german_ner.load_data(root_dir=data_dir)
    tokenizer = BertTokenizer.from_pretrained(pretrained_model, do_lower_case=True)
    tag2idx = char2idx(label_vocab)
    pipeline = fe.Pipeline(
        train_data=train_data,
        eval_data=eval_data,
        batch_size=batch_size,
        ops=[
            Tokenize(inputs="x", outputs="x", tokenize_fn=tokenizer.tokenize),
            WordtoId(inputs="x", outputs="x", mapping=tokenizer.convert_tokens_to_ids),
            WordtoId(inputs="y", outputs="y", mapping=tag2idx),
            PadSequence(max_len=max_len, inputs="x", outputs="x"),
            PadSequence(max_len=max_len, value=len(tag2idx), inputs="y", outputs="y"),
            AttentionMask(inputs="x", outputs="x_masks")
        ])

    # step 2. prepare model
    bert_config = BertConfig.from_pretrained(pretrained_model)
    num_hidden_layers = bert_config.to_dict()['num_hidden_layers']
    head_masks = [None] * num_hidden_layers
    model = fe.build(model_fn=lambda: NERModel(head_masks=head_masks, pretrained_model=pretrained_model),
                     optimizer_fn=lambda x: torch.optim.Adam(x, lr=1e-5))
    network = fe.Network(ops=[
        ModelOp(model=model, inputs=["x", "x_masks"], outputs="y_pred"),
        Reshape(inputs="y", outputs="y", shape=(-1, )),
        Reshape(inputs="y_pred", outputs="y_pred", shape=(-1, 24)),
        CrossEntropy(inputs=("y_pred", "y"), outputs="loss"),
        UpdateOp(model=model, loss_name="loss")
    ])

    traces = [Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=save_dir)]

    # step 3 prepare estimator
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch,
                             max_eval_steps_per_epoch=max_eval_steps_per_epoch)

    return estimator