def run_test(mixed_precision, merge_grad, gradient):
            lr = 0.1
            pipeline = fe.Pipeline(train_data=self.train_data,
                                   batch_size=4,
                                   ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

            model = fe.build(model_fn=LeNet_tf,
                             optimizer_fn=lambda: tf.optimizers.SGD(lr),
                             mixed_precision=mixed_precision)
            network = fe.Network(ops=[
                ModelOp(model=model, inputs="x", outputs="y_pred"),
                CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
                GradientOp(model=model, finals="ce", outputs="grad"),
                UpdateOp(model=model, loss_name="ce", gradients=gradient, merge_grad=merge_grad),
            ])

            traces = [
                CheckNetworkWeight(model=model,
                                   grad_key="grad",
                                   merge_grad=merge_grad,
                                   test_self=self,
                                   lrs=lr,
                                   framework="tf")
            ]
            estimator = fe.Estimator(pipeline=pipeline,
                                     network=network,
                                     epochs=2,
                                     traces=traces,
                                     train_steps_per_epoch=2)
            estimator.fit(warmup=False)
Exemple #2
0
def _build_estimator(model: Union[tf.keras.Model, torch.nn.Module], trace: Traceability, axis: int = -1):
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    batch_size = 32
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x", axis=axis), Minmax(inputs="x", outputs="x")])
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3)),
        trace
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=1,
                             traces=traces,
                             max_train_steps_per_epoch=1,
                             max_eval_steps_per_epoch=None)
    fake_data = tf.ones(shape=(batch_size, 28, 28, 1)) if axis == -1 else torch.ones(size=(batch_size, 1, 28, 28))
    model.fe_input_spec = FeInputSpec(fake_data, model)
    return estimator
        def run_test(mixed_precision, merge_grad, gradient):
            lr = 0.1
            lr2 = 0.01
            pipeline = fe.Pipeline(train_data=self.train_data,
                                   batch_size=4,
                                   ops=[ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x")])

            optimizer_fn = RepeatScheduler(
                [lambda x: torch.optim.SGD(params=x, lr=lr), lambda x: torch.optim.SGD(params=x, lr=lr2)])

            model = fe.build(model_fn=LeNet_torch, optimizer_fn=optimizer_fn, mixed_precision=mixed_precision)
            network = fe.Network(ops=[
                ModelOp(model=model, inputs="x", outputs="y_pred"),
                CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
                GradientOp(model=model, finals="ce", outputs="grad"),
                UpdateOp(model=model, loss_name="ce", gradients=gradient, merge_grad=merge_grad),
            ])

            traces = [
                CheckNetworkWeight(model=model,
                                   grad_key="grad",
                                   merge_grad=merge_grad,
                                   test_self=self,
                                   framework="torch",
                                   lrs=[lr, lr2, lr, lr2],
                                   work_intervals=[[1, 2], [2, 3], [3, 4], [4, 5]])
            ]
            estimator = fe.Estimator(pipeline=pipeline,
                                     network=network,
                                     epochs=4,
                                     traces=traces,
                                     train_steps_per_epoch=2)
            estimator.fit(warmup=False)
Exemple #4
0
def get_estimator(epochs=2,
                  batch_size=32,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"),
        LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3))
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces)
    return estimator
Exemple #5
0
def create_pipeline():
    train_data, eval_data = mnist.load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=32,
                           ops=[ExpandDims(inputs="x", outputs="x1"), Minmax(inputs="x1", outputs="x")])
    return pipeline
Exemple #6
0
def get_estimator(epochs=50, batch_size=256, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()):
    train_data, _ = mnist.load_data()
    pipeline = fe.Pipeline(
        train_data=train_data,
        batch_size=batch_size,
        ops=[
            ExpandDims(inputs="x", outputs="x"),
            Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5),
            LambdaOp(fn=lambda: np.random.normal(size=[100]).astype('float32'), outputs="z")
        ])
    gen_model = fe.build(model_fn=generator, optimizer_fn=lambda: tf.optimizers.Adam(1e-4))
    disc_model = fe.build(model_fn=discriminator, optimizer_fn=lambda: tf.optimizers.Adam(1e-4))
    network = fe.Network(ops=[
        ModelOp(model=gen_model, inputs="z", outputs="x_fake"),
        ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"),
        GLoss(inputs="fake_score", outputs="gloss"),
        UpdateOp(model=gen_model, loss_name="gloss"),
        ModelOp(inputs="x", model=disc_model, outputs="true_score"),
        DLoss(inputs=("true_score", "fake_score"), outputs="dloss"),
        UpdateOp(model=disc_model, loss_name="dloss")
    ])
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=ModelSaver(model=gen_model, save_dir=save_dir, frequency=5),
                             max_train_steps_per_epoch=max_train_steps_per_epoch)
    return estimator
Exemple #7
0
def get_estimator(epochs=2, batch_size=32):
    # step 1
    train_data, eval_data = mnist.load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model,
                inputs="x",
                outputs=["y_pred", "feature_vector"],
                intermediate_layers='dense'),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        CustomLoss(inputs=("feature_vector", "feature_selected"),
                   outputs="feature_loss"),
        LambdaOp(fn=lambda x, y: x + y,
                 inputs=("ce", "feature_loss"),
                 outputs="total_loss"),
        UpdateOp(model=model, loss_name="total_loss")
    ])
    # step 3
    traces = [
        MemoryBank(inputs=("feature_vector", "y"), outputs="feature_selected")
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces)
    return estimator
 def test_mode_ds_id_interaction(self):
     train_data, eval_data = mnist.load_data()
     test_data = eval_data.split(0.5)
     pipeline = fe.Pipeline(train_data=train_data,
                            eval_data=eval_data,
                            test_data=test_data,
                            batch_size=32,
                            ops=[
                                ExpandDims(inputs="x", outputs="x"),
                                Minmax(inputs="x", outputs="x")
                            ])
     model = fe.build(model_fn=LeNet, optimizer_fn="adam")
     network = fe.Network(ops=[
         ModelOp(model=model, inputs="x", outputs="y_pred"),
         CrossEntropy(inputs=("y_pred", "y"), outputs="ce", ds_id="ds_1")
     ])
     pipeline_data = pipeline.transform(data=train_data[0], mode="train")
     data1 = network.transform(data=pipeline_data,
                               mode="infer",
                               ds_id="ds_1")
     assert "ce" not in data1
     data2 = network.transform(data=pipeline_data,
                               mode="infer",
                               ds_id="ds_2")
     assert "ce" not in data2
Exemple #9
0
    def test_pytorch_weight_decay_vs_l2(self):
        # Get Data
        train_data, _ = mnist.load_data()
        t_d = train_data.split(128)
        # Initializing models
        pytorch_wd = fe.build(model_fn=MyNet_torch,
                              optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01, weight_decay=self.beta))

        pytorch_l2 = fe.build(model_fn=MyNet_torch, optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01))
        # Initialize pipeline
        pipeline = fe.Pipeline(train_data=t_d,
                               batch_size=128,
                               ops=[ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x")])
        # Define the two pytorch networks
        network_weight_decay = fe.Network(ops=[
            ModelOp(model=pytorch_wd, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            UpdateOp(model=pytorch_wd, loss_name="ce")
        ])

        network_l2 = fe.Network(ops=[
            ModelOp(model=pytorch_l2, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            L2Regularizaton(inputs="ce", outputs="l2", model=pytorch_l2, beta=self.beta),
            UpdateOp(model=pytorch_l2, loss_name="l2")
        ])

        # defining traces
        traces = [Accuracy(true_key="y", pred_key="y_pred")]

        # Setting up estimators
        estimator_wd = fe.Estimator(pipeline=pipeline,
                                    network=network_weight_decay,
                                    epochs=1,
                                    traces=traces,
                                    train_steps_per_epoch=1)

        estimator_l2 = fe.Estimator(pipeline=pipeline,
                                    network=network_l2,
                                    epochs=1,
                                    traces=traces,
                                    train_steps_per_epoch=1)
        # Training
        print('********************************Pytorch weight decay training************************************')
        estimator_wd.fit()
        print()
        print('********************************Pytorch L2 Regularization training************************************')
        estimator_l2.fit()
        # testing weights
        count = 0
        for wt, l2 in zip(pytorch_wd.parameters(), pytorch_l2.parameters()):
            if ((wt - l2).abs()).sum() < torch.tensor(10**-6):
                count += 1
        self.assertTrue(count == 6)
def get_estimator(epochs=2,
                  batch_size=32,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    # step 1
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ],
                           num_process=0)

    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    print([f"{idx}: {x.name}" for idx, x in enumerate(model.submodules)])
    network = fe.Network(ops=[
        Watch(inputs="x"),
        ModelOp(model=model,
                inputs="x",
                outputs=["y_pred", "embedding"],
                intermediate_layers='dense'),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        GradientOp(finals="embedding", inputs="x", outputs="grads"),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        Inspector(),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric="accuracy",
                       save_best_mode="max"),
        LRScheduler(model=model,
                    lr_fn=lambda step: cosine_decay(
                        step, cycle_length=3750, init_lr=1e-3)),
        TensorBoard(log_dir="tf_logs",
                    write_embeddings="embedding",
                    embedding_labels="y")
    ]
    estimator = fe.Estimator(
        pipeline=pipeline,
        network=network,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator
Exemple #11
0
    def test_tf_model_end_to_end_gradient(self):
        train_data, _ = mnist.load_data()
        pipeline = fe.Pipeline(train_data=train_data,
                               batch_size=4,
                               ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])

        model = fe.build(model_fn=LeNet_tf, optimizer_fn="adam")
        network = fe.Network(ops=[
            ModelOp(model=model, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            GradientOp(model=model, finals="ce", outputs="gradients"),
            UpdateOp(model=model, gradients="gradients", loss_name="ce")
        ])
        estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2, max_train_steps_per_epoch=10)
        estimator.fit()
Exemple #12
0
def get_estimator():
    ds, _ = mnist.load_data()
    ds = NegativeImageSimulatedTube(ds)
    pipeline = fe.Pipeline(train_data=ds,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2)
    return estimator
Exemple #13
0
def get_estimator():
    pos_real, _ = mnist.load_data()
    neg_real, _ = mnist.load_data()
    neg_sim, _ = mnist.load_data()
    neg_sim = NegativeImageSimulatedTube(neg_sim)
    batch_ds = BatchDataset(datasets=(pos_real, neg_real, neg_sim), num_samples=(2, 2, 1))
    pipeline = fe.Pipeline(train_data=batch_ds,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2)
    return estimator
Exemple #14
0
def get_estimator(epochs=2, batch_size=32):
    train_data, eval_data = mnist.load_data()
    test_data = eval_data.split(0.5)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        DebugOp(inputs="ce", outputs="ce", mode="train"),
        UpdateOp(model=model, loss_name="ce")
    ])
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs)
    return estimator
Exemple #15
0
def get_estimator(batch_size=100,
                  epochs=20,
                  max_train_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    train_data, _ = load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           batch_size=batch_size,
                           ops=[
                               ExpandDims(inputs="x", outputs="x", axis=0),
                               Minmax(inputs="x", outputs="x"),
                               Binarize(inputs="x", outputs="x",
                                        threshold=0.5),
                           ])

    encode_model = fe.build(model_fn=EncoderNet,
                            optimizer_fn="adam",
                            model_name="encoder")
    decode_model = fe.build(model_fn=DecoderNet,
                            optimizer_fn="adam",
                            model_name="decoder")

    network = fe.Network(ops=[
        ModelOp(model=encode_model, inputs="x", outputs="meanlogvar"),
        SplitOp(inputs="meanlogvar", outputs=("mean", "logvar")),
        ReparameterizepOp(inputs=("mean", "logvar"), outputs="z"),
        ModelOp(model=decode_model, inputs="z", outputs="x_logit"),
        CrossEntropy(inputs=("x_logit", "x"), outputs="cross_entropy"),
        CVAELoss(inputs=("cross_entropy", "mean", "logvar", "z"),
                 outputs="loss"),
        UpdateOp(model=encode_model, loss_name="loss"),
        UpdateOp(model=decode_model, loss_name="loss"),
    ])

    traces = [
        BestModelSaver(model=encode_model, save_dir=save_dir),
        BestModelSaver(model=decode_model, save_dir=save_dir)
    ]

    estimator = fe.Estimator(
        pipeline=pipeline,
        network=network,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch)

    return estimator
def get_estimator():
    ds1 = create_dataset1()
    ds2 = create_dataset2()
    batch_ds = BatchDataset(datasets=(ds1, ds2), num_samples=(1, 1))
    pipeline = fe.Pipeline(train_data=batch_ds,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce")
    ])
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=4,
                             traces=DebugTrace(inputs="y"))
    return estimator
Exemple #17
0
def get_estimator():
    # step 1
    train_data, eval_data = mnist.load_data()
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           batch_size=32,
                           ops=[
                               ExpandDims(inputs="x", outputs="x"),
                               Minmax(inputs="x", outputs="x")
                           ])
    # step 2
    model = fe.build(model_fn=LeNet, optimizer_fn="adam")
    network = fe.Network(ops=[
        ModelOp(model=model, inputs="x", outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
        UpdateOp(model=model, loss_name="ce", merge_grad=4)
    ])
    # step 3
    estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2)
    return estimator
Exemple #18
0
 def create_estimator_for_arc(self, model, use_eval, axis):
     train_data, eval_data = mnist.load_data()
     pipeline = fe.Pipeline(train_data=train_data,
                            eval_data=eval_data if use_eval else None,
                            batch_size=8,
                            ops=[
                                ExpandDims(inputs="x",
                                           outputs="x",
                                           axis=axis),
                                Minmax(inputs="x", outputs="x")
                            ])
     network = fe.Network(ops=[
         ModelOp(model=model, inputs="x", outputs="y_pred"),
         CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
         UpdateOp(model=model, loss_name="ce")
     ])
     estimator = fe.Estimator(pipeline=pipeline,
                              network=network,
                              epochs=2,
                              traces=LRScheduler(model=model, lr_fn=ARC(1)),
                              max_train_steps_per_epoch=10)
     return estimator
Exemple #19
0
    def test_pytorch_l2_vs_tensorflow_l2(self):
        # Get Data
        train_data, eval_data = mnist.load_data()
        t_d = train_data.split(128)
        # Initializing Pytorch model
        pytorch_l2 = fe.build(model_fn=MyNet_torch, optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01))
        # Initialize Pytorch pipeline
        pipeline = fe.Pipeline(train_data=t_d,
                        eval_data=eval_data,
                        batch_size=128,
                        ops=[ExpandDims(inputs="x", outputs="x", axis=0),
                                Minmax(inputs="x", outputs="x")])
        # Initialize Pytorch Network
        network_l2 = fe.Network(ops=[
            ModelOp(model=pytorch_l2, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            L2Regularizaton(inputs="ce",outputs="l2",model=pytorch_l2,beta = self.beta),
            UpdateOp(model=pytorch_l2, loss_name="l2")
        ])
        # step 3
        traces = [
            Accuracy(true_key="y", pred_key="y_pred")
        ]
        # Initialize Pytorch estimator
        estimator_l2 = fe.Estimator(pipeline=pipeline,
                            network=network_l2,
                            epochs=1,
                            traces=traces,
                            train_steps_per_epoch=1,
                            monitor_names=["ce","l2"])
        print('********************************Pytorch L2 Regularization training************************************')
        estimator_l2.fit()

        # Converting Pytorch weights to numpy
        torch_wt = []
        for _, param in pytorch_l2.named_parameters():
            if param.requires_grad:
                torch_wt.append(param.detach().numpy())

        # step 1
        pipeline = fe.Pipeline(train_data=t_d,
                               eval_data=eval_data,
                               batch_size=128,
                               ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")])
        # step 2
        model_tf = fe.build(model_fn=MyNet_tf, optimizer_fn=lambda: tf.optimizers.SGD(learning_rate=0.01))
        network = fe.Network(ops=[
            ModelOp(model=model_tf, inputs="x", outputs="y_pred"),
            CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
            L2Regularizaton(inputs="ce",outputs="l2",model=model_tf,beta = self.beta),
            UpdateOp(model=model_tf, loss_name="l2")
        ])
        # step 3
        traces = [
            Accuracy(true_key="y", pred_key="y_pred")
        ]
        estimator = fe.Estimator(pipeline=pipeline,
                                network=network,
                                epochs=1,
                                traces=traces,
                                train_steps_per_epoch=1,
                                monitor_names=["ce","l2"])
        print('*******************************Tensorflow L2 Regularization training***********************************')
        estimator.fit()


        # Converting TF weights to numpy
        tf_wt = []
        for layer in model_tf.layers:
            for w in layer.trainable_variables:
                tf_wt.append(w.numpy())

        # testing weights
        count = 0
        for tf_t,tr in zip(tf_wt,torch_wt):
            if np.sum(np.abs(tf_t-np.transpose(tr))) < (10**-5):
                count += 1
        self.assertTrue(count == 6)
Exemple #20
0
def get_estimator(epochs=20, batch_size=128, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()):
    # Dataset Creation
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
    x_eval0, y_eval0 = x_eval[np.where((y_eval == 1))], np.ones(y_eval[np.where((y_eval == 1))].shape)
    x_eval1, y_eval1 = x_eval[np.where((y_eval != 1))], y_eval[np.where((y_eval != 1))]

    # Ensuring outliers comprise 50% of the dataset
    index = np.random.choice(x_eval1.shape[0], int(x_eval0.shape[0]), replace=False)
    x_eval1, y_eval1 = x_eval1[index], np.zeros(y_eval1[index].shape)

    x_train, y_train = x_train[np.where((y_train == 1))], np.zeros(y_train[np.where((y_train == 1))].shape)
    train_data = fe.dataset.NumpyDataset({"x": x_train, "y": y_train})

    x_eval, y_eval = np.concatenate([x_eval0, x_eval1]), np.concatenate([y_eval0, y_eval1])
    eval_data = fe.dataset.NumpyDataset({"x": x_eval, "y": y_eval})

    pipeline = fe.Pipeline(
        train_data=train_data,
        eval_data=eval_data,
        batch_size=batch_size,
        ops=[
            ExpandDims(inputs="x", outputs="x"),
            Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5),
            LambdaOp(fn=lambda x: x + np.random.normal(loc=0.0, scale=0.155, size=(28, 28, 1)),
                     inputs="x",
                     outputs="x_w_noise",
                     mode="train")
        ])

    recon_model = fe.build(model_fn=reconstructor,
                           optimizer_fn=lambda: tf.optimizers.RMSprop(2e-4),
                           model_name="reconstructor")
    disc_model = fe.build(model_fn=discriminator,
                          optimizer_fn=lambda: tf.optimizers.RMSprop(1e-4),
                          model_name="discriminator")

    network = fe.Network(ops=[
        ModelOp(model=recon_model, inputs="x_w_noise", outputs="x_fake", mode="train"),
        ModelOp(model=recon_model, inputs="x", outputs="x_fake", mode="eval"),
        ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"),
        ModelOp(model=disc_model, inputs="x", outputs="true_score"),
        RLoss(inputs=("fake_score", "x_fake", "x"), outputs="rloss"),
        UpdateOp(model=recon_model, loss_name="rloss"),
        DLoss(inputs=("true_score", "fake_score"), outputs="dloss"),
        UpdateOp(model=disc_model, loss_name="dloss"),
    ])

    traces = [
        F1AUCScores(true_key="y", pred_key="fake_score", mode="eval", output_name=["auc_score", "f1_score"]),
        BestModelSaver(model=recon_model, save_dir=save_dir, metric='f1_score', save_best_mode='max'),
        BestModelSaver(model=disc_model, save_dir=save_dir, metric='f1_score', save_best_mode='max'),
    ]

    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch,
                             log_steps=50)

    return estimator
 def test_multi_input(self):
     op = ExpandDims(axis=0, inputs='x', outputs='x')
     data = op.forward(data=self.multi_input, state={})
     self.assertTrue(is_equal(data, self.multi_output))
 def test_single_input(self):
     op = ExpandDims(axis=-1, inputs='x', outputs='x')
     data = op.forward(data=[np.array([1, 2, 3, 4])], state={})
     self.assertTrue(is_equal(data, self.single_output))