Beispiel #1
0
 def test_on_epoch_end_monitor_op(self):
     early_stopping = EarlyStopping(baseline=5.0)
     early_stopping.system = sample_system_object()
     early_stopping.min_delta = 1
     early_stopping.monitor_op = np.greater
     early_stopping.best = 7
     early_stopping.on_epoch_end(data=self.data)
     with self.subTest('Check value of wait'):
         self.assertEqual(early_stopping.wait, 0)
     with self.subTest('Check value of best'):
         self.assertEqual(early_stopping.best, 10)
Beispiel #2
0
 def test_on_epoch_end_early_stopping_msg(self):
     with patch('sys.stdout', new=StringIO()) as fake_stdout:
         early_stopping = EarlyStopping(baseline=5.0)
         early_stopping.system = sample_system_object()
         early_stopping.system.epoch_idx = 3
         early_stopping.best = 2
         early_stopping.on_epoch_end(data=self.data)
         log = fake_stdout.getvalue().strip()
         self.assertEqual(log, self.expected_msg)
Beispiel #3
0
def get_estimator(epochs=30,
                  batch_size=128,
                  seq_length=20,
                  vocab_size=10000,
                  data_dir=None,
                  train_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp()):
    train_data, eval_data, _, _ = load_data(root_dir=data_dir,
                                            seq_length=seq_length + 1)
    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           ops=[
                               CreateInputAndTarget(inputs="x",
                                                    outputs=("x", "y")),
                               Batch(batch_size=batch_size, drop_last=True)
                           ])
    # step 2
    model = fe.build(
        model_fn=lambda: BuildModel(
            vocab_size, embedding_dim=300, rnn_units=600),
        optimizer_fn=lambda x: torch.optim.SGD(x, lr=1.0, momentum=0.9))
    network = fe.Network(ops=[
        DimesionAdjust(inputs=("x", "y"), outputs=("x", "y")),
        ModelOp(model=model, inputs="x", outputs="y_pred", mode=None),
        CrossEntropy(inputs=("y_pred", "y"),
                     outputs="ce",
                     form="sparse",
                     from_logits=True),
        UpdateOp(model=model, loss_name="ce")
    ])
    # step 3
    traces = [
        Perplexity(inputs="ce", outputs="perplexity", mode="eval"),
        LRScheduler(model=model,
                    lr_fn=lambda step: lr_schedule(step, init_lr=1.0)),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric='perplexity',
                       save_best_mode='min',
                       load_best_final=True),
        EarlyStopping(monitor="perplexity", patience=5)
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             train_steps_per_epoch=train_steps_per_epoch)
    return estimator
Beispiel #4
0
 def test_on_begin_baseline_arbitrary_value(self):
     early_stopping = EarlyStopping(baseline=5.0)
     early_stopping.system = sample_system_object()
     early_stopping.on_begin(data=self.data)
     self.assertEqual(early_stopping.best, 5.0)
Beispiel #5
0
 def test_on_begin_compare_max(self):
     early_stopping = EarlyStopping(compare='max')
     early_stopping.system = sample_system_object()
     early_stopping.on_begin(data=self.data)
     self.assertEqual(early_stopping.best, -np.Inf)
Beispiel #6
0
def get_estimator(epochs=200,
                  batch_size=128,
                  max_train_steps_per_epoch=None,
                  max_eval_steps_per_epoch=None,
                  save_dir=tempfile.mkdtemp(),
                  data_dir=None):
    # step 1. prepare pipeline
    train_data, eval_data = omniglot.load_data(root_dir=data_dir)
    test_data = eval_data.split(0.5)

    pipeline = fe.Pipeline(train_data=train_data,
                           eval_data=eval_data,
                           test_data=test_data,
                           batch_size=batch_size,
                           ops=[
                               ReadImage(inputs="x_a",
                                         outputs="x_a",
                                         color_flag='gray'),
                               ReadImage(inputs="x_b",
                                         outputs="x_b",
                                         color_flag='gray'),
                               Sometimes(ShiftScaleRotate(image_in="x_a",
                                                          image_out="x_a",
                                                          shift_limit=0.05,
                                                          scale_limit=0.2,
                                                          rotate_limit=10,
                                                          mode="train"),
                                         prob=0.89),
                               Sometimes(ShiftScaleRotate(image_in="x_b",
                                                          image_out="x_b",
                                                          shift_limit=0.05,
                                                          scale_limit=0.2,
                                                          rotate_limit=10,
                                                          mode="train"),
                                         prob=0.89),
                               Minmax(inputs="x_a", outputs="x_a"),
                               Minmax(inputs="x_b", outputs="x_b")
                           ])

    # step 2. prepare model
    model = fe.build(model_fn=siamese_network,
                     model_name="siamese_net",
                     optimizer_fn="adam")

    network = fe.Network(ops=[
        ModelOp(inputs=["x_a", "x_b"], model=model, outputs="y_pred"),
        CrossEntropy(inputs=("y_pred", "y"), outputs="loss", form="binary"),
        UpdateOp(model=model, loss_name="loss")
    ])

    # step 3.prepare estimator
    traces = [
        LRScheduler(model=model, lr_fn=lr_schedule),
        Accuracy(true_key="y", pred_key="y_pred"),
        OneShotAccuracy(dataset=eval_data,
                        model=model,
                        output_name='one_shot_accuracy'),
        BestModelSaver(model=model,
                       save_dir=save_dir,
                       metric="one_shot_accuracy",
                       save_best_mode="max"),
        EarlyStopping(monitor="one_shot_accuracy",
                      patience=20,
                      compare='max',
                      mode="eval")
    ]

    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        epochs=epochs,
        traces=traces,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        max_eval_steps_per_epoch=max_eval_steps_per_epoch)
    return estimator