Exemplo n.º 1
0
def get_estimator(epochs=50, batch_size=256, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()):
    train_data, _ = mnist.load_data()
    pipeline = fe.Pipeline(
        train_data=train_data,
        batch_size=batch_size,
        ops=[
            ExpandDims(inputs="x", outputs="x"),
            Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5),
            LambdaOp(fn=lambda: np.random.normal(size=[100]).astype('float32'), outputs="z")
        ])
    gen_model = fe.build(model_fn=generator, optimizer_fn=lambda: tf.optimizers.Adam(1e-4))
    disc_model = fe.build(model_fn=discriminator, optimizer_fn=lambda: tf.optimizers.Adam(1e-4))
    network = fe.Network(ops=[
        ModelOp(model=gen_model, inputs="z", outputs="x_fake"),
        ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"),
        GLoss(inputs="fake_score", outputs="gloss"),
        UpdateOp(model=gen_model, loss_name="gloss"),
        ModelOp(inputs="x", model=disc_model, outputs="true_score"),
        DLoss(inputs=("true_score", "fake_score"), outputs="dloss"),
        UpdateOp(model=disc_model, loss_name="dloss")
    ])
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=ModelSaver(model=gen_model, save_dir=save_dir, frequency=5),
                             max_train_steps_per_epoch=max_train_steps_per_epoch)
    return estimator
Exemplo n.º 2
0
 def test_repeat_fn_exterior_value(self):
     add_op = LambdaOp(inputs='x', outputs=('x', 'y'), fn=lambda x: (x + 1, x * x), mode='eval')
     repeat_op = Repeat(add_op, repeat=lambda y, z: y + z < 25)
     with self.subTest('Check op inputs'):
         self.assertListEqual(repeat_op.inputs, ['x', 'z'])
     with self.subTest('Check op outputs'):
         self.assertListEqual(repeat_op.outputs, ['x', 'y'])
     with self.subTest('Check op mode'):
         self.assertSetEqual(repeat_op.mode, {'eval'})
     output = repeat_op.forward(data=[np.ones([1]), 10 + np.ones([1])], state={"mode": "eval"})
     with self.subTest('Check output type'):
         self.assertEqual(type(output), list)
     with self.subTest('Check output value (x)'):
         self.assertEqual(5, output[0])
     with self.subTest('Check output value (y)'):
         self.assertEqual(16, output[1])
Exemplo n.º 3
0
def get_estimator(target_size=128,
                  epochs=55,
                  save_dir=tempfile.mkdtemp(),
                  max_train_steps_per_epoch=None,
                  data_dir=None):
    # assert growth parameters
    num_grow = np.log2(target_size) - 2
    assert num_grow >= 1 and num_grow % 1 == 0, "need exponential of 2 and greater than 8 as target size"
    num_phases = int(2 * num_grow + 1)
    assert epochs % num_phases == 0, "epoch must be multiple of {} for size {}".format(num_phases, target_size)
    num_grow, phase_length = int(num_grow), int(epochs / num_phases)
    event_epoch = [1, 1 + phase_length] + [phase_length * (2 * i + 1) + 1 for i in range(1, num_grow)]
    event_size = [4] + [2**(i + 3) for i in range(num_grow)]
    # set up data schedules
    dataset = nih_chestxray.load_data(root_dir=data_dir)
    resize_map = {
        epoch: Resize(image_in="x", image_out="x", height=size, width=size)
        for (epoch, size) in zip(event_epoch, event_size)
    }
    resize_low_res_map1 = {
        epoch: Resize(image_in="x", image_out="x_low_res", height=size // 2, width=size // 2)
        for (epoch, size) in zip(event_epoch, event_size)
    }
    resize_low_res_map2 = {
        epoch: Resize(image_in="x_low_res", image_out="x_low_res", height=size, width=size)
        for (epoch, size) in zip(event_epoch, event_size)
    }
    batch_size_map = {
        epoch: max(512 // size, 4) * get_num_devices() if size <= 512 else 2 * get_num_devices()
        for (epoch, size) in zip(event_epoch, event_size)
    }
    batch_scheduler = EpochScheduler(epoch_dict=batch_size_map)
    pipeline = fe.Pipeline(
        batch_size=batch_scheduler,
        train_data=dataset,
        drop_last=True,
        ops=[
            ReadImage(inputs="x", outputs="x", color_flag='gray'),
            EpochScheduler(epoch_dict=resize_map),
            EpochScheduler(epoch_dict=resize_low_res_map1),
            EpochScheduler(epoch_dict=resize_low_res_map2),
            Normalize(inputs=["x", "x_low_res"], outputs=["x", "x_low_res"], mean=1.0, std=1.0, max_pixel_value=127.5),
            ChannelTranspose(inputs=["x", "x_low_res"], outputs=["x", "x_low_res"]),
            LambdaOp(fn=lambda: np.random.normal(size=[512]).astype('float32'), outputs="z")
        ])
    fade_in_alpha = torch.tensor(1.0)
    d_models = fe.build(
        model_fn=lambda: build_D(fade_in_alpha, target_resolution=int(np.log2(target_size)), num_channels=1),
        optimizer_fn=[lambda x: Adam(x, lr=0.001, betas=(0.0, 0.99), eps=1e-8)] * len(event_size),
        model_name=["d_{}".format(size) for size in event_size])

    g_models = fe.build(
        model_fn=lambda: build_G(fade_in_alpha, target_resolution=int(np.log2(target_size)), num_channels=1),
        optimizer_fn=[lambda x: Adam(x, lr=0.001, betas=(0.0, 0.99), eps=1e-8)] * len(event_size) + [None],
        model_name=["g_{}".format(size) for size in event_size] + ["G"])
    fake_img_map = {
        epoch: ModelOp(inputs="z", outputs="x_fake", model=model)
        for (epoch, model) in zip(event_epoch, g_models[:-1])
    }
    fake_score_map = {
        epoch: ModelOp(inputs="x_fake", outputs="fake_score", model=model)
        for (epoch, model) in zip(event_epoch, d_models)
    }
    real_score_map = {
        epoch: ModelOp(inputs="x_blend", outputs="real_score", model=model)
        for (epoch, model) in zip(event_epoch, d_models)
    }
    interp_score_map = {
        epoch: ModelOp(inputs="x_interp", outputs="interp_score", model=model)
        for (epoch, model) in zip(event_epoch, d_models)
    }
    g_update_map = {
        epoch: UpdateOp(loss_name="gloss", model=model)
        for (epoch, model) in zip(event_epoch, g_models[:-1])
    }
    d_update_map = {epoch: UpdateOp(loss_name="dloss", model=model) for (epoch, model) in zip(event_epoch, d_models)}
    network = fe.Network(ops=[
        EpochScheduler(fake_img_map),
        EpochScheduler(fake_score_map),
        ImageBlender(alpha=fade_in_alpha, inputs=("x", "x_low_res"), outputs="x_blend"),
        EpochScheduler(real_score_map),
        Interpolate(inputs=("x_fake", "x"), outputs="x_interp"),
        EpochScheduler(interp_score_map),
        GradientPenalty(inputs=("x_interp", "interp_score"), outputs="gp"),
        GLoss(inputs="fake_score", outputs="gloss"),
        DLoss(inputs=("real_score", "fake_score", "gp"), outputs="dloss"),
        EpochScheduler(g_update_map),
        EpochScheduler(d_update_map)
    ])
    traces = [
        AlphaController(alpha=fade_in_alpha,
                        fade_start_epochs=event_epoch[1:],
                        duration=phase_length,
                        batch_scheduler=batch_scheduler,
                        num_examples=len(dataset)),
        ModelSaver(model=g_models[-1], save_dir=save_dir, frequency=phase_length),
        ImageSaving(
            epoch_model_map={epoch - 1: model
                             for (epoch, model) in zip(event_epoch[1:] + [epochs + 1], g_models[:-1])},
            save_dir=save_dir)
    ]
    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch)
    return estimator
Exemplo n.º 4
0
def get_estimator(epochs=20, batch_size=128, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()):
    # Dataset Creation
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
    x_eval0, y_eval0 = x_eval[np.where((y_eval == 1))], np.ones(y_eval[np.where((y_eval == 1))].shape)
    x_eval1, y_eval1 = x_eval[np.where((y_eval != 1))], y_eval[np.where((y_eval != 1))]

    # Ensuring outliers comprise 50% of the dataset
    index = np.random.choice(x_eval1.shape[0], int(x_eval0.shape[0]), replace=False)
    x_eval1, y_eval1 = x_eval1[index], np.zeros(y_eval1[index].shape)

    x_train, y_train = x_train[np.where((y_train == 1))], np.zeros(y_train[np.where((y_train == 1))].shape)
    train_data = fe.dataset.NumpyDataset({"x": x_train, "y": y_train})

    x_eval, y_eval = np.concatenate([x_eval0, x_eval1]), np.concatenate([y_eval0, y_eval1])
    eval_data = fe.dataset.NumpyDataset({"x": x_eval, "y": y_eval})

    pipeline = fe.Pipeline(
        train_data=train_data,
        eval_data=eval_data,
        batch_size=batch_size,
        ops=[
            ExpandDims(inputs="x", outputs="x"),
            Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5),
            LambdaOp(fn=lambda x: x + np.random.normal(loc=0.0, scale=0.155, size=(28, 28, 1)),
                     inputs="x",
                     outputs="x_w_noise",
                     mode="train")
        ])

    recon_model = fe.build(model_fn=reconstructor,
                           optimizer_fn=lambda: tf.optimizers.RMSprop(2e-4),
                           model_name="reconstructor")
    disc_model = fe.build(model_fn=discriminator,
                          optimizer_fn=lambda: tf.optimizers.RMSprop(1e-4),
                          model_name="discriminator")

    network = fe.Network(ops=[
        ModelOp(model=recon_model, inputs="x_w_noise", outputs="x_fake", mode="train"),
        ModelOp(model=recon_model, inputs="x", outputs="x_fake", mode="eval"),
        ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"),
        ModelOp(model=disc_model, inputs="x", outputs="true_score"),
        RLoss(inputs=("fake_score", "x_fake", "x"), outputs="rloss"),
        UpdateOp(model=recon_model, loss_name="rloss"),
        DLoss(inputs=("true_score", "fake_score"), outputs="dloss"),
        UpdateOp(model=disc_model, loss_name="dloss"),
    ])

    traces = [
        F1AUCScores(true_key="y", pred_key="fake_score", mode="eval", output_name=["auc_score", "f1_score"]),
        BestModelSaver(model=recon_model, save_dir=save_dir, metric='f1_score', save_best_mode='max'),
        BestModelSaver(model=disc_model, save_dir=save_dir, metric='f1_score', save_best_mode='max'),
    ]

    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             epochs=epochs,
                             traces=traces,
                             max_train_steps_per_epoch=max_train_steps_per_epoch,
                             log_steps=50)

    return estimator
Exemplo n.º 5
0
 def test_batch_forward(self):
     op = LambdaOp(fn=np.sum)
     data = tf.convert_to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
     result = op.forward_batch(data=[data], state={})
     ans = np.array([6, 15, 24], dtype=np.float32)
     self.assertTrue(np.array_equal(result, ans))
Exemplo n.º 6
0
 def test_multi_input(self):
     op = LambdaOp(fn=np.reshape)
     data = op.forward(data=[np.array([1, 2, 3, 4]), (2, 2)], state={})
     self.assertTrue(is_equal(data, np.array([[1, 2], [3, 4]])))
Exemplo n.º 7
0
 def test_single_input(self):
     op = LambdaOp(fn=np.sum)
     data = op.forward(data=[[1, 2, 3]], state={})
     self.assertEqual(data, 6)
Exemplo n.º 8
0
def get_estimator(batch_size=4,
                  epochs=2,
                  max_train_steps_per_epoch=None,
                  log_steps=100,
                  style_weight=5.0,
                  content_weight=1.0,
                  tv_weight=1e-4,
                  save_dir=tempfile.mkdtemp(),
                  style_img_path='Vassily_Kandinsky,_1913_-_Composition_7.jpg',
                  data_dir=None):
    train_data, _ = mscoco.load_data(root_dir=data_dir,
                                     load_bboxes=False,
                                     load_masks=False,
                                     load_captions=False)

    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    style_img = cv2.imread(style_img_path)
    assert style_img is not None, "cannot load the style image, please go to the folder with style image"
    style_img = cv2.resize(style_img, (256, 256))
    style_img = (style_img.astype(np.float32) - 127.5) / 127.5

    pipeline = fe.Pipeline(train_data=train_data,
                           batch_size=batch_size,
                           ops=[
                               ReadImage(inputs="image", outputs="image"),
                               Normalize(inputs="image",
                                         outputs="image",
                                         mean=1.0,
                                         std=1.0,
                                         max_pixel_value=127.5),
                               Resize(height=256,
                                      width=256,
                                      image_in="image",
                                      image_out="image"),
                               LambdaOp(fn=lambda: style_img,
                                        outputs="style_image"),
                               ChannelTranspose(
                                   inputs=["image", "style_image"],
                                   outputs=["image", "style_image"])
                           ])

    model = fe.build(model_fn=StyleTransferNet,
                     model_name="style_transfer_net",
                     optimizer_fn=lambda x: torch.optim.Adam(x, lr=1e-3))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="image_out"),
        ExtractVGGFeatures(
            inputs="style_image", outputs="y_style", device=device),
        ExtractVGGFeatures(inputs="image", outputs="y_content", device=device),
        ExtractVGGFeatures(inputs="image_out", outputs="y_pred",
                           device=device),
        StyleContentLoss(style_weight=style_weight,
                         content_weight=content_weight,
                         tv_weight=tv_weight,
                         inputs=('y_pred', 'y_style', 'y_content',
                                 'image_out'),
                         outputs='loss'),
        UpdateOp(model=model, loss_name="loss")
    ])

    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        traces=ModelSaver(model=model, save_dir=save_dir, frequency=1),
        epochs=epochs,
        max_train_steps_per_epoch=max_train_steps_per_epoch,
        log_steps=log_steps)

    return estimator
Exemplo n.º 9
0
 def test_batch_forward(self):
     op = LambdaOp(fn=np.sum)
     data = tf.convert_to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
     result = op.forward_batch(data=[data], state={})
     self.assertEqual(result, 45)