Beispiel #1
0
def get_estimator(style_img_path=None,
                  data_path=None,
                  style_weight=5.0,
                  content_weight=1.0,
                  tv_weight=1e-4,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    train_csv, _, path = load_data(data_path, load_object=False)
    if style_img_path is None:
        style_img_path = tf.keras.utils.get_file(
            'kandinsky.jpg',
            'https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_'
            '-_Composition_7.jpg')
    style_img = cv2.imread(style_img_path)
    assert (style_img is not None), "Invalid style reference image"
    tfr_save_dir = os.path.join(path, 'tfrecords')
    style_img = (style_img.astype(np.float32) / 127.5) / 127.5
    style_img_t = tf.convert_to_tensor(np.expand_dims(style_img, axis=0))
    writer = RecordWriter(train_data=train_csv,
                          save_dir=tfr_save_dir,
                          ops=[
                              ImageReader(inputs="image",
                                          parent_path=path,
                                          outputs="image"),
                              Resize(inputs="image",
                                     target_size=(256, 256),
                                     outputs="image")
                          ])

    pipeline = fe.Pipeline(batch_size=4,
                           data=writer,
                           ops=[Rescale(inputs="image", outputs="image")])

    model = fe.build(model_def=styleTransferNet,
                     model_name="style_transfer_net",
                     loss_name="loss",
                     optimizer=tf.keras.optimizers.Adam(1e-3))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="image_out"),
        ExtractVGGFeatures(inputs=lambda: style_img_t, outputs="y_style"),
        ExtractVGGFeatures(inputs="image", outputs="y_content"),
        ExtractVGGFeatures(inputs="image_out", outputs="y_pred"),
        StyleContentLoss(style_weight=style_weight,
                         content_weight=content_weight,
                         tv_weight=tv_weight,
                         inputs=('y_pred', 'y_style', 'y_content',
                                 'image_out'),
                         outputs='loss')
    ])

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=2,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=ModelSaver(model_name="style_transfer_net",
                                               save_dir=model_dir))
    return estimator
Beispiel #2
0
def get_estimator(batch_size=8, epochs=25, steps_per_epoch=None, validation_steps=None, model_dir=tempfile.mkdtemp()):
    # load CUB200 dataset.
    csv_path, path = cub200.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "tfrecords"),
        train_data=csv_path,
        validation_data=0.2,
        ops=[
            ImageReader(inputs='image', parent_path=path),
            Resize(target_size=(512, 512), keep_ratio=True, outputs='image'),
            MatReader(inputs='annotation', parent_path=path),
            SelectDictKey(),
            Resize((512, 512), keep_ratio=True),
            Reshape(shape=(512, 512, 1), outputs="annotation")
        ])
    #step 1, pipeline
    pipeline = fe.Pipeline(
        batch_size=batch_size,
        data=writer,
        ops=[
            Augmentation2D(inputs=("image", "annotation"),
                           outputs=("image", "annotation"),
                           mode="train",
                           rotation_range=15.0,
                           zoom_range=[0.8, 1.2],
                           flip_left_right=True),
            Rescale(inputs='image', outputs='image')
        ])
    #step 2, network
    opt = tf.optimizers.Adam(learning_rate=0.0001)
    resunet50 = fe.build(model_def=ResUnet50, model_name="resunet50", optimizer=opt, loss_name="total_loss")
    uncertainty = fe.build(model_def=UncertaintyLoss, model_name="uncertainty", optimizer=opt, loss_name="total_loss")
    network = fe.Network(ops=[
        ModelOp(inputs='image', model=resunet50, outputs=["label_pred", "mask_pred"]),
        SparseCategoricalCrossentropy(inputs=["label", "label_pred"], outputs="cls_loss"),
        BinaryCrossentropy(inputs=["annotation", "mask_pred"], outputs="seg_loss"),
        ModelOp(inputs=("cls_loss", "seg_loss"), model=uncertainty, outputs="total_loss"),
        Loss(inputs="total_loss", outputs="total_loss")
    ])
    #step 3, estimator
    traces = [
        Dice(true_key="annotation", pred_key='mask_pred'),
        Accuracy(true_key="label", pred_key="label_pred"),
        ModelSaver(model_name="resunet50", save_dir=model_dir, save_best=True),
        LRController(model_name="resunet50", lr_schedule=CyclicLRSchedule())
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             traces=traces,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator
def get_estimator(data_dir=None, save_dir=None):
    train_csv, data_path = load_data(data_dir)

    imreader = ImageReader(inputs="x", parent_path=data_path, grey_scale=True)
    writer_128 = RecordWriter(
        save_dir=os.path.join(data_path, "tfrecord_128"),
        train_data=train_csv,
        ops=[imreader,
             ResizeRecord(target_size=(128, 128), outputs="x")])
    writer_1024 = RecordWriter(
        save_dir=os.path.join(data_path, "tfrecord_1024"),
        train_data=train_csv,
        ops=[imreader,
             ResizeRecord(target_size=(1024, 1024), outputs="x")])
    # We create a scheduler for batch_size with the epochs at which it will change and corresponding values.
    batchsize_scheduler_128 = Scheduler({
        0: 128,
        5: 64,
        15: 32,
        25: 16,
        35: 8,
        45: 4
    })
    batchsize_scheduler_1024 = Scheduler({55: 4, 65: 2, 75: 1})
    # pipeline ops
    resize_scheduler_128 = Scheduler({
        0:
        Resize(inputs="x", size=(4, 4), outputs="x"),
        5:
        Resize(inputs="x", size=(8, 8), outputs="x"),
        15:
        Resize(inputs="x", size=(16, 16), outputs="x"),
        25:
        Resize(inputs="x", size=(32, 32), outputs="x"),
        35:
        Resize(inputs="x", size=(64, 64), outputs="x"),
        45:
        None
    })
    resize_scheduler_1024 = Scheduler({
        55:
        Resize(inputs="x", size=(256, 256), outputs="x"),
        65:
        Resize(inputs="x", size=(512, 512), outputs="x"),
        75:
        None
    })
    lowres_op = CreateLowRes(inputs="x", outputs="x_lowres")
    rescale_x = Rescale(inputs="x", outputs="x")
    rescale_lowres = Rescale(inputs="x_lowres", outputs="x_lowres")
    pipeline_128 = fe.Pipeline(
        batch_size=batchsize_scheduler_128,
        data=writer_128,
        ops=[resize_scheduler_128, lowres_op, rescale_x, rescale_lowres])
    pipeline_1024 = fe.Pipeline(
        batch_size=batchsize_scheduler_1024,
        data=writer_1024,
        ops=[resize_scheduler_1024, lowres_op, rescale_x, rescale_lowres])

    pipeline_scheduler = Scheduler({0: pipeline_128, 55: pipeline_1024})

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001,
                                         beta_1=0.0,
                                         beta_2=0.99,
                                         epsilon=1e-8)

    fade_in_alpha = tf.Variable(initial_value=1.0,
                                dtype='float32',
                                trainable=False)

    d2, d3, d4, d5, d6, d7, d8, d9, d10 = fe.build(
        model_def=lambda: build_D(
            fade_in_alpha=fade_in_alpha, target_resolution=10, num_channels=1),
        model_name=["d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10"],
        optimizer=[optimizer] * 9,
        loss_name=["dloss"] * 9)

    g2, g3, g4, g5, g6, g7, g8, g9, g10, G = fe.build(
        model_def=lambda: build_G(
            fade_in_alpha=fade_in_alpha, target_resolution=10, num_channels=1),
        model_name=[
            "g2", "g3", "g4", "g5", "g6", "g7", "g8", "g9", "g10", "G"
        ],
        optimizer=[optimizer] * 10,
        loss_name=["gloss"] * 10)

    g_scheduler = Scheduler({
        0: ModelOp(model=g2, outputs="x_fake"),
        5: ModelOp(model=g3, outputs="x_fake"),
        15: ModelOp(model=g4, outputs="x_fake"),
        25: ModelOp(model=g5, outputs="x_fake"),
        35: ModelOp(model=g6, outputs="x_fake"),
        45: ModelOp(model=g7, outputs="x_fake"),
        55: ModelOp(model=g8, outputs="x_fake"),
        65: ModelOp(model=g9, outputs="x_fake"),
        75: ModelOp(model=g10, outputs="x_fake")
    })

    fake_score_scheduler = Scheduler({
        0:
        ModelOp(inputs="x_fake", model=d2, outputs="fake_score"),
        5:
        ModelOp(inputs="x_fake", model=d3, outputs="fake_score"),
        15:
        ModelOp(inputs="x_fake", model=d4, outputs="fake_score"),
        25:
        ModelOp(inputs="x_fake", model=d5, outputs="fake_score"),
        35:
        ModelOp(inputs="x_fake", model=d6, outputs="fake_score"),
        45:
        ModelOp(inputs="x_fake", model=d7, outputs="fake_score"),
        55:
        ModelOp(inputs="x_fake", model=d8, outputs="fake_score"),
        65:
        ModelOp(inputs="x_fake", model=d9, outputs="fake_score"),
        75:
        ModelOp(inputs="x_fake", model=d10, outputs="fake_score")
    })

    real_score_scheduler = Scheduler({
        0:
        ModelOp(model=d2, outputs="real_score"),
        5:
        ModelOp(model=d3, outputs="real_score"),
        15:
        ModelOp(model=d4, outputs="real_score"),
        25:
        ModelOp(model=d5, outputs="real_score"),
        35:
        ModelOp(model=d6, outputs="real_score"),
        45:
        ModelOp(model=d7, outputs="real_score"),
        55:
        ModelOp(model=d8, outputs="real_score"),
        65:
        ModelOp(model=d9, outputs="real_score"),
        75:
        ModelOp(model=d10, outputs="real_score")
    })

    interp_score_scheduler = Scheduler({
        0:
        ModelOp(inputs="x_interp",
                model=d2,
                outputs="interp_score",
                track_input=True),
        5:
        ModelOp(inputs="x_interp",
                model=d3,
                outputs="interp_score",
                track_input=True),
        15:
        ModelOp(inputs="x_interp",
                model=d4,
                outputs="interp_score",
                track_input=True),
        25:
        ModelOp(inputs="x_interp",
                model=d5,
                outputs="interp_score",
                track_input=True),
        35:
        ModelOp(inputs="x_interp",
                model=d6,
                outputs="interp_score",
                track_input=True),
        45:
        ModelOp(inputs="x_interp",
                model=d7,
                outputs="interp_score",
                track_input=True),
        55:
        ModelOp(inputs="x_interp",
                model=d8,
                outputs="interp_score",
                track_input=True),
        65:
        ModelOp(inputs="x_interp",
                model=d9,
                outputs="interp_score",
                track_input=True),
        75:
        ModelOp(inputs="x_interp",
                model=d10,
                outputs="interp_score",
                track_input=True)
    })

    network = fe.Network(ops=[
        RandomInput(inputs=lambda: 512), g_scheduler, fake_score_scheduler,
        ImageBlender(inputs=(
            "x", "x_lowres"), alpha=fade_in_alpha), real_score_scheduler,
        Interpolate(inputs=("x_fake",
                            "x"), outputs="x_interp"), interp_score_scheduler,
        GradientPenalty(inputs=("x_interp", "interp_score"), outputs="gp"),
        GLoss(inputs="fake_score", outputs="gloss"),
        DLoss(inputs=("real_score", "fake_score", "gp"), outputs="dloss")
    ])

    if save_dir is None:
        save_dir = os.path.join(str(Path.home()), 'fastestimator_results',
                                'NIH_CXR_PGGAN')
        os.makedirs(save_dir, exist_ok=True)

    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline_scheduler,
        epochs=85,
        traces=[
            AlphaController(alpha=fade_in_alpha,
                            fade_start=[5, 15, 25, 35, 45, 55, 65, 75, 85],
                            duration=[5, 5, 5, 5, 5, 5, 5, 5, 5]),
            ResetOptimizer(reset_epochs=[5, 15, 25, 35, 45, 55, 65, 75],
                           optimizer=optimizer),
            ImageSaving(epoch_model={
                4: g2,
                14: g3,
                24: g4,
                34: g5,
                44: g6,
                54: g7,
                64: g8,
                74: g9,
                84: G
            },
                        save_dir=save_dir,
                        num_channels=1),
            ModelSaving(epoch_model={84: G}, save_dir=save_dir)
        ])
    return estimator
def get_estimator(batch_size=4,
                  epochs=25,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    csv_path, path = montgomery.load_data()
    writer = RecordWriter(save_dir=os.path.join(path, "tfrecords"),
                          train_data=csv_path,
                          validation_data=0.2,
                          ops=[
                              ImageReader(inputs="image",
                                          parent_path=path,
                                          outputs="image",
                                          grey_scale=True),
                              ImageReader(inputs="mask_left",
                                          parent_path=path,
                                          outputs="mask_left",
                                          grey_scale=True),
                              ImageReader(inputs="mask_right",
                                          parent_path=path,
                                          outputs="mask_right",
                                          grey_scale=True),
                              CombineLeftRightMask(inputs=("mask_left",
                                                           "mask_right")),
                              Resize(target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="mask"),
                              Resize(inputs="image", target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="image"),
                          ],
                          write_feature=["image", "mask"])

    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=writer,
                           ops=[
                               Augmentation2D(inputs=["image", "mask"],
                                              outputs=["image", "mask"],
                                              mode="train",
                                              rotation_range=10,
                                              flip_left_right=True),
                               Minmax(inputs="image", outputs="image"),
                               Minmax(inputs="mask", outputs="mask")
                           ])

    model = fe.build(model_def=lambda: UNet(input_size=(512, 512, 1)),
                     model_name="lungsegmentation",
                     optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
                     loss_name="loss")

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="pred_segment"),
        BinaryCrossentropy(
            y_true="mask", y_pred="pred_segment", outputs="loss")
    ])

    traces = [
        Dice(true_key="mask", pred_key="pred_segment"),
        ModelSaver(model_name="lungsegmentation",
                   save_dir=model_dir,
                   save_best=True)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             log_steps=20,
                             traces=traces,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator
def get_estimator(weight=10.0,
                  epochs=200,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    trainA_csv, trainB_csv, _, _, parent_path = load_data()
    tfr_save_dir = os.path.join(parent_path, 'tfrecords')
    # Step 1: Define Pipeline
    writer = RecordWriter(train_data=(trainA_csv, trainB_csv),
                          save_dir=tfr_save_dir,
                          ops=([
                              ImageReader(inputs="imgA",
                                          outputs="imgA",
                                          parent_path=parent_path)
                          ], [
                              ImageReader(inputs="imgB",
                                          outputs="imgB",
                                          parent_path=parent_path)
                          ]))

    pipeline = fe.Pipeline(data=writer,
                           batch_size=1,
                           ops=[
                               Myrescale(inputs="imgA", outputs="imgA"),
                               RandomJitter(inputs="imgA", outputs="real_A"),
                               Myrescale(inputs="imgB", outputs="imgB"),
                               RandomJitter(inputs="imgB", outputs="real_B")
                           ])
    # Step2: Define Network
    g_AtoB = fe.build(model_def=build_generator,
                      model_name="g_AtoB",
                      loss_name="g_AtoB_loss",
                      optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    g_BtoA = fe.build(model_def=build_generator,
                      model_name="g_BtoA",
                      loss_name="g_BtoA_loss",
                      optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    d_A = fe.build(model_def=build_discriminator,
                   model_name="d_A",
                   loss_name="d_A_loss",
                   optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    d_B = fe.build(model_def=build_discriminator,
                   model_name="d_B",
                   loss_name="d_B_loss",
                   optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    network = fe.Network(ops=[
        ModelOp(inputs="real_A", model=g_AtoB, outputs="fake_B"),
        ModelOp(inputs="real_B", model=g_BtoA, outputs="fake_A"),
        ModelOp(inputs="real_A", model=d_A, outputs="d_real_A"),
        ModelOp(inputs="fake_A", model=d_A, outputs="d_fake_A"),
        ModelOp(inputs="real_B", model=d_B, outputs="d_real_B"),
        ModelOp(inputs="fake_B", model=d_B, outputs="d_fake_B"),
        ModelOp(inputs="real_A", model=g_BtoA, outputs="same_A"),
        ModelOp(inputs="fake_B", model=g_BtoA, outputs="cycled_A"),
        ModelOp(inputs="real_B", model=g_AtoB, outputs="same_B"),
        ModelOp(inputs="fake_A", model=g_AtoB, outputs="cycled_B"),
        GLoss(inputs=("real_A", "d_fake_B", "cycled_A", "same_A"),
              weight=weight,
              outputs="g_AtoB_loss"),
        GLoss(inputs=("real_B", "d_fake_A", "cycled_B", "same_B"),
              weight=weight,
              outputs="g_BtoA_loss"),
        DLoss(inputs=("d_real_A", "d_fake_A"), outputs="d_A_loss"),
        DLoss(inputs=("d_real_B", "d_fake_B"), outputs="d_B_loss")
    ])
    # Step3: Define Estimator
    traces = [
        ModelSaver(model_name="g_AtoB", save_dir=model_dir, save_freq=10),
        ModelSaver(model_name="g_BtoA", save_dir=model_dir, save_freq=10)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator