Ejemplo n.º 1
0
def get_estimator(batch_size=256, epochs=50, model_dir=tempfile.mkdtemp()):
    # prepare data
    (x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
    data = {"train": {"x": np.expand_dims(x_train, -1)}}
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Myrescale(inputs="x", outputs="x"))
    # prepare model
    g_femodel = FEModel(model_def=make_generator_model,
                        model_name="gen",
                        loss_name="gloss",
                        optimizer=tf.optimizers.Adam(1e-4))
    d_femodel = FEModel(model_def=make_discriminator_model,
                        model_name="disc",
                        loss_name="dloss",
                        optimizer=tf.optimizers.Adam(1e-4))
    network = fe.Network(ops=[
        ModelOp(inputs=lambda: tf.random.normal([batch_size, 100]),
                model=g_femodel),
        ModelOp(model=d_femodel, outputs="pred_fake"),
        ModelOp(inputs="x", model=d_femodel, outputs="pred_true"),
        GLoss(inputs=("pred_fake"), outputs="gloss"),
        DLoss(inputs=("pred_true", "pred_fake"), outputs="dloss")
    ])
    # prepare estimator
    traces = [ModelSaver(model_name='gen', save_dir=model_dir, save_freq=5)]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces)
    return estimator
Ejemplo n.º 2
0
def get_estimator(batch_size=8, epochs=25, steps_per_epoch=None, validation_steps=None, model_dir=tempfile.mkdtemp()):
    # load CUB200 dataset.
    csv_path, path = cub200.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "tfrecords"),
        train_data=csv_path,
        validation_data=0.2,
        ops=[
            ImageReader(inputs='image', parent_path=path),
            Resize(target_size=(512, 512), keep_ratio=True, outputs='image'),
            MatReader(inputs='annotation', parent_path=path),
            SelectDictKey(),
            Resize((512, 512), keep_ratio=True),
            Reshape(shape=(512, 512, 1), outputs="annotation")
        ])
    #step 1, pipeline
    pipeline = fe.Pipeline(
        batch_size=batch_size,
        data=writer,
        ops=[
            Augmentation2D(inputs=("image", "annotation"),
                           outputs=("image", "annotation"),
                           mode="train",
                           rotation_range=15.0,
                           zoom_range=[0.8, 1.2],
                           flip_left_right=True),
            Rescale(inputs='image', outputs='image')
        ])
    #step 2, network
    opt = tf.optimizers.Adam(learning_rate=0.0001)
    resunet50 = fe.build(model_def=ResUnet50, model_name="resunet50", optimizer=opt, loss_name="total_loss")
    uncertainty = fe.build(model_def=UncertaintyLoss, model_name="uncertainty", optimizer=opt, loss_name="total_loss")
    network = fe.Network(ops=[
        ModelOp(inputs='image', model=resunet50, outputs=["label_pred", "mask_pred"]),
        SparseCategoricalCrossentropy(inputs=["label", "label_pred"], outputs="cls_loss"),
        BinaryCrossentropy(inputs=["annotation", "mask_pred"], outputs="seg_loss"),
        ModelOp(inputs=("cls_loss", "seg_loss"), model=uncertainty, outputs="total_loss"),
        Loss(inputs="total_loss", outputs="total_loss")
    ])
    #step 3, estimator
    traces = [
        Dice(true_key="annotation", pred_key='mask_pred'),
        Accuracy(true_key="label", pred_key="label_pred"),
        ModelSaver(model_name="resunet50", save_dir=model_dir, save_best=True),
        LRController(model_name="resunet50", lr_schedule=CyclicLRSchedule())
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             traces=traces,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator
Ejemplo n.º 3
0
def get_estimator(batch_size=32, epochs=25, model_dir=tempfile.mkdtemp()):
    # load CUB200 dataset.
    csv_path, path = cub200.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "FEdata"),
        train_data=csv_path,
        validation_data=0.2,
        ops=[
            ImageReader(inputs='image', parent_path=path),
            Resize(target_size=(128, 128), keep_ratio=True, outputs='image'),
            MatReader(inputs='annotation', parent_path=path),
            SelectDictKey(),
            Resize((128, 128), keep_ratio=True),
            Reshape(shape=(128, 128, 1), outputs="annotation")
        ])
    # data pipeline
    pipeline = fe.Pipeline(batch_size=batch_size, data=writer, ops=Minmax(inputs='image', outputs='image'))

    # Network
    model = FEModel(model_def=UNet, model_name="unet_cub", optimizer=tf.optimizers.Adam())
    network = fe.Network(ops=[
        ModelOp(inputs='image', model=model, outputs='mask_pred'),
        BinaryCrossentropy(y_true='annotation', y_pred='mask_pred')
    ])

    # estimator
    traces = [
        Dice(true_key="annotation", pred_key='mask_pred'),
        ModelSaver(model_name="unet_cub", save_dir=model_dir, save_best=True)
    ]
    estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, log_steps=50)
    return estimator
Ejemplo n.º 4
0
def get_estimator(style_img_path=None,
                  data_path=None,
                  style_weight=5.0,
                  content_weight=1.0,
                  tv_weight=1e-4,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    train_csv, _, path = load_data(data_path, load_object=False)
    if style_img_path is None:
        style_img_path = tf.keras.utils.get_file(
            'kandinsky.jpg',
            'https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_'
            '-_Composition_7.jpg')
    style_img = cv2.imread(style_img_path)
    assert (style_img is not None), "Invalid style reference image"
    tfr_save_dir = os.path.join(path, 'tfrecords')
    style_img = (style_img.astype(np.float32) / 127.5) / 127.5
    style_img_t = tf.convert_to_tensor(np.expand_dims(style_img, axis=0))
    writer = RecordWriter(train_data=train_csv,
                          save_dir=tfr_save_dir,
                          ops=[
                              ImageReader(inputs="image",
                                          parent_path=path,
                                          outputs="image"),
                              Resize(inputs="image",
                                     target_size=(256, 256),
                                     outputs="image")
                          ])

    pipeline = fe.Pipeline(batch_size=4,
                           data=writer,
                           ops=[Rescale(inputs="image", outputs="image")])

    model = fe.build(model_def=styleTransferNet,
                     model_name="style_transfer_net",
                     loss_name="loss",
                     optimizer=tf.keras.optimizers.Adam(1e-3))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="image_out"),
        ExtractVGGFeatures(inputs=lambda: style_img_t, outputs="y_style"),
        ExtractVGGFeatures(inputs="image", outputs="y_content"),
        ExtractVGGFeatures(inputs="image_out", outputs="y_pred"),
        StyleContentLoss(style_weight=style_weight,
                         content_weight=content_weight,
                         tv_weight=tv_weight,
                         inputs=('y_pred', 'y_style', 'y_content',
                                 'image_out'),
                         outputs='loss')
    ])

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=2,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=ModelSaver(model_name="style_transfer_net",
                                               save_dir=model_dir))
    return estimator
Ejemplo n.º 5
0
def get_estimator(epochs=2, batch_size=32, model_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
    train_data = {"x": np.expand_dims(x_train, -1), "y": y_train}
    eval_data = {"x": np.expand_dims(x_eval, -1), "y": y_eval}
    data = {"train": train_data, "eval": eval_data}
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Minmax(inputs="x", outputs="x"))

    # step 2. prepare model
    model = fe.FEModel(model_def=LeNet, model_name="lenet", optimizer="adam")
    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        SparseCategoricalCrossentropy(inputs=("y", "y_pred"))
    ])

    # step 3.prepare estimator
    traces = [
        Accuracy(true_key="y", pred_key="y_pred", output_name='acc'),
        ModelSaver(model_name="lenet", save_dir=model_dir, save_best=True)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces)
    return estimator
Ejemplo n.º 6
0
def get_estimator(epochs=10, batch_size=32, alpha=1.0, warmup=0, model_dir=tempfile.mkdtemp()):
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {"train": {"x": x_train, "y": y_train}, "eval": {"x": x_eval, "y": y_eval}}
    num_classes = 10
    pipeline = fe.Pipeline(batch_size=batch_size, data=data, ops=Minmax(inputs="x", outputs="x"))

    model = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:], classes=num_classes),
                    model_name="LeNet",
                    optimizer="adam")

    mixup_map = {warmup: MixUpBatch(inputs="x", outputs=["x", "lambda"], alpha=alpha, mode="train")}
    mixup_loss = {
        0: SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred", mode="train"),
        warmup: MixUpLoss(KerasCrossentropy(), lam="lambda", y_true="y", y_pred="y_pred", mode="train")
    }
    network = fe.Network(ops=[
        Scheduler(mixup_map),
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        Scheduler(mixup_loss),
        SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred", mode="eval")
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        ConfusionMatrix(true_key="y", pred_key="y_pred", num_classes=num_classes),
        ModelSaver(model_name="LeNet", save_dir=model_dir, save_best=True)
    ]

    estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, traces=traces)
    return estimator
Ejemplo n.º 7
0
def get_estimator(batch_size=4, epochs=25, model_dir=tempfile.mkdtemp()):
    csv_path, path = montgomery.load_data()
    writer = RecordWriter(save_dir=os.path.join(path, "FEdata"),
                          train_data=csv_path,
                          validation_data=0.2,
                          ops=[
                              ImageReader(grey_scale=True,
                                          inputs="image",
                                          parent_path=path,
                                          outputs="image"),
                              ImageReader(grey_scale=True,
                                          inputs="mask_left",
                                          parent_path=path,
                                          outputs="mask_left"),
                              ImageReader(grey_scale=True,
                                          inputs="mask_right",
                                          parent_path=path,
                                          outputs="mask_right"),
                              CombineLeftRightMask(inputs=("mask_left",
                                                           "mask_right")),
                              Resize(target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="mask"),
                              Resize(inputs="image", target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="image"),
                          ],
                          write_feature=["image", "mask"])

    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=writer,
                           ops=[
                               Augmentation2D(inputs=["image", "mask"],
                                              outputs=["image", "mask"],
                                              mode="train",
                                              rotation_range=10,
                                              flip_left_right=True),
                               Minmax(inputs="image", outputs="image"),
                               Minmax(inputs="mask", outputs="mask")
                           ])

    model = FEModel(model_def=lambda: UNet(input_size=(512, 512, 1)),
                    model_name="lungsegmentation",
                    optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="pred_segment"),
        BinaryCrossentropy(y_true="mask", y_pred="pred_segment")
    ])

    traces = [
        Dice(true_key="mask", pred_key="pred_segment"),
        ModelSaver(model_name="lungsegmentation",
                   save_dir=model_dir,
                   save_best=True)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             log_steps=20,
                             traces=traces)
    return estimator
Ejemplo n.º 8
0
def get_estimator(batch_size=100,
                  epochs=100,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    # prepare data
    (x_train, _), (x_eval, _) = tf.keras.datasets.mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32')
    x_eval = x_eval.reshape(x_eval.shape[0], 28, 28, 1).astype('float32')
    data = {"train": {"x": x_train}, "eval": {"x": x_eval}}
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=[
                               Myrescale(inputs="x", outputs="x"),
                               Mybinarize(inputs="x", outputs="x")
                           ])
    # prepare model
    infer_model = fe.build(model_def=inference_net,
                           model_name="encoder",
                           loss_name="loss",
                           optimizer=tf.optimizers.Adam(1e-4))
    gen_model = fe.build(model_def=generative_net,
                         model_name="decoder",
                         loss_name="loss",
                         optimizer=tf.optimizers.Adam(1e-4))

    network = fe.Network(ops=[
        ModelOp(inputs="x", model=infer_model, outputs="meanlogvar",
                mode=None),
        SplitOp(inputs="meanlogvar", outputs=("mean", "logvar"), mode=None),
        ReparameterizepOp(inputs=("mean", "logvar"), outputs="z", mode=None),
        ModelOp(inputs="z", model=gen_model, outputs="x_logit"),
        CVAELoss(inputs=("x", "mean", "logvar", "z", "x_logit"),
                 mode=None,
                 outputs="loss")
    ])
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=ModelSaver(model_name="decoder",
                                               save_dir=model_dir,
                                               save_best=True))
    return estimator
Ejemplo n.º 9
0
def get_estimator(epochs=10,
                  batch_size=64,
                  max_len=500,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    (x_train,
     y_train), (x_eval,
                y_eval) = tf.keras.datasets.imdb.load_data(maxlen=max_len,
                                                           num_words=MAX_WORDS)
    data = {
        "train": {
            "x": np.array([pad(x, max_len, 0) for x in x_train]),
            "y": y_train
        },
        "eval": {
            "x": np.array([pad(x, max_len, 0) for x in x_eval]),
            "y": y_eval
        }
    }

    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Reshape([1], inputs="y", outputs="y"))

    # step 2. prepare model
    model = fe.build(model_def=lambda: create_lstm(max_len),
                     model_name="lstm_imdb",
                     optimizer="adam",
                     loss_name="loss")
    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        BinaryCrossentropy(y_true="y", y_pred="y_pred", outputs="loss")
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        ModelSaver(model_name="lstm_imdb", save_dir=model_dir, save_best=True)
    ]
    # step 3.prepare estimator
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)

    return estimator
Ejemplo n.º 10
0
def get_estimator(epochs=50,
                  batch_size=64,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    (x_train, y_train), (x_eval,
                         y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {
        "train": {
            "x": x_train,
            "y": y_train
        },
        "eval": {
            "x": x_eval,
            "y": y_eval
        }
    }
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Minmax(inputs="x", outputs="x"))
    # step 2. prepare model
    model = fe.build(model_def=DenseNet121_cifar10,
                     model_name="densenet121",
                     optimizer="adam",
                     loss_name="loss")

    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        SparseCategoricalCrossentropy(
            y_true="y", y_pred="y_pred", outputs="loss")
    ])
    # step 3.prepare estimator
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=[
                                 Accuracy(true_key="y", pred_key="y_pred"),
                                 ModelSaver(model_name="densenet121",
                                            save_dir=model_dir,
                                            save_best=True),
                                 LRController(model_name="densenet121",
                                              reduce_on_eval=True)
                             ])
    return estimator
Ejemplo n.º 11
0
def get_estimator(batch_size=4, epochs=1000, steps_per_epoch=1000, validation_steps=None, model_dir=tempfile.mkdtemp(), imagenet_path=None):
    """Args:
        imagenet_path: folder path of ImageNet dataset, containing train and val subdirs .
    """

    assert imagenet_path is not None, 'Pass valid folder path of Imagenet dataset'
    # Ensure ImageNet dataset is downloaded. Pass the folder contianing train and val subdirectories.
    # currently the script doesn't download the ImageNet data.
    train_csv, val_csv, path = srgan.load_data(path_imgnet= imagenet_path)

    writer = fe.RecordWriter(
        save_dir=os.path.join(path, "sr_tfrecords"),
        train_data=train_csv,
        validation_data=val_csv,
        ops=[ImageReader(inputs="lowres", outputs="lowres"), ImageReader(inputs="highres", outputs="highres")],
        compression="GZIP",
        write_feature=['lowres', 'highres'])

    pipeline = fe.Pipeline(
        max_shuffle_buffer_mb=3000,
        batch_size=batch_size,
        data=writer,
        ops=[
            LowresRescale(inputs='lowres', outputs='lowres'),
            Rescale(inputs='highres', outputs='highres'), ])

    # prepare model
    model = fe.build(model_def=lambda: get_generator(input_shape=(24, 24, 3)),
                     model_name="srresnet_gen",
                     optimizer=tf.optimizers.Adam(learning_rate=0.0001),
                     loss_name="mse_loss")
    network = fe.Network(ops=[
        ModelOp(inputs='lowres', model=model, outputs='superres'),
        PixelMeanSquaredError(inputs=('superres', 'highres'), outputs="mse_loss")
    ])

    model_dir = os.path.join(path)
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             steps_per_epoch=steps_per_epoch,
                             epochs=epochs,
                             traces=[
                                 ModelSaver(model_name="srresnet_gen", save_dir=model_dir, save_best=True), ])
    return estimator
Ejemplo n.º 12
0
def get_estimator(batch_size=128, epochs=15, model_dir=tempfile.mkdtemp()):
    # prepare data in disk
    train_csv, val_csv, path = svhn.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "FEdata"),
        train_data=train_csv,
        validation_data=val_csv,
        ops=[
            ImageReader(inputs="image", parent_path=path, outputs="image"),
            String2List(inputs=["label", "x1", "y1", "x2", "y2"],
                        outputs=["label", "x1", "y1", "x2", "y2"]),
            RelativeCoordinate(inputs=("image", "x1", "y1", "x2", "y2"),
                               outputs=("x1", "y1", "x2", "y2")),
            Resize(inputs="image", target_size=(64, 128), outputs="image"),
            GenerateTarget(inputs=("label", "x1", "y1", "x2", "y2"),
                           outputs=("target_cls", "target_loc"))
        ])
    # prepare pipeline
    pipeline = Pipeline(batch_size=batch_size,
                        data=writer,
                        ops=Minmax(inputs="image", outputs="image"),
                        read_feature=["image", "target_cls", "target_loc"])
    # prepare model
    model = FEModel(
        model_def=lambda: RetinaNet(input_shape=(64, 128, 3), num_classes=10),
        model_name="retinanet",
        optimizer=tf.optimizers.Adam(learning_rate=0.0001))
    network = Network(ops=[
        ModelOp(inputs="image", model=model, outputs=["pred_cls", "pred_loc"]),
        PredictBox(outputs=("cls_selected", "loc_selected", "valid_outputs"),
                   mode="eval"),
        RetinaLoss(inputs=("target_cls", "target_loc", "pred_cls", "pred_loc"),
                   outputs="loss"),
    ])
    # prepare estimator
    estimator = Estimator(network=network,
                          pipeline=pipeline,
                          epochs=epochs,
                          log_steps=20,
                          traces=ModelSaver(model_name="retinanet",
                                            save_dir=model_dir,
                                            save_best=True))
    return estimator
Ejemplo n.º 13
0
def get_estimator(epochs=50, batch_size=32, model_dir=tempfile.mkdtemp()):
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.boston_housing.load_data()

    # step 1. prepare data
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_eval = scaler.transform(x_eval)

    data = {"train": {"x": x_train, "y": y_train}, "eval": {"x": x_eval, "y": y_eval}}
    pipeline = fe.Pipeline(batch_size=batch_size, data=data)

    # step 2. prepare model
    model = FEModel(model_def=create_dnn, model_name="dnn", optimizer="adam")
    network = fe.Network(
        ops=[ModelOp(inputs="x", model=model, outputs="y_pred"), MeanSquaredError(y_true="y", y_pred="y_pred")])

    # step 3.prepare estimator
    traces = [ModelSaver(model_name="dnn", save_dir=model_dir, save_best=True)]
    estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, log_steps=10, traces=traces)
    return estimator
Ejemplo n.º 14
0
def get_estimator(epochs=50,
                  batch_size=32,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    (X, y) = load_breast_cancer(True)
    x_train, x_eval, y_train, y_eval = train_test_split(X, y, test_size=0.2)

    # step 1. prepare data
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_eval = scaler.transform(x_eval)
    train_data = {"x": x_train, "y": np.expand_dims(y_train, -1)}
    eval_data = {"x": x_eval, "y": np.expand_dims(y_eval, -1)}
    data = {"train": train_data, "eval": eval_data}
    pipeline = fe.Pipeline(batch_size=batch_size, data=data)

    # step 2. prepare model
    model = fe.build(model_def=create_dnn,
                     model_name="dnn",
                     optimizer="adam",
                     loss_name="loss")
    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        MeanSquaredError(inputs=("y", "y_pred"), outputs="loss")
    ])

    # step 3.prepare estimator
    traces = [ModelSaver(model_name="dnn", save_dir=model_dir, save_best=True)]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             log_steps=10,
                             traces=traces)
    return estimator
Ejemplo n.º 15
0
def get_estimator(batch_size=4,
                  epochs=200,
                  steps_per_epoch=1000,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp(),
                  imagenet_path=None,
                  srresnet_model_path=None):
    """Args:
        imagenet_path: folder path of ImageNet dataset, containing train and val subdirs .
        srresnet_model_path: srresnet model weights, srgan generator gets initialized with the weights.
    """

    assert imagenet_path is not None, 'Pass valid folder path of Imagenet dataset'
    assert srresnet_model_path is not None, 'srresnet model is needed to initialize srgan generator model'
    # Ensure ImageNet dataset is downloaded. Pass the folder contianing train and val subdirectories.
    # currently the script doesn't download the ImageNet data.
    train_csv, val_csv, path = srgan.load_data(path_imgnet=imagenet_path)

    writer = fe.RecordWriter(save_dir=os.path.join(path, "sr_tfrecords"),
                             train_data=train_csv,
                             validation_data=val_csv,
                             ops=[
                                 ImageReader(inputs="lowres",
                                             outputs="lowres"),
                                 ImageReader(inputs="highres",
                                             outputs="highres")
                             ],
                             compression="GZIP",
                             write_feature=['lowres', 'highres'])

    pipeline = fe.Pipeline(max_shuffle_buffer_mb=3000,
                           batch_size=batch_size,
                           data=writer,
                           ops=[
                               LowresRescale(inputs='lowres',
                                             outputs='lowres'),
                               Rescale(inputs='highres', outputs='highres'),
                           ])

    # prepare model
    model_gen = fe.build(model_def=srresnet_model_path,
                         model_name="srgan_gen",
                         optimizer=tf.optimizers.Adam(learning_rate=0.0001),
                         loss_name="mse_adv_loss",
                         custom_objects={'SubPixelConv2D': SubPixelConv2D})
    model_desc = fe.build(
        model_def=lambda: get_discriminator(input_shape=(96, 96, 3)),
        model_name="srgan_desc",
        optimizer=tf.optimizers.Adam(learning_rate=0.0001),
        loss_name="desc_loss")

    network = fe.Network(ops=[
        ModelOp(inputs='lowres', model=model_gen, outputs='superres'),
        ModelOp(inputs='superres', model=model_desc, outputs='pred_fake'),
        ModelOp(inputs='highres', model=model_desc, outputs='pred_true'),
        DLoss(inputs=("pred_true", "pred_fake"),
              outputs=("desc_loss", "real_loss", "fake_loss")),
        GLoss(inputs=('superres', 'highres', 'pred_fake'),
              outputs=("mse_adv_loss", "mse_loss", "adv_loss"),
              vgg_content=True)
    ])

    model_dir = os.path.join(path)
    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        traces=[
            ModelSaver(model_name="srgan_gen",
                       save_dir=model_dir,
                       save_best=True),
            ModelSaver(model_name="srgan_desc",
                       save_dir=model_dir,
                       save_best=True),
            LRController(model_name="srgan_gen",
                         lr_schedule=MyLRSchedule(schedule_mode='step')),
            LRController(model_name="srgan_desc",
                         lr_schedule=MyLRSchedule(schedule_mode='step'))
        ])

    return estimator
Ejemplo n.º 16
0
def get_estimator(batch_size=1,
                  epochs=500,
                  steps_per_epoch=128,
                  model_dir=tempfile.mkdtemp(),
                  path_brats=os.path.join(os.getenv('HOME'),
                                          'fastestimator_data', 'BraTs')):
    """Args:
        path_brats: folder path of BraTs 2018 dataset, containing data subdir inturn having LGG and HGG data.
        Expected folder structure path_brats
        path_brats/
        |----------data/
                   |----LGG/
                   |----HGG/
    """
    assert path_brats is not None, 'Pass valid folder path of BraTs 2018 dataset'
    # Ensure Brats 2018 dataset is downloaded. Pass the folder contianing train and val subdirectories.
    # currently the script doesn't download the BraTs data.
    train_csv, val_csv, path_brats = brats.load_data(path_brats=path_brats,
                                                     resized_img_shape=(128,
                                                                        128,
                                                                        128),
                                                     bias_correction=False)
    writer = fe.RecordWriter(save_dir=os.path.join(path_brats, "tfrecords"),
                             train_data=train_csv,
                             validation_data=val_csv,
                             ops=[
                                 NIBImageReader(inputs="mod_img",
                                                outputs="mod_img"),
                                 NIBImageReader(inputs="seg_mask",
                                                outputs="seg_mask")
                             ],
                             compression="GZIP",
                             write_feature=['mod_img', 'seg_mask'])

    pipeline = fe.Pipeline(data=writer,
                           batch_size=batch_size,
                           ops=[
                               SplitMaskLabelwise(inputs="seg_mask",
                                                  outputs="seg_mask"),
                               Augmentation3D(inputs=("mod_img", "seg_mask"),
                                              outputs=("mod_img", "seg_mask"),
                                              mode='train')
                           ])

    model_unet3d_is = fe.build(
        model_def=lambda: UNet3D_Isensee(input_shape=(4, 64, 64, 64)),
        model_name="brats_unet3d_is",
        optimizer=tf.optimizers.Adam(learning_rate=5e-4),
        loss_name="wdice_loss")

    network = fe.Network(ops=[
        ModelOp(
            inputs="mod_img", model=model_unet3d_is, outputs="pred_seg_mask"),
        WeightedDiceLoss(inputs=("seg_mask", "pred_seg_mask"),
                         outputs=("wdice_loss"))
    ])
    model_dir = path_brats
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             steps_per_epoch=steps_per_epoch,
                             epochs=epochs,
                             traces=[
                                 ModelSaver(model_name="brats_unet3d_is",
                                            save_dir=model_dir,
                                            save_best=True),
                                 LRController(model_name="brats_unet3d_is",
                                              reduce_patience=10,
                                              reduce_factor=0.5,
                                              reduce_on_eval=True,
                                              min_lr=1e-07)
                             ],
                             log_steps=steps_per_epoch)

    return estimator
def get_estimator(data_dir=None, save_dir=None):
    train_csv, data_path = load_data(data_dir)

    imreader = ImageReader(inputs="x", parent_path=data_path, grey_scale=True)
    writer_128 = RecordWriter(
        save_dir=os.path.join(data_path, "tfrecord_128"),
        train_data=train_csv,
        ops=[imreader,
             ResizeRecord(target_size=(128, 128), outputs="x")])
    writer_1024 = RecordWriter(
        save_dir=os.path.join(data_path, "tfrecord_1024"),
        train_data=train_csv,
        ops=[imreader,
             ResizeRecord(target_size=(1024, 1024), outputs="x")])
    # We create a scheduler for batch_size with the epochs at which it will change and corresponding values.
    batchsize_scheduler_128 = Scheduler({
        0: 128,
        5: 64,
        15: 32,
        25: 16,
        35: 8,
        45: 4
    })
    batchsize_scheduler_1024 = Scheduler({55: 4, 65: 2, 75: 1})
    # pipeline ops
    resize_scheduler_128 = Scheduler({
        0:
        Resize(inputs="x", size=(4, 4), outputs="x"),
        5:
        Resize(inputs="x", size=(8, 8), outputs="x"),
        15:
        Resize(inputs="x", size=(16, 16), outputs="x"),
        25:
        Resize(inputs="x", size=(32, 32), outputs="x"),
        35:
        Resize(inputs="x", size=(64, 64), outputs="x"),
        45:
        None
    })
    resize_scheduler_1024 = Scheduler({
        55:
        Resize(inputs="x", size=(256, 256), outputs="x"),
        65:
        Resize(inputs="x", size=(512, 512), outputs="x"),
        75:
        None
    })
    lowres_op = CreateLowRes(inputs="x", outputs="x_lowres")
    rescale_x = Rescale(inputs="x", outputs="x")
    rescale_lowres = Rescale(inputs="x_lowres", outputs="x_lowres")
    pipeline_128 = fe.Pipeline(
        batch_size=batchsize_scheduler_128,
        data=writer_128,
        ops=[resize_scheduler_128, lowres_op, rescale_x, rescale_lowres])
    pipeline_1024 = fe.Pipeline(
        batch_size=batchsize_scheduler_1024,
        data=writer_1024,
        ops=[resize_scheduler_1024, lowres_op, rescale_x, rescale_lowres])

    pipeline_scheduler = Scheduler({0: pipeline_128, 55: pipeline_1024})

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001,
                                         beta_1=0.0,
                                         beta_2=0.99,
                                         epsilon=1e-8)

    fade_in_alpha = tf.Variable(initial_value=1.0,
                                dtype='float32',
                                trainable=False)

    d2, d3, d4, d5, d6, d7, d8, d9, d10 = fe.build(
        model_def=lambda: build_D(
            fade_in_alpha=fade_in_alpha, target_resolution=10, num_channels=1),
        model_name=["d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10"],
        optimizer=[optimizer] * 9,
        loss_name=["dloss"] * 9)

    g2, g3, g4, g5, g6, g7, g8, g9, g10, G = fe.build(
        model_def=lambda: build_G(
            fade_in_alpha=fade_in_alpha, target_resolution=10, num_channels=1),
        model_name=[
            "g2", "g3", "g4", "g5", "g6", "g7", "g8", "g9", "g10", "G"
        ],
        optimizer=[optimizer] * 10,
        loss_name=["gloss"] * 10)

    g_scheduler = Scheduler({
        0: ModelOp(model=g2, outputs="x_fake"),
        5: ModelOp(model=g3, outputs="x_fake"),
        15: ModelOp(model=g4, outputs="x_fake"),
        25: ModelOp(model=g5, outputs="x_fake"),
        35: ModelOp(model=g6, outputs="x_fake"),
        45: ModelOp(model=g7, outputs="x_fake"),
        55: ModelOp(model=g8, outputs="x_fake"),
        65: ModelOp(model=g9, outputs="x_fake"),
        75: ModelOp(model=g10, outputs="x_fake")
    })

    fake_score_scheduler = Scheduler({
        0:
        ModelOp(inputs="x_fake", model=d2, outputs="fake_score"),
        5:
        ModelOp(inputs="x_fake", model=d3, outputs="fake_score"),
        15:
        ModelOp(inputs="x_fake", model=d4, outputs="fake_score"),
        25:
        ModelOp(inputs="x_fake", model=d5, outputs="fake_score"),
        35:
        ModelOp(inputs="x_fake", model=d6, outputs="fake_score"),
        45:
        ModelOp(inputs="x_fake", model=d7, outputs="fake_score"),
        55:
        ModelOp(inputs="x_fake", model=d8, outputs="fake_score"),
        65:
        ModelOp(inputs="x_fake", model=d9, outputs="fake_score"),
        75:
        ModelOp(inputs="x_fake", model=d10, outputs="fake_score")
    })

    real_score_scheduler = Scheduler({
        0:
        ModelOp(model=d2, outputs="real_score"),
        5:
        ModelOp(model=d3, outputs="real_score"),
        15:
        ModelOp(model=d4, outputs="real_score"),
        25:
        ModelOp(model=d5, outputs="real_score"),
        35:
        ModelOp(model=d6, outputs="real_score"),
        45:
        ModelOp(model=d7, outputs="real_score"),
        55:
        ModelOp(model=d8, outputs="real_score"),
        65:
        ModelOp(model=d9, outputs="real_score"),
        75:
        ModelOp(model=d10, outputs="real_score")
    })

    interp_score_scheduler = Scheduler({
        0:
        ModelOp(inputs="x_interp",
                model=d2,
                outputs="interp_score",
                track_input=True),
        5:
        ModelOp(inputs="x_interp",
                model=d3,
                outputs="interp_score",
                track_input=True),
        15:
        ModelOp(inputs="x_interp",
                model=d4,
                outputs="interp_score",
                track_input=True),
        25:
        ModelOp(inputs="x_interp",
                model=d5,
                outputs="interp_score",
                track_input=True),
        35:
        ModelOp(inputs="x_interp",
                model=d6,
                outputs="interp_score",
                track_input=True),
        45:
        ModelOp(inputs="x_interp",
                model=d7,
                outputs="interp_score",
                track_input=True),
        55:
        ModelOp(inputs="x_interp",
                model=d8,
                outputs="interp_score",
                track_input=True),
        65:
        ModelOp(inputs="x_interp",
                model=d9,
                outputs="interp_score",
                track_input=True),
        75:
        ModelOp(inputs="x_interp",
                model=d10,
                outputs="interp_score",
                track_input=True)
    })

    network = fe.Network(ops=[
        RandomInput(inputs=lambda: 512), g_scheduler, fake_score_scheduler,
        ImageBlender(inputs=(
            "x", "x_lowres"), alpha=fade_in_alpha), real_score_scheduler,
        Interpolate(inputs=("x_fake",
                            "x"), outputs="x_interp"), interp_score_scheduler,
        GradientPenalty(inputs=("x_interp", "interp_score"), outputs="gp"),
        GLoss(inputs="fake_score", outputs="gloss"),
        DLoss(inputs=("real_score", "fake_score", "gp"), outputs="dloss")
    ])

    if save_dir is None:
        save_dir = os.path.join(str(Path.home()), 'fastestimator_results',
                                'NIH_CXR_PGGAN')
        os.makedirs(save_dir, exist_ok=True)

    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline_scheduler,
        epochs=85,
        traces=[
            AlphaController(alpha=fade_in_alpha,
                            fade_start=[5, 15, 25, 35, 45, 55, 65, 75, 85],
                            duration=[5, 5, 5, 5, 5, 5, 5, 5, 5]),
            ResetOptimizer(reset_epochs=[5, 15, 25, 35, 45, 55, 65, 75],
                           optimizer=optimizer),
            ImageSaving(epoch_model={
                4: g2,
                14: g3,
                24: g4,
                34: g5,
                44: g6,
                54: g7,
                64: g8,
                74: g9,
                84: G
            },
                        save_dir=save_dir,
                        num_channels=1),
            ModelSaving(epoch_model={84: G}, save_dir=save_dir)
        ])
    return estimator
Ejemplo n.º 18
0
def get_estimator():
    train_csv, data_path = load_data()
    writer = RecordWriter(save_dir=os.path.join(data_path, "tfrecord"),
                          train_data=train_csv,
                          ops=[
                              ImageReader(inputs="x", parent_path=data_path),
                              ResizeRecord(target_size=(128, 128), outputs="x")
                          ])

    # We create a scheduler for batch_size with the epochs at which it will change and corresponding values.
    batchsize_scheduler = Scheduler({0: 64, 5: 32, 15: 16, 25: 8, 35: 4})
    # batchsize_scheduler = Scheduler({0: 64, 5: 64, 15: 64, 25: 64, 35: 32})

    # We create a scheduler for the Resize ops.
    resize_scheduler = Scheduler({
        0:
        Resize(inputs="x", size=(4, 4), outputs="x"),
        5:
        Resize(inputs="x", size=(8, 8), outputs="x"),
        15:
        Resize(inputs="x", size=(16, 16), outputs="x"),
        25:
        Resize(inputs="x", size=(32, 32), outputs="x"),
        35:
        Resize(inputs="x", size=(64, 64), outputs="x"),
        45:
        None
    })

    # In Pipeline, we use the schedulers for batch_size and ops.
    pipeline = fe.Pipeline(batch_size=batchsize_scheduler,
                           data=writer,
                           ops=[
                               resize_scheduler,
                               CreateLowRes(inputs="x", outputs="x_lowres"),
                               Rescale(inputs="x", outputs="x"),
                               Rescale(inputs="x_lowres", outputs="x_lowres")
                           ])

    opt2 = tf.keras.optimizers.Adam(learning_rate=0.001,
                                    beta_1=0.0,
                                    beta_2=0.99,
                                    epsilon=1e-8)
    opt3 = tf.keras.optimizers.Adam(learning_rate=0.001,
                                    beta_1=0.0,
                                    beta_2=0.99,
                                    epsilon=1e-8)
    opt4 = tf.keras.optimizers.Adam(learning_rate=0.001,
                                    beta_1=0.0,
                                    beta_2=0.99,
                                    epsilon=1e-8)
    opt5 = tf.keras.optimizers.Adam(learning_rate=0.001,
                                    beta_1=0.0,
                                    beta_2=0.99,
                                    epsilon=1e-8)
    opt6 = tf.keras.optimizers.Adam(learning_rate=0.001,
                                    beta_1=0.0,
                                    beta_2=0.99,
                                    epsilon=1e-8)
    opt7 = tf.keras.optimizers.Adam(learning_rate=0.001,
                                    beta_1=0.0,
                                    beta_2=0.99,
                                    epsilon=1e-8)
    fade_in_alpha = tf.Variable(initial_value=1.0,
                                dtype='float32',
                                trainable=False)
    d2, d3, d4, d5, d6, d7 = fe.build(
        model_def=lambda: build_D(fade_in_alpha=fade_in_alpha,
                                  target_resolution=7),
        model_name=["d2", "d3", "d4", "d5", "d6", "d7"],
        optimizer=[opt2, opt3, opt4, opt5, opt6, opt7],
        loss_name=["dloss", "dloss", "dloss", "dloss", "dloss", "dloss"])

    g2, g3, g4, g5, g6, g7, G = fe.build(
        model_def=lambda: build_G(fade_in_alpha=fade_in_alpha,
                                  target_resolution=7),
        model_name=["g2", "g3", "g4", "g5", "g6", "g7", "G"],
        optimizer=[opt2, opt3, opt4, opt5, opt6, opt7, opt7],
        loss_name=[
            "gloss", "gloss", "gloss", "gloss", "gloss", "gloss", "gloss"
        ])

    g_scheduler = Scheduler({
        0: ModelOp(model=g2, outputs="x_fake"),
        5: ModelOp(model=g3, outputs="x_fake"),
        15: ModelOp(model=g4, outputs="x_fake"),
        25: ModelOp(model=g5, outputs="x_fake"),
        35: ModelOp(model=g6, outputs="x_fake"),
        45: ModelOp(model=g7, outputs="x_fake"),
    })

    fake_score_scheduler = Scheduler({
        0:
        ModelOp(inputs="x_fake", model=d2, outputs="fake_score"),
        5:
        ModelOp(inputs="x_fake", model=d3, outputs="fake_score"),
        15:
        ModelOp(inputs="x_fake", model=d4, outputs="fake_score"),
        25:
        ModelOp(inputs="x_fake", model=d5, outputs="fake_score"),
        35:
        ModelOp(inputs="x_fake", model=d6, outputs="fake_score"),
        45:
        ModelOp(inputs="x_fake", model=d7, outputs="fake_score")
    })

    real_score_scheduler = Scheduler({
        0:
        ModelOp(model=d2, outputs="real_score"),
        5:
        ModelOp(model=d3, outputs="real_score"),
        15:
        ModelOp(model=d4, outputs="real_score"),
        25:
        ModelOp(model=d5, outputs="real_score"),
        35:
        ModelOp(model=d6, outputs="real_score"),
        45:
        ModelOp(model=d7, outputs="real_score")
    })

    interp_score_scheduler = Scheduler({
        0:
        ModelOp(inputs="x_interp",
                model=d2,
                outputs="interp_score",
                track_input=True),
        5:
        ModelOp(inputs="x_interp",
                model=d3,
                outputs="interp_score",
                track_input=True),
        15:
        ModelOp(inputs="x_interp",
                model=d4,
                outputs="interp_score",
                track_input=True),
        25:
        ModelOp(inputs="x_interp",
                model=d5,
                outputs="interp_score",
                track_input=True),
        35:
        ModelOp(inputs="x_interp",
                model=d6,
                outputs="interp_score",
                track_input=True),
        45:
        ModelOp(inputs="x_interp",
                model=d7,
                outputs="interp_score",
                track_input=True)
    })

    network = fe.Network(ops=[
        RandomInput(inputs=lambda: 512), g_scheduler, fake_score_scheduler,
        ImageBlender(inputs=(
            "x", "x_lowres"), alpha=fade_in_alpha), real_score_scheduler,
        Interpolate(inputs=("x_fake",
                            "x"), outputs="x_interp"), interp_score_scheduler,
        GradientPenalty(inputs=("x_interp", "interp_score"), outputs="gp"),
        GLoss(inputs="fake_score", outputs="gloss"),
        DLoss(inputs=("real_score", "fake_score", "gp"), outputs="dloss")
    ])

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=55,
                             traces=[
                                 AlphaController(
                                     alpha=fade_in_alpha,
                                     fade_start=[5, 15, 25, 35, 45, 55],
                                     duration=[5, 5, 5, 5, 5, 5]),
                                 ImageSaving(epoch_model={
                                     4: "g2",
                                     14: "g3",
                                     24: "g4",
                                     34: "g5",
                                     44: "g6",
                                     54: "g7"
                                 },
                                             save_dir="/data/Xiaomeng/images")
                             ])
    return estimator
Ejemplo n.º 19
0
def get_estimator(data_path=None, model_dir=tempfile.mkdtemp(), batch_size=2):
    #prepare dataset
    train_csv, val_csv, path = load_data(path=data_path)
    writer = fe.RecordWriter(
        save_dir=os.path.join(path, "retinanet_coco_1024"),
        train_data=train_csv,
        validation_data=val_csv,
        ops=[
            ImageReader(inputs="image", parent_path=path, outputs="image"),
            String2List(inputs=["x1", "y1", "width", "height", "obj_label"],
                        outputs=["x1", "y1", "width", "height", "obj_label"]),
            ResizeImageAndBbox(
                target_size=(1024, 1024),
                keep_ratio=True,
                inputs=["image", "x1", "y1", "width", "height"],
                outputs=["image", "x1", "y1", "width", "height"]),
            FlipImageAndBbox(inputs=[
                "image", "x1", "y1", "width", "height", "obj_label", "id"
            ],
                             outputs=[
                                 "image", "x1", "y1", "width", "height",
                                 "obj_label", "id"
                             ]),
            GenerateTarget(inputs=("obj_label", "x1", "y1", "width", "height"),
                           outputs=("cls_gt", "x1_gt", "y1_gt", "w_gt",
                                    "h_gt"))
        ],
        expand_dims=True,
        compression="GZIP",
        write_feature=[
            "image", "id", "cls_gt", "x1_gt", "y1_gt", "w_gt", "h_gt",
            "obj_label", "x1", "y1", "width", "height"
        ])
    # prepare pipeline
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=writer,
                           ops=[
                               Rescale(inputs="image", outputs="image"),
                               Pad(padded_shape=[2051],
                                   inputs=[
                                       "x1_gt", "y1_gt", "w_gt", "h_gt",
                                       "obj_label", "x1", "y1", "width",
                                       "height"
                                   ],
                                   outputs=[
                                       "x1_gt", "y1_gt", "w_gt", "h_gt",
                                       "obj_label", "x1", "y1", "width",
                                       "height"
                                   ])
                           ])
    # prepare network
    model = fe.build(model_def=lambda: RetinaNet(input_shape=(1024, 1024, 3),
                                                 num_classes=90),
                     model_name="retinanet",
                     optimizer=tf.optimizers.SGD(momentum=0.9),
                     loss_name="total_loss")
    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs=["cls_pred", "loc_pred"]),
        RetinaLoss(inputs=("cls_gt", "x1_gt", "y1_gt", "w_gt", "h_gt",
                           "cls_pred", "loc_pred"),
                   outputs=("total_loss", "focal_loss", "l1_loss")),
        PredictBox(inputs=[
            "cls_pred", "loc_pred", "obj_label", "x1", "y1", "width", "height"
        ],
                   outputs=("pred", "gt"),
                   mode="eval",
                   input_shape=(1024, 1024, 3))
    ])
    # prepare estimator
    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        epochs=7,
        traces=[
            MeanAvgPrecision(90, (1024, 1024, 3),
                             'pred',
                             'gt',
                             output_name=("mAP", "AP50", "AP75")),
            ModelSaver(model_name="retinanet",
                       save_dir=model_dir,
                       save_best='mAP',
                       save_best_mode='max'),
            LRController(model_name="retinanet",
                         lr_schedule=MyLRSchedule(schedule_mode="step"))
        ])
    return estimator
Ejemplo n.º 20
0
def get_estimator(weight=10.0, epochs=200, model_dir=tempfile.mkdtemp()):
    trainA_csv, trainB_csv, _, _, parent_path = load_data()
    tfr_save_dir = os.path.join(parent_path, 'FEdata')
    # Step 1: Define Pipeline
    writer = RecordWriter(train_data=(trainA_csv, trainB_csv),
                          save_dir=tfr_save_dir,
                          ops=([
                              ImageReader(inputs="imgA",
                                          outputs="imgA",
                                          parent_path=parent_path)
                          ], [
                              ImageReader(inputs="imgB",
                                          outputs="imgB",
                                          parent_path=parent_path)
                          ]))

    pipeline = fe.Pipeline(data=writer,
                           batch_size=1,
                           ops=[
                               Myrescale(inputs="imgA", outputs="imgA"),
                               RandomJitter(inputs="imgA", outputs="real_A"),
                               Myrescale(inputs="imgB", outputs="imgB"),
                               RandomJitter(inputs="imgB", outputs="real_B")
                           ])
    # Step2: Define Network
    g_AtoB = FEModel(model_def=build_generator,
                     model_name="g_AtoB",
                     loss_name="g_AtoB_loss",
                     optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    g_BtoA = FEModel(model_def=build_generator,
                     model_name="g_BtoA",
                     loss_name="g_BtoA_loss",
                     optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    d_A = FEModel(model_def=build_discriminator,
                  model_name="d_A",
                  loss_name="d_A_loss",
                  optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    d_B = FEModel(model_def=build_discriminator,
                  model_name="d_B",
                  loss_name="d_B_loss",
                  optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    network = fe.Network(ops=[
        ModelOp(inputs="real_A", model=g_AtoB, outputs="fake_B"),
        ModelOp(inputs="real_B", model=g_BtoA, outputs="fake_A"),
        ModelOp(inputs="real_A", model=d_A, outputs="d_real_A"),
        ModelOp(inputs="fake_A", model=d_A, outputs="d_fake_A"),
        ModelOp(inputs="real_B", model=d_B, outputs="d_real_B"),
        ModelOp(inputs="fake_B", model=d_B, outputs="d_fake_B"),
        ModelOp(inputs="real_A", model=g_BtoA, outputs="same_A"),
        ModelOp(inputs="fake_B", model=g_BtoA, outputs="cycled_A"),
        ModelOp(inputs="real_B", model=g_AtoB, outputs="same_B"),
        ModelOp(inputs="fake_A", model=g_AtoB, outputs="cycled_B"),
        GLoss(inputs=("real_A", "d_fake_B", "cycled_A", "same_A"),
              weight=weight,
              outputs="g_AtoB_loss"),
        GLoss(inputs=("real_B", "d_fake_A", "cycled_B", "same_B"),
              weight=weight,
              outputs="g_BtoA_loss"),
        DLoss(inputs=("d_real_A", "d_fake_A"), outputs="d_A_loss"),
        DLoss(inputs=("d_real_B", "d_fake_B"), outputs="d_B_loss")
    ])
    # Step3: Define Estimator
    traces = [
        ModelSaver(model_name="g_AtoB", save_dir=model_dir, save_freq=10),
        ModelSaver(model_name="g_BtoA", save_dir=model_dir, save_freq=10)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces)
    return estimator
Ejemplo n.º 21
0
def get_estimator(epochs=10,
                  batch_size=32,
                  epsilon=0.01,
                  warmup=0,
                  model_dir=tempfile.mkdtemp()):
    (x_train, y_train), (x_eval,
                         y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {
        "train": {
            "x": x_train,
            "y": y_train
        },
        "eval": {
            "x": x_eval,
            "y": y_eval
        }
    }
    num_classes = 10

    pipeline = Pipeline(batch_size=batch_size,
                        data=data,
                        ops=Minmax(inputs="x", outputs="x"))

    model = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:],
                                            classes=num_classes),
                    model_name="LeNet",
                    optimizer="adam")

    adv_img = {
        warmup:
        AdversarialSample(inputs=("loss", "x"),
                          outputs="x_adverse",
                          epsilon=epsilon,
                          mode="train")
    }
    adv_eval = {
        warmup:
        ModelOp(inputs="x_adverse",
                model=model,
                outputs="y_pred_adverse",
                mode="train")
    }
    adv_loss = {
        warmup:
        SparseCategoricalCrossentropy(y_true="y",
                                      y_pred="y_pred_adverse",
                                      outputs="adverse_loss",
                                      mode="train")
    }
    adv_avg = {
        warmup:
        Average(inputs=("loss", "adverse_loss"), outputs="loss", mode="train")
    }

    network = Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred", track_input=True),
        SparseCategoricalCrossentropy(
            y_true="y", y_pred="y_pred", outputs="loss"),
        Scheduler(adv_img),
        Scheduler(adv_eval),
        Scheduler(adv_loss),
        Scheduler(adv_avg)
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        ConfusionMatrix(true_key="y",
                        pred_key="y_pred",
                        num_classes=num_classes),
        ModelSaver(model_name="LeNet", save_dir=model_dir, save_freq=2)
    ]

    estimator = Estimator(network=network,
                          pipeline=pipeline,
                          epochs=epochs,
                          traces=traces)

    return estimator
Ejemplo n.º 22
0
def get_estimator(epochs=200, batch_size=128, steps_per_epoch=500, validation_steps=100, model_dir=tempfile.mkdtemp()):
    # step 1. prepare pipeline
    train_path, eval_path = load_data()
    data = {
        "train": lambda: get_batch(train_path, batch_size=batch_size),
        "eval": lambda: get_batch(eval_path, batch_size=batch_size, is_train=False)
    }
    pipeline = fe.Pipeline(
        data=data,
        batch_size=batch_size,
        ops=[
            Probability(
                tensor_op=Augmentation2D(inputs="x_a",
                                         outputs="x_a",
                                         mode="train",
                                         rotation_range=10.0,
                                         shear_range=-0.3 * 180 / np.pi,
                                         zoom_range=[0.8, 1.2],
                                         width_shift_range=0.05,
                                         height_shift_range=0.05),
                prob=0.89),
            Probability(
                tensor_op=Augmentation2D(inputs="x_b",
                                         outputs="x_b",
                                         mode="train",
                                         rotation_range=10.0,
                                         shear_range=-0.3 * 180 / np.pi,
                                         zoom_range=[0.8, 1.2],
                                         width_shift_range=0.05,
                                         height_shift_range=0.05),
                prob=0.89),
            Minmax(inputs="x_a", outputs="x_a"),
            Minmax(inputs="x_b", outputs="x_b")
        ])

    # step 2. prepare model
    model = fe.build(model_def=siamese_network,
                     model_name="siamese_net",
                     optimizer=Adam(learning_rate=1e-4),
                     loss_name="loss")

    network = fe.Network(ops=[
        ModelOp(inputs=["x_a", "x_b"], model=model, outputs="y_pred"),
        BinaryCrossentropy(inputs=("y", "y_pred"), outputs="loss")
    ])

    # Defining learning rate schedule
    lr_scheduler = LRDecaySchedule(decay_rate=0.99)

    # Loading images for validation one-shot accuracy
    val_img_list = load_eval_data(path=eval_path, is_test=False)

    # step 3.prepare estimator
    traces = [
        LRController(model_name="siamese_net", lr_schedule=lr_scheduler),
        OneShotAccuracy(model=model, img_list=val_img_list, output_name='one_shot_accuracy'),
        ModelSaver(model_name="siamese_net", save_dir=model_dir, save_best='one_shot_accuracy', save_best_mode='max'),
        EarlyStopping(monitor="one_shot_accuracy", patience=20, compare='max', mode="eval")
    ]

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator
Ejemplo n.º 23
0
def get_estimator(pretrained_fe_path, classifier_path, data_path=None):

    assert os.path.exists(pretrained_fe_path), "Pretrained feature extractor is missing"
    assert os.path.exists(classifier_path), "Pretrained classifier is missing"
    usps_train_csv, usps_eval_csv, usps_parent_dir = usps.load_data(data_path)
    mnist_train_csv, mnist_eval_csv, mnist_parent_dir = mnist.load_data(data_path)

    tfr_path = os.path.join(os.path.dirname(usps_parent_dir), 'ADDA-tfrecords')
    os.makedirs(tfr_path, exist_ok=True)

    df = pd.read_csv(usps_train_csv)
    df.columns = ['target_img', 'target_label']
    df.to_csv(usps_train_csv, index=False)

    df = pd.read_csv(usps_eval_csv)
    df.columns = ['target_img', 'target_label']
    df.to_csv(usps_eval_csv, index=False)

    df = pd.read_csv(mnist_train_csv)
    df.columns = ['source_img', 'source_label']
    df.to_csv(mnist_train_csv, index=False)

    df = pd.read_csv(mnist_eval_csv)
    df.columns = ['source_img', 'source_label']
    df.to_csv(mnist_eval_csv, index=False)

    BATCH_SIZE = 128

    writer = RecordWriter(save_dir=tfr_path,
                          train_data=(usps_train_csv, mnist_train_csv),
                          ops=(
                              [ImageReader(inputs="target_img", outputs="target_img", parent_path=usps_parent_dir, grey_scale=True)], # first tuple element
                              [ImageReader(inputs="source_img", outputs="source_img", parent_path=mnist_parent_dir, grey_scale=True)])) # second tuple element

    pipeline = fe.Pipeline(
        batch_size=BATCH_SIZE,
        data=writer,
        ops=[
            Resize(inputs="target_img", outputs="target_img", size=(32, 32)),
            Resize(inputs="source_img", outputs="source_img", size=(32, 32)),
            Minmax(inputs="target_img", outputs="target_img"),
            Minmax(inputs="source_img", outputs="source_img")
        ])

    # Step2: Define Network
    feature_extractor = fe.build(model_def=build_feature_extractor,
                                 model_name="fe",
                                 loss_name="fe_loss",
                                 optimizer=tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9))

    discriminator = fe.build(model_def=build_discriminator,
                             model_name="disc",
                             loss_name="d_loss",
                             optimizer=tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9))

    network = fe.Network(ops=[
        ModelOp(inputs="target_img", outputs="target_feature", model=feature_extractor),
        ModelOp(inputs="target_feature", outputs="target_score", model=discriminator),
        ExtractSourceFeature(model_path=pretrained_fe_path, inputs="source_img", outputs="source_feature"),
        ModelOp(inputs="source_feature", outputs="source_score", model=discriminator),
        DLoss(inputs=("source_score", "target_score"), outputs="d_loss"),
        FELoss(inputs="target_score", outputs="fe_loss")
    ])

    traces = [
        LoadPretrainedFE(model_name="fe", model_path=pretrained_fe_path),
        EvaluateTargetClassifier(model_name="fe", model_path=classifier_path)
    ]

    estimator = fe.Estimator(pipeline=pipeline, network=network, traces=traces, epochs=100)

    return estimator