def get_estimator(epochs=10, batch_size=32, alpha=1.0, warmup=0, model_dir=tempfile.mkdtemp()):
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {"train": {"x": x_train, "y": y_train}, "eval": {"x": x_eval, "y": y_eval}}
    num_classes = 10
    pipeline = fe.Pipeline(batch_size=batch_size, data=data, ops=Minmax(inputs="x", outputs="x"))

    model = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:], classes=num_classes),
                    model_name="LeNet",
                    optimizer="adam")

    mixup_map = {warmup: MixUpBatch(inputs="x", outputs=["x", "lambda"], alpha=alpha, mode="train")}
    mixup_loss = {
        0: SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred", mode="train"),
        warmup: MixUpLoss(KerasCrossentropy(), lam="lambda", y_true="y", y_pred="y_pred", mode="train")
    }
    network = fe.Network(ops=[
        Scheduler(mixup_map),
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        Scheduler(mixup_loss),
        SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred", mode="eval")
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        ConfusionMatrix(true_key="y", pred_key="y_pred", num_classes=num_classes),
        ModelSaver(model_name="LeNet", save_dir=model_dir, save_best=True)
    ]

    estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, traces=traces)
    return estimator
示例#2
0
def get_estimator(style_img_path=None,
                  data_path=None,
                  style_weight=5.0,
                  content_weight=1.0,
                  tv_weight=1e-4,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    train_csv, _, path = load_data(data_path, load_object=False)
    if style_img_path is None:
        style_img_path = tf.keras.utils.get_file(
            'kandinsky.jpg',
            'https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_'
            '-_Composition_7.jpg')
    style_img = cv2.imread(style_img_path)
    assert (style_img is not None), "Invalid style reference image"
    tfr_save_dir = os.path.join(path, 'tfrecords')
    style_img = (style_img.astype(np.float32) / 127.5) / 127.5
    style_img_t = tf.convert_to_tensor(np.expand_dims(style_img, axis=0))
    writer = RecordWriter(train_data=train_csv,
                          save_dir=tfr_save_dir,
                          ops=[
                              ImageReader(inputs="image",
                                          parent_path=path,
                                          outputs="image"),
                              Resize(inputs="image",
                                     target_size=(256, 256),
                                     outputs="image")
                          ])

    pipeline = fe.Pipeline(batch_size=4,
                           data=writer,
                           ops=[Rescale(inputs="image", outputs="image")])

    model = fe.build(model_def=styleTransferNet,
                     model_name="style_transfer_net",
                     loss_name="loss",
                     optimizer=tf.keras.optimizers.Adam(1e-3))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="image_out"),
        ExtractVGGFeatures(inputs=lambda: style_img_t, outputs="y_style"),
        ExtractVGGFeatures(inputs="image", outputs="y_content"),
        ExtractVGGFeatures(inputs="image_out", outputs="y_pred"),
        StyleContentLoss(style_weight=style_weight,
                         content_weight=content_weight,
                         tv_weight=tv_weight,
                         inputs=('y_pred', 'y_style', 'y_content',
                                 'image_out'),
                         outputs='loss')
    ])

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=2,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=ModelSaver(model_name="style_transfer_net",
                                               save_dir=model_dir))
    return estimator
示例#3
0
def get_estimator(epochs=2, batch_size=32, model_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
    train_data = {"x": np.expand_dims(x_train, -1), "y": y_train}
    eval_data = {"x": np.expand_dims(x_eval, -1), "y": y_eval}
    data = {"train": train_data, "eval": eval_data}
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Minmax(inputs="x", outputs="x"))

    # step 2. prepare model
    model = fe.FEModel(model_def=LeNet, model_name="lenet", optimizer="adam")
    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        SparseCategoricalCrossentropy(inputs=("y", "y_pred"))
    ])

    # step 3.prepare estimator
    traces = [
        Accuracy(true_key="y", pred_key="y_pred", output_name='acc'),
        ModelSaver(model_name="lenet", save_dir=model_dir, save_best=True)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces)
    return estimator
示例#4
0
def get_estimator(batch_size=256, epochs=50, model_dir=tempfile.mkdtemp()):
    # prepare data
    (x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
    data = {"train": {"x": np.expand_dims(x_train, -1)}}
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Myrescale(inputs="x", outputs="x"))
    # prepare model
    g_femodel = FEModel(model_def=make_generator_model,
                        model_name="gen",
                        loss_name="gloss",
                        optimizer=tf.optimizers.Adam(1e-4))
    d_femodel = FEModel(model_def=make_discriminator_model,
                        model_name="disc",
                        loss_name="dloss",
                        optimizer=tf.optimizers.Adam(1e-4))
    network = fe.Network(ops=[
        ModelOp(inputs=lambda: tf.random.normal([batch_size, 100]),
                model=g_femodel),
        ModelOp(model=d_femodel, outputs="pred_fake"),
        ModelOp(inputs="x", model=d_femodel, outputs="pred_true"),
        GLoss(inputs=("pred_fake"), outputs="gloss"),
        DLoss(inputs=("pred_true", "pred_fake"), outputs="dloss")
    ])
    # prepare estimator
    traces = [ModelSaver(model_name='gen', save_dir=model_dir, save_freq=5)]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces)
    return estimator
示例#5
0
def get_estimator(batch_size=32, epochs=25, model_dir=tempfile.mkdtemp()):
    # load CUB200 dataset.
    csv_path, path = cub200.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "FEdata"),
        train_data=csv_path,
        validation_data=0.2,
        ops=[
            ImageReader(inputs='image', parent_path=path),
            Resize(target_size=(128, 128), keep_ratio=True, outputs='image'),
            MatReader(inputs='annotation', parent_path=path),
            SelectDictKey(),
            Resize((128, 128), keep_ratio=True),
            Reshape(shape=(128, 128, 1), outputs="annotation")
        ])
    # data pipeline
    pipeline = fe.Pipeline(batch_size=batch_size, data=writer, ops=Minmax(inputs='image', outputs='image'))

    # Network
    model = FEModel(model_def=UNet, model_name="unet_cub", optimizer=tf.optimizers.Adam())
    network = fe.Network(ops=[
        ModelOp(inputs='image', model=model, outputs='mask_pred'),
        BinaryCrossentropy(y_true='annotation', y_pred='mask_pred')
    ])

    # estimator
    traces = [
        Dice(true_key="annotation", pred_key='mask_pred'),
        ModelSaver(model_name="unet_cub", save_dir=model_dir, save_best=True)
    ]
    estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, log_steps=50)
    return estimator
示例#6
0
def get_estimator(batch_size=4, epochs=25, model_dir=tempfile.mkdtemp()):
    csv_path, path = montgomery.load_data()
    writer = RecordWriter(save_dir=os.path.join(path, "FEdata"),
                          train_data=csv_path,
                          validation_data=0.2,
                          ops=[
                              ImageReader(grey_scale=True,
                                          inputs="image",
                                          parent_path=path,
                                          outputs="image"),
                              ImageReader(grey_scale=True,
                                          inputs="mask_left",
                                          parent_path=path,
                                          outputs="mask_left"),
                              ImageReader(grey_scale=True,
                                          inputs="mask_right",
                                          parent_path=path,
                                          outputs="mask_right"),
                              CombineLeftRightMask(inputs=("mask_left",
                                                           "mask_right")),
                              Resize(target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="mask"),
                              Resize(inputs="image", target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="image"),
                          ],
                          write_feature=["image", "mask"])

    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=writer,
                           ops=[
                               Augmentation2D(inputs=["image", "mask"],
                                              outputs=["image", "mask"],
                                              mode="train",
                                              rotation_range=10,
                                              flip_left_right=True),
                               Minmax(inputs="image", outputs="image"),
                               Minmax(inputs="mask", outputs="mask")
                           ])

    model = FEModel(model_def=lambda: UNet(input_size=(512, 512, 1)),
                    model_name="lungsegmentation",
                    optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="pred_segment"),
        BinaryCrossentropy(y_true="mask", y_pred="pred_segment")
    ])

    traces = [
        Dice(true_key="mask", pred_key="pred_segment"),
        ModelSaver(model_name="lungsegmentation",
                   save_dir=model_dir,
                   save_best=True)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             log_steps=20,
                             traces=traces)
    return estimator
示例#7
0
def get_estimator(batch_size=8, epochs=25, steps_per_epoch=None, validation_steps=None, model_dir=tempfile.mkdtemp()):
    # load CUB200 dataset.
    csv_path, path = cub200.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "tfrecords"),
        train_data=csv_path,
        validation_data=0.2,
        ops=[
            ImageReader(inputs='image', parent_path=path),
            Resize(target_size=(512, 512), keep_ratio=True, outputs='image'),
            MatReader(inputs='annotation', parent_path=path),
            SelectDictKey(),
            Resize((512, 512), keep_ratio=True),
            Reshape(shape=(512, 512, 1), outputs="annotation")
        ])
    #step 1, pipeline
    pipeline = fe.Pipeline(
        batch_size=batch_size,
        data=writer,
        ops=[
            Augmentation2D(inputs=("image", "annotation"),
                           outputs=("image", "annotation"),
                           mode="train",
                           rotation_range=15.0,
                           zoom_range=[0.8, 1.2],
                           flip_left_right=True),
            Rescale(inputs='image', outputs='image')
        ])
    #step 2, network
    opt = tf.optimizers.Adam(learning_rate=0.0001)
    resunet50 = fe.build(model_def=ResUnet50, model_name="resunet50", optimizer=opt, loss_name="total_loss")
    uncertainty = fe.build(model_def=UncertaintyLoss, model_name="uncertainty", optimizer=opt, loss_name="total_loss")
    network = fe.Network(ops=[
        ModelOp(inputs='image', model=resunet50, outputs=["label_pred", "mask_pred"]),
        SparseCategoricalCrossentropy(inputs=["label", "label_pred"], outputs="cls_loss"),
        BinaryCrossentropy(inputs=["annotation", "mask_pred"], outputs="seg_loss"),
        ModelOp(inputs=("cls_loss", "seg_loss"), model=uncertainty, outputs="total_loss"),
        Loss(inputs="total_loss", outputs="total_loss")
    ])
    #step 3, estimator
    traces = [
        Dice(true_key="annotation", pred_key='mask_pred'),
        Accuracy(true_key="label", pred_key="label_pred"),
        ModelSaver(model_name="resunet50", save_dir=model_dir, save_best=True),
        LRController(model_name="resunet50", lr_schedule=CyclicLRSchedule())
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             traces=traces,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator
示例#8
0
def get_estimator(epochs=10,
                  batch_size=64,
                  max_len=500,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    (x_train,
     y_train), (x_eval,
                y_eval) = tf.keras.datasets.imdb.load_data(maxlen=max_len,
                                                           num_words=MAX_WORDS)
    data = {
        "train": {
            "x": np.array([pad(x, max_len, 0) for x in x_train]),
            "y": y_train
        },
        "eval": {
            "x": np.array([pad(x, max_len, 0) for x in x_eval]),
            "y": y_eval
        }
    }

    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Reshape([1], inputs="y", outputs="y"))

    # step 2. prepare model
    model = fe.build(model_def=lambda: create_lstm(max_len),
                     model_name="lstm_imdb",
                     optimizer="adam",
                     loss_name="loss")
    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        BinaryCrossentropy(y_true="y", y_pred="y_pred", outputs="loss")
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        ModelSaver(model_name="lstm_imdb", save_dir=model_dir, save_best=True)
    ]
    # step 3.prepare estimator
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)

    return estimator
def get_estimator(epochs=50,
                  batch_size=64,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    (x_train, y_train), (x_eval,
                         y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {
        "train": {
            "x": x_train,
            "y": y_train
        },
        "eval": {
            "x": x_eval,
            "y": y_eval
        }
    }
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Minmax(inputs="x", outputs="x"))
    # step 2. prepare model
    model = fe.build(model_def=DenseNet121_cifar10,
                     model_name="densenet121",
                     optimizer="adam",
                     loss_name="loss")

    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        SparseCategoricalCrossentropy(
            y_true="y", y_pred="y_pred", outputs="loss")
    ])
    # step 3.prepare estimator
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=[
                                 Accuracy(true_key="y", pred_key="y_pred"),
                                 ModelSaver(model_name="densenet121",
                                            save_dir=model_dir,
                                            save_best=True),
                                 LRController(model_name="densenet121",
                                              reduce_on_eval=True)
                             ])
    return estimator
示例#10
0
def get_estimator(batch_size=100,
                  epochs=100,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    # prepare data
    (x_train, _), (x_eval, _) = tf.keras.datasets.mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32')
    x_eval = x_eval.reshape(x_eval.shape[0], 28, 28, 1).astype('float32')
    data = {"train": {"x": x_train}, "eval": {"x": x_eval}}
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=[
                               Myrescale(inputs="x", outputs="x"),
                               Mybinarize(inputs="x", outputs="x")
                           ])
    # prepare model
    infer_model = fe.build(model_def=inference_net,
                           model_name="encoder",
                           loss_name="loss",
                           optimizer=tf.optimizers.Adam(1e-4))
    gen_model = fe.build(model_def=generative_net,
                         model_name="decoder",
                         loss_name="loss",
                         optimizer=tf.optimizers.Adam(1e-4))

    network = fe.Network(ops=[
        ModelOp(inputs="x", model=infer_model, outputs="meanlogvar",
                mode=None),
        SplitOp(inputs="meanlogvar", outputs=("mean", "logvar"), mode=None),
        ReparameterizepOp(inputs=("mean", "logvar"), outputs="z", mode=None),
        ModelOp(inputs="z", model=gen_model, outputs="x_logit"),
        CVAELoss(inputs=("x", "mean", "logvar", "z", "x_logit"),
                 mode=None,
                 outputs="loss")
    ])
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=ModelSaver(model_name="decoder",
                                               save_dir=model_dir,
                                               save_best=True))
    return estimator
示例#11
0
def get_estimator(batch_size=4, epochs=1000, steps_per_epoch=1000, validation_steps=None, model_dir=tempfile.mkdtemp(), imagenet_path=None):
    """Args:
        imagenet_path: folder path of ImageNet dataset, containing train and val subdirs .
    """

    assert imagenet_path is not None, 'Pass valid folder path of Imagenet dataset'
    # Ensure ImageNet dataset is downloaded. Pass the folder contianing train and val subdirectories.
    # currently the script doesn't download the ImageNet data.
    train_csv, val_csv, path = srgan.load_data(path_imgnet= imagenet_path)

    writer = fe.RecordWriter(
        save_dir=os.path.join(path, "sr_tfrecords"),
        train_data=train_csv,
        validation_data=val_csv,
        ops=[ImageReader(inputs="lowres", outputs="lowres"), ImageReader(inputs="highres", outputs="highres")],
        compression="GZIP",
        write_feature=['lowres', 'highres'])

    pipeline = fe.Pipeline(
        max_shuffle_buffer_mb=3000,
        batch_size=batch_size,
        data=writer,
        ops=[
            LowresRescale(inputs='lowres', outputs='lowres'),
            Rescale(inputs='highres', outputs='highres'), ])

    # prepare model
    model = fe.build(model_def=lambda: get_generator(input_shape=(24, 24, 3)),
                     model_name="srresnet_gen",
                     optimizer=tf.optimizers.Adam(learning_rate=0.0001),
                     loss_name="mse_loss")
    network = fe.Network(ops=[
        ModelOp(inputs='lowres', model=model, outputs='superres'),
        PixelMeanSquaredError(inputs=('superres', 'highres'), outputs="mse_loss")
    ])

    model_dir = os.path.join(path)
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             steps_per_epoch=steps_per_epoch,
                             epochs=epochs,
                             traces=[
                                 ModelSaver(model_name="srresnet_gen", save_dir=model_dir, save_best=True), ])
    return estimator
示例#12
0
def get_estimator(batch_size=128, epochs=15, model_dir=tempfile.mkdtemp()):
    # prepare data in disk
    train_csv, val_csv, path = svhn.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "FEdata"),
        train_data=train_csv,
        validation_data=val_csv,
        ops=[
            ImageReader(inputs="image", parent_path=path, outputs="image"),
            String2List(inputs=["label", "x1", "y1", "x2", "y2"],
                        outputs=["label", "x1", "y1", "x2", "y2"]),
            RelativeCoordinate(inputs=("image", "x1", "y1", "x2", "y2"),
                               outputs=("x1", "y1", "x2", "y2")),
            Resize(inputs="image", target_size=(64, 128), outputs="image"),
            GenerateTarget(inputs=("label", "x1", "y1", "x2", "y2"),
                           outputs=("target_cls", "target_loc"))
        ])
    # prepare pipeline
    pipeline = Pipeline(batch_size=batch_size,
                        data=writer,
                        ops=Minmax(inputs="image", outputs="image"),
                        read_feature=["image", "target_cls", "target_loc"])
    # prepare model
    model = FEModel(
        model_def=lambda: RetinaNet(input_shape=(64, 128, 3), num_classes=10),
        model_name="retinanet",
        optimizer=tf.optimizers.Adam(learning_rate=0.0001))
    network = Network(ops=[
        ModelOp(inputs="image", model=model, outputs=["pred_cls", "pred_loc"]),
        PredictBox(outputs=("cls_selected", "loc_selected", "valid_outputs"),
                   mode="eval"),
        RetinaLoss(inputs=("target_cls", "target_loc", "pred_cls", "pred_loc"),
                   outputs="loss"),
    ])
    # prepare estimator
    estimator = Estimator(network=network,
                          pipeline=pipeline,
                          epochs=epochs,
                          log_steps=20,
                          traces=ModelSaver(model_name="retinanet",
                                            save_dir=model_dir,
                                            save_best=True))
    return estimator
示例#13
0
def get_estimator(epochs=50, batch_size=32, model_dir=tempfile.mkdtemp()):
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.boston_housing.load_data()

    # step 1. prepare data
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_eval = scaler.transform(x_eval)

    data = {"train": {"x": x_train, "y": y_train}, "eval": {"x": x_eval, "y": y_eval}}
    pipeline = fe.Pipeline(batch_size=batch_size, data=data)

    # step 2. prepare model
    model = FEModel(model_def=create_dnn, model_name="dnn", optimizer="adam")
    network = fe.Network(
        ops=[ModelOp(inputs="x", model=model, outputs="y_pred"), MeanSquaredError(y_true="y", y_pred="y_pred")])

    # step 3.prepare estimator
    traces = [ModelSaver(model_name="dnn", save_dir=model_dir, save_best=True)]
    estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, log_steps=10, traces=traces)
    return estimator
def get_estimator(epochs=50,
                  batch_size=32,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    (X, y) = load_breast_cancer(True)
    x_train, x_eval, y_train, y_eval = train_test_split(X, y, test_size=0.2)

    # step 1. prepare data
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_eval = scaler.transform(x_eval)
    train_data = {"x": x_train, "y": np.expand_dims(y_train, -1)}
    eval_data = {"x": x_eval, "y": np.expand_dims(y_eval, -1)}
    data = {"train": train_data, "eval": eval_data}
    pipeline = fe.Pipeline(batch_size=batch_size, data=data)

    # step 2. prepare model
    model = fe.build(model_def=create_dnn,
                     model_name="dnn",
                     optimizer="adam",
                     loss_name="loss")
    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        MeanSquaredError(inputs=("y", "y_pred"), outputs="loss")
    ])

    # step 3.prepare estimator
    traces = [ModelSaver(model_name="dnn", save_dir=model_dir, save_best=True)]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             log_steps=10,
                             traces=traces)
    return estimator
示例#15
0
def get_estimator(batch_size=4,
                  epochs=200,
                  steps_per_epoch=1000,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp(),
                  imagenet_path=None,
                  srresnet_model_path=None):
    """Args:
        imagenet_path: folder path of ImageNet dataset, containing train and val subdirs .
        srresnet_model_path: srresnet model weights, srgan generator gets initialized with the weights.
    """

    assert imagenet_path is not None, 'Pass valid folder path of Imagenet dataset'
    assert srresnet_model_path is not None, 'srresnet model is needed to initialize srgan generator model'
    # Ensure ImageNet dataset is downloaded. Pass the folder contianing train and val subdirectories.
    # currently the script doesn't download the ImageNet data.
    train_csv, val_csv, path = srgan.load_data(path_imgnet=imagenet_path)

    writer = fe.RecordWriter(save_dir=os.path.join(path, "sr_tfrecords"),
                             train_data=train_csv,
                             validation_data=val_csv,
                             ops=[
                                 ImageReader(inputs="lowres",
                                             outputs="lowres"),
                                 ImageReader(inputs="highres",
                                             outputs="highres")
                             ],
                             compression="GZIP",
                             write_feature=['lowres', 'highres'])

    pipeline = fe.Pipeline(max_shuffle_buffer_mb=3000,
                           batch_size=batch_size,
                           data=writer,
                           ops=[
                               LowresRescale(inputs='lowres',
                                             outputs='lowres'),
                               Rescale(inputs='highres', outputs='highres'),
                           ])

    # prepare model
    model_gen = fe.build(model_def=srresnet_model_path,
                         model_name="srgan_gen",
                         optimizer=tf.optimizers.Adam(learning_rate=0.0001),
                         loss_name="mse_adv_loss",
                         custom_objects={'SubPixelConv2D': SubPixelConv2D})
    model_desc = fe.build(
        model_def=lambda: get_discriminator(input_shape=(96, 96, 3)),
        model_name="srgan_desc",
        optimizer=tf.optimizers.Adam(learning_rate=0.0001),
        loss_name="desc_loss")

    network = fe.Network(ops=[
        ModelOp(inputs='lowres', model=model_gen, outputs='superres'),
        ModelOp(inputs='superres', model=model_desc, outputs='pred_fake'),
        ModelOp(inputs='highres', model=model_desc, outputs='pred_true'),
        DLoss(inputs=("pred_true", "pred_fake"),
              outputs=("desc_loss", "real_loss", "fake_loss")),
        GLoss(inputs=('superres', 'highres', 'pred_fake'),
              outputs=("mse_adv_loss", "mse_loss", "adv_loss"),
              vgg_content=True)
    ])

    model_dir = os.path.join(path)
    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        traces=[
            ModelSaver(model_name="srgan_gen",
                       save_dir=model_dir,
                       save_best=True),
            ModelSaver(model_name="srgan_desc",
                       save_dir=model_dir,
                       save_best=True),
            LRController(model_name="srgan_gen",
                         lr_schedule=MyLRSchedule(schedule_mode='step')),
            LRController(model_name="srgan_desc",
                         lr_schedule=MyLRSchedule(schedule_mode='step'))
        ])

    return estimator
示例#16
0
def get_estimator(batch_size=1,
                  epochs=500,
                  steps_per_epoch=128,
                  model_dir=tempfile.mkdtemp(),
                  path_brats=os.path.join(os.getenv('HOME'),
                                          'fastestimator_data', 'BraTs')):
    """Args:
        path_brats: folder path of BraTs 2018 dataset, containing data subdir inturn having LGG and HGG data.
        Expected folder structure path_brats
        path_brats/
        |----------data/
                   |----LGG/
                   |----HGG/
    """
    assert path_brats is not None, 'Pass valid folder path of BraTs 2018 dataset'
    # Ensure Brats 2018 dataset is downloaded. Pass the folder contianing train and val subdirectories.
    # currently the script doesn't download the BraTs data.
    train_csv, val_csv, path_brats = brats.load_data(path_brats=path_brats,
                                                     resized_img_shape=(128,
                                                                        128,
                                                                        128),
                                                     bias_correction=False)
    writer = fe.RecordWriter(save_dir=os.path.join(path_brats, "tfrecords"),
                             train_data=train_csv,
                             validation_data=val_csv,
                             ops=[
                                 NIBImageReader(inputs="mod_img",
                                                outputs="mod_img"),
                                 NIBImageReader(inputs="seg_mask",
                                                outputs="seg_mask")
                             ],
                             compression="GZIP",
                             write_feature=['mod_img', 'seg_mask'])

    pipeline = fe.Pipeline(data=writer,
                           batch_size=batch_size,
                           ops=[
                               SplitMaskLabelwise(inputs="seg_mask",
                                                  outputs="seg_mask"),
                               Augmentation3D(inputs=("mod_img", "seg_mask"),
                                              outputs=("mod_img", "seg_mask"),
                                              mode='train')
                           ])

    model_unet3d_is = fe.build(
        model_def=lambda: UNet3D_Isensee(input_shape=(4, 64, 64, 64)),
        model_name="brats_unet3d_is",
        optimizer=tf.optimizers.Adam(learning_rate=5e-4),
        loss_name="wdice_loss")

    network = fe.Network(ops=[
        ModelOp(
            inputs="mod_img", model=model_unet3d_is, outputs="pred_seg_mask"),
        WeightedDiceLoss(inputs=("seg_mask", "pred_seg_mask"),
                         outputs=("wdice_loss"))
    ])
    model_dir = path_brats
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             steps_per_epoch=steps_per_epoch,
                             epochs=epochs,
                             traces=[
                                 ModelSaver(model_name="brats_unet3d_is",
                                            save_dir=model_dir,
                                            save_best=True),
                                 LRController(model_name="brats_unet3d_is",
                                              reduce_patience=10,
                                              reduce_factor=0.5,
                                              reduce_on_eval=True,
                                              min_lr=1e-07)
                             ],
                             log_steps=steps_per_epoch)

    return estimator
def get_estimator(data_path=None, model_dir=tempfile.mkdtemp(), batch_size=2):
    #prepare dataset
    train_csv, val_csv, path = load_data(path=data_path)
    writer = fe.RecordWriter(
        save_dir=os.path.join(path, "retinanet_coco_1024"),
        train_data=train_csv,
        validation_data=val_csv,
        ops=[
            ImageReader(inputs="image", parent_path=path, outputs="image"),
            String2List(inputs=["x1", "y1", "width", "height", "obj_label"],
                        outputs=["x1", "y1", "width", "height", "obj_label"]),
            ResizeImageAndBbox(
                target_size=(1024, 1024),
                keep_ratio=True,
                inputs=["image", "x1", "y1", "width", "height"],
                outputs=["image", "x1", "y1", "width", "height"]),
            FlipImageAndBbox(inputs=[
                "image", "x1", "y1", "width", "height", "obj_label", "id"
            ],
                             outputs=[
                                 "image", "x1", "y1", "width", "height",
                                 "obj_label", "id"
                             ]),
            GenerateTarget(inputs=("obj_label", "x1", "y1", "width", "height"),
                           outputs=("cls_gt", "x1_gt", "y1_gt", "w_gt",
                                    "h_gt"))
        ],
        expand_dims=True,
        compression="GZIP",
        write_feature=[
            "image", "id", "cls_gt", "x1_gt", "y1_gt", "w_gt", "h_gt",
            "obj_label", "x1", "y1", "width", "height"
        ])
    # prepare pipeline
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=writer,
                           ops=[
                               Rescale(inputs="image", outputs="image"),
                               Pad(padded_shape=[2051],
                                   inputs=[
                                       "x1_gt", "y1_gt", "w_gt", "h_gt",
                                       "obj_label", "x1", "y1", "width",
                                       "height"
                                   ],
                                   outputs=[
                                       "x1_gt", "y1_gt", "w_gt", "h_gt",
                                       "obj_label", "x1", "y1", "width",
                                       "height"
                                   ])
                           ])
    # prepare network
    model = fe.build(model_def=lambda: RetinaNet(input_shape=(1024, 1024, 3),
                                                 num_classes=90),
                     model_name="retinanet",
                     optimizer=tf.optimizers.SGD(momentum=0.9),
                     loss_name="total_loss")
    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs=["cls_pred", "loc_pred"]),
        RetinaLoss(inputs=("cls_gt", "x1_gt", "y1_gt", "w_gt", "h_gt",
                           "cls_pred", "loc_pred"),
                   outputs=("total_loss", "focal_loss", "l1_loss")),
        PredictBox(inputs=[
            "cls_pred", "loc_pred", "obj_label", "x1", "y1", "width", "height"
        ],
                   outputs=("pred", "gt"),
                   mode="eval",
                   input_shape=(1024, 1024, 3))
    ])
    # prepare estimator
    estimator = fe.Estimator(
        network=network,
        pipeline=pipeline,
        epochs=7,
        traces=[
            MeanAvgPrecision(90, (1024, 1024, 3),
                             'pred',
                             'gt',
                             output_name=("mAP", "AP50", "AP75")),
            ModelSaver(model_name="retinanet",
                       save_dir=model_dir,
                       save_best='mAP',
                       save_best_mode='max'),
            LRController(model_name="retinanet",
                         lr_schedule=MyLRSchedule(schedule_mode="step"))
        ])
    return estimator
def get_estimator(weight=10.0, epochs=200, model_dir=tempfile.mkdtemp()):
    trainA_csv, trainB_csv, _, _, parent_path = load_data()
    tfr_save_dir = os.path.join(parent_path, 'FEdata')
    # Step 1: Define Pipeline
    writer = RecordWriter(train_data=(trainA_csv, trainB_csv),
                          save_dir=tfr_save_dir,
                          ops=([
                              ImageReader(inputs="imgA",
                                          outputs="imgA",
                                          parent_path=parent_path)
                          ], [
                              ImageReader(inputs="imgB",
                                          outputs="imgB",
                                          parent_path=parent_path)
                          ]))

    pipeline = fe.Pipeline(data=writer,
                           batch_size=1,
                           ops=[
                               Myrescale(inputs="imgA", outputs="imgA"),
                               RandomJitter(inputs="imgA", outputs="real_A"),
                               Myrescale(inputs="imgB", outputs="imgB"),
                               RandomJitter(inputs="imgB", outputs="real_B")
                           ])
    # Step2: Define Network
    g_AtoB = FEModel(model_def=build_generator,
                     model_name="g_AtoB",
                     loss_name="g_AtoB_loss",
                     optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    g_BtoA = FEModel(model_def=build_generator,
                     model_name="g_BtoA",
                     loss_name="g_BtoA_loss",
                     optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    d_A = FEModel(model_def=build_discriminator,
                  model_name="d_A",
                  loss_name="d_A_loss",
                  optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    d_B = FEModel(model_def=build_discriminator,
                  model_name="d_B",
                  loss_name="d_B_loss",
                  optimizer=tf.keras.optimizers.Adam(2e-4, 0.5))

    network = fe.Network(ops=[
        ModelOp(inputs="real_A", model=g_AtoB, outputs="fake_B"),
        ModelOp(inputs="real_B", model=g_BtoA, outputs="fake_A"),
        ModelOp(inputs="real_A", model=d_A, outputs="d_real_A"),
        ModelOp(inputs="fake_A", model=d_A, outputs="d_fake_A"),
        ModelOp(inputs="real_B", model=d_B, outputs="d_real_B"),
        ModelOp(inputs="fake_B", model=d_B, outputs="d_fake_B"),
        ModelOp(inputs="real_A", model=g_BtoA, outputs="same_A"),
        ModelOp(inputs="fake_B", model=g_BtoA, outputs="cycled_A"),
        ModelOp(inputs="real_B", model=g_AtoB, outputs="same_B"),
        ModelOp(inputs="fake_A", model=g_AtoB, outputs="cycled_B"),
        GLoss(inputs=("real_A", "d_fake_B", "cycled_A", "same_A"),
              weight=weight,
              outputs="g_AtoB_loss"),
        GLoss(inputs=("real_B", "d_fake_A", "cycled_B", "same_B"),
              weight=weight,
              outputs="g_BtoA_loss"),
        DLoss(inputs=("d_real_A", "d_fake_A"), outputs="d_A_loss"),
        DLoss(inputs=("d_real_B", "d_fake_B"), outputs="d_B_loss")
    ])
    # Step3: Define Estimator
    traces = [
        ModelSaver(model_name="g_AtoB", save_dir=model_dir, save_freq=10),
        ModelSaver(model_name="g_BtoA", save_dir=model_dir, save_freq=10)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces)
    return estimator
示例#19
0
def get_estimator(epochs=200, batch_size=128, steps_per_epoch=500, validation_steps=100, model_dir=tempfile.mkdtemp()):
    # step 1. prepare pipeline
    train_path, eval_path = load_data()
    data = {
        "train": lambda: get_batch(train_path, batch_size=batch_size),
        "eval": lambda: get_batch(eval_path, batch_size=batch_size, is_train=False)
    }
    pipeline = fe.Pipeline(
        data=data,
        batch_size=batch_size,
        ops=[
            Probability(
                tensor_op=Augmentation2D(inputs="x_a",
                                         outputs="x_a",
                                         mode="train",
                                         rotation_range=10.0,
                                         shear_range=-0.3 * 180 / np.pi,
                                         zoom_range=[0.8, 1.2],
                                         width_shift_range=0.05,
                                         height_shift_range=0.05),
                prob=0.89),
            Probability(
                tensor_op=Augmentation2D(inputs="x_b",
                                         outputs="x_b",
                                         mode="train",
                                         rotation_range=10.0,
                                         shear_range=-0.3 * 180 / np.pi,
                                         zoom_range=[0.8, 1.2],
                                         width_shift_range=0.05,
                                         height_shift_range=0.05),
                prob=0.89),
            Minmax(inputs="x_a", outputs="x_a"),
            Minmax(inputs="x_b", outputs="x_b")
        ])

    # step 2. prepare model
    model = fe.build(model_def=siamese_network,
                     model_name="siamese_net",
                     optimizer=Adam(learning_rate=1e-4),
                     loss_name="loss")

    network = fe.Network(ops=[
        ModelOp(inputs=["x_a", "x_b"], model=model, outputs="y_pred"),
        BinaryCrossentropy(inputs=("y", "y_pred"), outputs="loss")
    ])

    # Defining learning rate schedule
    lr_scheduler = LRDecaySchedule(decay_rate=0.99)

    # Loading images for validation one-shot accuracy
    val_img_list = load_eval_data(path=eval_path, is_test=False)

    # step 3.prepare estimator
    traces = [
        LRController(model_name="siamese_net", lr_schedule=lr_scheduler),
        OneShotAccuracy(model=model, img_list=val_img_list, output_name='one_shot_accuracy'),
        ModelSaver(model_name="siamese_net", save_dir=model_dir, save_best='one_shot_accuracy', save_best_mode='max'),
        EarlyStopping(monitor="one_shot_accuracy", patience=20, compare='max', mode="eval")
    ]

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator
示例#20
0
def get_estimator(epochs=10,
                  batch_size=32,
                  epsilon=0.01,
                  warmup=0,
                  model_dir=tempfile.mkdtemp()):
    (x_train, y_train), (x_eval,
                         y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {
        "train": {
            "x": x_train,
            "y": y_train
        },
        "eval": {
            "x": x_eval,
            "y": y_eval
        }
    }
    num_classes = 10

    pipeline = Pipeline(batch_size=batch_size,
                        data=data,
                        ops=Minmax(inputs="x", outputs="x"))

    model = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:],
                                            classes=num_classes),
                    model_name="LeNet",
                    optimizer="adam")

    adv_img = {
        warmup:
        AdversarialSample(inputs=("loss", "x"),
                          outputs="x_adverse",
                          epsilon=epsilon,
                          mode="train")
    }
    adv_eval = {
        warmup:
        ModelOp(inputs="x_adverse",
                model=model,
                outputs="y_pred_adverse",
                mode="train")
    }
    adv_loss = {
        warmup:
        SparseCategoricalCrossentropy(y_true="y",
                                      y_pred="y_pred_adverse",
                                      outputs="adverse_loss",
                                      mode="train")
    }
    adv_avg = {
        warmup:
        Average(inputs=("loss", "adverse_loss"), outputs="loss", mode="train")
    }

    network = Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred", track_input=True),
        SparseCategoricalCrossentropy(
            y_true="y", y_pred="y_pred", outputs="loss"),
        Scheduler(adv_img),
        Scheduler(adv_eval),
        Scheduler(adv_loss),
        Scheduler(adv_avg)
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        ConfusionMatrix(true_key="y",
                        pred_key="y_pred",
                        num_classes=num_classes),
        ModelSaver(model_name="LeNet", save_dir=model_dir, save_freq=2)
    ]

    estimator = Estimator(network=network,
                          pipeline=pipeline,
                          epochs=epochs,
                          traces=traces)

    return estimator