Exemplo n.º 1
0
def get_estimator(batch_size=4, epochs=25, model_dir=tempfile.mkdtemp()):
    csv_path, path = montgomery.load_data()
    writer = RecordWriter(save_dir=os.path.join(path, "FEdata"),
                          train_data=csv_path,
                          validation_data=0.2,
                          ops=[
                              ImageReader(grey_scale=True,
                                          inputs="image",
                                          parent_path=path,
                                          outputs="image"),
                              ImageReader(grey_scale=True,
                                          inputs="mask_left",
                                          parent_path=path,
                                          outputs="mask_left"),
                              ImageReader(grey_scale=True,
                                          inputs="mask_right",
                                          parent_path=path,
                                          outputs="mask_right"),
                              CombineLeftRightMask(inputs=("mask_left",
                                                           "mask_right")),
                              Resize(target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="mask"),
                              Resize(inputs="image", target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="image"),
                          ],
                          write_feature=["image", "mask"])

    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=writer,
                           ops=[
                               Augmentation2D(inputs=["image", "mask"],
                                              outputs=["image", "mask"],
                                              mode="train",
                                              rotation_range=10,
                                              flip_left_right=True),
                               Minmax(inputs="image", outputs="image"),
                               Minmax(inputs="mask", outputs="mask")
                           ])

    model = FEModel(model_def=lambda: UNet(input_size=(512, 512, 1)),
                    model_name="lungsegmentation",
                    optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="pred_segment"),
        BinaryCrossentropy(y_true="mask", y_pred="pred_segment")
    ])

    traces = [
        Dice(true_key="mask", pred_key="pred_segment"),
        ModelSaver(model_name="lungsegmentation",
                   save_dir=model_dir,
                   save_best=True)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             log_steps=20,
                             traces=traces)
    return estimator
Exemplo n.º 2
0
def get_estimator(batch_size=32, epochs=25, model_dir=tempfile.mkdtemp()):
    # load CUB200 dataset.
    csv_path, path = cub200.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "FEdata"),
        train_data=csv_path,
        validation_data=0.2,
        ops=[
            ImageReader(inputs='image', parent_path=path),
            Resize(target_size=(128, 128), keep_ratio=True, outputs='image'),
            MatReader(inputs='annotation', parent_path=path),
            SelectDictKey(),
            Resize((128, 128), keep_ratio=True),
            Reshape(shape=(128, 128, 1), outputs="annotation")
        ])
    # data pipeline
    pipeline = fe.Pipeline(batch_size=batch_size, data=writer, ops=Minmax(inputs='image', outputs='image'))

    # Network
    model = FEModel(model_def=UNet, model_name="unet_cub", optimizer=tf.optimizers.Adam())
    network = fe.Network(ops=[
        ModelOp(inputs='image', model=model, outputs='mask_pred'),
        BinaryCrossentropy(y_true='annotation', y_pred='mask_pred')
    ])

    # estimator
    traces = [
        Dice(true_key="annotation", pred_key='mask_pred'),
        ModelSaver(model_name="unet_cub", save_dir=model_dir, save_best=True)
    ]
    estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, log_steps=50)
    return estimator
Exemplo n.º 3
0
def get_estimator(batch_size=8, epochs=25, steps_per_epoch=None, validation_steps=None, model_dir=tempfile.mkdtemp()):
    # load CUB200 dataset.
    csv_path, path = cub200.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "tfrecords"),
        train_data=csv_path,
        validation_data=0.2,
        ops=[
            ImageReader(inputs='image', parent_path=path),
            Resize(target_size=(512, 512), keep_ratio=True, outputs='image'),
            MatReader(inputs='annotation', parent_path=path),
            SelectDictKey(),
            Resize((512, 512), keep_ratio=True),
            Reshape(shape=(512, 512, 1), outputs="annotation")
        ])
    #step 1, pipeline
    pipeline = fe.Pipeline(
        batch_size=batch_size,
        data=writer,
        ops=[
            Augmentation2D(inputs=("image", "annotation"),
                           outputs=("image", "annotation"),
                           mode="train",
                           rotation_range=15.0,
                           zoom_range=[0.8, 1.2],
                           flip_left_right=True),
            Rescale(inputs='image', outputs='image')
        ])
    #step 2, network
    opt = tf.optimizers.Adam(learning_rate=0.0001)
    resunet50 = fe.build(model_def=ResUnet50, model_name="resunet50", optimizer=opt, loss_name="total_loss")
    uncertainty = fe.build(model_def=UncertaintyLoss, model_name="uncertainty", optimizer=opt, loss_name="total_loss")
    network = fe.Network(ops=[
        ModelOp(inputs='image', model=resunet50, outputs=["label_pred", "mask_pred"]),
        SparseCategoricalCrossentropy(inputs=["label", "label_pred"], outputs="cls_loss"),
        BinaryCrossentropy(inputs=["annotation", "mask_pred"], outputs="seg_loss"),
        ModelOp(inputs=("cls_loss", "seg_loss"), model=uncertainty, outputs="total_loss"),
        Loss(inputs="total_loss", outputs="total_loss")
    ])
    #step 3, estimator
    traces = [
        Dice(true_key="annotation", pred_key='mask_pred'),
        Accuracy(true_key="label", pred_key="label_pred"),
        ModelSaver(model_name="resunet50", save_dir=model_dir, save_best=True),
        LRController(model_name="resunet50", lr_schedule=CyclicLRSchedule())
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             traces=traces,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator
Exemplo n.º 4
0
def get_estimator(style_img_path=None,
                  data_path=None,
                  style_weight=5.0,
                  content_weight=1.0,
                  tv_weight=1e-4,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    train_csv, _, path = load_data(data_path, load_object=False)
    if style_img_path is None:
        style_img_path = tf.keras.utils.get_file(
            'kandinsky.jpg',
            'https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_'
            '-_Composition_7.jpg')
    style_img = cv2.imread(style_img_path)
    assert (style_img is not None), "Invalid style reference image"
    tfr_save_dir = os.path.join(path, 'tfrecords')
    style_img = (style_img.astype(np.float32) / 127.5) / 127.5
    style_img_t = tf.convert_to_tensor(np.expand_dims(style_img, axis=0))
    writer = RecordWriter(train_data=train_csv,
                          save_dir=tfr_save_dir,
                          ops=[
                              ImageReader(inputs="image",
                                          parent_path=path,
                                          outputs="image"),
                              Resize(inputs="image",
                                     target_size=(256, 256),
                                     outputs="image")
                          ])

    pipeline = fe.Pipeline(batch_size=4,
                           data=writer,
                           ops=[Rescale(inputs="image", outputs="image")])

    model = fe.build(model_def=styleTransferNet,
                     model_name="style_transfer_net",
                     loss_name="loss",
                     optimizer=tf.keras.optimizers.Adam(1e-3))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="image_out"),
        ExtractVGGFeatures(inputs=lambda: style_img_t, outputs="y_style"),
        ExtractVGGFeatures(inputs="image", outputs="y_content"),
        ExtractVGGFeatures(inputs="image_out", outputs="y_pred"),
        StyleContentLoss(style_weight=style_weight,
                         content_weight=content_weight,
                         tv_weight=tv_weight,
                         inputs=('y_pred', 'y_style', 'y_content',
                                 'image_out'),
                         outputs='loss')
    ])

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=2,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=ModelSaver(model_name="style_transfer_net",
                                               save_dir=model_dir))
    return estimator
Exemplo n.º 5
0
def get_estimator(batch_size=128, epochs=15, model_dir=tempfile.mkdtemp()):
    # prepare data in disk
    train_csv, val_csv, path = svhn.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "FEdata"),
        train_data=train_csv,
        validation_data=val_csv,
        ops=[
            ImageReader(inputs="image", parent_path=path, outputs="image"),
            String2List(inputs=["label", "x1", "y1", "x2", "y2"],
                        outputs=["label", "x1", "y1", "x2", "y2"]),
            RelativeCoordinate(inputs=("image", "x1", "y1", "x2", "y2"),
                               outputs=("x1", "y1", "x2", "y2")),
            Resize(inputs="image", target_size=(64, 128), outputs="image"),
            GenerateTarget(inputs=("label", "x1", "y1", "x2", "y2"),
                           outputs=("target_cls", "target_loc"))
        ])
    # prepare pipeline
    pipeline = Pipeline(batch_size=batch_size,
                        data=writer,
                        ops=Minmax(inputs="image", outputs="image"),
                        read_feature=["image", "target_cls", "target_loc"])
    # prepare model
    model = FEModel(
        model_def=lambda: RetinaNet(input_shape=(64, 128, 3), num_classes=10),
        model_name="retinanet",
        optimizer=tf.optimizers.Adam(learning_rate=0.0001))
    network = Network(ops=[
        ModelOp(inputs="image", model=model, outputs=["pred_cls", "pred_loc"]),
        PredictBox(outputs=("cls_selected", "loc_selected", "valid_outputs"),
                   mode="eval"),
        RetinaLoss(inputs=("target_cls", "target_loc", "pred_cls", "pred_loc"),
                   outputs="loss"),
    ])
    # prepare estimator
    estimator = Estimator(network=network,
                          pipeline=pipeline,
                          epochs=epochs,
                          log_steps=20,
                          traces=ModelSaver(model_name="retinanet",
                                            save_dir=model_dir,
                                            save_best=True))
    return estimator