예제 #1
0
def get_estimator(batch_size=4, epochs=25, model_dir=tempfile.mkdtemp()):
    csv_path, path = montgomery.load_data()
    writer = RecordWriter(save_dir=os.path.join(path, "FEdata"),
                          train_data=csv_path,
                          validation_data=0.2,
                          ops=[
                              ImageReader(grey_scale=True,
                                          inputs="image",
                                          parent_path=path,
                                          outputs="image"),
                              ImageReader(grey_scale=True,
                                          inputs="mask_left",
                                          parent_path=path,
                                          outputs="mask_left"),
                              ImageReader(grey_scale=True,
                                          inputs="mask_right",
                                          parent_path=path,
                                          outputs="mask_right"),
                              CombineLeftRightMask(inputs=("mask_left",
                                                           "mask_right")),
                              Resize(target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="mask"),
                              Resize(inputs="image", target_size=(512, 512)),
                              Reshape(shape=(512, 512, 1), outputs="image"),
                          ],
                          write_feature=["image", "mask"])

    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=writer,
                           ops=[
                               Augmentation2D(inputs=["image", "mask"],
                                              outputs=["image", "mask"],
                                              mode="train",
                                              rotation_range=10,
                                              flip_left_right=True),
                               Minmax(inputs="image", outputs="image"),
                               Minmax(inputs="mask", outputs="mask")
                           ])

    model = FEModel(model_def=lambda: UNet(input_size=(512, 512, 1)),
                    model_name="lungsegmentation",
                    optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001))

    network = fe.Network(ops=[
        ModelOp(inputs="image", model=model, outputs="pred_segment"),
        BinaryCrossentropy(y_true="mask", y_pred="pred_segment")
    ])

    traces = [
        Dice(true_key="mask", pred_key="pred_segment"),
        ModelSaver(model_name="lungsegmentation",
                   save_dir=model_dir,
                   save_best=True)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             log_steps=20,
                             traces=traces)
    return estimator
예제 #2
0
def get_estimator(epochs=10, batch_size=32, alpha=1.0, warmup=0, model_dir=tempfile.mkdtemp()):
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {"train": {"x": x_train, "y": y_train}, "eval": {"x": x_eval, "y": y_eval}}
    num_classes = 10
    pipeline = fe.Pipeline(batch_size=batch_size, data=data, ops=Minmax(inputs="x", outputs="x"))

    model = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:], classes=num_classes),
                    model_name="LeNet",
                    optimizer="adam")

    mixup_map = {warmup: MixUpBatch(inputs="x", outputs=["x", "lambda"], alpha=alpha, mode="train")}
    mixup_loss = {
        0: SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred", mode="train"),
        warmup: MixUpLoss(KerasCrossentropy(), lam="lambda", y_true="y", y_pred="y_pred", mode="train")
    }
    network = fe.Network(ops=[
        Scheduler(mixup_map),
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        Scheduler(mixup_loss),
        SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred", mode="eval")
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        ConfusionMatrix(true_key="y", pred_key="y_pred", num_classes=num_classes),
        ModelSaver(model_name="LeNet", save_dir=model_dir, save_best=True)
    ]

    estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, traces=traces)
    return estimator
예제 #3
0
def get_estimator(epochs=2, batch_size=32, model_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
    train_data = {"x": np.expand_dims(x_train, -1), "y": y_train}
    eval_data = {"x": np.expand_dims(x_eval, -1), "y": y_eval}
    data = {"train": train_data, "eval": eval_data}
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Minmax(inputs="x", outputs="x"))

    # step 2. prepare model
    model = fe.FEModel(model_def=LeNet, model_name="lenet", optimizer="adam")
    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        SparseCategoricalCrossentropy(inputs=("y", "y_pred"))
    ])

    # step 3.prepare estimator
    traces = [
        Accuracy(true_key="y", pred_key="y_pred", output_name='acc'),
        ModelSaver(model_name="lenet", save_dir=model_dir, save_best=True)
    ]
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces)
    return estimator
예제 #4
0
def get_estimator(batch_size=32, epochs=25, model_dir=tempfile.mkdtemp()):
    # load CUB200 dataset.
    csv_path, path = cub200.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "FEdata"),
        train_data=csv_path,
        validation_data=0.2,
        ops=[
            ImageReader(inputs='image', parent_path=path),
            Resize(target_size=(128, 128), keep_ratio=True, outputs='image'),
            MatReader(inputs='annotation', parent_path=path),
            SelectDictKey(),
            Resize((128, 128), keep_ratio=True),
            Reshape(shape=(128, 128, 1), outputs="annotation")
        ])
    # data pipeline
    pipeline = fe.Pipeline(batch_size=batch_size, data=writer, ops=Minmax(inputs='image', outputs='image'))

    # Network
    model = FEModel(model_def=UNet, model_name="unet_cub", optimizer=tf.optimizers.Adam())
    network = fe.Network(ops=[
        ModelOp(inputs='image', model=model, outputs='mask_pred'),
        BinaryCrossentropy(y_true='annotation', y_pred='mask_pred')
    ])

    # estimator
    traces = [
        Dice(true_key="annotation", pred_key='mask_pred'),
        ModelSaver(model_name="unet_cub", save_dir=model_dir, save_best=True)
    ]
    estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, log_steps=50)
    return estimator
예제 #5
0
def get_estimator(epochs=50,
                  batch_size=64,
                  steps_per_epoch=None,
                  validation_steps=None,
                  model_dir=tempfile.mkdtemp()):
    # step 1. prepare data
    (x_train, y_train), (x_eval,
                         y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {
        "train": {
            "x": x_train,
            "y": y_train
        },
        "eval": {
            "x": x_eval,
            "y": y_eval
        }
    }
    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=data,
                           ops=Minmax(inputs="x", outputs="x"))
    # step 2. prepare model
    model = fe.build(model_def=DenseNet121_cifar10,
                     model_name="densenet121",
                     optimizer="adam",
                     loss_name="loss")

    network = fe.Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred"),
        SparseCategoricalCrossentropy(
            y_true="y", y_pred="y_pred", outputs="loss")
    ])
    # step 3.prepare estimator
    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps,
                             traces=[
                                 Accuracy(true_key="y", pred_key="y_pred"),
                                 ModelSaver(model_name="densenet121",
                                            save_dir=model_dir,
                                            save_best=True),
                                 LRController(model_name="densenet121",
                                              reduce_on_eval=True)
                             ])
    return estimator
예제 #6
0
def get_estimator(batch_size=128, epochs=15, model_dir=tempfile.mkdtemp()):
    # prepare data in disk
    train_csv, val_csv, path = svhn.load_data()
    writer = RecordWriter(
        save_dir=os.path.join(path, "FEdata"),
        train_data=train_csv,
        validation_data=val_csv,
        ops=[
            ImageReader(inputs="image", parent_path=path, outputs="image"),
            String2List(inputs=["label", "x1", "y1", "x2", "y2"],
                        outputs=["label", "x1", "y1", "x2", "y2"]),
            RelativeCoordinate(inputs=("image", "x1", "y1", "x2", "y2"),
                               outputs=("x1", "y1", "x2", "y2")),
            Resize(inputs="image", target_size=(64, 128), outputs="image"),
            GenerateTarget(inputs=("label", "x1", "y1", "x2", "y2"),
                           outputs=("target_cls", "target_loc"))
        ])
    # prepare pipeline
    pipeline = Pipeline(batch_size=batch_size,
                        data=writer,
                        ops=Minmax(inputs="image", outputs="image"),
                        read_feature=["image", "target_cls", "target_loc"])
    # prepare model
    model = FEModel(
        model_def=lambda: RetinaNet(input_shape=(64, 128, 3), num_classes=10),
        model_name="retinanet",
        optimizer=tf.optimizers.Adam(learning_rate=0.0001))
    network = Network(ops=[
        ModelOp(inputs="image", model=model, outputs=["pred_cls", "pred_loc"]),
        PredictBox(outputs=("cls_selected", "loc_selected", "valid_outputs"),
                   mode="eval"),
        RetinaLoss(inputs=("target_cls", "target_loc", "pred_cls", "pred_loc"),
                   outputs="loss"),
    ])
    # prepare estimator
    estimator = Estimator(network=network,
                          pipeline=pipeline,
                          epochs=epochs,
                          log_steps=20,
                          traces=ModelSaver(model_name="retinanet",
                                            save_dir=model_dir,
                                            save_best=True))
    return estimator
예제 #7
0
def get_estimator(epochs=10,
                  batch_size=32,
                  epsilon=0.01,
                  warmup=0,
                  model_dir=tempfile.mkdtemp()):
    (x_train, y_train), (x_eval,
                         y_eval) = tf.keras.datasets.cifar10.load_data()
    data = {
        "train": {
            "x": x_train,
            "y": y_train
        },
        "eval": {
            "x": x_eval,
            "y": y_eval
        }
    }
    num_classes = 10

    pipeline = Pipeline(batch_size=batch_size,
                        data=data,
                        ops=Minmax(inputs="x", outputs="x"))

    model = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:],
                                            classes=num_classes),
                    model_name="LeNet",
                    optimizer="adam")

    adv_img = {
        warmup:
        AdversarialSample(inputs=("loss", "x"),
                          outputs="x_adverse",
                          epsilon=epsilon,
                          mode="train")
    }
    adv_eval = {
        warmup:
        ModelOp(inputs="x_adverse",
                model=model,
                outputs="y_pred_adverse",
                mode="train")
    }
    adv_loss = {
        warmup:
        SparseCategoricalCrossentropy(y_true="y",
                                      y_pred="y_pred_adverse",
                                      outputs="adverse_loss",
                                      mode="train")
    }
    adv_avg = {
        warmup:
        Average(inputs=("loss", "adverse_loss"), outputs="loss", mode="train")
    }

    network = Network(ops=[
        ModelOp(inputs="x", model=model, outputs="y_pred", track_input=True),
        SparseCategoricalCrossentropy(
            y_true="y", y_pred="y_pred", outputs="loss"),
        Scheduler(adv_img),
        Scheduler(adv_eval),
        Scheduler(adv_loss),
        Scheduler(adv_avg)
    ])

    traces = [
        Accuracy(true_key="y", pred_key="y_pred"),
        ConfusionMatrix(true_key="y",
                        pred_key="y_pred",
                        num_classes=num_classes),
        ModelSaver(model_name="LeNet", save_dir=model_dir, save_freq=2)
    ]

    estimator = Estimator(network=network,
                          pipeline=pipeline,
                          epochs=epochs,
                          traces=traces)

    return estimator
예제 #8
0
def get_estimator(epochs=200, batch_size=128, steps_per_epoch=500, validation_steps=100, model_dir=tempfile.mkdtemp()):
    # step 1. prepare pipeline
    train_path, eval_path = load_data()
    data = {
        "train": lambda: get_batch(train_path, batch_size=batch_size),
        "eval": lambda: get_batch(eval_path, batch_size=batch_size, is_train=False)
    }
    pipeline = fe.Pipeline(
        data=data,
        batch_size=batch_size,
        ops=[
            Probability(
                tensor_op=Augmentation2D(inputs="x_a",
                                         outputs="x_a",
                                         mode="train",
                                         rotation_range=10.0,
                                         shear_range=-0.3 * 180 / np.pi,
                                         zoom_range=[0.8, 1.2],
                                         width_shift_range=0.05,
                                         height_shift_range=0.05),
                prob=0.89),
            Probability(
                tensor_op=Augmentation2D(inputs="x_b",
                                         outputs="x_b",
                                         mode="train",
                                         rotation_range=10.0,
                                         shear_range=-0.3 * 180 / np.pi,
                                         zoom_range=[0.8, 1.2],
                                         width_shift_range=0.05,
                                         height_shift_range=0.05),
                prob=0.89),
            Minmax(inputs="x_a", outputs="x_a"),
            Minmax(inputs="x_b", outputs="x_b")
        ])

    # step 2. prepare model
    model = fe.build(model_def=siamese_network,
                     model_name="siamese_net",
                     optimizer=Adam(learning_rate=1e-4),
                     loss_name="loss")

    network = fe.Network(ops=[
        ModelOp(inputs=["x_a", "x_b"], model=model, outputs="y_pred"),
        BinaryCrossentropy(inputs=("y", "y_pred"), outputs="loss")
    ])

    # Defining learning rate schedule
    lr_scheduler = LRDecaySchedule(decay_rate=0.99)

    # Loading images for validation one-shot accuracy
    val_img_list = load_eval_data(path=eval_path, is_test=False)

    # step 3.prepare estimator
    traces = [
        LRController(model_name="siamese_net", lr_schedule=lr_scheduler),
        OneShotAccuracy(model=model, img_list=val_img_list, output_name='one_shot_accuracy'),
        ModelSaver(model_name="siamese_net", save_dir=model_dir, save_best='one_shot_accuracy', save_best_mode='max'),
        EarlyStopping(monitor="one_shot_accuracy", patience=20, compare='max', mode="eval")
    ]

    estimator = fe.Estimator(network=network,
                             pipeline=pipeline,
                             epochs=epochs,
                             traces=traces,
                             steps_per_epoch=steps_per_epoch,
                             validation_steps=validation_steps)
    return estimator
예제 #9
0
def get_estimator(batch_size=128, epochs=100):

    usps_train_csv, _, usps_parent_dir = usps.load_data()
    mnist_train_csv, _, mnist_parent_dir = mnist.load_data()

    df = pd.read_csv(mnist_train_csv)
    df.columns = ['source_img', 'source_label']
    df.to_csv(mnist_train_csv, index=False)

    df = pd.read_csv(usps_train_csv)
    df.columns = ['target_img', 'target_label']
    df.to_csv(usps_train_csv, index=False)

    writer = fe.RecordWriter(
        save_dir=os.path.join(os.path.dirname(mnist_parent_dir), 'dann',
                              'tfr'),
        train_data=(usps_train_csv, mnist_train_csv),
        ops=(
            [
                ImageReader(inputs="target_img",
                            outputs="target_img",
                            parent_path=usps_parent_dir,
                            grey_scale=True)
            ],  # first tuple element
            [
                ImageReader(inputs="source_img",
                            outputs="source_img",
                            parent_path=mnist_parent_dir,
                            grey_scale=True)
            ]))  # second tuple element

    pipeline = fe.Pipeline(batch_size=batch_size,
                           data=writer,
                           ops=[
                               Resize(inputs="target_img",
                                      outputs="target_img",
                                      size=(28, 28)),
                               Resize(inputs="source_img",
                                      outputs="source_img",
                                      size=(28, 28)),
                               Minmax(inputs="target_img",
                                      outputs="target_img"),
                               Minmax(inputs="source_img",
                                      outputs="source_img")
                           ])

    alpha = tf.Variable(0.0, dtype=tf.float32, trainable=False)
    img_shape = (28, 28, 1)
    feat_dim = 7 * 7 * 48

    feature_extractor = fe.build(
        model_def=lambda: build_feature_extractor(img_shape),
        model_name="feature_extractor",
        loss_name="fe_loss",
        optimizer=tf.keras.optimizers.Adam(1e-4))

    label_predictor = fe.build(
        model_def=lambda: build_label_predictor(feat_dim),
        model_name="label_predictor",
        loss_name="fe_loss",
        optimizer=tf.keras.optimizers.Adam(1e-4))

    domain_predictor = fe.build(
        model_def=lambda: build_domain_predictor(feat_dim, alpha),
        model_name="domain_predictor",
        loss_name="fe_loss",
        optimizer=tf.keras.optimizers.Adam(1e-4))

    network = fe.Network(ops=[
        ModelOp(
            inputs="source_img", outputs="src_feat", model=feature_extractor),
        ModelOp(
            inputs="target_img", outputs="tgt_feat", model=feature_extractor),
        ModelOp(
            inputs="src_feat", outputs="src_c_logit", model=label_predictor),
        ModelOp(
            inputs="src_feat", outputs="src_d_logit", model=domain_predictor),
        ModelOp(
            inputs="tgt_feat", outputs="tgt_d_logit", model=domain_predictor),
        FELoss(inputs=("src_c_logit", "source_label", "src_d_logit",
                       "tgt_d_logit"),
               outputs="fe_loss")
    ])

    traces = [GRLWeightController(alpha=alpha)]

    estimator = fe.Estimator(pipeline=pipeline,
                             network=network,
                             traces=traces,
                             epochs=epochs)

    return estimator
예제 #10
0
def get_estimator(pretrained_fe_path, classifier_path, data_path=None):

    assert os.path.exists(pretrained_fe_path), "Pretrained feature extractor is missing"
    assert os.path.exists(classifier_path), "Pretrained classifier is missing"
    usps_train_csv, usps_eval_csv, usps_parent_dir = usps.load_data(data_path)
    mnist_train_csv, mnist_eval_csv, mnist_parent_dir = mnist.load_data(data_path)

    tfr_path = os.path.join(os.path.dirname(usps_parent_dir), 'ADDA-tfrecords')
    os.makedirs(tfr_path, exist_ok=True)

    df = pd.read_csv(usps_train_csv)
    df.columns = ['target_img', 'target_label']
    df.to_csv(usps_train_csv, index=False)

    df = pd.read_csv(usps_eval_csv)
    df.columns = ['target_img', 'target_label']
    df.to_csv(usps_eval_csv, index=False)

    df = pd.read_csv(mnist_train_csv)
    df.columns = ['source_img', 'source_label']
    df.to_csv(mnist_train_csv, index=False)

    df = pd.read_csv(mnist_eval_csv)
    df.columns = ['source_img', 'source_label']
    df.to_csv(mnist_eval_csv, index=False)

    BATCH_SIZE = 128

    writer = RecordWriter(save_dir=tfr_path,
                          train_data=(usps_train_csv, mnist_train_csv),
                          ops=(
                              [ImageReader(inputs="target_img", outputs="target_img", parent_path=usps_parent_dir, grey_scale=True)], # first tuple element
                              [ImageReader(inputs="source_img", outputs="source_img", parent_path=mnist_parent_dir, grey_scale=True)])) # second tuple element

    pipeline = fe.Pipeline(
        batch_size=BATCH_SIZE,
        data=writer,
        ops=[
            Resize(inputs="target_img", outputs="target_img", size=(32, 32)),
            Resize(inputs="source_img", outputs="source_img", size=(32, 32)),
            Minmax(inputs="target_img", outputs="target_img"),
            Minmax(inputs="source_img", outputs="source_img")
        ])

    # Step2: Define Network
    feature_extractor = fe.build(model_def=build_feature_extractor,
                                 model_name="fe",
                                 loss_name="fe_loss",
                                 optimizer=tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9))

    discriminator = fe.build(model_def=build_discriminator,
                             model_name="disc",
                             loss_name="d_loss",
                             optimizer=tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9))

    network = fe.Network(ops=[
        ModelOp(inputs="target_img", outputs="target_feature", model=feature_extractor),
        ModelOp(inputs="target_feature", outputs="target_score", model=discriminator),
        ExtractSourceFeature(model_path=pretrained_fe_path, inputs="source_img", outputs="source_feature"),
        ModelOp(inputs="source_feature", outputs="source_score", model=discriminator),
        DLoss(inputs=("source_score", "target_score"), outputs="d_loss"),
        FELoss(inputs="target_score", outputs="fe_loss")
    ])

    traces = [
        LoadPretrainedFE(model_name="fe", model_path=pretrained_fe_path),
        EvaluateTargetClassifier(model_name="fe", model_path=classifier_path)
    ]

    estimator = fe.Estimator(pipeline=pipeline, network=network, traces=traces, epochs=100)

    return estimator