Example #1
0
def test_save_model(model_name: str, fake_model: Model):
    fake_model.__asf_model_name = f"{model_name}:some_old_tag"

    save_model(fake_model, "test_save_model")
    model = load_model(f"{model_name}:test_save_model")

    verify_model_layer_equality(model, fake_model)
Example #2
0
def test_save_model_no_dir(model_name: str, tmpdir: py.path.local,
                           fake_model: Model):
    shutil.rmtree(tmpdir.join("models"))

    fake_model.__asf_model_name = f"{model_name}:some_old_tag"
    save_model(fake_model, "test_save_model")

    assert tmpdir.join("models").check(dir=1)
Example #3
0
def test_save_model_with_history(model_name: str, fake_model: Model,
                                 new_history: History):
    fake_model.__asf_model_name = f"{model_name}:some_old_tag"

    save_model(fake_model, "test_save_model", history=new_history)
    model = load_model(f"{model_name}:test_save_model")

    verify_model_layer_equality(model, fake_model)
    assert model.__asf_model_history == new_history
Example #4
0
def create_model_masked(model_name: str,
                        num_filters: int = 16,
                        dropout: float = 0.1,
                        batchnorm: bool = True) -> Model:
    """ Function to define the UNET Model """
    inputs = Input(shape=(dems, dems, 2))

    c1 = conv2d_block(inputs,
                      num_filters * 1,
                      kernel_size=3,
                      batchnorm=batchnorm)
    p1 = MaxPooling2D((2, 2))(c1)
    p1 = Dropout(dropout)(p1)

    c2 = conv2d_block(p1, num_filters * 2, kernel_size=3, batchnorm=batchnorm)
    p2 = MaxPooling2D((2, 2))(c2)
    p2 = Dropout(dropout)(p2)

    c3 = conv2d_block(p2, num_filters * 4, kernel_size=3, batchnorm=batchnorm)
    p3 = MaxPooling2D((2, 2))(c3)
    p3 = Dropout(dropout)(p3)

    c4 = conv2d_block(p3, num_filters * 8, kernel_size=3, batchnorm=batchnorm)
    p4 = MaxPooling2D((2, 2))(c4)
    p4 = Dropout(dropout)(p4)

    c5 = conv2d_block(p4, num_filters * 8, kernel_size=3, batchnorm=batchnorm)
    p5 = MaxPooling2D((2, 2))(c5)
    p5 = Dropout(dropout)(p5)

    c6 = conv2d_block(p5, num_filters * 8, kernel_size=3, batchnorm=batchnorm)
    p6 = MaxPooling2D((2, 2))(c6)
    p6 = Dropout(dropout)(p6)

    c7 = conv2d_block(p6,
                      num_filters=num_filters * 16,
                      kernel_size=3,
                      batchnorm=batchnorm)

    # Expanding to 64 x 64 x 1
    u8 = Conv2DTranspose(num_filters * 4, (3, 3),
                         strides=(2, 2),
                         padding='same')(c7)
    u8 = concatenate([u8, c6])
    u8 = Dropout(dropout)(u8)
    c8 = conv2d_block(u8, num_filters * 4, kernel_size=3, batchnorm=batchnorm)

    u9 = Conv2DTranspose(num_filters * 2, (3, 3),
                         strides=(2, 2),
                         padding='same')(c8)
    u9 = concatenate([u9, c5])
    u9 = Dropout(dropout)(u9)
    c9 = conv2d_block(u9, num_filters * 2, kernel_size=3, batchnorm=batchnorm)

    u10 = Conv2DTranspose(num_filters * 1, (3, 3),
                          strides=(2, 2),
                          padding='same')(c9)

    u10 = concatenate([u10, c4])
    u10 = Dropout(dropout)(u10)
    c10 = conv2d_block(u10,
                       num_filters * 1,
                       kernel_size=3,
                       batchnorm=batchnorm)

    u11 = Conv2DTranspose(num_filters * 1, (3, 3),
                          strides=(2, 2),
                          padding='same')(c10)

    u11 = concatenate([u11, c3])
    u11 = Dropout(dropout)(u11)
    c11 = conv2d_block(u11,
                       num_filters * 1,
                       kernel_size=3,
                       batchnorm=batchnorm)

    u12 = Conv2DTranspose(num_filters * 1, (3, 3),
                          strides=(2, 2),
                          padding='same')(c11)
    u12 = concatenate([u12, c2])
    u12 = Dropout(dropout)(u12)
    c12 = conv2d_block(u12,
                       num_filters * 1,
                       kernel_size=3,
                       batchnorm=batchnorm)

    u13 = Conv2DTranspose(num_filters * 1, (3, 3),
                          strides=(2, 2),
                          padding='same')(c12)
    u13 = concatenate([u13, c1])
    u13 = Dropout(dropout)(u13)
    c13 = conv2d_block(u13,
                       num_filters * 1,
                       kernel_size=3,
                       batchnorm=batchnorm)

    outputs = Conv2D(1, (1, 1), activation='sigmoid', name='last_layer')(c13)
    model = Model(inputs=inputs, outputs=[outputs])

    model.__asf_model_name = model_name

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(),
                  metrics=['accuracy'])

    return model
def create_cdl_model_masked(model_name: str,
                            num_filters: int = NUM_FILTERS,
                            time_steps: int = TIME_STEPS,
                            dropout: float = 0.5,
                            batchnorm: bool = True) -> Model:
    """ Function to define the Time Distributed UNET Model """
    """Requires stack of Sequential SAR data (with vh vv channels stacked), where each image is a different timestep"""
    inputs = Input(shape=(None, None, TIME_STEPS * N_CHANNELS),
                   batch_size=None)
    c1 = conv2d_block(inputs,
                      num_filters * 1,
                      kernel_size=3,
                      batchnorm=batchnorm)

    p1 = MaxPooling2D((2, 2))(c1)
    p1 = Dropout(dropout)(p1)

    c2 = conv2d_block(p1, num_filters * 2, kernel_size=3, batchnorm=batchnorm)
    p2 = MaxPooling2D((2, 2))(c2)
    p2 = Dropout(dropout)(p2)

    c3 = conv2d_block(p2, num_filters * 4, kernel_size=3, batchnorm=batchnorm)
    p3 = MaxPooling2D((2, 2))(c3)
    p3 = Dropout(dropout)(p3)

    c4 = conv2d_block(p3, num_filters * 8, kernel_size=3, batchnorm=batchnorm)
    p4 = MaxPooling2D((2, 2))(c4)
    p4 = Dropout(dropout)(p4)

    c5 = conv2d_block(p4, num_filters * 16, kernel_size=3, batchnorm=batchnorm)
    p5 = MaxPooling2D((2, 2))(c5)
    p5 = Dropout(dropout)(p5)

    c6 = conv2d_block(p5, num_filters * 32, kernel_size=3, batchnorm=batchnorm)
    p6 = MaxPooling2D((2, 2))(c6)
    p6 = Dropout(dropout)(p6)

    # c7 = conv2d_block(p6, num_filters * 64, kernel_size=3, batchnorm=batchnorm)
    # p7 = MaxPooling2D((2, 2))(c7)
    # p7 = Dropout(dropout)(p7)

    # c8 = conv2d_block(p7, num_filters * 128, kernel_size=3, batchnorm=batchnorm)
    # p8 = MaxPooling2D((2, 2))(c8)
    # p8 = Dropout(dropout)(p8)
    # middle_clstm = ConvLSTM2D(filters=num_filters * 4, kernel_size=3, activation="tanh", padding='same', return_sequences=True)
    # middle_bidirection = Bidirectional(middle_clstm)(p3)
    middle = conv2d_block(p6, num_filters * 32, kernel_size=3)

    # Expanding dims
    # uv = deconv2d_block_time_dist(middle, num_filters=num_filters*128, dropout=dropout, kernel_size=3, batchnorm=batchnorm, concat_layer=c8, activation=True)
    # uw = deconv2d_block_time_dist(uv, num_filters=num_filters*64, dropout=dropout, kernel_size=3, batchnorm=batchnorm, concat_layer=c7, activation=True)
    uy = deconv2d_block_time_dist(middle,
                                  num_filters=num_filters * 32,
                                  dropout=dropout,
                                  kernel_size=3,
                                  batchnorm=batchnorm,
                                  concat_layer=c6,
                                  activation=True)
    uz = deconv2d_block_time_dist(uy,
                                  num_filters=num_filters * 16,
                                  dropout=dropout,
                                  kernel_size=3,
                                  batchnorm=batchnorm,
                                  concat_layer=c5,
                                  activation=True)
    u = deconv2d_block_time_dist(uz,
                                 num_filters=num_filters * 8,
                                 dropout=dropout,
                                 kernel_size=3,
                                 batchnorm=batchnorm,
                                 concat_layer=c4,
                                 activation=True)
    u1 = deconv2d_block_time_dist(u,
                                  num_filters=num_filters * 4,
                                  dropout=dropout,
                                  kernel_size=3,
                                  batchnorm=batchnorm,
                                  concat_layer=c3,
                                  activation=True)
    u2 = deconv2d_block_time_dist(u1,
                                  num_filters=num_filters * 2,
                                  dropout=dropout,
                                  kernel_size=3,
                                  batchnorm=batchnorm,
                                  concat_layer=c2,
                                  activation=True)
    u3 = deconv2d_block_time_dist(u2,
                                  num_filters=num_filters,
                                  dropout=dropout,
                                  kernel_size=3,
                                  batchnorm=batchnorm,
                                  concat_layer=c1,
                                  activation=True)

    # classifier (forward-backwards convlstm)
    # final_conv_forward = ConvLSTM2D(filters=num_filters, kernel_size=3, activation="tanh", padding='same', return_sequences=False)(u3)
    # final_conv_backwards = ConvLSTM2D(filters=num_filters, kernel_size=3, activation="tanh", padding='same', return_sequences=False)
    # final_bidirectional = Bidirectional(final_conv_forward)(u3)

    final = Conv2D(filters=1,
                   kernel_size=1,
                   activation="sigmoid",
                   padding='same')(u3)
    # final = ConvLSTM2D(filters=1, kernel_size=1, activation="sigmoid", padding='same', return_sequences=False)(final_bidirecitonal)
    # final_conv_locality = feature_locality(inputs, final, num_filters, batchnorm, dropout)
    model = Model(inputs=inputs, outputs=[final])

    model.__asf_model_name = model_name

    lr_schedule = ExponentialDecay(1e-3,
                                   decay_steps=2000,
                                   decay_rate=0.96,
                                   staircase=True)
    # Adam(lr=1e-3)
    # dice_coefficient_loss
    #[BinaryCrossentropy(from_logits=False), cosh_dice_coefficient_loss]
    model.compile(loss=jaccard_distance_loss,
                  optimizer=Adam(learning_rate=lr_schedule),
                  metrics=['accuracy', MeanIoU(num_classes=2)])

    return model
Example #6
0
def create_cdl_model_masked(model_name: str,
                            num_filters: int = 16,
                            time_steps: int = 5,
                            dropout: float = 0.1,
                            batchnorm: bool = True) -> Model:
    """ Function to define the Time Distributed UNET Model """
    """Requires stack of Sequential SAR data (with vh vv channels stacked), where each image is a different timestep"""
    inputs = Input(shape=(time_steps, dems, dems, 2))

    c1 = conv2d_block_time_dist(inputs,
                                num_filters * 1,
                                kernel_size=3,
                                batchnorm=batchnorm)
    p1 = TimeDistributed(MaxPooling2D((2, 2)))(c1)
    p1 = TimeDistributed(Dropout(dropout))(p1)

    c2 = conv2d_block_time_dist(p1,
                                num_filters * 2,
                                kernel_size=3,
                                batchnorm=batchnorm)
    p2 = TimeDistributed(MaxPooling2D((2, 2)))(c2)
    p2 = TimeDistributed(Dropout(dropout))(p2)

    c3 = conv2d_block_time_dist(p2,
                                num_filters * 4,
                                kernel_size=3,
                                batchnorm=batchnorm)
    p3 = TimeDistributed(MaxPooling2D((2, 2)))(c3)
    p3 = TimeDistributed(Dropout(dropout))(p3)

    c4 = conv2d_block_time_dist(p3,
                                num_filters * 8,
                                kernel_size=3,
                                batchnorm=batchnorm)
    p4 = TimeDistributed(MaxPooling2D((2, 2)))(c4)
    p4 = Dropout(dropout)(p4)

    c5 = conv2d_block_time_dist(p4,
                                num_filters * 8,
                                kernel_size=3,
                                batchnorm=batchnorm)
    p5 = TimeDistributed(MaxPooling2D((2, 2)))(c5)
    p5 = TimeDistributed(Dropout(dropout))(p5)

    c6 = conv2d_block_time_dist(p5,
                                num_filters * 8,
                                kernel_size=3,
                                batchnorm=batchnorm)
    p6 = TimeDistributed(MaxPooling2D((2, 2)))(c6)
    p6 = TimeDistributed(Dropout(dropout))(p6)

    c7 = conv2d_block_time_dist(p6,
                                num_filters=num_filters * 16,
                                kernel_size=3,
                                batchnorm=batchnorm)

    # Expanding to 64 x 64 x 1
    u8 = TimeDistributed(
        Conv2DTranspose(num_filters * 4, (3, 3),
                        strides=(2, 2),
                        padding='same'))(c7)
    u8 = concatenate([u8, c6])
    u8 = TimeDistributed(Dropout(dropout))(u8)
    c8 = conv2d_block_time_dist(u8,
                                num_filters * 4,
                                kernel_size=3,
                                batchnorm=batchnorm)

    u9 = TimeDistributed(
        Conv2DTranspose(num_filters * 2, (3, 3),
                        strides=(2, 2),
                        padding='same'))(c8)
    u9 = concatenate([u9, c5])
    u9 = TimeDistributed(Dropout(dropout))(u9)
    c9 = conv2d_block_time_dist(u9,
                                num_filters * 2,
                                kernel_size=3,
                                batchnorm=batchnorm)

    u10 = TimeDistributed(
        Conv2DTranspose(num_filters * 1, (3, 3),
                        strides=(2, 2),
                        padding='same'))(c9)

    u10 = concatenate([u10, c4])
    u10 = TimeDistributed(Dropout(dropout))(u10)
    c10 = conv2d_block_time_dist(u10,
                                 num_filters * 1,
                                 kernel_size=3,
                                 batchnorm=batchnorm)

    u11 = TimeDistributed(
        Conv2DTranspose(num_filters * 1, (3, 3),
                        strides=(2, 2),
                        padding='same'))(c10)

    u11 = concatenate([u11, c3])
    u11 = TimeDistributed(Dropout(dropout))(u11)
    c11 = conv2d_block_time_dist(u11,
                                 num_filters * 1,
                                 kernel_size=3,
                                 batchnorm=batchnorm)

    u12 = TimeDistributed(
        Conv2DTranspose(num_filters * 1, (3, 3),
                        strides=(2, 2),
                        padding='same'))(c11)
    u12 = concatenate([u12, c2])
    u12 = TimeDistributed(Dropout(dropout))(u12)
    c12 = conv2d_block_time_dist(u12,
                                 num_filters * 1,
                                 kernel_size=3,
                                 batchnorm=batchnorm)

    u13 = TimeDistributed(
        Conv2DTranspose(num_filters * 1, (3, 3),
                        strides=(2, 2),
                        padding='same'))(c12)
    u13 = concatenate([u13, c1])
    u13 = TimeDistributed(Dropout(dropout))(u13)
    c13 = conv2d_block_time_dist(u13,
                                 num_filters * 1,
                                 kernel_size=3,
                                 batchnorm=batchnorm)

    outputs = TimeDistributed(
        Conv2D(1, (1, 1), activation='sigmoid', name='last_layer'))(c13)
    model = Model(inputs=inputs, outputs=[outputs])

    model.__asf_model_name = model_name

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(),
                  metrics=['accuracy'])

    return model