def SDN_nopooling(inputs):

    zz = Conv2D(64, (3, 3), padding='same')(inputs)  #different stride than 1?
    zzz = Conv2D(64, (3, 3), padding='same')(zz)

    zzz = MaxPooling2D((2, 2))(zzz)
    zzz = Conv2D(128, (3, 3), padding='same')(zzz)

    #    zzz = UpSampling2D((2,2))(zzz)
    #    zzz = Conv2D(64, (3,3), padding = 'same')(zzz)

    zzz = Conv2DTranspose(64, (3, 3), strides=(2, 2),
                          padding='same')(zzz)  # A different upsampling

    #    zzzz = multiply([zz, zzz])
    zzzz = Conv2D(
        2,
        (3, 3),
        padding='same',
        #                      kernel_initializer= 'zeros',
        #                      bias_initializer = 'zeros',
        #                      activity_regularizer = l1(0.001),
        activation='tanh')(zzz)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer(localization_net=locnet,
                         output_size=(input_shape[0], input_shape[1]),
                         input_shape=input_shape)(inputs)

    return x1, locnet(inputs)
def SDN_deeper(inputs):
    z1_1 = Conv2D(32, (2, 2), padding='same')(inputs)
    z1_2 = Conv2D(32, (2, 2), padding='same')(z1_1)

    z2 = MaxPooling2D((2, 2))(z1_2)
    z2_1 = Conv2D(64, (2, 2), padding='same')(z2)
    z2_2 = Conv2D(64, (2, 2), padding='same')(z2_1)

    z3 = MaxPooling2D((2, 2))(z2_2)
    z3 = Conv2D(128, (2, 2), padding='same')(z3)

    z3 = UpSampling2D((2, 2))(z3)
    z3 = Conv2D(64, (2, 2), padding='same')(z3)
    z4 = multiply([z2_1, z3])

    z4 = UpSampling2D((2, 2))(z3)
    z4 = Conv2D(32, (2, 2), padding='same')(z4)
    z5 = multiply([z1_1, z4])

    zzzz = Conv2D(
        2,
        (2, 2),
        padding='same',
        #                      kernel_initializer= 'zeros',
        #                      bias_initializer = 'zeros',
        #                      activity_regularizer = l1(0.001),
        activation='tanh')(z5)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer(localization_net=locnet,
                         output_size=(input_shape[0], input_shape[1]),
                         input_shape=input_shape)(inputs)

    return x1, locnet(inputs)
示例#3
0
def SDN(inputs):

    zz = Conv2D(64, (3, 3), padding='same')(inputs)
    zzz = Conv2D(64, (3, 3), padding='same')(zz)

    zzz = MaxPooling2D((2, 2))(zzz)
    zzz = Conv2D(128, (3, 3), padding='same')(zzz)

    zzz = UpSampling2D((2, 2))(zzz)
    zzz = Conv2D(64, (3, 3), padding='same')(zzz)

    zzzz = multiply([zz, zzz])
    zzzz = Conv2D(2, (3, 3),
                  padding='same',
                  kernel_initializer='zeros',
                  bias_initializer='zeros',
                  activity_regularizer=l2(0.1),
                  activation='tanh')(zzzz)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer(localization_net=locnet,
                         output_size=(input_shape[0], input_shape[1]),
                         input_shape=input_shape)(inputs)

    return x1
示例#4
0
def SDN(inputs):

    zz = Conv2D(64, (3, 3), padding='same')(inputs)
    #    zzz = Conv2D(64, (3,3), padding = 'same')(zz)

    zzz = MaxPooling2D((2, 2))(zz)
    zzz = Conv2D(128, (3, 3), padding='same')(zzz)

    zzz = UpSampling2D((2, 2))(zzz)
    zzz = Conv2D(64, (3, 3), padding='same')(zzz)

    zzzz = multiply([zz, zzz])
    zzzz = Conv2D(2, (3, 3),
                  padding='same',
                  kernel_initializer='zeros',
                  bias_initializer='zeros',
                  activation='linear')(zzzz)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer(localization_net=locnet,
                         output_size=(input_shape_G[0], input_shape_G[1]),
                         input_shape=input_shape_G)(inputs)
    #    x1 = SpatialTransformer(localization_net=locnet,
    #                             output_size=(input_shape_G[0],input_shape_G[1]),
    #                             input_shape=input_shape_G)(inputs)

    return x1
示例#5
0
def SDN_ver1(inputs): #should control the size carefully, larger strides to downsample 
    #z1_1 = Conv3D(32, (2,2,2), padding = 'same')(inputs)
    z1_2 = Conv2D(32, (2,2), strides = 2, padding = 'valid')(inputs)
    z1_2 = PReLU(shared_axes = [3])(z1_2)

    #z2_1 = Conv3D(64, (2,2,2), padding = 'same')(z1_2)
    z2_2 = Conv2D(64, (2,2), strides = 2, padding = 'valid')(z1_2)
    z2_2 = PReLU(shared_axes = [3])(z2_2)

#    z3 = Conv3D(64, (2,2,2), padding = 'same')(z2_2)


    z4 = Conv2DTranspose(64, (2,2), strides=2, padding = 'valid')(z2_2)
    z4 = Conv2D(64, (2,2), padding = 'same')(z4)

    z5 = Conv2DTranspose(32, (2,2), strides=2, padding = 'valid')(z4)
    z5 = Conv2D(32, (2,2), padding = 'same', activation = 'linear')(z5)

#    z5 = ZeroPadding2D(((1,0),(1,0)))(z5)     #Extra padding to make size match
    zzzz = Conv2D(2, (2,2), padding = 'same',
#                      kernel_initializer= 'he_uniform',
#                      bias_initializer = 'he_uniform',
#                      activity_regularizer = l1(0.001),
                      activation = 'tanh')(z5)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer(localization_net=locnet,
                             output_size=(input_shape[0],input_shape[1], input_shape[2]),
                             input_shape=input_shape)(inputs)

    return x1, locnet(inputs)
示例#6
0
     masked_loss = K.mean((K.exp(K.sum(mask, axis = 3))*K.square(yTrue-yPred)))  #[16,62,62,2] vs. [16,64,64,1]

     reg_loss = sobelNorm(model.layers[1].locnet.output) # why does this term gives zeros? Do not use it alone...
     
     return img_loss + sobel_loss + 0.3*BCE

#------------------------------------------------------------------------------
# Training with SDN
#------------------------------------------------------------------------------

#x = SpatialTransformer(localization_net=affine,
#                             output_size=(120,120), 
#                             input_shape=input_shape)(inputs)
     
x1 = SpatialDeformer(localization_net=locnet,
                             output_size=(res,res), 
                             input_shape=input_shape)(inputs)

#x2 = SpatialDeformer(localization_net=locnet,
#                             output_size=(res,res), 
#                             input_shape=input_shape)(inputs) # using a second sdn for average
#x = Average()([x1, x2])

model = Model(inputs, x1)
model.compile(loss = customLoss, 
              optimizer = Adam(decay=1e-5),
              )

history = model.fit(x_train, y_train, 
                    epochs=epochs, batch_size=batch_size,
                    verbose = 0,
示例#7
0
zz = MaxPooling2D((2, 2))(zz)
zz = Conv2D(20, (5, 5), padding='same')(zz)
#zz = BatchNormalization()(zz)   # causing errors when compiling, need to set scope?
zz = Conv2D(2, (5, 5),
            padding='same',
            kernel_initializer='zeros',
            bias_initializer='zeros',
            activation='tanh')(zz)  #careful about the activation
locnet = Model(inputs, zz)

#x = SpatialTransformer(localization_net=locnet_a,
#                             output_size=(60,60),
#                             input_shape=input_shape)(inputs)

x = SpatialDeformer(localization_net=locnet,
                    output_size=(30, 30),
                    input_shape=input_shape)(inputs)

x = Conv2D(32, (3, 3), padding='same', activation='relu')(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(32, (3, 3), padding='same', activation='relu')(x)
x = MaxPooling2D((2, 2))(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dense(nb_classes, activation='softmax')(x)
model = Model(inputs, x)

# define custom loss-----------------------------------------------------------
#------------------------------------------------------------------------------
from keras.losses import categorical_crossentropy