def build_model():
    with tf.device('/device:GPU:2'):
        base_model = applications.VGG19(weights='imagenet',
                                        include_top=False,
                                        input_shape=train_tensors.shape[1:])
        add_model = Sequential()
        add_model.add(Flatten(input_shape=base_model.output_shape[1:]))
        added0_model = Model(inputs=base_model.input,
                             outputs=add_model(base_model.output))
        stn_model = Sequential()
        stn_model.add(
            Lambda(lambda x: 2 * x - 1.,
                   input_shape=train_tensors.shape[1:],
                   output_shape=train_tensors.shape[1:]))
        stn_model.add(BatchNormalization())
        stn_model.add(
            SpatialTransformer(localization_net=locnet(),
                               output_size=train_tensors.shape[1:3]))
        added_model = Model(inputs=stn_model.input,
                            outputs=added0_model(stn_model.output))

        inp = Input(batch_shape=(None, train_data.shape[1]))
        extra_model = Model(input=inp, output=inp)

        x = concatenate([added_model.output, extra_model.output])
        x = Dropout(0.5)(x)
        x = Dense(256, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(1, activation='sigmoid')(x)

        model = Model(input=[added_model.input, extra_model.input], output=x)

        model.summary()
        return model
Example #2
0
def create_model():
    
    weights  = get_init_weight()
    img_input = Input(shape=( None, None, 3))
    input_shape = (None, None, 3)
    
    conv1 = Conv2D(32, (3, 3))(img_input) 
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(32, (3, 3))(pool1)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    
    out1 = GlobalAveragePooling2D(pool2)
    
    out1 = Dense(50, activation = 'relu')(out1)
    out = Dense(6, weights=weights)(out1)
    locnet = Model(inputs=img_input, outputs= out)
    
    in1 = SpatialTransformer(localization_net=locnet,
                             output_size=(32,32), input_shape=input_shape)

    conv1=make_conv_block(32,in1,1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
#    drop1 = Dropout(0.5)(pool1)
    
    conv2=make_conv_block(64,pool1,2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    
    flat1 = Flatten()(pool2)
    dense1 = Dense(hidden_size, activation = 'relu')(flat1)
    out = Dense(num_class, activation = 'softmax')(dense1)
    
    model = Model(inputs=img_input, outputs= out)
    return model
def conv_model(input_shape=(32, 32, 3)):
    model = Sequential()
    model.add(
        Lambda(lambda x: x / 127.5 - 1.,
               input_shape=(32, 32, 3),
               output_shape=(32, 32, 3)))
    model.add(BatchNormalization())
    model.add(Conv2D(10, (1, 1), padding='same', kernel_regularizer=l2(0.05)))
    model.add(LeakyReLU(alpha=0.5))
    model.add(BatchNormalization())
    model.add(Conv2D(3, (1, 1), padding='same', kernel_regularizer=l2(0.05)))
    model.add(LeakyReLU(alpha=0.5))
    model.add(BatchNormalization())
    model.add(
        SpatialTransformer(localization_net=locnet(), output_size=(32, 32)))
    # model.add(Conv2D(64,( 3, 3), strides=1, padding='same', data_format='channels_first',input_shape=(3, 32, 32)))
    # model.add(BatchNormalization())
    # model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3), kernel_regularizer=l2(0.05)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, (3, 3), padding='valid', kernel_regularizer=l2(0.05)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), kernel_regularizer=l2(0.05)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(5, activation="softmax"))
    model.summary()
    return model
Example #4
0
def conv_model(input_shape=(32, 32, 3)):
    model = Sequential()
    model.add(InputLayer(input_shape=(32, 32, 3)))
    model.add(
        SpatialTransformer(localization_net=locnet(), output_size=(32, 32)))
    model.add(
        Conv2D(16, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.05)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.05)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.05)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dropout(0.6))
    model.add(Dense(43, activation="softmax"))
    return model
Example #5
0
def simpConvNNSTN(input_shape=(64, 64, 3)):
    """
	Build the classifier simpConvNNSTN
	"""
    model = Sequential()
    model.add(
        SpatialTransformer(localization_net=locnet,
                           downsample_factor=3,
                           input_shape=input_shape))
    model.add(
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                     input_shape=input_shape))
    #model went from 64x64x3 to 32x32x3
    model.add(Conv2D(64, (3, 3), strides=(2, 2), activation='softplus'))
    #model is now 16x16x64
    model.add(Conv2D(32, (3, 3), activation='softplus'))
    #model is now 16x16x32
    model.add(Conv2D(16, (3, 3), activation='softplus'))
    #model is now 16x16x16
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    #model is now  8x8x16
    model.add(Flatten())
    #model is now 1024 (flattened from 8x8x16)
    model.add(Dense(128, activation='softplus'))
    model.add(Dense(32, activation='softplus'))
    model.add(Dense(1, activation='sigmoid'))
    return model
Example #6
0
def create_stn_model(input_shape=(1600,), output_dim=10):
    b = np.zeros((2, 3), dtype='float32')
    b[0, 0] = 1
    b[1, 1] = 1
    W = np.zeros((50, 6), dtype='float32')
    weights = [W, b.flatten()]

    locnet = Sequential()
    locnet.add(MaxPooling2D(pool_size=(2,2), input_shape=input_shape))
    locnet.add(Conv2D(20, (5, 5)))
    locnet.add(MaxPooling2D(pool_size=(2,2)))
    locnet.add(Conv2D(20, (5, 5)))
    locnet.add(Flatten())
    locnet.add(Dense(50))
    locnet.add(Activation('relu'))
    locnet.add(Dense(6, weights=weights))

    model = Sequential()
    model.add(SpatialTransformer(localization_net=locnet,
                             output_size=(40,40), input_shape=input_shape))
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(output_dim, activation='softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    return model
def conv_model(input_shape=(32, 32, 3)):

    l2_reg = 0.05

    model = Sequential()
    model.add(Lambda(
        lambda x: x * 2 - 1.,
        input_shape=(32, 32, 3),
        output_shape=(32, 32, 3)))
    model.add(BatchNormalization())
    model.add(Conv2D(10, (1, 1), padding='same',
                     kernel_regularizer=l2(l2_reg)))
    model.add(LeakyReLU(alpha=0.5))
    model.add(BatchNormalization())
    model.add(Conv2D(3, (1, 1), padding='same', kernel_regularizer=l2(l2_reg)))
    model.add(LeakyReLU(alpha=0.5))
    model.add(BatchNormalization())
    model.add(SpatialTransformer(localization_net=locnet(),
                                 output_size=(32, 32)))
    model.add(Conv2D(16, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(Conv2D(32, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(Conv2D(64, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(Conv2D(96, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(Conv2D(128, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(Conv2D(192, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(256, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(Conv2D(64, (5, 5), padding='same',
                     activation='relu', kernel_regularizer=l2(l2_reg)))
    model.add(MaxPooling2D(pool_size=(8, 8)))
    model.add(Flatten())
    model.add(Dropout(0.6))
    model.add(Dense(43, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.01)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=adam, metrics=['accuracy'])

    return model
def template_match_nn():

    l2_reg = 0.01

    model = Sequential()
    model.add(Lambda(
        lambda x: x * 2 - 1.,
        input_shape=(32, 32, 3),
        output_shape=(32, 32, 3)))
    model.add(SpatialTransformer(localization_net=locnet(),
                                 output_size=(32, 32)))
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.01)
    model.compile(loss='mean_absolute_error', optimizer=adam)

    return model
Example #9
0
    def stn(self):

        # initial weights
        b = np.zeros((2, 3), dtype='float32')
        b[0, 0] = 1
        b[1, 1] = 1
        W = np.zeros((50, 6), dtype='float32')
        weights = [W, b.flatten()]

        locnet = Sequential()
        locnet.add(MaxPooling2D(pool_size=(2, 2), input_shape=self.input_shape))
        locnet.add(Conv2D(20, (5, 5)))
        locnet.add(MaxPooling2D(pool_size=(2, 2)))
        locnet.add(Conv2D(20, (5, 5)))

        locnet.add(Flatten())
        locnet.add(Dense(50))
        locnet.add(Activation('relu'))
        locnet.add(Dense(6, weights=weights))

        # stn
        model = Sequential()
        # conv1
        model.add(SpatialTransformer(localization_net=locnet,
            output_size=(32, 32), input_shape=self.input_shape))
        # model.add(SpatialTransformer(localization_net=locnet,
        #     output_size=self.input_shape, input_shape=self.input_shape))
        model.add(Conv2D(20, kernel_size=(5, 5),
                         padding="same",
                         activation='relu'))
        # pool1
        # model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # conv2
        model.add(Conv2D(50, kernel_size=(5, 5),
                         padding="same",
                         activation='relu'))
        # pool2
        # model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # model.add(Dropout(0.25))
        model.add(Flatten())
        # ip1
        model.add(Dense(500, activation='relu', name='fc1'))
        # model.add(Dropout(0.5))
        # ip2
        model.add(Dense(self.num_classes, activation='softmax', name='predictions'))

        return model
    def crnn_stn(self):

        from spatial_transformer import SpatialTransformer
        import numpy as np

        # initial weights
        b = np.zeros((2, 3), dtype='float32')
        b[0, 0] = 1
        b[1, 1] = 1
        W = np.zeros((20, 6), dtype='float32')
        weights = [W, b.flatten()]

        locnet = Sequential()
        locnet.add(MaxPooling2D(pool_size=(2, 2),
                                input_shape=self.image_shape))
        locnet.add(Conv2D(20, (5, 5)))
        locnet.add(MaxPooling2D(pool_size=(2, 2)))
        locnet.add(Conv2D(20, (5, 5)))
        locnet.add(Dense(6, weights=weights))

        #vgg 16
        model = Sequential()
        model.add(
            TimeDistributed(SpatialTransformer(localization_net=locnet,
                                               output_size=(30, 30)),
                            input_shape=self.input_shape))
        model.add(
            TimeDistributed(Conv2D(8, (3, 3),
                                   activation='relu',
                                   padding='same'),
                            input_shape=self.input_shape))
        model.add(
            TimeDistributed(
                Conv2D(32, (3, 3), activation='relu', padding='same')))

        model.add(TimeDistributed(Flatten()))
        model.add(LSTM(16, return_sequences=True))
        model.add(Flatten())
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
def conv_model(input_shape=(32, 32, 3)):
	model = Sequential()
	model.add(Lambda(
		lambda x: (x - 128) / 128,
		input_shape=(32, 32, 3),
		output_shape=(32, 32, 3)))
	model.add(SpatialTransformer(localization_net=locnet(),
								 output_size=(32, 32)))
	# model.add(BatchNormalization())
	# model.add(Conv2D(10, (1, 1), padding='same', kernel_regularizer=l2(0.05)))
	# model.add(LeakyReLU(alpha=0.5))
	# model.add(BatchNormalization())
	# model.add(Conv2D(3, (1, 1), padding='same', kernel_regularizer=l2(0.05)))
	# model.add(LeakyReLU(alpha=0.5))
	# model.add(BatchNormalization())
	# model.add(SpatialTransformer(localization_net=locnet(),
	# 							 output_size=(32, 32)))
	model.add(Conv2D(16, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	model.add(BatchNormalization())
	model.add(Conv2D(32, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	model.add(BatchNormalization())
	model.add(Conv2D(64, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	model.add(BatchNormalization())
	# model.add(Conv2D(96, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	# model.add(BatchNormalization())
	# model.add(Conv2D(128, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	# model.add(BatchNormalization())
	# model.add(Conv2D(192, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	# model.add(BatchNormalization())
	# model.add(MaxPooling2D(pool_size=(2, 2)))
	# model.add(Conv2D(256, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	# model.add(BatchNormalization())
	# model.add(MaxPooling2D(pool_size=(2, 2)))
	# model.add(Conv2D(128, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	# model.add(BatchNormalization())
	model.add(Conv2D(64, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.05)))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Flatten())
	model.add(Dropout(0.6))
	model.add(Dense(43, activation="softmax"))
	return model
locnet = Sequential()
locnet.add(MaxPooling2D(pool_size=(2,2), input_shape=input_shape))
locnet.add(Conv2D(20, (5, 5)))
locnet.add(MaxPooling2D(pool_size=(2,2)))
locnet.add(Conv2D(20, (5, 5)))

locnet.add(Flatten())
locnet.add(Dense(50))
locnet.add(Activation('relu'))
locnet.add(Dense(6, weights=weights))
#locnet.add(Activation('sigmoid'))


model = Sequential()

model.add(SpatialTransformer(localization_net=locnet,
                             output_size=(30,30), input_shape=input_shape))

model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))

model.add(Dense(nb_classes))
model.add(Activation('softmax'))
Example #13
0
base_model = applications.VGG19(weights='imagenet',
                                include_top=False,
                                input_shape=(64, 64, 3))

add_model = Sequential()
add_model.add(Flatten(input_shape=base_model.output_shape[1:]))

added0_model = Model(inputs=base_model.input, outputs=add_model(base_model.output))

stn_model = Sequential()
stn_model.add(Lambda(
    lambda x: 2*x - 1.,
    input_shape=(64, 64, 3),
    output_shape=(64, 64, 3)))
stn_model.add(BatchNormalization())
stn_model.add(SpatialTransformer(localization_net=locnet(),
                                 output_size=(64, 64)))

added_model = Model(inputs=stn_model.input, outputs=added0_model(stn_model.output))

inp = Input(batch_shape=(None, 5))
# out = Dense(8)(inp)
extra_model = Model(input=inp, output=inp)

x = concatenate([added_model.output,
           extra_model.output])

# x = Dropout(0.5)(x)
# x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
Example #14
0
 def __init__(self):
     super().__init__()
     self.transform = SpatialTransformer()
     self.classify = ClassNet()
Example #15
0
def build_model_FCN_model_api(batch_size,
                              optimizer,
                              patch_size=(128, 128),
                              base_weight_decay=0.0005,
                              output_ROI_mask=True):
    print('Using build_model_FCN_model_api')

    # define the shared model:
    net_name = 'Multi-view_FCN'

    scale_number = 3
    ##################### input  ###############################################
    input_shape0 = (batch_size, patch_size[0], patch_size[1], 1)
    input_shape1 = (batch_size, patch_size[0] / 2, patch_size[1] / 2, 1)
    input_shape2 = (batch_size, patch_size[0] / 4, patch_size[1] / 4, 1)

    input_shape3 = (1, patch_size[0] / 4, patch_size[1] / 4, 1)

    input_patches1_s0 = Input(batch_shape=input_shape0, name='patches1_s0')
    input_patches1_s1 = Input(batch_shape=input_shape1, name='patches1_s1')
    input_patches1_s2 = Input(batch_shape=input_shape2, name='patches1_s2')

    input_patches2_s0 = Input(batch_shape=input_shape0, name='patches2_s0')
    input_patches2_s1 = Input(batch_shape=input_shape1, name='patches2_s1')
    input_patches2_s2 = Input(batch_shape=input_shape2, name='patches2_s2')

    input_patches3_s0 = Input(batch_shape=input_shape0, name='patches3_s0')
    input_patches3_s1 = Input(batch_shape=input_shape1, name='patches3_s1')
    input_patches3_s2 = Input(batch_shape=input_shape2, name='patches3_s2')

    input_patches4_s0 = Input(batch_shape=input_shape0, name='patches4_s0')
    input_patches4_s1 = Input(batch_shape=input_shape1, name='patches4_s1')
    input_patches4_s2 = Input(batch_shape=input_shape2, name='patches4_s2')

    input_depth_maps_v1 = Input(batch_shape=input_shape3,
                                name='depth_ratio_v1')
    input_depth_maps_v2 = Input(batch_shape=input_shape3,
                                name='depth_ratio_v2')
    input_depth_maps_v3 = Input(batch_shape=input_shape3,
                                name='depth_ratio_v3')
    input_depth_maps_v4 = Input(batch_shape=input_shape3,
                                name='depth_ratio_v4')

    if output_ROI_mask:
        # the output density patch/map is down-sampled by a factor of 4
        output_masks = Input(batch_shape=(batch_size, patch_size[0],
                                          patch_size[1], 1),
                             name='output_masks')

    train_flag = False
    ####################### view 1 #############################################
    # image pyramids:
    x1_s0_output = feature_extraction_view1(base_weight_decay,
                                            input_patches1_s0, train_flag)
    x1_s1_output = feature_extraction_view1(base_weight_decay,
                                            input_patches1_s1, train_flag)
    x1_s2_output = feature_extraction_view1(base_weight_decay,
                                            input_patches1_s2, train_flag)

    # view 1 decoder
    # x1_7 = view1_decoder(base_weight_decay, x1_s0_output)

    # conv block 5
    x1_5 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=64,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_5')(x1_s0_output)
    x1_5 = Activation('relu', name='conv_block_5_act')(x1_5)

    # conv block 6
    x1_6 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=32,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_6')(x1_5)
    x1_6 = Activation('relu', name='conv_block_6_act')(x1_6)

    # conv block 7
    x1_7 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=1,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_7')(x1_6)
    x1_7 = Activation('relu', name='conv_block_7_act')(x1_7)

    ####################### view 2 #############################################
    # image pyramids:
    x2_s0_output = feature_extraction_view2(base_weight_decay,
                                            input_patches2_s0, train_flag)
    x2_s1_output = feature_extraction_view2(base_weight_decay,
                                            input_patches2_s1, train_flag)
    x2_s2_output = feature_extraction_view2(base_weight_decay,
                                            input_patches2_s2, train_flag)
    # view 2 decoder
    # x2_7 = view2_decoder(base_weight_decay, x2_s0_output)

    # dmap
    # conv block 5
    x2_5 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=64,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_5_2')(x2_s0_output)
    x2_5 = Activation('relu', name='conv_block_5_2_act')(x2_5)

    # conv block 6
    x2_6 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=32,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_6_2')(x2_5)
    x2_6 = Activation('relu', name='conv_block_6_2_act')(x2_6)

    # conv block 7
    x2_7 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=1,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_7_2')(x2_6)
    x2_7 = Activation('relu', name='conv_block_7_2_act')(x2_7)

    ####################### view 3 #############################################
    # image pyramids:
    x3_s0_output = feature_extraction_view3(base_weight_decay,
                                            input_patches3_s0, train_flag)
    x3_s1_output = feature_extraction_view3(base_weight_decay,
                                            input_patches3_s1, train_flag)
    x3_s2_output = feature_extraction_view3(base_weight_decay,
                                            input_patches3_s2, train_flag)

    # view 3 decoder
    # x3_7 = view3_decoder(base_weight_decay, x3_s0_output)

    # conv block 5
    x3_5 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=64,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_5_3')(x3_s0_output)
    x3_5 = Activation('relu', name='conv_block_5_3_act')(x3_5)

    # conv block 6
    x3_6 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=32,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_6_3')(x3_5)
    x3_6 = Activation('relu', name='conv_block_6_3_act')(x3_6)

    # conv block 7
    x3_7 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=1,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_7_3')(x3_6)
    x3_7 = Activation('relu', name='conv_block_7_3_act')(x3_7)

    ####################### view 4 #############################################
    # image pyramids:
    x4_s0_output = feature_extraction_view4(base_weight_decay,
                                            input_patches4_s0, train_flag)
    x4_s1_output = feature_extraction_view4(base_weight_decay,
                                            input_patches4_s1, train_flag)
    x4_s2_output = feature_extraction_view4(base_weight_decay,
                                            input_patches4_s2, train_flag)

    # view 4 decoder
    # x4_7 = view4_decoder(base_weight_decay, x4_s0_output)

    # conv block 5
    x4_5 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=64,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_5_4')(x4_s0_output)
    x4_5 = Activation('relu', name='conv_block_5_4_act')(x4_5)

    # conv block 6
    x4_6 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=32,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_6_4')(x4_5)
    x4_6 = Activation('relu', name='conv_block_6_4_act')(x4_6)

    # conv block 7
    x4_7 = Conv2D(data_format='channels_last',
                  trainable=True,
                  filters=1,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  kernel_initializer='he_normal',
                  padding='same',
                  kernel_regularizer=l2(base_weight_decay),
                  use_bias=True,
                  activation=None,
                  name='conv_block_7_4')(x4_6)
    x4_7 = Activation('relu', name='conv_block_7_4_act')(x4_7)

    #################################### fusion #############################################
    ################# get the scale-selection mask #####################
    # view depth of image
    batch_size = x1_s0_output.shape[0].value
    height = x1_s0_output.shape[1].value
    width = x1_s0_output.shape[2].value
    num_channels = x1_s0_output.shape[3].value
    output_shape = [1, height, width, 1]

    # view1_depth = feature_scale_fusion_layer_mask(scale_number=scale_number,
    #                                               view = 1, output_shape=output_shape)
    # view2_depth = feature_scale_fusion_layer_mask(scale_number=scale_number,
    #                                               view = 2, output_shape=output_shape)
    # view3_depth = feature_scale_fusion_layer_mask(scale_number=scale_number,
    #                                               view = 3, output_shape=output_shape)

    # view1_scale = scale_selection_mask(base_weight_decay, input_depth_maps_v1)
    # view2_scale = scale_selection_mask(base_weight_decay, input_depth_maps_v2)
    # view3_scale = scale_selection_mask(base_weight_decay, input_depth_maps_v3)
    view1_scale = Conv2D(data_format='channels_last',
                         trainable=True,
                         filters=1,
                         kernel_size=(1, 1),
                         strides=(1, 1),
                         kernel_initializer=Constant(value=-1),
                         padding='same',
                         kernel_regularizer=l2(base_weight_decay),
                         use_bias=True,
                         bias_initializer='ones',
                         activation=None,
                         name='scale_fusion1')(input_depth_maps_v1)

    view2_scale = Conv2D(data_format='channels_last',
                         trainable=True,
                         filters=1,
                         kernel_size=(1, 1),
                         strides=(1, 1),
                         kernel_initializer=Constant(value=-1),
                         padding='same',
                         kernel_regularizer=l2(base_weight_decay),
                         use_bias=True,
                         bias_initializer='ones',
                         activation=None,
                         name='scale_fusion2')(input_depth_maps_v2)

    view3_scale = Conv2D(data_format='channels_last',
                         trainable=True,
                         filters=1,
                         kernel_size=(1, 1),
                         strides=(1, 1),
                         kernel_initializer=Constant(value=-1),
                         padding='same',
                         kernel_regularizer=l2(base_weight_decay),
                         use_bias=True,
                         bias_initializer='ones',
                         activation=None,
                         name='scale_fusion3')(input_depth_maps_v3)

    view4_scale = Conv2D(data_format='channels_last',
                         trainable=True,
                         filters=1,
                         kernel_size=(1, 1),
                         strides=(1, 1),
                         kernel_initializer=Constant(value=-1),
                         padding='same',
                         kernel_regularizer=l2(base_weight_decay),
                         use_bias=True,
                         bias_initializer='ones',
                         activation=None,
                         name='scale_fusion4')(input_depth_maps_v4)

    view1_scale_mask = feature_scale_fusion_layer_rbm(
        scale_number=scale_number)(view1_scale)
    view2_scale_mask = feature_scale_fusion_layer_rbm(
        scale_number=scale_number)(view2_scale)
    view3_scale_mask = feature_scale_fusion_layer_rbm(
        scale_number=scale_number)(view3_scale)
    view4_scale_mask = feature_scale_fusion_layer_rbm(
        scale_number=scale_number)(view4_scale)

    #################### fusion with mask ##################
    # view 1
    ## conv
    x1_s0_output_fusion = fusion_conv_v1(base_weight_decay, x1_s0_output)
    x1_s1_output_fusion = fusion_conv_v1(base_weight_decay, x1_s1_output)
    x1_s2_output_fusion = fusion_conv_v1(base_weight_decay, x1_s2_output)

    ## up sampling
    x1_s1_output_fusion = UpSampling_layer(size=[height, width])(
        [x1_s1_output_fusion])
    x1_s2_output_fusion = UpSampling_layer(size=[height, width])(
        [x1_s2_output_fusion])

    concatenated_map_v1 = Concatenate(name='cat_map_v1')(
        [x1_s0_output_fusion, x1_s1_output_fusion, x1_s2_output_fusion])
    fusion_v1 = feature_scale_fusion_layer(scale_number=scale_number)(
        [concatenated_map_v1, view1_scale_mask])

    ## proj
    fusion_v1_proj = SpatialTransformer(
        1, [int(480 / 4), int(640 / 4)])(fusion_v1)

    # view 2
    ## conv
    x2_s0_output_fusion = fusion_conv_v2(base_weight_decay, x2_s0_output)
    x2_s1_output_fusion = fusion_conv_v2(base_weight_decay, x2_s1_output)
    x2_s2_output_fusion = fusion_conv_v2(base_weight_decay, x2_s2_output)

    ## up sampling
    x2_s1_output_fusion = UpSampling_layer(size=[height, width])(
        [x2_s1_output_fusion])
    x2_s2_output_fusion = UpSampling_layer(size=[height, width])(
        [x2_s2_output_fusion])

    concatenated_map_v2 = Concatenate(name='cat_map_v2')(
        [x2_s0_output_fusion, x2_s1_output_fusion, x2_s2_output_fusion])
    fusion_v2 = feature_scale_fusion_layer(scale_number=scale_number)(
        [concatenated_map_v2, view2_scale_mask])

    ## proj
    fusion_v2_proj = SpatialTransformer(
        2, [int(480 / 4), int(640 / 4)])(fusion_v2)

    # view 3
    ## conv
    x3_s0_output_fusion = fusion_conv_v3(base_weight_decay, x3_s0_output)
    x3_s1_output_fusion = fusion_conv_v3(base_weight_decay, x3_s1_output)
    x3_s2_output_fusion = fusion_conv_v3(base_weight_decay, x3_s2_output)

    ## up sampling
    x3_s1_output_fusion = UpSampling_layer(size=[height, width])(
        [x3_s1_output_fusion])
    x3_s2_output_fusion = UpSampling_layer(size=[height, width])(
        [x3_s2_output_fusion])

    concatenated_map_v3 = Concatenate(name='cat_map_v3')(
        [x3_s0_output_fusion, x3_s1_output_fusion, x3_s2_output_fusion])

    fusion_v3 = feature_scale_fusion_layer(scale_number=scale_number)(
        [concatenated_map_v3, view3_scale_mask])

    ## proj
    fusion_v3_proj = SpatialTransformer(
        3, [int(480 / 4), int(640 / 4)])(fusion_v3)

    # view 4
    ## conv
    x4_s0_output_fusion = fusion_conv_v4(base_weight_decay, x4_s0_output)
    x4_s1_output_fusion = fusion_conv_v4(base_weight_decay, x4_s1_output)
    x4_s2_output_fusion = fusion_conv_v4(base_weight_decay, x4_s2_output)

    ## up sampling
    x4_s1_output_fusion = UpSampling_layer(size=[height, width])(
        [x4_s1_output_fusion])
    x4_s2_output_fusion = UpSampling_layer(size=[height, width])(
        [x4_s2_output_fusion])

    concatenated_map_v4 = Concatenate(name='cat_map_v4')(
        [x4_s0_output_fusion, x4_s1_output_fusion, x4_s2_output_fusion])

    fusion_v4 = feature_scale_fusion_layer(scale_number=scale_number)(
        [concatenated_map_v4, view4_scale_mask])

    ## proj
    fusion_v4_proj = SpatialTransformer(
        4, [int(480 / 4), int(640 / 4)])(fusion_v4)

    ################# concatenate ################
    concatenated_map = Concatenate(name='cat_map_fusion')(
        [fusion_v1_proj, fusion_v2_proj, fusion_v3_proj, fusion_v4_proj])
    fusion_v123 = Conv2D(data_format='channels_last',
                         trainable=True,
                         filters=96,
                         kernel_size=(1, 1),
                         strides=(1, 1),
                         kernel_initializer='he_normal',
                         padding='same',
                         kernel_regularizer=l2(base_weight_decay),
                         use_bias=True,
                         activation=None,
                         name='scale_fusion')(concatenated_map)
    fusion_v123 = Activation('relu', name='scale_fusion_act')(fusion_v123)

    #################### fusion and decode #####################################
    # conv block 9
    x = Conv2D(data_format='channels_last',
               trainable=True,
               filters=64,
               kernel_size=(5, 5),
               strides=(1, 1),
               kernel_initializer='he_normal',
               padding='same',
               kernel_regularizer=l2(base_weight_decay),
               use_bias=True,
               activation=None,
               name='conv_block_fusion1')(fusion_v123)
    x = Activation('relu', name='conv_block_fusion1_act')(x)

    x = Conv2D(data_format='channels_last',
               trainable=True,
               filters=32,
               kernel_size=(5, 5),
               strides=(1, 1),
               kernel_initializer='he_normal',
               padding='same',
               kernel_regularizer=l2(base_weight_decay),
               use_bias=True,
               activation=None,
               name='conv_block_fusion2')(x)
    x = Activation('relu', name='conv_block_fusion2_act')(x)

    x = Conv2D(data_format='channels_last',
               trainable=True,
               filters=1,
               kernel_size=(5, 5),
               strides=(1, 1),
               kernel_initializer='he_normal',
               padding='same',
               kernel_regularizer=l2(base_weight_decay),
               use_bias=True,
               activation=None,
               name='conv_block_fusion3')(x)
    x_output = Activation('relu', name='conv_block_fusion3_act')(x)

    if output_ROI_mask:
        rgr_output = 'den_map_roi'
        output = Multiply(name=rgr_output)([x_output, output_masks])
        print('Layer name of regression output: {}'.format(rgr_output))
        model = Model(inputs=[
            input_patches1_s0, input_patches1_s1, input_patches1_s2,
            input_patches2_s0, input_patches2_s1, input_patches2_s2,
            input_patches3_s0, input_patches3_s1, input_patches3_s2,
            input_patches4_s0, input_patches4_s1, input_patches4_s2,
            input_depth_maps_v1, input_depth_maps_v2, input_depth_maps_v3,
            input_depth_maps_v4, output_masks
        ],
                      outputs=[x_output],
                      name=net_name)
    else:
        model = Model(
            inputs=[
                input_patches1_s0,
                input_patches1_s1,
                input_patches1_s2,
                input_patches2_s0,
                input_patches2_s1,
                input_patches2_s2,
                input_patches3_s0,
                input_patches3_s1,
                input_patches3_s2,
                input_patches4_s0,
                input_patches4_s1,
                input_patches4_s2,
                input_depth_maps_v1,
                input_depth_maps_v2,
                input_depth_maps_v3,
                input_depth_maps_v4,
            ],
            outputs=[x_output],  #x1_7, x2_7, x3_7, x4_7,
            name=net_name + 'overall')

    print('Compiling ...')
    model.compile(optimizer=optimizer, loss='mse')
    return model
Example #16
0
locnet.add(MaxPooling2D(pool_size=(2, 2)))
locnet.add(Convolution2D(20, (3, 3), padding='same'))
locnet.add(Activation('linear'))
locnet.add(MaxPooling2D(pool_size=(2, 2)))

locnet.add(Flatten())
#locnet.add(Dense(100))
locnet.add(Activation('linear'))
locnet.add(Dense(9, weights=weights))
locnet.add(Activation('linear'))

#-----Now we add a STN layer which will output the transformed image-----#
transf1 = Sequential()
transf1.add(
    SpatialTransformer(localization_net=locnet,
                       output_size=(photo_height, photo_width, 1),
                       input_shape=input_shape))
from keras import optimizers
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
transf1.compile(loss='mse', optimizer=sgd, metrics=['mse'])
locnet.summary()
transf1.summary()

#-----LOAD DATASET-----#
X, y, X_val, y_val = read_train_set("dataset/train_set")
print "Train tensor shape: ", X.shape
print "Validation tensor shape: ", X_val.shape

#-----FITTING-----#

#-----Fit on batch. This is useful for GPU training to prevent Out Of Memory (OOM) error-----#
def train_net(config_file):
    net = SpatialTransformer(config_file)

    X = T.tensor4(name='input_tensor')
    y = T.ivector(name='label_vector')
    X_shared = theano.shared(np.zeros(net.input_tensor_shape, 'float32'),
                             'input')
    y_shared = theano.shared(np.zeros(net.output_shape, 'int32'), 'output')
    lr = theano.shared(net.learning_rate, 'learning rate')

    net.set_training_status(True)
    pred = net(X)
    cost = net.build_cost(pred, y)
    updates = net.build_updates(cost, net.trainable, **{'learning_rate': lr})
    error_rate = metrics.mean_classification_error(pred, y) * 100.
    train = net.compile([], [cost, error_rate],
                        updates=updates,
                        givens={
                            X: X_shared,
                            y: y_shared
                        })

    net.set_training_status(False)
    pred = net(X)
    trans = net.inference_trans_net(X)
    error_rate = metrics.mean_classification_error(pred, y) * 100.
    eval = net.compile([], [error_rate, trans],
                       givens={
                           X: X_shared,
                           y: y_shared
                       })

    data = read_data.load_data(net.config['data']['path'])
    dm = utils.DataManager(config_file, (X_shared, y_shared))
    dm.training_set = (data['X_train'], data['y_train'])
    dm.num_train_data = data['num_examples_train']
    dm.testing_set = (data['X_valid'], data['y_valid'])
    dm.num_test_data = data['num_examples_valid']
    mon = monitor.Monitor(config_file)
    epoch = 0
    print('Start training...')
    while epoch < net.n_epochs:
        if (epoch + 1) % 20 == 0:
            new_lr = lr.get_value() * 0.7
            print('New LR: %f' % new_lr)
            lr.set_value(new_lr)

        batches = dm.get_batches(epoch, net.n_epochs, True, False)
        for X_, y_ in batches:
            dm.update_input((X_, y_))
            _cost, _error = train()
            mon.plot('training cost', _cost)
            mon.plot('training classification error', _error)
            mon.tick()

        if epoch % net.validation_frequency == 0:
            valid_batches = dm.get_batches(training=False)
            for X_, y_ in valid_batches:
                dm.update_input((X_, y_))
                _error, _trans = eval()
                mon.plot('validation classification error', _error)
            mon.save_image('transformed image', _trans[:10])
            mon.flush()
        epoch += 1
    print('Training ended.')