Beispiel #1
0
def makeConvLayers(inputLayer):
    # two conv-nets in parallel for feature learning, 
    # one with fine resolution another with coarse resolution    
    # network to learn fine features
    convFine = Conv1D(filters=64, kernel_size=int(Fs/2), strides=int(Fs/16), padding='same', activation='relu', name='fConv1')(inputLayer)
    convFine = MaxPool1D(pool_size=8, strides=8, name='fMaxP1')(convFine)
    convFine = Dropout(rate=0.5, name='fDrop1')(convFine)
    convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv2')(convFine)
    convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv3')(convFine)
    convFine = Conv1D(filters=128, kernel_size=8, padding='same', activation='relu', name='fConv4')(convFine)
    convFine = MaxPool1D(pool_size=4, strides=4, name='fMaxP2')(convFine)
    fineShape = convFine.get_shape()
    convFine = Flatten(name='fFlat1')(convFine)
    
    # network to learn coarse features
    convCoarse = Conv1D(filters=32, kernel_size=Fs*4, strides=int(Fs/2), padding='same', activation='relu', name='cConv1')(inputLayer)
    convCoarse = MaxPool1D(pool_size=4, strides=4, name='cMaxP1')(convCoarse)
    convCoarse = Dropout(rate=0.5, name='cDrop1')(convCoarse)
    convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv2')(convCoarse)
    convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv3')(convCoarse)
    convCoarse = Conv1D(filters=128, kernel_size=6, padding='same', activation='relu', name='cConv4')(convCoarse)
    convCoarse = MaxPool1D(pool_size=2, strides=2, name='cMaxP2')(convCoarse)
    coarseShape = convCoarse.get_shape()
    convCoarse = Flatten(name='cFlat1')(convCoarse)
    
    # concatenate coarse and fine cnns
    mergeLayer = concatenate([convFine, convCoarse], name='merge')
    
    return mergeLayer, (coarseShape, fineShape)
def get_DenseNet_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    x = Conv3D(DENSE_NET_INITIAL_CONV_DIM, (3, 3, 3), padding='same')(inputs)
    print('input')
    print(x.get_shape())

    for i in range(DENSE_NET_BLOCKS):
        x = dense_block(x)
        if i != DENSE_NET_BLOCKS - 1:
            x = transition_block(x)

    print('top')
    x = GlobalAveragePooling3D()(x)
    print(x.get_shape())

    if DENSE_NET_ENABLE_DROPOUT:
        x = Dropout(DENSE_NET_DROPOUT)(x)

    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE), loss='binary_crossentropy', metrics=['accuracy'])

    return model
def get_Inception_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    print('inputs')
    print(inputs.get_shape())

    # Make inception base
    x = inception_base(inputs)

    for i in range(INCEPTION_BLOCKS):
        x = inception_block(x, filters=INCEPTION_KEEP_FILTERS)

        if (i + 1) % INCEPTION_REDUCTION_STEPS == 0 and i != INCEPTION_BLOCKS - 1:
            x = reduction_block(x, filters=INCEPTION_KEEP_FILTERS // 2)

    print('top')
    x = GlobalMaxPooling3D()(x)
    print(x.get_shape())
    x = Dropout(INCEPTION_DROPOUT)(x)
    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE), loss='binary_crossentropy', metrics=['accuracy'])

    return model
Beispiel #4
0
def fineTuningNet(n_feats, n_classes, preTrainedNet):
    inLayer = Input(shape=(n_feats, 1), name='inLayer')
    mLayer, (cShape, fShape) = makeConvLayers(inLayer)
    outLayer = Dropout(rate=0.5, name='mDrop1')(mLayer)
    
    # this is the network that learns temporal dependencies using LSTM
    # merge the outputs of last layers
    # reshape because LSTM layer needs 3 dims (None, 1, n_feats)
    outLayer = Reshape((1, int(fShape[1]*fShape[2] + cShape[1]*cShape[2])))(outLayer)
    outLayer = Bidirectional(LSTM(512, activation='relu', dropout=0.5, name='bLstm1'))(outLayer)
    outLayer = Reshape((1, int(outLayer.get_shape()[1])))(outLayer)
    outLayer = Bidirectional(LSTM(512, activation='relu', dropout=0.5, name='bLstm2'))(outLayer)
    outLayer = Dense(n_classes, activation='softmax', name='outLayer')(outLayer)
    
    network = Model(inLayer, outLayer)
    
    # now that we have the network, we will copy the weights from the pretrained network into this network
    allPreTrainLayers = dict([(layer.name, layer) for layer in preTrainedNet.layers])
    allFineTuneLayers = dict([(layer.name, layer) for layer in network.layers])
    
    allPreTrainLayerNames = [layer.name for layer in preTrainedNet.layers]
    # we don't need the input and output layers from the pretrained net, so discard them
    allPreTrainLayerNames = [l for l in allPreTrainLayerNames if l not in ['inLayer', 'outLayer']]
    
    # now set weights of fine tune network based on pre train network
    for l in allPreTrainLayerNames:
        allFineTuneLayers[l].set_weights(allPreTrainLayers[l].get_weights())
    
    network.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    
    return network
Beispiel #5
0
def model_raw_sound(x_train, num_labels):
    model_input = x = Input(shape=x_train[0].shape)
    x = Conv1D(filters=16, kernel_size=filter_size)(x)
    x = activation()(x)
    x = MaxPooling1D(pool_size=4)(x)
    x = Dropout(0.2)(x)

    x = Conv1D(filters=32, kernel_size=filter_size)(x)
    x = activation()(x)
    x = MaxPooling1D(pool_size=4)(x)
    x = Dropout(0.2)(x)

    x = Conv1D(filters=64, kernel_size=filter_size)(x)
    x = activation()(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Dropout(0.2)(x)

    x = Conv1D(filters=128, kernel_size=filter_size)(x)
    x = activation()(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Dropout(0.2)(x)

    x = AveragePooling1D(pool_size=(int(x.get_shape()[1]), ))(x)

    x = Conv1D(filters=num_labels,
               kernel_size=1,
               padding='valid',
               activation='softmax' if num_labels > 1 else 'sigmoid')(x)

    model = Model(inputs=[model_input], outputs=[x])

    model.summary()
    return model
def get_Inception_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    print('inputs')
    print(inputs.get_shape())

    # Make inception base
    x = inception_base(inputs)

    for i in range(INCEPTION_BLOCKS):
        x = inception_block(x, filters=INCEPTION_KEEP_FILTERS)

        if (i + 1) % INCEPTION_REDUCTION_STEPS == 0 and i != INCEPTION_BLOCKS - 1:
            x = reduction_block(x, filters=INCEPTION_KEEP_FILTERS // 2)

    print('top')
    x = GlobalMaxPooling3D()(x)
    print(x.get_shape())
    x = Dropout(INCEPTION_DROPOUT)(x)
    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model_s = Model(inputs=inputs, outputs=x)
    model = multi_gpu_model(model_s, gpus=4)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE), loss='binary_crossentropy', metrics=['accuracy'])

    return model,model_s
def get_DenseNet_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT,
                    CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    x = Conv3D(DENSE_NET_INITIAL_CONV_DIM, (3, 3, 3), padding='same')(inputs)
    print('input')
    print(x.get_shape())

    for i in range(DENSE_NET_BLOCKS):
        x = dense_block(x)
        if i != DENSE_NET_BLOCKS - 1:
            x = transition_block(x)

    print('top')
    x = GlobalAveragePooling3D()(x)
    print(x.get_shape())

    if DENSE_NET_ENABLE_DROPOUT:
        x = Dropout(DENSE_NET_DROPOUT)(x)

    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
Beispiel #8
0
def residual_projectionNet(depth=3,
                           nb_filters=512,
                           input_shape=(64, 64, 1),
                           dropout=0):

    # Lets make a really deep CNN
    input_img = Input(shape=input_shape)

    model = Convolution2D(nb_filters,
                          3,
                          3,
                          activation='relu',
                          border_mode='valid')(input_img)
    if dropout > 0: model = Dropout(dropout)(model)

    for i in range(depth - 2):
        model = Convolution2D(nb_filters,
                              3,
                              3,
                              activation='relu',
                              border_mode='valid')(model)
        if dropout > 0: model = Dropout(dropout)(model)

    model = Convolution2D(1, 3, 3, border_mode='valid')(model)

    crop_amount = int(int(input_img.get_shape()[1] - model.get_shape()[1]) / 2)

    crop = Cropping2D(cropping=((crop_amount, crop_amount),
                                (crop_amount, crop_amount)))(input_img)

    merge1 = merge([crop, model], mode='sum')

    final_model = Model(input=[input_img], output=[merge1])
    return final_model
    def build(self):
        data = Input(batch_shape=self.data_shape)

        net = self._stem(data)

        print('_stem  ', net.get_shape().as_list())
        net = self.build_inception_resnet_a(net=net, n_itsrs=5)
        print('build_inception_resnet_a ', net.get_shape().as_list())
        net = self._reductionA(net=net, out_lst=[384, 256, 384, 256])
        print('_reductionA  ', net.get_shape().as_list())

        net = self.build_inception_resnet_b(net, 10)
        print('build_inception_resnet_b  ', net.get_shape().as_list())
        net = self._reductionB(net)
        print('_reductionB  ', net.get_shape().as_list())

        net = self.build_inception_resnet_c(net, 5)
        print('_reductionC  ', net.get_shape().as_list())

        net = AveragePooling2D([8, 8], strides=[1, 1], padding='valid')(net)
        print('AveragePooling2D  ', net.get_shape().as_list())
        net = Dropout(0.8)(net)
        net = Flatten()(net)
        net = Dense(self.n_classes, activation='softmax')(net)
        print('Dense  ', net.get_shape().as_list())
        model = Model(inputs=data, outputs=net, name=self.model)
        model.summary()
        return net
Beispiel #10
0
 def deconve(layer_input, skip_input, filters, dropout_rate):
     f_size = 4
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
     u = Dropout(dropout_rate)(u)
     u = BatchNormalization(momentum=0.8)(u)
     cw = (u.get_shape()[2]-skip_input.get_shape()[2]).value
     ch = (u.get_shape()[1] - skip_input.get_shape()[1]).value
     crop_u = Cropping2D(cropping=((int(ch/2), int(ch/2) + (ch % 2)), (int(cw/2), int(cw/2) + (cw % 2))))(u)
     u = Concatenate()([crop_u, skip_input])
     return u
Beispiel #11
0
 def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0,strides=1):
     """Layers used during upsampling"""
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(filters, kernel_size=f_size, strides=strides, padding='same', activation='relu')(u)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     print("u",u.get_shape().as_list())
     print("skip_input",skip_input.get_shape().as_list())
     u = Concatenate()([u, skip_input])
     return u
def get_generator(im_shape, nchannel=(1, ), num_class=2, weight_decay=0.0005):
    """
    ## prepare inputs
    """
    # im shape
    if global_channel_axis == 1:
        im_shape = list(nchannel) + list(im_shape)
    else:
        im_shape = list(im_shape) + list(nchannel)
    # l2 regularizer
    kernel_regularizer = regularizers.l2(weight_decay)
    # get input
    inputs = Input(shape=im_shape, name='main_input')
    """
    ## before downsampling
    """
    conv_0_before_dsampling = Conv3D(
        4, (3, 3, 3),
        activation=None,
        padding='same',
        use_bias=False,
        kernel_initializer='he_normal',
        kernel_regularizer=kernel_regularizer)(inputs)
    conv_0_before_dsampling_b = BatchNormalization(
        axis=global_channel_axis, momentum=0.95)(conv_0_before_dsampling)
    conv_0_before_dsampling_b_relu = PReLU(
        shared_axes=global_pReLU_shared_axes)(conv_0_before_dsampling_b)
    """
    ## downsampling (shared branch)
    """
    ## 0 level
    # 2-conv resNet
    d0b = get_2conv_block_resNet(conv_0_before_dsampling_b_relu, 4, (3, 3, 3),
                                 kernel_regularizer)
    print("d0b: ", d0b.get_shape())
    ## 1 level
    # conv2-pooling
    d1a_t = Conv3D(8, (2, 2, 2),
                   strides=(2, 2, 2),
                   use_bias=False,
                   kernel_regularizer=kernel_regularizer)(d0b)
    d1a = PReLU(shared_axes=global_pReLU_shared_axes)(d1a_t)
    print("d1a: ", d1a.get_shape())
    # 2-conv resNet
    d1b = get_2conv_block_resNet(d1a, 8, (3, 3, 3), kernel_regularizer)
    print("d1b: ", d1b.get_shape())
    ## 2 level
    # conv2-pooling
    d2a_t = Conv3D(16, (2, 2, 2),
                   strides=(2, 2, 2),
                   use_bias=False,
                   kernel_regularizer=kernel_regularizer)(d1b)
    d2a = PReLU(shared_axes=global_pReLU_shared_axes)(d2a_t)
    print("d2a: ", d2a.get_shape())
    # 3-conv resNet
    d2b = get_3conv_block_resNet(d2a, 16, (3, 3, 3), kernel_regularizer)
    print("d2b: ", d2b.get_shape())
    """
    ## upsampling (classification branch)
    """
    ## up c: 1 level
    # upooling
    c1a_t = Deconv3D(8, (2, 2, 2),
                     strides=(2, 2, 2),
                     activation=None,
                     padding='valid',
                     kernel_initializer='he_normal',
                     kernel_regularizer=kernel_regularizer)(d2b)
    c1a = PReLU(shared_axes=global_pReLU_shared_axes)(c1a_t)
    print("c1a: ", c1a.get_shape())
    # concat: d1b + c1a
    concat_d1b_c1a_t = concatenate([d1b, c1a], axis=global_channel_axis)
    concat_d1b_c1a = change_num_featmap_1x1x1conv(concat_d1b_c1a_t, 8,
                                                  kernel_regularizer)
    c1b = get_2conv_block_resNet(concat_d1b_c1a, 8, (3, 3, 3),
                                 kernel_regularizer)
    print("c1b: ", c1b.get_shape())
    ## up c: 0 level
    # upooling
    c0a_t = Deconv3D(4, (2, 2, 2),
                     strides=(2, 2, 2),
                     activation=None,
                     padding='valid',
                     kernel_initializer='he_normal',
                     kernel_regularizer=kernel_regularizer)(c1b)
    c0a = PReLU(shared_axes=global_pReLU_shared_axes)(c0a_t)
    print("c0a: ", c0a.get_shape())
    # concat: d0b + c0a
    concat_d0b_c0a_t = concatenate([d0b, c0a], axis=global_channel_axis)
    concat_d0b_c0a = change_num_featmap_1x1x1conv(concat_d0b_c0a_t, 4,
                                                  kernel_regularizer)
    c0b = get_2conv_block_resNet(concat_d0b_c0a, 4, (3, 3, 3),
                                 kernel_regularizer)
    print("c0b: ", c0b.get_shape())
    ## fully connected: classification
    c0c_t = Conv3D(4, (3, 3, 3),
                   activation=None,
                   padding='same',
                   use_bias=False,
                   kernel_initializer='he_normal',
                   kernel_regularizer=kernel_regularizer)(c0b)
    c0c_t_b = BatchNormalization(axis=global_channel_axis,
                                 momentum=0.95)(c0c_t)
    c0c_t_b_rl = PReLU(shared_axes=global_pReLU_shared_axes)(c0c_t_b)
    c0c = Dropout(0.2)(c0c_t_b_rl)
    print("c0c: ", c0c.get_shape())
    c0d = Conv3D(1, (1, 1, 1),
                 activation='sigmoid',
                 padding='same',
                 kernel_initializer='he_normal',
                 kernel_regularizer=kernel_regularizer,
                 name='classification_output')(c0c)
    #c0d = Conv3D(num_class, (1, 1, 1), activation='softmax', padding='same', kernel_initializer='he_normal',
    #             kernel_regularizer=kernel_regularizer, name='classification_output')(c0c)
    print("c0d: ", c0d.get_shape())
    """
    ## finish model
    """
    model = Model(inputs=inputs, outputs=c0d)
    return model
Beispiel #13
0
def residual_projectionNet2(depth=3,
                            nb_filters=512,
                            k1=16,
                            k2=1,
                            k3=8,
                            input_shape=(64, 64, 1),
                            dropout=0):

    # Lets make a really deep CNN
    input_img = Input(shape=input_shape)

    model = Convolution2D(nb_filters,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(input_img)
    if dropout > 0: model = Dropout(dropout)(model)

    for i in range(depth - 2):
        model = Convolution2D(nb_filters,
                              3,
                              3,
                              activation='relu',
                              border_mode='same')(model)
        if dropout > 0: model = Dropout(dropout)(model)

    model = Convolution2D(1, 3, 3, border_mode='same')(model)

    crop_amount = int(int(input_img.get_shape()[1] - model.get_shape()[1]) / 2)

    crop = Cropping2D(cropping=((crop_amount, crop_amount),
                                (crop_amount, crop_amount)))(input_img)

    merge1 = merge([crop, model], mode='sum')

    final_model = Model(input=[input_img], output=[merge1])
    return final_model


#
# def VNet(depth=3,nb_filters=32,input_shape=(256,256,1),dropout=0,sz_filters=3,theta=80,nrays=256):
#
# 	input_img = Input(shape=input_shape)
# 	input_img_y = Input(shape=input_shape)
# 	H_shape = (256,256)
# 	H = Input(shape=H_shape)
#
# 	G = K.tf.matmul(H,K.tf.reshape(input_img_y,(1,256*256)))
#
#
# 	def add_deep_stuff(x):
# 		x = Convolution2D(nb_filters,sz_filters,sz_filters, border_mode='same')(x)
# 		x = Convolution2D(nb_filters,sz_filters,sz_filters, border_mode='same')(x)
# 		x = Convolution2D(nb_filters,sz_filters,sz_filters, border_mode='same')(x)
# 		x = Convolution2D(input_shape(2),1,1, border_mode='same')(x)
# 		return x
#
# 	def add_scale_layer(x):
# 		# Getting a tensor with a single 1:
# 		single_one = GlobalMaxPooling2D()(x)
# 		single_one = Lambda(lambda x: x**0)(single_one)
# 		# Scaling parameter!
# 		trainable_scaling_parameter = Dense(1)(single_one)
# 		# Repmat this to the size of conv1
# 		size_conv1_output = x.get_shape()[1]*x.get_shape()[2]*x.get_shape()[3]
# 		repeated_scale = RepeatVector(int(size_conv1_output))(trainable_scaling_parameter)
# 		target_shape = (int(x.get_shape()[1]),int(x.get_shape()[2]),int(x.get_shape()[3]))
# 		reshaped_to_conv1_shape = Reshape(target_shape)(repeated_scale)
#
# 		# Finally multiply the scaling matrix with our convolution output:
# 		scaled_conv1_output = merge([x,reshaped_to_conv1_shape],mode='mul')
# 		return scaled_conv1_output
#
# 	def add_gradient_layer(x):
# 		inner = K.tf.matmul(H,K.tf.reshape(x,(1,256*256)))
# 		dif = K.tf.subtract(inner,G)
# 		gradient = K.tf.matmul(H,dif,adjoint_a=True)
# 		gradient = add_scale_layer(gradient)
# 		return gradient
#
#
#
# 	model = input_img
# 	# Do a couple blocks
# 	for i in range(depth):
# 		model = add_deep_stuff(model)
# 		gradient = add_gradient_layer(model)
# 		model = add_scale_layer(model)
# 		model = merge([model, gradient],mode='sum')
#
# 	final_model = Model(input=[input_img,input_img,y,H])
# 	return final_mode

# Hacky stuff to implement scaling layer
# input_shape=(64,64,1)
# input_img = Input(shape=input_shape)
# conv1_output = Convolution2D(64,3,3,activation='relu', border_mode='valid')(input_img)
#
# # Getting a tensor with a single 1:
# single_one = GlobalMaxPooling2D()(input_img)
# single_one = Lambda(lambda x: x**0)(single_one)
# # Scaling parameter!
# trainable_scaling_parameter = Dense(1)(single_one)
# # Repmat this to the size of conv1
# size_conv1_output = conv1_output.get_shape()[1]*conv1_output.get_shape()[2]*conv1_output.get_shape()[3]
# repeated_scale = RepeatVector(int(size_conv1_output))(trainable_scaling_parameter)
# target_shape = (int(conv1_output.get_shape()[1]),int(conv1_output.get_shape()[2]),int(conv1_output.get_shape()[3]))
# reshaped_to_conv1_shape = Reshape(target_shape)(repeated_scale)
#
# # Finally multiply the scaling matrix with our convolution output:
# scaled_conv1_output = merge([conv1_output,reshaped_to_conv1_shape],mode='mul')

# EoF #
def sampling(args):
    z_mu, z_log_var = args
    eps = tf.random_normal(K.shape(z_log_var), dtype=np.float32, mean=0., stddev=1.0, name='epsilon')
    z = z_mu + K.exp(z_log_var / 2) * eps
    return z



# Encoder network
inputs = Input(shape=input_shape, name='input')
x = Conv2D(filters, (3, 3), activation='relu', strides=2, padding='same')(inputs)
x = Conv2D(filters*2, (3, 3), activation='relu', strides=2, padding='same')(x)
x = Dropout(0.25)(x)

# Save the shape information for later decoder network
shape = x.get_shape().as_list()

x = Flatten()(x)
x = Dense(32, activation='relu')(x)
z_mu = Dense(z_dim, name='z_mu')(x)
z_log_var = Dense(z_dim, name='z_log_var')(x)
z = Lambda(sampling, name='z')([z_mu, z_log_var])
# Instantiate encoder model
encoder = Model(inputs, [z_mu, z_log_var, z], name='vae_encoder')
encoder.summary()



# Decoder network
z_inputs = Input(shape=(z_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(z_inputs)
Beispiel #15
0
def unet_arch_2c(h, w):
  print("Model of size: %d %d" % (h, w))
  ch = 1 # 1
  inputs = Input(shape=(ch, h , w)) # 160 x 160
  ordering = 'th'  # 'th': (ch, h, w),  'tf': (h, w, ch)

  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(inputs)
  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv1)
  pool1 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv1)
  pool1 = Dropout(0.15)(pool1)
  print 'pool1', pool1.get_shape()

  conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool1)
  conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv2)
  pool2 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv2)
  pool2 = Dropout(0.25)(pool2)
  print 'pool2', pool2.get_shape()

  conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool2)
  conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv3)
  pool3 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv3)
  pool3 = Dropout(0.4)(pool3)
  print 'pool3', pool3.get_shape()

  conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool3)
  conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv4)
  print 'conv4', conv4.get_shape()
  pool4 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv4)
  pool4 = Dropout(0.5)(pool4)
  print 'pool4', pool4.get_shape()

  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool4)
  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv5)
  # pool5 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv5) # 5x5
  # pool5 = Dropout(0.5)(pool5)
  print 'conv5', conv5.get_shape()


  up1 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)
  #print 'up1', up1.get_shape()
  up1 = merge([up1, conv4], mode='concat', concat_axis=1)
  #up1 = merge([(UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)), pool4], mode='concat', concat_axis=1)
  up1 = Dropout(0.4)(up1)
  print 'up1', up1.get_shape()
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up1)
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv8)
  print 'conv8', conv8.get_shape()

  up2 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv8)
  up2 = merge([up2, conv3], mode='concat', concat_axis=1)
  #up2 = merge([UpSampling2D(size=(2, 2))(conv8), conv3], mode='concat', concat_axis=1)
  up2 = Dropout(0.25)(up2)
  print 'up2',up2.get_shape()
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up2)
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv9)
  print 'conv9',conv9.get_shape()  # 7,80,32
  print 'conv2',conv2.get_shape()  # 1,160,16

  up3 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv9)   # 14, 160, 32
  up3 = merge([up3, conv2], mode='concat', concat_axis=1)
  #up3 = merge([UpSampling2D(size=(2, 2))(conv9), conv2], mode='concat', concat_axis=1)
  up3 = Dropout(0.15)(up3)
  print 'up3',up3.get_shape()
  conv10 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up3)
  conv10 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv10)
  print 'conv10',conv10.get_shape()

  up4 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv10)
  up4 = merge([up4, conv1], mode='concat', concat_axis=1)
  #up4 = merge([UpSampling2D(size=(2, 2))(conv10), conv1], mode='concat', concat_axis=1)
  up4 = Dropout(0.15)(up4)
  conv11 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up4)
  conv11 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv11)

  predictions = Convolution2D(ch, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv11)

  model = Model(input=inputs, output=predictions)
  model.summary()
  #plot(model, "model.png")
  return model
Beispiel #16
0
filt_frac_img = [0.25,0.25]
no_of_filters = [50, 50, 20] 
dropout = 0.1
one_d = 200
#reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=factor,
#                              patience=patience, min_lr=min_lr, verbose = 1)

#model = Sequential()
x =  Input(shape = (D,WL,1))
y = Conv2D(no_of_filters[0], (1,int(WL * filt_frac_img[0])))(x)
y = MaxPooling2D((1,2))(y)
y = Dropout(rate = dropout, seed = 1)(y)
y = Conv2D(no_of_filters[1],(1,int(WL * filt_frac_img[1])))(y)
y = MaxPooling2D((1,2))(y)
y = Dropout(rate = dropout, seed = 1)(y)
temp = Conv2D(no_of_filters[2],(1,y.get_shape().as_list()[2]))(y)
temp = Conv2D(one_d, (D,1))(temp)
temp = Reshape((-1,))(temp)
pred_out = Dense(no_of_outputs, activation = 'softmax')(temp)
model = Model(x, pred_out)
model.compile(loss = 'categorical_crossentropy', optimizer = Adam(lr = lr))
print(model.summary())
epochs = 200
batch_size = 10
training = 1
filename = 'WL=' + str(WL) + '_labels_' + str(no_of_outputs) + '_electrodes_' + str(col_list) + '_lr_' + str(lr) + '_filter_no._' + str(no_of_filters) +  '_filt_frac_img_'  + str(filt_frac_img) + '_drop_' + str(dropout) + '_one_d_' + str(one_d) + '_epochs_' + str(epochs)
with open('./arch_reports/' + filename + '_report.txt','w') as fh:
# Pass the file handle in as a lambda function to make it callable
    model.summary(print_fn=lambda x: fh.write(x + '\n'))

Beispiel #17
0
def draw_capsnet_model(hyper_param, embedding_matrix=None, verbose=True):
    """
    Input: hyper parameters dictionary
    
    Construct:
        input layers : x , x_pos(o), x_captialization(o)
        embedding matrix : use_glove or randomly initialize
        conv1 : first convolution layer
        primarycaps : conv2 and squash function applied
        ner_caps : make 8 ner capsules of specified dim
        out_pred : calc length of 8 ner capsules as 8 prob. predictions over 8 ner classes
    
    Returns: 
        if decoding/reconstruction disabled --> a single keras.models.Model object
        if decoding/reconstruction enabled --> three keras.models.Model objects
    """

    # input layer(s)
    x = Input(shape=(hyper_param['maxlen'], ), name='x')
    if hyper_param['use_pos_tags']:
        x_pos = Input(shape=(hyper_param['maxlen'], hyper_param['poslen']),
                      name='x_pos')
    if hyper_param['use_capitalization_info']:
        x_capital = Input(shape=(hyper_param['maxlen'],
                                 hyper_param['capitallen']),
                          name='x_capital')

    # embedding matrix
    if hyper_param['use_glove']:
        embed = Embedding(hyper_param['max_features'], hyper_param['embed_dim'], weights=[embedding_matrix],\
                          input_length=hyper_param['maxlen'], trainable=hyper_param['allow_glove_retrain'])(x)
    else:
        embed = Embedding(hyper_param['max_features'], hyper_param['embed_dim'], input_length=hyper_param['maxlen'],\
                          embeddings_initializer="random_uniform" )(x)

    # concat embeddings with additional features
    if hyper_param['use_pos_tags'] and hyper_param['use_capitalization_info']:
        embed = Concatenate(axis=-1)([embed, x_pos, x_capital])
    elif hyper_param['use_pos_tags'] and (
            not hyper_param['use_capitalization_info']):
        embed = Concatenate(axis=-1)([embed, x_pos])
    elif (not hyper_param['use_pos_tags']
          ) and hyper_param['use_capitalization_info']:
        embed = Concatenate(axis=-1)([embed, x_capital])
    else:
        embed = embed

    # add dropout here
    if hyper_param['embed_dropout'] > 0.0:
        embed = SpatialDropout1D(hyper_param['embed_dropout'])(embed)

    # feed embeddings into conv1
    conv1 = Conv1D( filters=hyper_param['conv1_filters'], \
                   kernel_size=hyper_param['conv1_kernel_size'],\
                   strides=hyper_param['conv1_strides'], \
                   padding=hyper_param['conv1_padding'],\
                   activation='relu', name='conv1')(embed)

    # make primary capsules
    if hyper_param['use_2D_primarycaps']:
        convShape = conv1.get_shape().as_list()
        conv1 = Reshape((convShape[1], convShape[2], 1))(conv1)
        primaryCapLayer = PrimaryCap
    else:
        primaryCapLayer = PrimaryCap1D

    # make primary capsules
    primarycaps = primaryCapLayer(conv1, \
                             dim_capsule=hyper_param['primarycaps_dim_capsule'],\
                             n_channels=hyper_param['primarycaps_n_channels'],\
                             kernel_size=hyper_param['primarycaps_kernel_size'], \
                             strides=hyper_param['primarycaps_strides'], \
                             padding=hyper_param['primarycaps_padding'])

    # make ner capsules
    ner_caps = CapsuleLayer(num_capsule=hyper_param['ner_classes'], \
                            dim_capsule=hyper_param['ner_capsule_dim'], \
                            routings=hyper_param['num_dynamic_routing_passes'], \
                            name='nercaps')(primarycaps)

    # replace each ner capsuel with its length
    out_pred = Length(name='out_pred')(ner_caps)

    if verbose:
        print("x", x.get_shape())
        if hyper_param['use_pos_tags']: print("x_pos", x_pos.get_shape())
        if hyper_param['use_capitalization_info']:
            print("x_capital", x_capital.get_shape())
        print("embed", embed.get_shape())
        print("conv1", conv1.get_shape())
        print("primarycaps", primarycaps.get_shape())
        print("ner_caps", ner_caps.get_shape())
        print("out_pred", out_pred.get_shape())

    if hyper_param['use_decoder']:
        decoder_y_cat = Input(shape=(hyper_param['ner_classes'], ),
                              name='decoder_y_cat')
        masked_by_y = Mask(name='masked_by_y')(
            [ner_caps,
             decoder_y_cat])  # true label is used to mask during training
        masked = Mask()(
            ner_caps)  # mask using capsule with maximal length for predicion

        # decoder for training
        train_decoder_dense1 = Dense(hyper_param['decoder_feed_forward_1'], activation='relu',\
                               input_dim=hyper_param['ner_capsule_dim']*hyper_param['ner_classes'],\
                               name='train_decoder_dense1')(masked_by_y)
        train_decoder_dense1_dropout = Dropout(
            hyper_param['decoder_dropout'])(train_decoder_dense1)
        train_decoder_dense2 = Dense(hyper_param['decoder_feed_forward_2'], activation='relu',\
                                     name='train_decoder_dense2')(train_decoder_dense1_dropout)
        train_decoder_dense2_dropout = Dropout(
            hyper_param['decoder_dropout'])(train_decoder_dense2)
        train_decoder_output = Dense(hyper_param['embed_dim'], activation=None,\
                                     name='train_decoder_output')(train_decoder_dense2_dropout)

        # decoder for evaluation (prediction)
        eval_decoder_dense1 = Dense(hyper_param['decoder_feed_forward_1'], activation='relu',\
                               input_dim=hyper_param['ner_capsule_dim']*hyper_param['ner_classes'],\
                               name='eval_decoder_dense1')(masked)
        eval_decoder_dense2 = Dense(hyper_param['decoder_feed_forward_2'], activation='relu',\
                                     name='eval_decoder_dense2')(eval_decoder_dense1)
        eval_decoder_output = Dense(hyper_param['embed_dim'], activation=None,\
                                     name='eval_decoder_output')(eval_decoder_dense2)

        if verbose:
            print("Decoder model enabled for GloVe vector deconstruction...")
            print("decoder_y_cat", decoder_y_cat.get_shape())
            print("masked_by_y", masked_by_y.get_shape())
            print("train_decoder_dense1", train_decoder_dense1.get_shape())
            print("train_decoder_dense1_dropout",
                  train_decoder_dense1_dropout.get_shape())
            print("train_decoder_dense2", train_decoder_dense2.get_shape())
            print("train_decoder_dense2_dropout",
                  train_decoder_dense2_dropout.get_shape())
            print("train_decoder_output", train_decoder_output.get_shape())
            print("masked", masked.get_shape())
            print("eval_decoder_dense1", eval_decoder_dense1.get_shape())
            print("eval_decoder_dense2", eval_decoder_dense2.get_shape())
            print("eval_decoder_output", eval_decoder_output.get_shape())

    # construct input list
    if hyper_param['use_pos_tags'] and hyper_param['use_capitalization_info']:
        input_list = [x, x_pos, x_capital]
    elif hyper_param['use_pos_tags'] and (
            not hyper_param['use_capitalization_info']):
        input_list = [x, x_pos]
    elif (not hyper_param['use_pos_tags']
          ) and hyper_param['use_capitalization_info']:
        input_list = [x, x_capital]
    else:
        input_list = [x]

    if hyper_param['use_decoder'] == False:
        print("decoder/reconstruction DISabled")
        print("returning 1 model")
        return Model(inputs=input_list, outputs=[out_pred])
    else:
        train_model = Model(inputs=input_list + [decoder_y_cat],
                            outputs=[out_pred, train_decoder_output])
        eval_model = Model(inputs=input_list,
                           outputs=[out_pred, eval_decoder_output])
        print("decoder/reconstruction enabled")
        print("returning a list of 2 models: train_model, eval_model")
        return train_model, eval_model
            print graph_in.get_shape()

            conv_11 = Convolution2D(nb_filter=feature_map, nb_row=filter_sizes[0], nb_col=col_size, border_mode='valid', activation='relu')(graph_in)
            conv_22 = Convolution2D(nb_filter=feature_map, nb_row=filter_sizes[1], nb_col=col_size, border_mode='valid', activation='relu')(graph_in)
            conv_33 = Convolution2D(nb_filter=feature_map, nb_row=filter_sizes[2], nb_col=col_size, border_mode='valid', activation='relu')(graph_in)

            conv_11 = MaxPooling2D(pool_size=(int(conv_11.get_shape()[1]),int(conv_11.get_shape()[2])))(conv_11)
            conv_22 = MaxPooling2D(pool_size=(int(conv_22.get_shape()[1]),int(conv_22.get_shape()[2])))(conv_22)
            conv_33 = MaxPooling2D(pool_size=(int(conv_33.get_shape()[1]),int(conv_33.get_shape()[2])))(conv_33)


            conva = merge([conv_11, conv_22, conv_33], mode='concat',concat_axis=-1)
            
            conva = Dropout(dropout_prob[1])(conva)

            print conva.get_shape()
            #model-2
            out = Reshape((3*128,))(conva)
            out = Dense(hidden_dims[0], activation='relu', W_regularizer=l2(0.02))(out)
            out = Dropout(dropout_prob[1])(out)
            out = Dense(hidden_dims[1], activation='relu', W_regularizer=l2(0.02))(out)
            out = Dropout(dropout_prob[1])(out)
            out = Dense(hidden_dims[2], activation='softmax')(out)

            total = Model(input=word_input, output=out)

            sgd1=keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)

            total.compile(optimizer = sgd1, loss='categorical_crossentropy', metrics=["accuracy"])
            earlyStopping = EarlyStopping(monitor='val_loss', patience=8, verbose=1, mode='auto')
            load_file = "/home/wxr/hyx/trec_model/trec_cnn_lstm_model_weights_col_size_train_405_nothighway.h5"
Beispiel #19
0
def segnet_arch_2c_rgb(h, w):
  print("Model of size: %d %d" % (h, w))
  ch = 3 # 1
  inputs = Input(shape=(ch, h , w))
  ordering = 'th'  # 'th': (ch, h, w),  'tf': (h, w, ch)
  #             0       1      2      3    4     5      6     7      8
  dropouts = [0.37,  0.51,   0.34,  0.48,  1,   0.48, 0.28, 0.78,  0.8]

  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(inputs)
  print 'conv1', conv1.get_shape()
  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv1)
  print 'conv1.', conv1.get_shape()
  pool1 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv1)
  pool1 = Dropout(dropouts[0])(pool1)
  print 'pool1', pool1.get_shape()

  conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool1)
  print 'conv2', conv2.get_shape()
  conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv2)
  print 'conv2.', conv2.get_shape()
  pool2 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv2)
  pool2 = Dropout(dropouts[1])(pool2)
  print 'pool2', pool2.get_shape()

  conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool2)
  print 'conv3', conv3.get_shape()
  conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv3)
  print 'conv3.', conv3.get_shape()
  pool3 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv3)
  pool3 = Dropout(dropouts[2])(pool3)  #changed from 0.4 to 0.25
  print 'pool3', pool3.get_shape()

  conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool3)
  print 'conv4', conv4.get_shape()
  conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv4)
  print 'conv4.', conv4.get_shape()
  pool4 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv4)
  pool4 = Dropout(dropouts[3])(pool4)  #changed from 0.5 to 0.25
  print 'pool4', pool4.get_shape()

  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(pool4)
  print 'conv5', conv5.get_shape()
  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv5)
  print 'conv5.', conv5.get_shape()


  up1 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)
  print 'up1 upsampling2D:', up1.get_shape()
  up1 = merge([up1, conv4], mode='concat', concat_axis=1)
  #up1 = merge([(UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)), pool4], mode='concat', concat_axis=1)
  up1 = Dropout(dropouts[4])(up1)
  print 'up1 merge conv4', up1.get_shape()
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up1)
  print 'conv8', conv8.get_shape()
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv8)
  print 'conv8.', conv8.get_shape()

  up2 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv8)
  print 'up2 upsampling2D:', up2.get_shape()
  up2 = merge([up2, conv3], mode='concat', concat_axis=1)
  #up2 = merge([UpSampling2D(size=(2, 2))(conv8), conv3], mode='concat', concat_axis=1)
  up2 = Dropout(dropouts[5])(up2)
  print 'up2 merge conv3',up2.get_shape()
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up2)
  print 'conv9',conv9.get_shape()  # 7,80,32
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv9)
  print 'conv9.',conv9.get_shape()  # 7,80,32

  up3 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv9)   # 14, 160, 32
  print 'up3 upsampling2D:', up3.get_shape()
  up3 = merge([up3, conv2], mode='concat', concat_axis=1)
  #up3 = merge([UpSampling2D(size=(2, 2))(conv9), conv2], mode='concat', concat_axis=1)
  up3 = Dropout(dropouts[6])(up3)
  print 'up3 merge conv2',up3.get_shape()
  conv10 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up3)
  print 'conv10',conv10.get_shape()
  conv10 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv10)
  print 'conv10.',conv10.get_shape()

  up4 = UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv10)
  print 'up4 upsampling2D:', up4.get_shape()
  up4 = merge([up4, conv1], mode='concat', concat_axis=1)
  #up4 = merge([UpSampling2D(size=(2, 2))(conv10), conv1], mode='concat', concat_axis=1)
  up4 = Dropout(dropouts[7])(up4)
  print 'up4 merge conv1',up4.get_shape()
  conv11 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(up4)
  print 'conv11',conv11.get_shape()
  conv11 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal',dim_ordering=ordering)(conv11)
  print 'conv11.',conv11.get_shape()

  #conv12 = Convolution2D(ch, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv11)
  conv12 = Convolution2D(1, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv11)
  print 'out',conv12.get_shape()

  predictions = K.argmax(conv12, axis=1)
  model = Model(input=inputs, output=[conv12])
  
  model.summary()
  #return model
  return model, predictions
Beispiel #20
0
def unet_archPaper(h, w):
  print("Model of size: %d %d" % (h, w))

  inputs = Input((1, h , w)) # 160 x 160
  ordering = 'th'  # 'th': (ch, h, w),  'tf': (h, w, ch)

  conv_1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(inputs)
  conv_2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_1)
  print 'view conv2', conv_2.get_shape()
  pool1 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_2)
  pool1 = Dropout(0.15)(pool1)
  print 'view pool1', pool1.get_shape()

  conv_3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(pool1)
  conv_4 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_3)
  print '\nview conv4', conv_3.get_shape(), '< up-3'
  pool2 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_4)
  pool2 = Dropout(0.25)(pool2)
  print 'view pool2', pool2.get_shape()

  conv_5 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(pool2)
  conv_6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_5)
  print '\nview conv6', conv_5.get_shape(), '< up-2'
  pool3 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_6)
  pool3 = Dropout(0.4)(pool3)
  print 'view pool3', pool3.get_shape()

  conv_7 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(pool3)
  conv_8 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_7)
  print '\nview conv8', conv_8.get_shape(), '< up-1'
  pool4 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_8)
  pool4 = Dropout(0.5)(pool4)
  print 'view pool4', pool4.get_shape()

  conv_9 = Convolution2D(1024, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(pool4)
  print '\nview conv9', conv_9.get_shape()
  conv_10 = Convolution2D(1024, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_9)
  print 'view conv10', conv_10.get_shape()
  pool5 = MaxPooling2D(pool_size=(2, 2),dim_ordering=ordering)(conv_10) # 5x5
  pool5 = Dropout(0.5)(pool5)
  print 'view pool5', pool5.get_shape()

  ####################################################################################################
  up_1 = merge([UpSampling2D(size=(2, 2))(conv_8), pool5], mode='concat', concat_axis=1)
  print '\nview up1', up_1.get_shape()
  conv_12 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(up_1)
  conv_13 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_12)

  pool6 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv_13)  # 5x5
  pool6 = Dropout(0.5)(pool6)
  print 'view pool6', pool6.get_shape()

  ##################
  up_2 = merge([UpSampling2D(size=(2, 1))(conv_6), pool6], mode='concat', concat_axis=1)
  print '\nview up2', up_2.get_shape()
  conv_15 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', init='he_normal')(up_2)
  conv_16 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', init='he_normal')(conv_15)
  print 'view conv16', conv_16.get_shape()
  pool7 = Dropout(0.15)(conv_16)
  print 'view pool7', pool7.get_shape()

  ##################
  up_3 = merge([UpSampling2D(size=(2, 1))(conv_4), pool7], mode='concat', concat_axis=1)
  print '\nview up3', up_3.get_shape()
  conv_18 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(up_3)
  conv_19 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_18)
  print 'view conv18', conv_18.get_shape()
  pool8 = Dropout(0.4)(conv_19)
  print 'view pool8', pool8.get_shape()

  ##################
  up_4 = merge([UpSampling2D(size=(2, 1))(conv_2), pool8], mode='concat', concat_axis=1)
  print 'view up4', up_4.get_shape()
  conv_21 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(up_4)
  print 'view conv9-1', conv_21.get_shape()
  conv_22 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', init = 'he_normal')(conv_21)
  print 'view conv9', conv_22.get_shape()
  pool9 = Dropout(0.25)(conv_22)
  ##################################################################


  conv_23 = Convolution2D(1, 1, 1, activation='sigmoid', init = 'he_normal')(pool9)
  conv_24 = Convolution2D(1, 1, 1, activation='sigmoid', init = 'he_normal')(conv_23)
  print 'view conv10', conv_24.get_shape()

  model = Model(input=inputs, output=conv_24)
  #model = Model(input=inputs, output=conv12)
  model.summary()
  #plot(model, "model.png")
  return model
Beispiel #21
0
def segnet_arch_2c(h,w,dropouts):
  
  print("Model of size: %d %d" % (h, w))
  ch = 1
  ordering = 'th' # 'th': (ch, h, w),  'tf': (h, w, ch)
  inputs = Input(shape=(ch, h, w))
  concat_axis = 1
  
  #              0       1      2      3    4     5      6     7      8
  #dropouts =  [0.37,   0.51,  0.34,  0.48,  1,  0.48,  0.28,  0.78,  0.8]
  #dropouts =  [[0.15,0.25,0.4,0.5,1,0.4,0.25,0.15,0.15]]

  conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(inputs)
  conv2 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv1)
  pool1 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv2)
  pool1 = Dropout(dropouts[0])(pool1)
  print 'pool1', pool1.get_shape()

  conv3 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(pool1)
  conv4 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv3)
  pool2 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv4)
  pool2 = Dropout(dropouts[1])(pool2)
  print 'pool2', pool2.get_shape()

  conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(pool2)
  conv6 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv5)
  pool3 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv6)
  pool3 = Dropout(dropouts[2])(pool3)
  print 'pool3', pool3.get_shape()

  conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(pool3)
  conv8 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv7)
  print 'conv8', conv8.get_shape()
  pool4 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv8)
  pool4 = Dropout(dropouts[3])(pool4)
  print 'pool4', pool4.get_shape()

  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(pool4)
  conv10 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(
    conv9)
  #pool5 = MaxPooling2D(pool_size=(2, 2), dim_ordering=ordering)(conv10)  # 5x5
  #pool5 = Dropout(dropouts[4])(pool5)
  print 'conv10', conv10.get_shape()

  up1 = UpSampling2D(size=(2, 2), dim_ordering=ordering)(conv10)
  print 'up1 upsampling2D:', up1.get_shape()
  up1 = merge([up1, conv8], mode='concat', concat_axis=concat_axis)
  # up1 = merge([(UpSampling2D(size=(2, 2),dim_ordering=ordering)(conv5)), pool4], mode='concat', concat_axis=1)
  up1 = Dropout(dropouts[5])(up1)
  print 'up1', up1.get_shape()
  conv11 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(up1)
  conv12 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(
    conv11)
  print 'conv12', conv12.get_shape()

  up2 = UpSampling2D(size=(2, 2), dim_ordering=ordering)(conv12)
  print 'up2 upsampling2D:', up2.get_shape()
  up2 = merge([up2, conv6], mode='concat', concat_axis=concat_axis)
  # up2 = merge([UpSampling2D(size=(2, 2))(conv8), conv3], mode='concat', concat_axis=1)
  up2 = Dropout(dropouts[6])(up2)
  print 'up2', up2.get_shape()
  conv13 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(up2)
  conv14 = Convolution2D(32, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(
    conv13)
  print 'conv13', conv13.get_shape()  # 7,80,32
  print 'conv2', conv4.get_shape()  # 1,160,16

  up3 = UpSampling2D(size=(2, 2), dim_ordering=ordering)(conv14)  # 14, 160, 32
  print 'up3 upsampling2D:', up3.get_shape()
  up3 = merge([up3, conv4], mode='concat', concat_axis=concat_axis)
  # up3 = merge([UpSampling2D(size=(2, 2))(conv9), conv2], mode='concat', concat_axis=1)
  up3 = Dropout(dropouts[7])(up3)
  print 'up3', up3.get_shape()
  conv15 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(up3)
  conv16 = Convolution2D(16, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(
    conv15)
  print 'conv16', conv16.get_shape()

  up4 = UpSampling2D(size=(2, 2), dim_ordering=ordering)(conv16)
  print 'up4 upsampling2D:', up4.get_shape()
  up4 = merge([up4, conv2], mode='concat', concat_axis=concat_axis)
  # up4 = merge([UpSampling2D(size=(2, 2))(conv10), conv1], mode='concat', concat_axis=1)
  up4 = Dropout(dropouts[8])(up4)
  conv17 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(up4)
  conv18 = Convolution2D(8, 3, 3, activation='relu', border_mode='same', init='he_normal', dim_ordering=ordering)(conv17)
  print 'conv18 shape:', conv18.get_shape()
  #predictions = Convolution2D(ch, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv18) #old
  predictions = Convolution2D(ch, 1, 1, activation='sigmoid', init='he_normal',dim_ordering=ordering)(conv18) #old

  '''
  dense1 = Flatten()(conv19)
  print 'dense1 shape',dense1.get_shape()
  dense1 = Dropout(1)(dense1)
  
  predictions = Dense(input_dim=ch*1*1,output_dim =h*w,init = 'he_normal',activation = 'softmax')(dense1)
  print 'precision get shape',predictions.get_shape()
  '''
  model = Model(input=inputs, output=predictions)
  model.summary()
  #plot(model, "model.png")
  return model,predictions