class cnn_features:
    def __init__(self, batch_size=5, epoch=10, classes=10):
        self.batch_size = batch_size
        self.epoch = epoch
        self.classes = classes

    def train(self, train, label):
        input_shape = (train.shape[1], train.shape[2], train.shape[3])
        self.model = Sequential()
        self.model.add(
            Conv2D(32, (3, 3),
                   activation="relu",
                   input_shape=input_shape,
                   padding='SAME'))
        self.model.add(MaxPooling2D((2, 2)))
        self.model.add(Dropout(0.2))

        self.model.add(Conv2D(16, (3, 3), activation='relu', padding='SAME'))
        self.model.add(MaxPooling2D((2, 2)))
        self.model.add(Dropout(0.2))

        input_shape = self.model.get_output_shape_at(0)
        output_shape = list(input_shape)
        output_shape[-1] = 1
        print(output_shape, input_shape)
        self.model.add(
            channel_average_pooling(output_shape, input_shape=input_shape))
        #model.add(Lambda(channel_average_pooling))
        print("#####", self.model.get_output_shape_at(0))

        self.model.add(Flatten())
        self.model.add(Dense(128, activation='relu', name='features'))
        self.model.add(Dropout(0.2))
        # model.add(Dense(512,activation = 'relu'))
        # model.add(Dropout(0.2))

        self.model.add(Dense(self.classes, activation='softmax'))
        self.model.compile(optimizer=keras.optimizers.SGD(lr=0.01),
                           loss=keras.losses.categorical_crossentropy,
                           metrics=['accuracy'])
        history = self.model.fit(train,
                                 label,
                                 batch_size=self.batch_size,
                                 epochs=self.epoch)

    def predict(self, test):
        pre = self.model.predict(test)
        return pre

    def features(self, data):
        model = Model(input=self.model.input,
                      output=self.model.get_layer('features').output)
        feature = model.predict(data)
        return feature
Esempio n. 2
0
    def creat_convolution_layer(
        self,
        input_shape=None,
        input=None,
        convolution_filter_type=None,
        k=None,
    ):
        '''
            创建一个卷积层模型,在keras的Convolution2D基础进行封装,使得可以创建多size和多size的卷积层

        :param input_shape: 上一层的shape
        :param input: 上一层
        :param convolution_filter_type: 卷积核类型,可以多size和单size,比如:
            1. 多size:每个列表代表一种类型(size)的卷积核,
                conv_filter_type = [[100,2,1,'valid',(k,1)],
                                    [100,4,1,'valid'],
                                    [100,6,1,'valid'],
                                   ]
            2. 单size:一个列表即可。[[100,2,1,'valid',(k,1)]]
        :param k: k-max-pooling 的 k值
        :return: kera TensorVariable,output,output_shape
        '''
        from keras.layers import Convolution2D, MaxPooling2D
        from keras.models import Sequential

        assert input_shape is not None, 'input shape 不能为空!'
        if len(convolution_filter_type) == 1:
            nb_filter, nb_row, nb_col, border_mode, k = convolution_filter_type[
                0]
            # 单size 卷积层
            output_layer = Sequential()
            output_layer.add(
                Convolution2D(nb_filter,
                              nb_row,
                              nb_col,
                              border_mode=border_mode,
                              input_shape=input_shape))
            output_layer.add(MaxPooling2D(pool_size=k))
            output = output_layer(input)
            output_shape = output_layer.get_output_shape_at(-1)
            # output_layer.summary()
        else:
            # 多size 卷积层
            output_layer = self.create_multi_size_convolution_layer(
                input_shape=input_shape,
                convolution_filter_type=convolution_filter_type,
                k=k,
            )
            output_shape = output_layer.get_output_shape_at(-1)
            output = output_layer([input] * len(convolution_filter_type))

        return output, output_shape[1:]
Esempio n. 3
0
def init_downscale_model(input_size, sc, get_output_shape=False):
    model = Sequential()
    model.add(AveragePooling3D(pool_size=sc, border_mode='valid', input_shape=input_size))
    if get_output_shape:
        return model.get_output_shape_at(0)[1:]
    else:
        return model
Esempio n. 4
0
    def create_full_connected_layer(
            self,
            input= None,
            input_shape =None,
            units = None,
    ):
        '''
            创建多层的全连接层

        :param input_shape: 上一层的shape
        :param input: 上一层
        :param units: 每一层全连接层的单元数,比如:[100,20]
        :type units: array-like
        :return: output, output_shape
        '''

        from keras.models import Sequential
        from keras.layers import Dense

        output_layer = Sequential()
        output_layer.add(Dense(
            output_dim=units[0],
            init="glorot_uniform",
            activation='relu',
            input_shape = (input_shape,)
        ))

        for unit in units[1:]:
            output_layer.add(Dense(output_dim=unit, init="glorot_uniform", activation='relu'))

        output = output_layer(input)
        output_shape = output_layer.get_output_shape_at(-1)

        return output, output_shape[1:]
Esempio n. 5
0
    def creat_convolution_layer(
            self,
            input_shape = None,
            input=None,
            convolution_filter_type=None,
            k =None,
    ):
        '''
            创建一个卷积层模型,在keras的Convolution2D基础进行封装,使得可以创建多size和多size的卷积层

        :param input_shape: 上一层的shape
        :param input: 上一层
        :param convolution_filter_type: 卷积核类型,可以多size和单size,比如:
            1. 多size:每个列表代表一种类型(size)的卷积核,
                conv_filter_type = [[100,2,1,'valid',(k,1)],
                                    [100,4,1,'valid'],
                                    [100,6,1,'valid'],
                                   ]
            2. 单size:一个列表即可。[[100,2,1,'valid',(k,1)]]
        :param k: k-max-pooling 的 k值
        :return: kera TensorVariable,output,output_shape
        '''
        from keras.layers import Convolution2D,MaxPooling2D
        from keras.models import Sequential

        assert input_shape is not None,'input shape 不能为空!'
        if len(convolution_filter_type) == 1:
            nb_filter, nb_row, nb_col, border_mode,k = convolution_filter_type[0]
            # 单size 卷积层
            output_layer = Sequential()
            output_layer.add(Convolution2D(nb_filter, nb_row, nb_col, border_mode=border_mode,input_shape= input_shape))
            output_layer.add(MaxPooling2D(pool_size=k))
            output = output_layer(input)
            output_shape = output_layer.get_output_shape_at(-1)
            # output_layer.summary()
        else:
            # 多size 卷积层
            output_layer = self.create_multi_size_convolution_layer(
                input_shape=input_shape,
                convolution_filter_type=convolution_filter_type,
                k=k,
            )
            output_shape = output_layer.get_output_shape_at(-1)
            output = output_layer([input] * len(convolution_filter_type))

        return output,output_shape[1:]
def complex_cnn(output_channels, img_width, img_height):
    model = Sequential()
    if K.image_data_format() == 'channels_last':
        model.add(
            Conv2D(32,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   activation='relu',
                   input_shape=(img_width, img_height, 1)))
    else:
        model.add(
            Conv2D(32,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   activation='relu',
                   input_shape=(1, img_width, img_height)))

    model.add(Conv2D(32, kernel_size=(3, 3), strides=(1, 1),
                     activation='relu'))
    model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation=None))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1),
                     activation='relu'))
    model.add(Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation=None))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # model.build()
    # model.summary()
    model(Input((img_width, img_height, 1)))

    cur_shape = model.get_output_shape_at(-1)

    if K.image_data_format() == 'channels_last':
        model.add(
            Conv2D(128,
                   kernel_size=(cur_shape[1], cur_shape[2]),
                   activation=None))
    else:
        model.add(
            Conv2D(128,
                   kernel_size=(cur_shape[2], cur_shape[3]),
                   activation=None))

    model.add(Flatten())
    model.add(Dense(output_channels, activation='softmax'))

    model_name = "Complex CNN " + str(img_width) + "x" + str(
        img_height) + " " + str(output_channels) + "class"

    model_name_short = "Complex CNN " + str(img_width) + "x" + str(img_height)

    return [model, model_name, model_name_short]
Esempio n. 7
0
def cifar_cnn(output_channels, img_width, img_height):
    # initialize model
    model = Sequential()
    if K.image_dim_ordering() == 'tf':
        model.add(
            ZeroPadding2D(padding=((2, 2), (2, 2)),
                          input_shape=(img_width, img_height, 1)))
    else:
        model.add(
            ZeroPadding2D(padding=((2, 2), (2, 2)),
                          input_shape=(1, img_width, img_height)))

    model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), activation=None))
    model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Activation("relu"))

    model.add(ZeroPadding2D(padding=((2, 2), (2, 2))))
    model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1),
                     activation='relu'))
    model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(ZeroPadding2D(padding=((2, 2), (2, 2))))
    model.add(Conv2D(64, kernel_size=(5, 5), strides=(1, 1),
                     activation='relu'))
    model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))

    cur_shape = model.get_output_shape_at(-1)

    if K.image_dim_ordering() == 'tf':
        model.add(
            Conv2D(64,
                   kernel_size=(cur_shape[1], cur_shape[2]),
                   activation='relu'))
    else:
        model.add(
            Conv2D(64,
                   kernel_size=(cur_shape[2], cur_shape[3]),
                   activation='relu'))

    model.add(Flatten())
    model.add(Dense(output_channels, activation='softmax'))

    model_name = "Cifar CNN " + str(img_width) + "x" + str(
        img_height) + " " + str(output_channels) + "class"

    model_name_short = "Cifar CNN " + str(img_width) + "x" + str(img_height)

    return [model, model_name, model_name_short]
Esempio n. 8
0
def make_cnn2(x_shape, cls_num, trainable=True):

    model = Sequential()

    model.add(
        Conv1D(filters=80,
               kernel_size=(48, ),
               strides=32,
               activation='relu',
               input_shape=(x_shape[0], 1)))

    model.add(
        Conv1D(filters=60, kernel_size=(32, ), strides=16, activation='relu'))
    model.add(BatchNormalization())

    wav_out_shape = model.get_output_shape_at(0)
    new_shape = wav_out_shape[1:len(wav_out_shape)] + (1, )
    model.add(Reshape(new_shape))

    model.add(Conv2D(filters=8, kernel_size=(2, 2), activation='relu'))

    model.add(Conv2D(filters=8, kernel_size=(2, 2), activation='relu'))
    model.add(MaxPool2D(strides=(2, 2)))
    model.add(Dropout(0.2))
    model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
    model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPool2D(strides=(2, 2)))
    model.add(Dropout(0.2))
    model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPool2D(strides=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Flatten())

    model.add(Dense(128, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(128, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(cls_num, activation='softmax'))
    model.trainable = trainable
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])
    return model
Esempio n. 9
0
def create_model(num_classes, num_filters=[64, 128, 256, 512], top_k=3, learning_rate=0.01, input_dim=69):
    model = Sequential()
    model.add(Embedding(input_dim=69, output_dim=16, input_length=1014, name='input_embedding'))
    model.add(Conv1D(filters=64, kernel_size=3, strides=2, padding="same"))
    
    for i in range(len(num_filters)):
        conv_filter= num_filters[i]
        
        conv_block = Sequential()
        conv_block.add(Conv1D(filters=conv_filter, 
                              kernel_size=3, 
                              strides=1, 
                              padding='same', 
                              input_shape=list(model.get_output_shape_at(0))[1:]))
        conv_block.add(BatchNormalization())
        conv_block.add(Activation('relu'))
        conv_block.add(Conv1D(filters=conv_filter, 
                              kernel_size=3, 
                              strides=1, 
                              padding='same'))
        conv_block.add(BatchNormalization())
        conv_block.add(Activation('relu'))
        
        model.add(conv_block)
        model.add(MaxPooling1D(pool_size=3, strides=2, padding="same"))
    def _top_k(x):
        x = tf.transpose(x, [0, 2, 1])
        k_max = tf.nn.top_k(x, k=top_k)
        return tf.reshape(k_max[0], (-1, num_filters[-1] * top_k))
    model.add(Lambda(_top_k, output_shape=(num_filters[-1] * top_k,)))
    model.add(Dense(2048, activation='relu', kernel_initializer='he_normal'))
    model.add(Dropout(0.2, seed=23))
    model.add(Dense(2048, activation='relu', kernel_initializer='he_normal'))
    model.add(Dropout(0.2, seed=23))
    model.add(Dense(num_classes, activation='softmax', name='output_layer'))
    sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=False)
    model.summary()
    model.compile(optimizer=sgd, loss='mean_squared_error', metrics=['accuracy'])
    return model    
Esempio n. 10
0
    def create_full_connected_layer(
        self,
        input=None,
        input_shape=None,
        units=None,
    ):
        '''
            创建多层的全连接层

        :param input_shape: 上一层的shape
        :param input: 上一层
        :param units: 每一层全连接层的单元数,比如:[100,20]
        :type units: array-like
        :return: output, output_shape
        '''

        from keras.models import Sequential
        from keras.layers import Dense

        output_layer = Sequential()
        output_layer.add(
            Dense(output_dim=units[0],
                  init="glorot_uniform",
                  activation='relu',
                  input_shape=(input_shape, )))

        for unit in units[1:]:
            output_layer.add(
                Dense(output_dim=unit,
                      init="glorot_uniform",
                      activation='relu'))

        output = output_layer(input)
        output_shape = output_layer.get_output_shape_at(-1)

        return output, output_shape[1:]
Esempio n. 11
0
def build_model(image_shape=(28, 28), embedding_size=50):

    s = image_shape[-1]
    feat = Sequential()
    feat.add(
        Conv2D(20, (3, 3),
               activation='relu',
               padding='same',
               input_shape=(1, s, s),
               data_format='channels_first'))
    feat.add(
        Conv2D(5, (5, 5),
               activation='relu',
               data_format='channels_first',
               padding='same'))
    feat.add(Flatten())
    feat.add(Dense(100))
    feat.add(Dense(embedding_size))

    inp1 = Input(shape=(1, s, s))
    inp2 = Input(shape=(1, s, s))

    feat1 = feat(inp1)
    feat2 = feat(inp2)

    distance = Lambda(euclidean_distance,
                      output_shape=eucl_dist_output_shape)([feat1, feat2])

    feat.compile('sgd', 'mse')
    model = Model([inp1, inp2], distance)
    model.compile('adam', 'mse')

    unfeat = Sequential()
    input_dim = feat.get_output_shape_at(0)[-1]
    unfeat.add(Dense(100, input_shape=(input_dim, ), activation='relu'))
    unfeat.add(Dense(5 * s * s, activation='relu'))
    unfeat.add(Reshape((5, s, s)))
    unfeat.add(
        Conv2D(10, (5, 5),
               activation='relu',
               data_format='channels_first',
               padding='same'))
    unfeat.add(
        Conv2D(1, (3, 3),
               activation='linear',
               data_format='channels_first',
               padding='same'))
    unfeat.add(Flatten())
    unfeat.add(Activation('softmax'))  # samples are probabilities
    unfeat.add(Reshape((1, s, s)))

    uf1 = unfeat(feat1)
    uf2 = unfeat(feat2)

    unfeat.compile('adam', 'kullback_leibler_divergence')

    model2 = Model([inp1, inp2], [distance, uf1, uf2, uf1, uf2])
    model2.compile('adam', [
        'mse', kullback_leibler_divergence_, kullback_leibler_divergence_,
        sparsity_constraint, sparsity_constraint
    ],
                   loss_weights=[1, 1e1, 1e1, 1e-3, 1e-3])

    return {'feat': feat, 'emd': model, 'unfeat': unfeat, 'dwe': model2}
Esempio n. 12
0
# 7th layer
model.add(
    Convolution3D(filters=128,
                  kernel_size=[3, 3, 3],
                  data_format='channels_first',
                  kernel_initializer="he_normal",
                  padding='same',
                  kernel_regularizer=l2(reg)))
model.add(BatchNormalization())
model.add(ELU(alpha=1.0))
model.add(MaxPooling3D(pool_size=(2, 2, 1), data_format='channels_first'))
model.add(Dropout(.25))

# Reshaping spatio-temporal features to feed into LSTM layer
shape = model.get_output_shape_at(0)
model.add(Reshape((shape[-1], shape[1] * shape[2] * shape[3])))

# LSTM layer
model.add(LSTM(512, return_sequences=True, kernel_initializer="he_normal"))
model.add(BatchNormalization())
model.add(ELU(alpha=1.0))
model.add(Dropout(.25))

# Flattening the output
model.add(Flatten())

# 1st dense layer
model.add(Dense(2048, kernel_initializer="he_normal"))
model.add(BatchNormalization())
model.add(ELU(alpha=1.0))
Esempio n. 13
0
    def create_multi_size_convolution_layer(self,
                                            input_shape=None,
                                            convolution_filter_type=None,
                                            k=1,
                                            ):
        """
            创建一个多类型(size,大小)核卷积层模型,可以直接添加到 keras的模型中去。
                1. 为每种size的核分别创建 Sequential 模型,模型内 搭建一个 2D卷积层 和一个 k-max pooling层
                2. 将1步骤创建的卷积核的结果 进行 第1维的合并,变成并行的卷积核
                3. 返回一个 4D 的向量

        必须是一个4D的输入,(n_batch,channel,row,col)

        :param convolution_filter_type: 卷积层的类型.一种 size对应一个 list

            for example:每个列表代表一种类型(size)的卷积核,和 max pooling 的size
                conv_filter_type = [[100,2,word_embedding_dim,'valid', (1, 1)],
                                    [100,4,word_embedding_dim,'valid', (1, 1)],
                                    [100,6,word_embedding_dim,'valid', (1, 1)],
                                   ]
        :type convolution_filter_type: array-like
        :param input_shape: 输入的 shape,3D,类似一张图,(channel,row,col)比如 (1,5,5)表示单通道5*5的图片
        :type input_shape: array-like
        :param k: 设置 k-max 层 的 k
        :type k: int
        :return: convolution model,4D-array
        :rtype: Sequential
        """

        assert len(
            input_shape) == 3, 'warning: 因为必须是一个4D的输入,(n_batch,channel,row,col),所以input shape必须是一个3D-array,(channel,row,col)!'

        from keras.layers import Convolution2D, Activation, MaxPooling2D, Merge
        from keras.models import Sequential
        # 构建第一层卷积层和1-max pooling
        conv_layers = []
        for items in convolution_filter_type:

            nb_filter, nb_row, nb_col, border_mode,k = items

            m = Sequential()
            m.add(Convolution2D(nb_filter,
                                nb_row,
                                nb_col,
                                border_mode=border_mode,
                                input_shape=input_shape,
                                ))
            m.add(Activation('relu'))

            # 1-max
            if k[0] == 1:
                if border_mode == 'valid':
                    pool_size = (input_shape[1] - nb_row + 1, k[1])
                elif border_mode == 'same':
                    pool_size = (input_shape[1], k[1])
                else:
                    pool_size = (input_shape[1] - nb_row + 1, k[1])
                m.add(MaxPooling2D(pool_size=pool_size, name='1-max'))
            elif k[0] == 0:
                m.add(MaxPooling2D(pool_size=(2,  k[1])))
            else:
                # k-max pooling
                # todo
                # 因为kmax需要用到Lambda,而pickle无法dump function对象,所以使用该模型的时候,保存不了模型,待解决.
                m.add(self.kmaxpooling(k=k[0]))
            # m.summary()
            conv_layers.append(m)

        # 卷积的结果进行拼接
        cnn_model = Sequential()
        cnn_model.add(Merge(conv_layers, mode='concat', concat_axis=2))
        # cnn_model.summary()
        print(cnn_model.get_output_shape_at(-1))
        return cnn_model
Esempio n. 14
0
    def create_multi_size_convolution_layer(
        self,
        input_shape=None,
        convolution_filter_type=None,
        k=1,
    ):
        """
            创建一个多类型(size,大小)核卷积层模型,可以直接添加到 keras的模型中去。
                1. 为每种size的核分别创建 Sequential 模型,模型内 搭建一个 2D卷积层 和一个 k-max pooling层
                2. 将1步骤创建的卷积核的结果 进行 第1维的合并,变成并行的卷积核
                3. 返回一个 4D 的向量

        必须是一个4D的输入,(n_batch,channel,row,col)

        :param convolution_filter_type: 卷积层的类型.一种 size对应一个 list

            for example:每个列表代表一种类型(size)的卷积核,和 max pooling 的size
                conv_filter_type = [[100,2,word_embedding_dim,'valid', (1, 1)],
                                    [100,4,word_embedding_dim,'valid', (1, 1)],
                                    [100,6,word_embedding_dim,'valid', (1, 1)],
                                   ]
        :type convolution_filter_type: array-like
        :param input_shape: 输入的 shape,3D,类似一张图,(channel,row,col)比如 (1,5,5)表示单通道5*5的图片
        :type input_shape: array-like
        :param k: 设置 k-max 层 的 k
        :type k: int
        :return: convolution model,4D-array
        :rtype: Sequential
        """

        assert len(
            input_shape
        ) == 3, 'warning: 因为必须是一个4D的输入,(n_batch,channel,row,col),所以input shape必须是一个3D-array,(channel,row,col)!'

        from keras.layers import Convolution2D, Activation, MaxPooling2D, Merge
        from keras.models import Sequential
        # 构建第一层卷积层和1-max pooling
        conv_layers = []
        for items in convolution_filter_type:

            nb_filter, nb_row, nb_col, border_mode, k = items

            m = Sequential()
            m.add(
                Convolution2D(
                    nb_filter,
                    nb_row,
                    nb_col,
                    border_mode=border_mode,
                    input_shape=input_shape,
                ))
            m.add(Activation('relu'))

            # 1-max
            if k[0] == 1:
                if border_mode == 'valid':
                    pool_size = (input_shape[1] - nb_row + 1, k[1])
                elif border_mode == 'same':
                    pool_size = (input_shape[1], k[1])
                else:
                    pool_size = (input_shape[1] - nb_row + 1, k[1])
                m.add(MaxPooling2D(pool_size=pool_size, name='1-max'))
            elif k[0] == 0:
                m.add(MaxPooling2D(pool_size=(2, k[1])))
            else:
                # k-max pooling
                # todo
                # 因为kmax需要用到Lambda,而pickle无法dump function对象,所以使用该模型的时候,保存不了模型,待解决.
                m.add(self.kmaxpooling(k=k[0]))
            # m.summary()
            conv_layers.append(m)

        # 卷积的结果进行拼接
        cnn_model = Sequential()
        cnn_model.add(Merge(conv_layers, mode='concat', concat_axis=2))
        # cnn_model.summary()
        print(cnn_model.get_output_shape_at(-1))
        return cnn_model
def build_model(image_shape=(28,28), embedding_size=50):

    s = image_shape[-1]

    #序列化模型的输出
    feat=Sequential()
    #加层
    #C20, k(3,3), channels_first
    feat.add(Conv2D(20,(3,3), activation='relu',padding='same', input_shape=(1, s, s), data_format='channels_first'))
    #C5
    feat.add(Conv2D(5,(5,5),activation='relu',data_format='channels_first', padding='same'))
    #Flatten层用来将输入“压平”,即把多维的输入一维化,常用在从卷积层到全连接层的过渡。
    feat.add(Flatten())
    #FC100
    feat.add(Dense(100))
    #FC50
    #embedding到50维的向量
    feat.add(Dense(embedding_size))

    #Input定义输入层, 这里都对应着一张图片
    inp1=Input(shape=(1,s,s))
    inp2=Input(shape=(1,s,s))

    #注意,此处用的一个模型,同一个feat
    feat1=feat(inp1)
    feat2=feat(inp2)

    #计算欧式距离,输入的是一个列表
    #使用Lambda层,本函数用以对上一层的输出施以任何Theano/TensorFlow表达式
    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([feat1, feat2])

    #优化器和损失
    #feat.compile('sgd','mse')

    #定义模型接口(输入,输出)
    model=Model([inp1,inp2],distance)
    #model.compile('adam','mse')
    
    unfeat=Sequential()
    input_dim = feat.get_output_shape_at(0)[-1]
    unfeat.add(Dense(100, input_shape=(input_dim,), activation='relu'))
    unfeat.add(Dense(5*s*s, activation='relu'))
    unfeat.add(Reshape((5, s,s)))
    unfeat.add(Conv2D(10,(5,5),activation='relu',data_format='channels_first', padding='same'))
    unfeat.add(Conv2D(1,(3,3),activation='linear',data_format='channels_first', padding='same'))
    unfeat.add(Flatten())
    unfeat.add(Activation('softmax')) # samples are probabilities
    unfeat.add(Reshape((1,s,s)))


    uf1=unfeat(feat1)
    uf2=unfeat(feat2)

    #unfeat.compile('adam','kullback_leibler_divergence')


    #mse:对应欧式距离与w距离
    #KL:对应 (uf1 x1) (uf2,x2), x1是(1,28,28),经过nomalize(x/sum),uf1是(1,28,28)的softmax,都可以看成分布?
    # [x1,x2],[y, x1,x2, x1, x2]
    model2 = Model([inp1,inp2],[distance, uf1,uf2, uf1, uf2])
    model2.compile('adam',loss = ['mse', kullback_leibler_divergence_,kullback_leibler_divergence_,
                           sparsity_constraint, sparsity_constraint],
                           loss_weights=[1, 1e1,1e1, 1e-3, 1e-3])
    
    return {'feat':feat, 'emd':model,'unfeat':unfeat,'dwe':model2}