Example #1
0
    def create_full_connected_layer(
            self,
            input= None,
            input_shape =None,
            units = None,
    ):
        '''
            创建多层的全连接层

        :param input_shape: 上一层的shape
        :param input: 上一层
        :param units: 每一层全连接层的单元数,比如:[100,20]
        :type units: array-like
        :return: output, output_shape
        '''

        from keras.models import Sequential
        from keras.layers import Dense

        output_layer = Sequential()
        output_layer.add(Dense(
            output_dim=units[0],
            init="glorot_uniform",
            activation='relu',
            input_shape = (input_shape,)
        ))

        for unit in units[1:]:
            output_layer.add(Dense(output_dim=unit, init="glorot_uniform", activation='relu'))

        output = output_layer(input)
        output_shape = output_layer.get_output_shape_at(-1)

        return output, output_shape[1:]
Example #2
0
    def creat_convolution_layer(
            self,
            input_shape = None,
            input=None,
            convolution_filter_type=None,
            k =None,
    ):
        '''
            创建一个卷积层模型,在keras的Convolution2D基础进行封装,使得可以创建多size和多size的卷积层

        :param input_shape: 上一层的shape
        :param input: 上一层
        :param convolution_filter_type: 卷积核类型,可以多size和单size,比如:
            1. 多size:每个列表代表一种类型(size)的卷积核,
                conv_filter_type = [[100,2,1,'valid',(k,1)],
                                    [100,4,1,'valid'],
                                    [100,6,1,'valid'],
                                   ]
            2. 单size:一个列表即可。[[100,2,1,'valid',(k,1)]]
        :param k: k-max-pooling 的 k值
        :return: kera TensorVariable,output,output_shape
        '''
        from keras.layers import Convolution2D,MaxPooling2D
        from keras.models import Sequential

        assert input_shape is not None,'input shape 不能为空!'
        if len(convolution_filter_type) == 1:
            nb_filter, nb_row, nb_col, border_mode,k = convolution_filter_type[0]
            # 单size 卷积层
            output_layer = Sequential()
            output_layer.add(Convolution2D(nb_filter, nb_row, nb_col, border_mode=border_mode,input_shape= input_shape))
            output_layer.add(MaxPooling2D(pool_size=k))
            output = output_layer(input)
            output_shape = output_layer.get_output_shape_at(-1)
            # output_layer.summary()
        else:
            # 多size 卷积层
            output_layer = self.create_multi_size_convolution_layer(
                input_shape=input_shape,
                convolution_filter_type=convolution_filter_type,
                k=k,
            )
            output_shape = output_layer.get_output_shape_at(-1)
            output = output_layer([input] * len(convolution_filter_type))

        return output,output_shape[1:]
Example #3
0
    def create_multi_size_convolution_layer(self,
                                            input_shape=None,
                                            convolution_filter_type=None,
                                            k=1,
                                            ):
        """
            创建一个多类型(size,大小)核卷积层模型,可以直接添加到 keras的模型中去。
                1. 为每种size的核分别创建 Sequential 模型,模型内 搭建一个 2D卷积层 和一个 k-max pooling层
                2. 将1步骤创建的卷积核的结果 进行 第1维的合并,变成并行的卷积核
                3. 返回一个 4D 的向量

        必须是一个4D的输入,(n_batch,channel,row,col)

        :param convolution_filter_type: 卷积层的类型.一种 size对应一个 list

            for example:每个列表代表一种类型(size)的卷积核,和 max pooling 的size
                conv_filter_type = [[100,2,word_embedding_dim,'valid', (1, 1)],
                                    [100,4,word_embedding_dim,'valid', (1, 1)],
                                    [100,6,word_embedding_dim,'valid', (1, 1)],
                                   ]
        :type convolution_filter_type: array-like
        :param input_shape: 输入的 shape,3D,类似一张图,(channel,row,col)比如 (1,5,5)表示单通道5*5的图片
        :type input_shape: array-like
        :param k: 设置 k-max 层 的 k
        :type k: int
        :return: convolution model,4D-array
        :rtype: Sequential
        """

        assert len(
            input_shape) == 3, 'warning: 因为必须是一个4D的输入,(n_batch,channel,row,col),所以input shape必须是一个3D-array,(channel,row,col)!'

        from keras.layers import Convolution2D, Activation, MaxPooling2D, Merge
        from keras.models import Sequential
        # 构建第一层卷积层和1-max pooling
        conv_layers = []
        for items in convolution_filter_type:

            nb_filter, nb_row, nb_col, border_mode,k = items

            m = Sequential()
            m.add(Convolution2D(nb_filter,
                                nb_row,
                                nb_col,
                                border_mode=border_mode,
                                input_shape=input_shape,
                                ))
            m.add(Activation('relu'))

            # 1-max
            if k[0] == 1:
                if border_mode == 'valid':
                    pool_size = (input_shape[1] - nb_row + 1, k[1])
                elif border_mode == 'same':
                    pool_size = (input_shape[1], k[1])
                else:
                    pool_size = (input_shape[1] - nb_row + 1, k[1])
                m.add(MaxPooling2D(pool_size=pool_size, name='1-max'))
            elif k[0] == 0:
                m.add(MaxPooling2D(pool_size=(2,  k[1])))
            else:
                # k-max pooling
                # todo
                # 因为kmax需要用到Lambda,而pickle无法dump function对象,所以使用该模型的时候,保存不了模型,待解决.
                m.add(self.kmaxpooling(k=k[0]))
            # m.summary()
            conv_layers.append(m)

        # 卷积的结果进行拼接
        cnn_model = Sequential()
        cnn_model.add(Merge(conv_layers, mode='concat', concat_axis=2))
        # cnn_model.summary()
        print(cnn_model.get_output_shape_at(-1))
        return cnn_model
def build_model(image_shape=(28,28), embedding_size=50):

    s = image_shape[-1]

    #序列化模型的输出
    feat=Sequential()
    #加层
    #C20, k(3,3), channels_first
    feat.add(Conv2D(20,(3,3), activation='relu',padding='same', input_shape=(1, s, s), data_format='channels_first'))
    #C5
    feat.add(Conv2D(5,(5,5),activation='relu',data_format='channels_first', padding='same'))
    #Flatten层用来将输入“压平”,即把多维的输入一维化,常用在从卷积层到全连接层的过渡。
    feat.add(Flatten())
    #FC100
    feat.add(Dense(100))
    #FC50
    #embedding到50维的向量
    feat.add(Dense(embedding_size))

    #Input定义输入层, 这里都对应着一张图片
    inp1=Input(shape=(1,s,s))
    inp2=Input(shape=(1,s,s))

    #注意,此处用的一个模型,同一个feat
    feat1=feat(inp1)
    feat2=feat(inp2)

    #计算欧式距离,输入的是一个列表
    #使用Lambda层,本函数用以对上一层的输出施以任何Theano/TensorFlow表达式
    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([feat1, feat2])

    #优化器和损失
    #feat.compile('sgd','mse')

    #定义模型接口(输入,输出)
    model=Model([inp1,inp2],distance)
    #model.compile('adam','mse')
    
    unfeat=Sequential()
    input_dim = feat.get_output_shape_at(0)[-1]
    unfeat.add(Dense(100, input_shape=(input_dim,), activation='relu'))
    unfeat.add(Dense(5*s*s, activation='relu'))
    unfeat.add(Reshape((5, s,s)))
    unfeat.add(Conv2D(10,(5,5),activation='relu',data_format='channels_first', padding='same'))
    unfeat.add(Conv2D(1,(3,3),activation='linear',data_format='channels_first', padding='same'))
    unfeat.add(Flatten())
    unfeat.add(Activation('softmax')) # samples are probabilities
    unfeat.add(Reshape((1,s,s)))


    uf1=unfeat(feat1)
    uf2=unfeat(feat2)

    #unfeat.compile('adam','kullback_leibler_divergence')


    #mse:对应欧式距离与w距离
    #KL:对应 (uf1 x1) (uf2,x2), x1是(1,28,28),经过nomalize(x/sum),uf1是(1,28,28)的softmax,都可以看成分布?
    # [x1,x2],[y, x1,x2, x1, x2]
    model2 = Model([inp1,inp2],[distance, uf1,uf2, uf1, uf2])
    model2.compile('adam',loss = ['mse', kullback_leibler_divergence_,kullback_leibler_divergence_,
                           sparsity_constraint, sparsity_constraint],
                           loss_weights=[1, 1e1,1e1, 1e-3, 1e-3])
    
    return {'feat':feat, 'emd':model,'unfeat':unfeat,'dwe':model2}