def _construct_siamese_architecture(self, learning_rate_multipliers,
                                         l2_regularization_penalization):
        """ Constructs the siamese architecture and stores it in the class

        Arguments:
            learning_rate_multipliers
            l2_regularization_penalization
        """

        # Let's define the cnn architecture
        convolutional_net = Sequential()

        convolutional_net.add(
            Dense(units=128, activation='sigmoid',
                  kernel_regularizer=l2(
                      l2_regularization_penalization['Dense1']),
                  name='Dense1'))

        # Now the pairs of images
        input_image_1 = Input(self.input_shape)
        input_image_2 = Input(self.input_shape)

        encoded_image_1 = convolutional_net(input_image_1)
        encoded_image_2 = convolutional_net(input_image_2)

        # L1 distance layer between the two encoded outputs
        # One could use Subtract from Keras, but we want the absolute value
        l1_distance_layer = Lambda(
            lambda tensors: K.abs(tensors[0] - tensors[1]))
        l1_distance = l1_distance_layer([encoded_image_1, encoded_image_2])

        # Same class or not prediction
        prediction = Dense(units=1, activation='sigmoid')(l1_distance)
        self.model = Model(
            inputs=[input_image_1, input_image_2], outputs=prediction)

        # Define the optimizer and compile the model
        optimizer = Modified_SGD(
            lr=self.learning_rate,
            lr_multipliers=learning_rate_multipliers,
            momentum=0.5)

        self.model.compile(loss='binary_crossentropy', metrics=['binary_accuracy'],
                           optimizer=optimizer)
    def _construct_siamese_architecture(self, learning_rate_multipliers,
                                        l2_regularization_penalization):
        """ Constructs the siamese architecture and stores it in the class

        Arguments:
            learning_rate_multipliers
            l2_regularization_penalization
        """

        # Let's define the cnn architecture
        convolutional_net = Sequential()
        '''
        #struct 1:
        #简单定义孪生网络的主体结构为MLP,属于一个全连接神经网络
        convolutional_net.add(Dense(128, activation=tf.nn.relu, input_shape=self.input_shape))
        convolutional_net.add(Dense(64, activation=tf.nn.relu, input_shape=self.input_shape))
        
        '''
        K.set_image_dim_ordering('tf')
        #struct 2:
        #孪生网络的主体结构为CNN,属于一个卷积神经网络
        convolutional_net.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=self.input_shape,
                   kernel_regularizer=l2(
                       l2_regularization_penalization['Conv1']),
                   name='Conv1'))
        convolutional_net.add(MaxPool2D(pool_size=1))

        # convolutional_net.add(Conv2D(filters=128, kernel_size=(7, 3),
        #                              activation='relu',
        #                              kernel_regularizer=l2(
        #                                  l2_regularization_penalization['Conv2']),
        #                              name='Conv2'))
        # convolutional_net.add(MaxPool2D())

        # convolutional_net.add(Conv1D(filters=128, kernel_size=(4, 4),
        #                              activation='relu',
        #                              kernel_regularizer=l2(
        #                                  l2_regularization_penalization['Conv3']),
        #                              name='Conv3'))
        # convolutional_net.add(MaxPool2D())
        #
        # convolutional_net.add(Conv1D(filters=256, kernel_size=(4, 4),
        #                              activation='relu',
        #                              kernel_regularizer=l2(
        #                                  l2_regularization_penalization['Conv4']),
        #                              name='Conv4'))

        convolutional_net.add(Flatten())
        convolutional_net.add(
            Dense(64,
                  activation='sigmoid',
                  kernel_regularizer=l2(
                      l2_regularization_penalization['Dense1']),
                  name='Dense1'))

        # Now the pairs of images
        print(self.input_shape)

        input_image_1 = Input(self.input_shape)
        input_image_2 = Input(self.input_shape)
        use_CNN = True
        if use_CNN:
            input_image_1 = tf.reshape(input_image_1, (32, 24, 1))
            input_image_2 = tf.reshape(input_image_1, (32, 24, 1))
            print(input_image_1.shape)
        encoded_image_1 = convolutional_net(input_image_1)
        encoded_image_2 = convolutional_net(input_image_2)
        print(encoded_image_1.shape)
        # L1 distance layer between the two encoded outputs
        # One could use Subtract from Keras, but we want the absolute value
        l1_distance_layer = Lambda(
            lambda tensors: K.abs(tensors[0] - tensors[1]))
        l1_distance = l1_distance_layer([encoded_image_1, encoded_image_2])
        print(l1_distance.shape)
        ###
        #todo 这个地方可以接着修改,中间加入一层dense作为mlp的隐层,再进最后的分类sigmod层

        # Same class or not prediction
        prediction = Dense(1, activation='sigmoid')(l1_distance)
        self.model = Model(inputs=[input_image_1, input_image_2],
                           outputs=prediction)
        self.model.summary()
        # Define the optimizer and compile the model
        optimizer = Modified_SGD(lr=self.learning_rate,
                                 lr_multipliers=learning_rate_multipliers,
                                 momentum=0.5)

        self.model.compile(loss='binary_crossentropy',
                           metrics=['binary_accuracy'],
                           optimizer=optimizer)