예제 #1
0
    def onet(self, input_shape=None):

        if input_shape is None:
            input_shape = (48, 48, 3)

        net = Input(input_shape)

        onet_layer = Conv2D(32, (3, 3), strides=(1, 1), padding='valid')(net)
        onet_layer = PReLU(shared_axes=[1, 2])(onet_layer)
        onet_layer = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(onet_layer)

        onet_layer = Conv2D(64, (3, 3), strides=(1, 1), padding='valid')(onet_layer)
        onet_layer = PReLU(shared_axes=[1, 2])(onet_layer)
        onet_layer = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(onet_layer)

        onet_layer = Conv2D(64, (3, 3), strides=(1, 1), padding='valid')(onet_layer)
        onet_layer = PReLU(shared_axes=[1, 2])(onet_layer)
        onet_layer = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(onet_layer)

        onet_layer = Conv2D(128, (2, 2), strides=(1, 1), padding='valid')(onet_layer)
        onet_layer = PReLU(shared_axes=[1, 2])(onet_layer)

        onet_layer = Flatten()(onet_layer)
        onet_layer = Dense(256)(onet_layer)
        onet_layer = PReLU()(onet_layer)

        onet_out1 = Dense(2)(onet_layer)
        onet_out1 = Softmax(axis=1)(onet_out1)

        onet_out2 = Dense(4)(onet_layer)
        onet_out3 = Dense(10)(onet_layer)

        onet = Model(net, [onet_out2, onet_out3, onet_out1])

        return onet
예제 #2
0
def recall_net(input):
    wn = Lambda(white_norm, name="white_norm")(input)
    conv1 = PReLU(shared_axes=[1, 2])(Conv2D(28, (3, 3),
                                             padding="valid",
                                             strides=(1, 1),
                                             name="conv1")(wn))
    block1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                          padding="same")(conv1)
    conv2 = PReLU(shared_axes=[1, 2])(Conv2D(48, (3, 3),
                                             padding="valid",
                                             strides=(1, 1),
                                             name="conv2")(block1))
    block2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                          padding="valid")(conv2)

    block3 = PReLU(shared_axes=[1, 2])(Conv2D(64, (2, 2),
                                              padding="valid",
                                              strides=(1, 1),
                                              name="conv3")(block2))
    block3 = Flatten()(block3)
    dense = PReLU()(Dense(128, name="r_feat")(block3))
    cate = Dense(2, activation=softmax, name="class")(dense)
    boxes = Dense(4, activity_regularizer=regularizers.l2(0.0005),
                  name="box")(dense)
    landmark = Dense(14,
                     activity_regularizer=regularizers.l2(0.0005),
                     name="landmark")(dense)
    model = Model(inputs=input, outputs=[cate, boxes, landmark])
    return model
예제 #3
0
    def pnet(self, input_shape=None):

        if input_shape is None:
            input_shape = (None, None, 3)

        net = Input(input_shape)

        pnet_layer = Conv2D(10, (3, 3), strides=(1, 1), padding='valid')(net)
        pnet_layer = PReLU(shared_axes=[1, 2])(pnet_layer)
        pnet_layer = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(pnet_layer)

        pnet_layer = Conv2D(16, (3, 3), strides=(1, 1), padding='valid')(pnet_layer)
        pnet_layer = PReLU(shared_axes=[1, 2])(pnet_layer)

        pnet_layer = Conv2D(32, (3, 3), strides=(1, 1), padding='valid')(pnet_layer)
        pnet_layer = PReLU(shared_axes=[1, 2])(pnet_layer)

        pnet_out1 = Conv2D(2, (1, 1), strides=(1, 1))(pnet_layer)
        pnet_out1 = Softmax(axis=3)(pnet_out1)

        pnet_out2 = Conv2D(4, (1, 1), strides=(1, 1))(pnet_layer)

        p_net = Model(net, [pnet_out2, pnet_out1])

        return p_net
예제 #4
0
def demo_create_encoder(latent_dim, cat_dim, window_size, input_dim):
    input_layer = Input(shape=(window_size, input_dim))
    
    code = TimeDistributed(Dense(64, activation='linear'))(input_layer)
    code = Bidirectional(LSTM(128, return_sequences=True))(code)
    code = BatchNormalization()(code)
    code = ELU()(code)
    code = Bidirectional(LSTM(64))(code)
    code = BatchNormalization()(code)
    code = ELU()(code)
    
    cat = Dense(64)(code)
    cat = BatchNormalization()(cat)
    cat = PReLU()(cat)
    cat = Dense(cat_dim, activation='softmax')(cat)
    
    latent_repr = Dense(64)(code)
    latent_repr = BatchNormalization()(latent_repr)
    latent_repr = PReLU()(latent_repr)
    latent_repr = Dense(latent_dim, activation='linear')(latent_repr)
    
    decode = Concatenate()([latent_repr, cat])
    decode = RepeatVector(window_size)(decode)
    decode = Bidirectional(LSTM(64, return_sequences=True))(decode)
    decode = ELU()(decode)
    decode = Bidirectional(LSTM(128, return_sequences=True))(decode)
    decode = ELU()(decode)
    decode = TimeDistributed(Dense(64))(decode)
    decode = ELU()(decode)
    decode = TimeDistributed(Dense(input_dim, activation='linear'))(decode)
    
    error = Subtract()([input_layer, decode])
        
    return Model(input_layer, [decode, latent_repr, cat, error])
예제 #5
0
    def rnet(self, input_shape=None):

        if input_shape is None:
            input_shape = (24, 24, 3)

        net = Input(input_shape)

        rnet_layer = Conv2D(28, (3, 3), strides=(1, 1), padding='valid')(net)
        rnet_layer = PReLU(shared_axes=[1, 2])(rnet_layer)
        rnet_layer = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(rnet_layer)

        rnet_layer = Conv2D(48, (3, 3), strides=(1, 1), padding='valid')(rnet_layer)
        rnet_layer = PReLU(shared_axes=[1, 2])(rnet_layer)
        rnet_layer = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(rnet_layer)

        rnet_layer = Conv2D(64, (2, 2), strides=(1, 1), padding='valid')(rnet_layer)
        rnet_layer = PReLU(shared_axes=[1, 2])(rnet_layer)
        rnet_layer = Flatten()(rnet_layer)

        rnet_layer = Dense(128)(rnet_layer)
        rnet_layer = PReLU()(rnet_layer)

        rnet_out1 = Dense(2)(rnet_layer)
        rnet_out1 = Softmax(axis=1)(rnet_out1)

        rnet_out2 = Dense(4)(rnet_layer)

        rnet = Model(net, [rnet_out2, rnet_out1])

        return rnet
예제 #6
0
def bottleneck_decoder(tensor,
                       nfilters,
                       upsampling=False,
                       normal=False,
                       name=''):
    y = tensor
    skip = tensor
    if upsampling:
        skip = Conv2D(filters=nfilters,
                      kernel_size=(1, 1),
                      kernel_initializer='he_normal',
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name=f'1x1_conv_skip_{name}')(skip)
        skip = UpSampling2D(size=(2, 2), name=f'upsample_skip_{name}')(skip)

    y = Conv2D(filters=nfilters // 4,
               kernel_size=(1, 1),
               kernel_initializer='he_normal',
               strides=(1, 1),
               padding='same',
               use_bias=False,
               name=f'1x1_conv_{name}')(y)
    y = BatchNormalization(momentum=0.1, name=f'bn_1x1_{name}')(y)
    y = PReLU(shared_axes=[1, 2], name=f'prelu_1x1_{name}')(y)

    if upsampling:
        y = Conv2DTranspose(filters=nfilters // 4,
                            kernel_size=(3, 3),
                            kernel_initializer='he_normal',
                            strides=(2, 2),
                            padding='same',
                            name=f'3x3_deconv_{name}')(y)
    elif normal:
        Conv2D(filters=nfilters // 4,
               kernel_size=(3, 3),
               strides=(1, 1),
               kernel_initializer='he_normal',
               padding='same',
               name=f'3x3_conv_{name}')(y)
    y = BatchNormalization(momentum=0.1, name=f'bn_main_{name}')(y)
    y = PReLU(shared_axes=[1, 2], name=f'prelu_{name}')(y)

    y = Conv2D(filters=nfilters,
               kernel_size=(1, 1),
               kernel_initializer='he_normal',
               use_bias=False,
               name=f'final_1x1_{name}')(y)
    y = BatchNormalization(momentum=0.1, name=f'bn_final_{name}')(y)

    y = Add(name=f'add_{name}')([y, skip])
    y = ReLU(name=f'relu_out_{name}')(y)

    return y
예제 #7
0
def up_projection(lt_, nf, s, block):
    with tf.name_scope('up_' + str(block)):
        if s == 2:
            ht = Conv2DTranspose(nf, 2, strides=2)(lt_)
            ht = PReLU()(ht)
            lt = ZeroPadding2D(2)(ht)
            lt = Conv2D(nf, 6, 2)(lt)
            lt = PReLU()(lt)
            et = Subtract()([lt, lt_])
            ht1 = Conv2DTranspose(nf, 2, strides=2)(et)
            ht1 = PReLU()(ht1)
            ht1 = Add()([ht, ht1])
            return (ht1)
        if s == 4:
            ht = Conv2DTranspose(nf, 4, strides=4)(lt_)
            ht = PReLU()(ht)
            lt = ZeroPadding2D(2)(ht)
            lt = Conv2D(nf, 8, strides=4)(lt)
            lt = PReLU()(lt)
            et = Subtract()([lt, lt_])
            ht1 = Conv2DTranspose(nf, 4, strides=4)(et)
            ht1 = PReLU()(ht1)
            ht1 = Add()([ht, ht1])
            return (ht1)
        if s == 8:
            ht = Conv2DTranspose(nf, 8, strides=8)(lt_)
            ht = PReLU()(ht)
            lt = ZeroPadding2D(2)(ht)
            lt = Conv2D(nf, 12, strides=8)(lt)
            lt = PReLU()(lt)
            et = Subtract()([lt, lt_])
            ht1 = Conv2DTranspose(nf, 8, strides=8)(et)
            ht1 = PReLU()(ht1)
            ht1 = Add()([ht, ht1])
        return (ht1)
예제 #8
0
    def __init__(self, act='', lrelu_alpha=0.1, **kwargs):
        super(_Act, self).__init__(**kwargs)

        if act == 'prelu':
            self.func = PReLU()
        else:
            self.func = LeakyReLU(alpha=lrelu_alpha)
예제 #9
0
    def conv_batch_prelu(name,
                         tensor,
                         num_filters,
                         kernel_size=(3, 3),
                         strides=(1, 1),
                         padding="same"):
        """
        This function combines conv2d layer, batch normalization layer and prelu activation.

        Args:
            name (str): layer's name ('conv_', 'batchnorm' and 'prelu' are added to the name)
            tensor (tf.Tensor): the input tensor
            num_filters (int): number of filters used in the convolution layer
            kernel_size (tuple or list): size of each kernel in the convolution
            strides (tuple or list): strides used in the convolution
            padding (str): one of 'same' or 'valid'

        Return:
            tensor (tf.Tensor): the output tensor
        """
        tensor = Conv2D(filters=num_filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        kernel_initializer="he_uniform",
                        bias_initializer="zeros",
                        kernel_regularizer=L1L2(regularizers[0],
                                                regularizers[1]),
                        padding=padding,
                        name=f"{prefix}_conv_{name}")(tensor)
        tensor = BatchNormalization(momentum=0.1,
                                    name=f"{prefix}_batchnorm_{name}")(tensor)
        tensor = PReLU(shared_axes=[1, 2],
                       name=f"{prefix}_prelu_{name}")(tensor)
        return tensor
예제 #10
0
def generator(x_in,
              mode,
              weight_decay=2.5e-5,
              num_filters=64,
              num_res_blocks=16):
    is_training = (mode == tf.estimator.ModeKeys.TRAIN)
    if isinstance(x_in, dict):  # For serving
        x_in = x_in['feature']

    x = Conv2D(num_filters, kernel_size=9, padding='same')(x_in)
    x = x_1 = PReLU(shared_axes=[1, 2])(x)

    for _ in range(num_res_blocks):
        x = res_block(x, num_filters, is_training=is_training)

    x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Add()([x_1, x])

    x = upsample(x, num_filters * 4)
    x = upsample(x, num_filters * 4)

    x = Conv2D(3, kernel_size=9, padding='same', activation='tanh')(x)

    # return Model(x_in, x)
    return x
예제 #11
0
    def build(self):
        # Input shape selection depending on if data_format is 'channels_last' or 'channels_first'.
        # If input_shape is None, the Generator won't have any defined input shape.
        if self.input_shape is None:
            input_generator = Input(
                shape=(None, None,
                       3) if self.data_format == 'channels_last' else (3, None,
                                                                       None))
        else:
            input_generator = Input(shape=self.input_shape)

        x = Conv2D(filters=64,
                   kernel_size=(9, 9),
                   strides=(1, 1),
                   padding='same',
                   activation=None)(input_generator)

        x_input_res_block = PReLU(alpha_initializer='zeros',
                                  alpha_regularizer=None,
                                  alpha_constraint=None,
                                  shared_axes=self.shared_axis)(x)

        x = x_input_res_block

        # Add B = 16 residual blocks.
        for _ in range(self.B):
            x = res_block(x, self.axis, self.shared_axis)

        x = Conv2D(64,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding='same',
                   activation=None,
                   use_bias=False)(x)
        x = BatchNormalization(axis=self.axis)(x)

        x = add([x, x_input_res_block])

        # Upsampling blocks.
        x = up_block(x, self.shared_axis)
        x = up_block(x, self.shared_axis)

        # Output of the generator. Convolution layer with tanh activation ([-1, 1] image values).
        output_generator = Conv2D(3,
                                  kernel_size=(9, 9),
                                  strides=(1, 1),
                                  activation='tanh',
                                  use_bias=False,
                                  padding='same',
                                  dtype=tf.float32)(x)

        # Model creation.
        generator = Model(inputs=input_generator,
                          outputs=output_generator,
                          name="Generator")

        # tf.keras.utils.plot_model(generator, 'E:\\TFM\\outputs\\model_imgs\\generator_model.png',
        #                           show_shapes=True)
        return generator
예제 #12
0
def res_block(x_in, num_filters, momentum=0.8):
    x = Conv2D(num_filters, kernel_size=3, padding='same')(x_in)
    x = BatchNormalization(momentum=momentum)(x)
    x = PReLU(shared_axes=[1, 2])(x)
    x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
    x = BatchNormalization(momentum=momentum)(x)
    x = Add()([x_in, x])
    return x
예제 #13
0
파일: srgan.py 프로젝트: acocac/CNN-tests
 def residual(inputs, n_filters, momentum=0.8):
     x = Conv2D(n_filters, kernel_size=3, padding='same')(inputs)
     x = BatchNormalization(momentum=momentum)(x)
     x = PReLU(shared_axes=[1, 2])(x)
     x = Conv2D(n_filters, kernel_size=3, padding='same')(x)
     x = BatchNormalization(momentum=momentum)(x)
     x = Add()([inputs, x])
     return x
예제 #14
0
    def __init__(self, att_hidden_units, activation='prelu'):
        '''

        :param att_hidden_units:
        :param activation:
        '''
        super().__init__()
        self.attention_dense = [Dense(units=unit, activation=PReLU() if activation == 'prelu' else Dice()) for unit in att_hidden_units]
        self.output_layer = Dense(units=1)
예제 #15
0
 def conv_RRDB(self, x_in, out_channel, activate=True):
     x = Conv2D(out_channel,
                kernel_size=3,
                strides=1,
                padding='same',
                kernel_initializer=self.init_kernel)(x_in)
     if activate:
         x = PReLU(shared_axes=[1, 2])(x)
     return x
예제 #16
0
    def upsample(self, x_in):
        x = Conv2D(self.n_filters,
                   kernel_size=3,
                   kernel_initializer=self.init_kernel,
                   padding='same')(x_in)
        x = Lambda(self.pixel_shuffle(scale=2))(x)
        x = PReLU(shared_axes=[1, 2])(x)

        return x
예제 #17
0
    def __init__(self,feature_columns, behavior_feature_list, attention_hidden_units=(80, 40),
                 ffn_hidden_units=(80, 40), attention_activation='prelu', ffn_activation='prelu',
                 max_len=40, dnn_dropout=0.,embed_reg=1e-4):
        '''

        :param feature_columns:
        :param behavior_feature_list:
        :param attention_hidden_units:
        :param ffn_hidden_units:
        :param attention_activation:
        :param ffn_activation:
        :param max_len:
        :param dnn_droupout:
        :param embed_reg:
        '''
        super().__init__()
        self.max_len = max_len
        self.dense_feature_column, self.sparse_feature_column = feature_columns

        # len
        self.other_sparse_len = len(self.sparse_feature_column) - len(behavior_feature_list)
        self.dense_len = len(self.dense_feature_column)
        self.behavior_num = len(behavior_feature_list)

        # other embedding layers
        self.sparse_feature_embedding = [
            Embedding(
                input_dim=feat['feat_dim'],
                input_length=1,
                output_dim=feat['embed_dim'],
                embeddings_initializer='random_uniform',
                embeddings_regularizer=l2(embed_reg)
            )
            for index, feat in enumerate(self.sparse_feature_column)
            if feat['feat'] not in behavior_feature_list
        ]

        self.behavior_seq_embedding = [
            Embedding(
                input_dim=feat['feat_dim'],
                input_length=1,
                output_dim=feat['embed_dim'],
                embeddings_initializer='random_uniform',
                embeddings_regularizer=l2(embed_reg)
            )
            for index, feat in enumerate(self.sparse_feature_column)
            if feat['feat'] in behavior_feature_list
        ]

        # Attention Layer
        self.attention_layer = AttentionLayer(activation=attention_activation, att_hidden_units=attention_hidden_units)

        self.bn = BatchNormalization(trainable=True)
        self.ffn = [Dense(unit, activation=PReLU() if ffn_activation == 'prelu' else Dice()) for unit in ffn_hidden_units]

        self.dropout = Dropout(dnn_dropout)
        self.output_layer = Dense(1)
def upsample(x, number):
    x = Conv2D(256,
               kernel_size=3,
               strides=1,
               padding='same',
               name='upSampleConv2d_' + str(number))(x)
    x = SubpixelConv2D(name=str('upSampleSubPixel_') + str(number), scale=2)(x)
    x = PReLU(shared_axes=[1, 2], name='upSamplePReLU_' + str(number))(x)
    return x
예제 #19
0
 def get_activation(self, x):
     if self.activation_type == 'elu':
         activate = ELU()(x)
     elif self.activation_type == 'relu':
         activate = ReLU()(x)
     elif self.activation_type == 'prelu':
         activate = PReLU()(x)
     elif self.activation_type == 'leakyrelu':
         activate = LeakyReLU()(x)
     else:
         raise ValueError('Undefined ACTIVATION_TYPE!')
     return activate
예제 #20
0
def up_block(x, shared_axis):
    x = Conv2D(256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation=None,
               use_bias=False)(x)
    x = UpSampling2D(size=(2, 2))(x)
    x = PReLU(alpha_initializer='zeros',
              alpha_regularizer=None,
              alpha_constraint=None,
              shared_axes=shared_axis)(x)
    return x
예제 #21
0
	def get_activation(self):
		activation_type = self.activation_type
		if activation_type ==1:
			activation = LeakyReLU(alpha=0.1)
		elif activation_type == 2:
			activation = ReLU()
		elif activation_type == 3:
			activation = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
		elif activation_type == 4:
			activation=ELU(alpha=1.0)
		else:
			raise Exception('Not a valid activation type')

		return activation
예제 #22
0
 def KerasModelSmallNet50(self, imgInput):
     """
     Construct small net. The image size is 50*50, which is suitable for map.
     """
     x = Conv2D(16, (3, 3), activation='tanh')(imgInput)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = MaxPooling2D(pool_size=(2, 2))(x)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = MaxPooling2D(pool_size=(2, 2))(x)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = Flatten()(x)
     x = Dense(self.featureDim, kernel_regularizer=regularizers.l2(0.0002),
               activity_regularizer=regularizers.l1(0.0002), name='fc_feature')(x)
     x = PReLU()(x)
     return x
예제 #23
0
def down_projection(ht_, nf, s, block, act='prelu'):
    with tf.name_scope('down_' + str(block)):
        if s == 2:
            ht = ZeroPadding2D(2)(ht_)
            lt = Conv2D(nf, 6, strides=2)(ht)
            lt = PReLU()(lt)
            ht = Conv2DTranspose(nf, 2, strides=2)(lt)
            ht = PReLU()(ht)
            et = Subtract()([ht, ht_])
            lt1 = ZeroPadding2D(2)(et)
            lt1 = Conv2D(nf, 6, strides=2)(lt1)
            lt1 = PReLU()(lt1)
            lt1 = Add()([lt1, lt])
            return lt1
        if s == 4:
            ht = ZeroPadding2D(2)(ht_)
            lt = Conv2D(nf, 8, strides=4)(ht)
            lt = PReLU()(lt)
            ht = Conv2DTranspose(nf, 4, strides=4)(lt)
            ht = PReLU()(ht)
            et = Subtract()([ht, ht_])
            lt1 = ZeroPadding2D(2)(et)
            lt1 = Conv2D(nf, 8, strides=4)(lt1)
            lt1 = PReLU()(lt1)
            lt1 = Add()([lt1, lt])
            return lt1
        if s == 8:
            ht = ZeroPadding2D(2)(ht_)
            lt = Conv2D(nf, 12, strides=8)(ht)
            lt = PReLU()(lt)
            ht = Conv2DTranspose(nf, 8, strides=8)(lt)
            ht = PReLU()(ht)
            et = Subtract()([ht, ht_])
            lt1 = ZeroPadding2D(2)(et)
            lt1 = Conv2D(nf, 12, strides=8)(lt1)
            lt1 = PReLU()(lt1)
            lt1 = Add()([lt1, lt])
        return lt1
예제 #24
0
파일: srgan.py 프로젝트: acocac/CNN-tests
def create_keras_model(inputShape, nClasses, scale=2, n_filters=64, depth=16):

    def residual(inputs, n_filters, momentum=0.8):
        x = Conv2D(n_filters, kernel_size=3, padding='same')(inputs)
        x = BatchNormalization(momentum=momentum)(x)
        x = PReLU(shared_axes=[1, 2])(x)
        x = Conv2D(n_filters, kernel_size=3, padding='same')(x)
        x = BatchNormalization(momentum=momentum)(x)
        x = Add()([inputs, x])
        return x

    def upsample(inputs, n_filters, scale):
        x = Conv2D(n_filters * (scale ** 2), kernel_size=3, padding='same')(inputs)
        x = Lambda(lambda x: tf.nn.depth_to_space(x, scale))(x)
        x = PReLU(shared_axes=[1, 2])(x)
        return x

    inputs = Input(shape=inputShape)

    x = Conv2D(n_filters, kernel_size=9, padding='same')(inputs)
    x0 = PReLU(shared_axes=[1, 2])(x)

    x = residual(x0, n_filters)
    for i in range(depth-1):
        x = residual(x, n_filters)

    x = Conv2D(n_filters, kernel_size=3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Add()([x0, x])

    # Upsampling for super-resolution
    if scale == 2:
        x = upsample(x, n_filters, scale)
    elif scale == 3:
        x = upsample(x, n_filters, scale)
    elif scale == 4:
        x = upsample(x, n_filters, (scale-2))
        x = upsample(x, n_filters, (scale-2))

    outputs = Conv2D(nClasses, kernel_size=9, padding='same', activation='sigmoid')(x)

    model = Model(inputs, outputs, name="srgan")

    return model
예제 #25
0
    def KerasModelResNet(self, imgInput):
        """
        Construct resNet. The image size is 150*150, which is suitable for image.
        """
        bn_axis = 3

        x = ZeroPadding2D((3, 3))(imgInput)
        x = Convolution2D(8, 7, strides=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [8, 8, 16], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [8, 8, 16], stage=2, block='b')
        x = identity_block(x, 3, [8, 8, 16], stage=2, block='c')

        x = conv_block(x, 3, [16, 16, 32], stage=3, block='a')
        x = identity_block(x, 3, [16, 16, 32], stage=3, block='b')
        x = identity_block(x, 3, [16, 16, 32], stage=3, block='c')
        x = identity_block(x, 3, [16, 16, 32], stage=3, block='d')

        x = conv_block(x, 3, [32, 32, 64], stage=4, block='a')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='b')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='c')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='d')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='e')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='f')

        x = conv_block(x, 3, [64, 64, 128], stage=5, block='a')
        x = identity_block(x, 3, [64, 64, 128], stage=5, block='b')
        x = identity_block(x, 3, [64, 64, 128], stage=5, block='c')

        x = conv_block(x, 3, [64, 64, 256], stage=6, block='a')
        x = identity_block(x, 3, [64, 64, 256], stage=6, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=6, block='c')

        x = GlobalAveragePooling2D()(x)
        # x = Flatten()(x)
        x = Dense(self.featureDim,
                  kernel_regularizer=regularizers.l2(0.0002),
                  activity_regularizer=regularizers.l1(0.0002),
                  name='fc_feature')(x)
        x = PReLU()(x)
        return x
예제 #26
0
    def feature_extracter_from_texts(self,mashup_api=None):
        """
        # 更改:把MLP去掉
        对mashup,service的description均需要提取特征,右路的文本的整个特征提取过程
        公用的话应该封装成新的model!
        :param x:
        :return: 输出的是一个封装好的model,所以可以被mashup和api公用
        """
        if new_Para.param.text_extracter_mode=='HDP' and mashup_api is not None:
            return self.HDP_feature_extracter_from_texts(mashup_api)

        if self.text_feature_extracter is None: #没求过时
            text_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
            text_embedding_layer = self.get_text_embedding_layer()  # 参数还需设为外部输入!
            text_embedded_sequences = text_embedding_layer(text_input)  # 转化为2D

            if new_Para.param.text_extracter_mode in ('inception','textCNN'): # 2D转3D,第三维是channel
                # print(text_embedded_sequences.shape)
                text_embedded_sequences = Lambda(lambda x: tf.expand_dims(x, axis=3))(text_embedded_sequences)  # tf 和 keras的tensor 不同!!!
                print(text_embedded_sequences.shape)

            if new_Para.param.text_extracter_mode=='inception':
                x = inception_layer(text_embedded_sequences, self.word_embedding_dim, self.inception_channels, self.inception_pooling)  # inception处理
                print('built inception layer, done!')
            elif new_Para.param.text_extracter_mode=='textCNN':
                x = self.textCNN_feature_extracter_from_texts(text_embedded_sequences)
            elif new_Para.param.text_extracter_mode=='LSTM':
                x = self.LSTM_feature_extracter_from_texts(text_embedded_sequences)
            else:
                raise ValueError('wrong extracter!')
            print('text feature after inception/textCNN/LSTM model,',x) # 观察MLP转化前,模块输出的特征

            for FC_unit_num in self.inception_fc_unit_nums:
                x = Dense(FC_unit_num, kernel_regularizer=l2(new_Para.param.l2_reg))(x)  # , activation='relu'
                if new_Para.param.inception_MLP_BN:
                    x = BatchNormalization(scale=False)(x)
                x = PReLU()(x)  #
                if new_Para.param.inception_MLP_dropout:
                    x = tf.keras.layers.Dropout(0.5)(x)

            self.text_feature_extracter=Model(text_input, x,name='text_feature_extracter')
        return self.text_feature_extracter
예제 #27
0
    def dense_batch_prelu(name, tensor, n_units):
        """
        This function combines dense layer, batch normalization layer and prelu activation.

        Args:
            name (str): layer's name ('dense_', 'batchnorm' and 'prelu' are added to the name)
            tensor (tf.Tensor): the input tensor
            n_units (int): number of units in the dense layer

        Return:
            tensor (tf.Tensor): the output tensor
        """
        tensor = Dense(n_units,
                       name=f"{prefix}_dense_{name}",
                       kernel_initializer="he_uniform",
                       bias_initializer="zeros")(tensor)
        tensor = BatchNormalization(momentum=0.1,
                                    name=f"{prefix}_batchnorm_{name}")(tensor)
        tensor = PReLU(name=f"{prefix}_prelu_{name}")(tensor)
        return tensor
예제 #28
0
    def get_inception_MLP_layer(self, channel_num,name = ''):
        """
        textCNN/inception后面加MLP的处理,结构之后的最后一个层命名name='text_feature_extracter'
        :param channel_num: 输入形状的最后一维,
        :param name: 可以使用不同的MLP对mashup,api和slt_apis分别进行转化,声明name即可
        :return:
        """

        if self.inception_MLP_layer is None:
            if new_Para.param.text_extracter_mode != 'LSTM' and new_Para.param.if_inception_MLP:
                input = Input(shape=(channel_num,), dtype='float32')
                x = input
                for FC_unit_num in self.inception_fc_unit_nums:
                    x = Dense(FC_unit_num, kernel_regularizer=l2(new_Para.param.l2_reg))(x)  # 默认activation=None
                    if new_Para.param.inception_MLP_BN:
                        x = BatchNormalization(scale=False)(x)
                    x = PReLU()(x)  #
                    if new_Para.param.inception_MLP_dropout:
                        x = tf.keras.layers.Dropout(0.5)(x)
                self.inception_MLP_layer = Model(input, x, name='text_feature_extracter'+name)
        return self.inception_MLP_layer
예제 #29
0
def sr_resnet(num_filters=64, num_res_blocks=16):
    x_in = Input(shape=(None, None, 3))
    x = Lambda(normalize_01)(x_in)

    x = Conv2D(num_filters, kernel_size=9, padding='same')(x)
    x = x_1 = PReLU(shared_axes=[1, 2])(x)

    for _ in range(num_res_blocks):
        x = res_block(x, num_filters)

    x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Add()([x_1, x])

    x = upsample(x, num_filters * 4)
    x = upsample(x, num_filters * 4)

    x = Conv2D(3, kernel_size=9, padding='same', activation='tanh')(x)
    x = Lambda(denormalize_m11)(x)

    return Model(x_in, x)
예제 #30
0
def res_block(inputs, axis, shared_axis):
    x = Conv2D(64,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation=None,
               use_bias=False)(inputs)
    x = BatchNormalization(axis=axis)(x)
    x = PReLU(alpha_initializer='zeros',
              alpha_regularizer=None,
              alpha_constraint=None,
              shared_axes=shared_axis)(x)
    x = Conv2D(64,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation=None,
               use_bias=False)(x)
    x = BatchNormalization(axis=axis)(x)

    return add([x, inputs])