Esempio n. 1
0
def oPNN(input_dims, embedding_dim, prod_dim, hidden_dims):
    n_cates = len(input_dims)
    x = [Input(shape=(input_dims[i], )) for i in range(n_cates)]
    embs = [Dense(embedding_dim)(x[i]) for i in range(n_cates)]
    lz = Concatenate(axis=-1)(embs)
    lz = Dense(prod_dim)(lz)

    lp = Add()(embs)  # f_sigma in the paper
    lp = outer_product(embedding_dim, prod_dim)(lp)

    l = Concatenate(axis=-1)([lp, lz])
    for i in range(len(hidden_dims)):
        l = Dense(hidden_dims[i])(l)
        l = PReLU()(l)
    ans = Dense(1, activation='sigmoid')(l)

    return Model(inputs=x, outputs=[ans])
Esempio n. 2
0
    def get_activation(self):
        activation_type = self.activation_type
        if activation_type == 1:
            activation = LeakyReLU(alpha=0.1)
        elif activation_type == 2:
            activation = ReLU()
        elif activation_type == 3:
            activation = PReLU(alpha_initializer='zeros',
                               alpha_regularizer=None,
                               alpha_constraint=None,
                               shared_axes=None)
        elif activation_type == 4:
            activation = ELU(alpha=1.0)
        else:
            raise Exception('Not a valid activation type')

        return activation
Esempio n. 3
0
    def _back_net(inputs):
        def _residual_block(inputs):
            x = _empty_block(inputs)
            x = BatchNormalization()(x)
            x = PReLU(shared_axes=[1, 2])(x)
            x = _empty_block(x)
            x = BatchNormalization()(x)
            m = Add()([x, inputs])
            return m

        def _empty_block(inputs):
            x1 = Conv2D(feature_dim, (3, 3),
                        padding="same",
                        kernel_initializer="he_normal")(inputs)
            x2 = Conv2D(feature_dim, (3, 3),
                        dilation_rate=3,
                        padding="same",
                        kernel_initializer="he_normal")(inputs)
            x3 = Conv2D(feature_dim, (3, 3),
                        dilation_rate=5,
                        padding="same",
                        kernel_initializer="he_normal")(inputs)
            x = concatenate([x1, x2, x3], axis=-1)
            x_out = Conv2D(feature_dim, (1, 1),
                           padding="same",
                           kernel_initializer="he_normal")(x)
            return x_out

        x = Conv2D(feature_dim, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal")(inputs)
        x = PReLU(shared_axes=[1, 2])(x)
        x0 = x

        for i in range(resunit_num):
            x = _residual_block(x)

        x = Conv2D(feature_dim, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal")(x)
        x = BatchNormalization()(x)
        x = Add()([x, x0])
        x = Conv2D(input_channel_num, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal")(x)
        return x
Esempio n. 4
0
def get_downconvolution_layer(ndims, layer_in, num_filters, dropout_rate=0.0):
    Conv = Conv2D if ndims == 2 else Conv3D
    MaxPooling = MaxPooling2D if ndims == 2 else MaxPooling3D
    kernel_size_a = (3, 3) if ndims == 2 else (3, 3, 3)

    # Low Res Branch
    a_halved = BatchNormalization(axis=1)(layer_in)
    a_halved = PReLU(shared_axes=(2,3) if ndims is 2 else (2,3,4))(a_halved)
    a_halved = Conv(num_filters, kernel_size=kernel_size_a, padding='same', strides=2)(a_halved)
    a_halved = Dropout(dropout_rate)(a_halved)

    # MP branch
    b_mp = MaxPooling(pool_size=(2, 2) if ndims == 2 else (2, 2, 2))(layer_in)

    layer_out_halved = Concatenate(axis=1)([a_halved, b_mp])

    return layer_out_halved
Esempio n. 5
0
    def convolutional_block(self, x, filters, kernel_size, stage, block, strides=(1, 1), activation=True, padding='valid'):
        """
        Convolutional Block consisting of BatchNormalization and PReLU as activation.

        PARAMETERS
        ----------

        x: input tensor
        filters: int,
                 Number of filters for the convolution.

        kernel_size: int or tuple of integers
                     Kernel size for the convolution

        stage: string,
               The stage in the architecture where the convolution is applied

        block: string,
               The block number within the stage specified above 

        strides: tuple, default=(1, 1)
                 The stride to be used for the convolution

        activation: bool, default=True
                    Whether or not to include the Activation Layer at the end.

        padding: {'same', 'valid'}, default='valid'
                 Same as that for Conv2D.

        RETURNS
        -------

        x: tensor,
           Output of the (Conv + Batch Norm) operation

        """

        x = Conv2D(filters, kernel_size, strides=strides, padding = 'same', 
                   kernel_initializer = 'he_normal',
                   name='conv' + stage + '_' + block)(x)
        x = BatchNormalization(name='batch_norm' + stage + '_' + block)(x)

        if activation:
            x = PReLU(name='act' + stage + '_' + block)(x)

        return x
Esempio n. 6
0
 def construct_encoding_trunk(self, inputs):
     x = inputs
     concat_layers = []
     for filters, num_layers in zip([64, 128, 256, 512, 512],
                                    [2, 2, 3, 3, 3]):
         concat_layers.append(x)
         for i in range(num_layers):
             x = Conv2D(filters=filters,
                        kernel_size=3,
                        activation=None,
                        padding='same')(x)
             x = PReLU(shared_axes=[1, 2])(x)
         x = Conv2D(filters=filters // 4,
                    kernel_size=3,
                    strides=2,
                    padding='same')(x)
     return x, concat_layers[::-1]
Esempio n. 7
0
def LeNet_plus_plus(x,
                    labels,
                    perform_L2_norm=False,
                    activation_type='softmax',
                    ring_approach=False,
                    background_class=False,
                    knownsMinimumMag=None):
    """
    Defines the network architecture for LeNet++.
    Use the options for different approaches:
    background_class: Classification with additional class for negative classes
    ring_approach: ObjectoSphere Loss applied if True
    knownsMinimumMag: Minimum Magnitude allowed for samples belonging to one of the Known Classes if ring_approach is True
    """
    mnist_image = x  #Input(shape=(28, 28, 1), dtype='float32', name='mnist_image')

    # 28 X 28 --> 14 X 14
    conv1_1 = Conv2D(32, (5, 5), strides=1, padding="same",
                     name='conv1_1')(mnist_image)
    conv1_2 = Conv2D(32, (5, 5), strides=1, padding="same",
                     name='conv1_2')(conv1_1)
    conv1_2 = BatchNormalization(name='BatchNormalization_1')(conv1_2)
    pool1 = MaxPooling2D(pool_size=(2, 2), strides=2, name='pool1')(conv1_2)
    # 14 X 14 --> 7 X 7
    conv2_1 = Conv2D(64, (5, 5), strides=1, padding="same",
                     name='conv2_1')(pool1)
    conv2_2 = Conv2D(64, (5, 5), strides=1, padding="same",
                     name='conv2_2')(conv2_1)
    conv2_2 = BatchNormalization(name='BatchNormalization_2')(conv2_2)
    pool2 = MaxPooling2D(pool_size=(2, 2), strides=2, name='pool2')(conv2_2)
    # 7 X 7 --> 3 X 3
    conv3_1 = Conv2D(128, (5, 5), strides=1, padding="same",
                     name='conv3_1')(pool2)
    conv3_2 = Conv2D(128, (5, 5), strides=1, padding="same",
                     name='conv3_2')(conv3_1)
    conv3_2 = BatchNormalization(name='BatchNormalization_3')(conv3_2)
    pool3 = MaxPooling2D(pool_size=(2, 2), strides=2, name='pool3')(conv3_2)
    flatten = Flatten(name='flatten')(pool3)
    fc = Dense(2, name='fc', use_bias=True)(flatten)

    knownUnknownsFlag = Input((1, ), dtype='float32', name='knownUnknownsFlag')
    pred = Dense(10, name='pred', use_bias=False)(fc)
    softmax = Activation(activation_type, name='softmax')(pred)
    xi = PReLU(name='xi')(fc)
    side = CenterLossLayer(alpha=0.5, name='centerlosslayer')([xi, labels])
    return softmax, side, fc
Esempio n. 8
0
def simple_birnn(model_input,
                 pos_tag_embeddings=None,
                 units=50,
                 dropout=0.5,
                 embedding_dropout=0.1,
                 prelu=True,
                 rnn_cell_name='CLSTM'):
    """

    :param model_input: embedding tensor of the input sequence
    :param pos_tag_embeddings:
    :param units:
    :param dropout:
    :param embedding_dropout:
    :param prelu:
    :param rnn_cell_name:
    :return:
    """

    if pos_tag_embeddings:
        model_input = Concatenate()([model_input, pos_tag_embeddings])

    if embedding_dropout:
        model_input = Dropout(embedding_dropout)(model_input)

    rnn_cell = get_rnn_name(rnn_cell_name)

    bi_rnn = Bidirectional(rnn_cell(units, return_sequences=True))(model_input)
    attention = Attention()(bi_rnn)

    max_pool = GlobalMaxPool1D()(bi_rnn)
    average_pool = GlobalAveragePooling1D()(bi_rnn)
    concat = Concatenate()([max_pool, average_pool, attention])
    dropout_one = Dropout(dropout)(concat)

    activation = None if prelu else 'relu'

    dense = Dense(units, activation=activation)(dropout_one)

    if prelu:
        dense = PReLU()(dense)

    dropout_dense = Dropout(dropout)(dense)

    return dropout_dense
Esempio n. 9
0
def res_block(inp: Union[Layer, tf.Tensor],
              filters: int,
              kernel_size: int = 3,
              strides: int = 2,
              batch_norm: Union[float, None] = 0.5,
              use_bias: bool = True,
              use_sn: bool = False,
              kernel_initializer: Initializer = RandomNormal(stddev=0.02)):
    assert filters > 0, "Invalid filter number"
    assert kernel_size > 0, "Invalid kernel size"
    assert strides > 0, "Invalid stride size"

    ConvLayer = ConvSN2D if use_sn else Conv2D

    gen = inp

    model = ConvLayer(filters,
                      kernel_size,
                      strides=strides,
                      padding="same",
                      kernel_initializer=kernel_initializer,
                      use_bias=use_bias,
                      activation=None)(inp)

    if batch_norm:
        model = BatchNormalization(momentum=batch_norm, axis=-1)(model)

    model = PReLU(alpha_initializer='zeros',
                  alpha_regularizer=None,
                  alpha_constraint=None,
                  shared_axes=[1, 2])(model)
    model = ConvLayer(filters,
                      kernel_size,
                      strides=strides,
                      padding="same",
                      kernel_initializer=kernel_initializer,
                      use_bias=use_bias,
                      activation=None)(model)

    if batch_norm:
        model = BatchNormalization(momentum=batch_norm, axis=-1)(model)

    model = Add()(inputs=[gen, model])

    return model
Esempio n. 10
0
def CNN_Model(data_id, seq_len, num_classes, num_features, embedding_matrix=None):

    in_text = Input(shape=(seq_len,))
    op_units, op_activation = _get_last_layer_units_and_activation(num_classes)

    trainable = True
    if embedding_matrix is None:
        emb_size = 64#64
        if data_id != 0:
            emb_size = 256
        print ('cnn emb_size:', emb_size)
        x = Embedding(num_features, emb_size, trainable=trainable)(in_text)
    else:
        print ('embedding_matrix:', embedding_matrix[:10])
        x = Embedding(num_features, 300, trainable=trainable, weights=[embedding_matrix])(in_text)

#     x = Conv1D(128, kernel_size=5, padding='valid', kernel_initializer='glorot_uniform')(x)
#     x = GlobalMaxPooling1D()(x)

    if data_id == 1:
        print ('kernel_size:', 3)
        x = Conv1D(128, kernel_size=3, padding='valid', kernel_initializer='glorot_uniform')(x)
        x = GlobalMaxPooling1D()(x)
    else:
        print ('kernel_size:', 2,3,4)
        xs = []
        for win in [2,3,4]:
            xt = Conv1D(128, kernel_size=win, padding='valid', kernel_initializer='glorot_uniform')(x)
            xt = GlobalMaxPooling1D()(xt)    
            xs.append(xt)
        x = Concatenate()(xs)

    x = Dense(128)(x) #
    x = PReLU()(x)
    x = Dropout(0.35)(x) #0
    x = BatchNormalization()(x)

    if data_id == 4:
        op_activation = 'sigmoid'
        
    y = Dense(op_units, activation=op_activation)(x)

    md = keras.models.Model(inputs = [in_text], outputs=y)

    return md
Esempio n. 11
0
    def _rain_net(inputs):
        def _residual_block(inputs, number):
            x = Conv2D(feature_dim, (3, 3),
                       padding="same",
                       kernel_initializer="he_normal")(inputs)
            x = BatchNormalization()(x)
            x = PReLU(shared_axes=[1, 2])(x)
            x = Conv2D(feature_dim, (3, 3),
                       padding="same",
                       kernel_initializer="he_normal")(x)
            x = BatchNormalization()(x)
            filters = 64
            se_shape = (1, 1, filters)
            se = GlobalAveragePooling2D()(x)
            se = Reshape(se_shape)(se)
            se = Dense(number,
                       activation="relu",
                       kernel_initializer="he_normal",
                       use_bias=False)(se)
            se = Dense(filters,
                       activation="hard_sigmoid",
                       kernel_initializer="he_normal",
                       use_bias=False)(se)
            x = multiply([x, se])
            m = Add()([x, inputs])
            return m

        x = Conv2D(feature_dim, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal")(inputs)
        x = PReLU(shared_axes=[1, 2])(x)
        x0 = x

        for i in range(resunit_num):
            x = _residual_block(x, 4)

        x = Conv2D(feature_dim, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal")(x)
        x = BatchNormalization()(x)
        x = Add()([x, x0])
        x = Conv2D(input_channel_num, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal")(x)
        return x
Esempio n. 12
0
 def get_nn_layers(self, x_, num_classes):
     for i in range(self.n_nn):
         act = self.nn_layers[i].activation
         if act in ['relu', 'sigmoid', 'tanh', 'elu']:
             x_ = Dense(self.nn_layers[i].units, activation=act)(x_)
         elif act == 'prelu':
             x_ = Dense(self.nn_layers[i].units)(x_)
             x_ = PReLU()(x_)
         else:
             x_ = Dense(self.nn_layers[i].units)(x_)
             x_ = LeakyReLU()(x_)
         if self.fp == 16:
             x_ = BatchNormalizationF16()(x_)
         else:
             x_ = BatchNormalization()(x_)
         x_ = Dropout(self.nn_layers[i].dropout)(x_)
     x_ = Dense(num_classes, activation='softmax')(x_)
     return x_
Esempio n. 13
0
    def _build_dense_layer(self, layer_input, dropout):
        """

        :param layer_input:
        :param dropout:
        :return:
        """
        activation = None if self.prelu else 'relu'

        dense = Dense(self.final_units, activation=activation)(layer_input)

        if self.prelu:
            dense = PReLU()(dense)

        if dropout:
            dense = Dropout(dropout)(dense)

        return dense
Esempio n. 14
0
    def create_net(self, input_shape):
        net_input = Input(shape=input_shape)
        x = Conv2D(self.filters, (11, 11), padding='same')(net_input)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (7, 7), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        self.encoded = MaxPooling2D((2, 2), padding='same')(x)

        # Keep the encoder part
        self.encoder = Model(net_input, self.encoded)

        # And now the decoder part
        x = Conv2D(self.filters, (3, 3), padding='same')(self.encoded)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (7, 7), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (11, 11), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)
        self.decoded = Conv2D(1, (3, 3), activation='sigmoid',
                              padding='same')(x)

        self.model = Model(net_input, self.decoded)
        return
Esempio n. 15
0
def network2(feature_dimension):
    models = Sequential()
    models.add(
        Dense(64,
              input_dim=feature_dimension,
              init='uniform',
              W_regularizer=l2(0.00001)))
    models.add(PReLU())
    models.add(BatchNormalization())
    models.add(Dropout(0.7))
    models.add(Dense(3, init='uniform'))
    models.add(Activation('softmax'))
    opt = optimizers.Adagrad(lr=0.01)
    models.compile(optimizer=opt,
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

    return models
Esempio n. 16
0
def get_SM_model_2(input_shape,
                   embedding_layer,
                   units,
                   dtype='float32',
                   classes=6,
                   reg_par=0.0):
    sentence = Input(shape=input_shape, dtype=dtype)

    if embedding_layer is not None:
        x = embedding_layer(sentence)
    else:
        x = sentence

    x = Dense(units=units, activation=None)(x)
    x = PReLU()(x)
    x = Dense(units=6, activation="softmax")(x)

    return Model(inputs=sentence, outputs=x)
Esempio n. 17
0
def build_VGG_Bnorm(input_data, block_channels=[16,32,64], block_layers=[2,2,2], fcChannels=[256,256], p_drop=0.4, classes=10):
    net = input_data
    for i, (cCount, lCount) in enumerate(zip(block_channels, block_layers)):
        net = build_VGG_Bnorm_block(net, cCount, lCount, 'conv{}'.format(i))
        net = Dropout(rate=0.25)(net)
        
    net = Flatten()(net)
    
    for i, cCount in enumerate(fcChannels):
        net = Dense(cCount, name='fc{}'.format(i))(net)
        net = BatchNormalization()(net)
        net = PReLU()(net)

        net = Dropout(rate=p_drop)(net)
    
    net = Dense(classes, name='out', activation='softmax')(net)

    return net
Esempio n. 18
0
    def __call__(self, inputs):
        """ Call the Faceswap Convolutional Layer.

        Parameters
        ----------
        inputs: Tensor
            The input to the layer

        Returns
        -------
        Tensor
            The output tensor from the Convolution 2D Layer
        """
        if self._use_reflect_padding:
            inputs = ReflectionPadding2D(stride=self._strides,
                                         kernel_size=self._args[-1],
                                         name="{}_reflectionpadding2d".format(
                                             self._name))(inputs)
        conv = DepthwiseConv2D if self._use_depthwise else Conv2D
        var_x = conv(*self._args,
                     strides=self._strides,
                     padding=self._padding,
                     name="{}_{}conv2d".format(
                         self._name, "dw" if self._use_depthwise else ""),
                     **self._kwargs)(inputs)
        # normalization
        if self._normalization == "instance":
            var_x = InstanceNormalization(
                name="{}_instancenorm".format(self._name))(var_x)
        if self._normalization == "batch":
            var_x = BatchNormalization(axis=3,
                                       name="{}_batchnorm".format(
                                           self._name))(var_x)

        # activation
        if self._activation == "leakyrelu":
            var_x = LeakyReLU(0.1,
                              name="{}_leakyrelu".format(self._name))(var_x)
        if self._activation == "swish":
            var_x = Swish(name="{}_swish".format(self._name))(var_x)
        if self._activation == "prelu":
            var_x = PReLU(name="{}_prelu".format(self._name))(var_x)

        return var_x
Esempio n. 19
0
def cnn_model():
    model = Sequential()
    model.add(Conv2D(100, (10, 3), input_shape=(128, 10, 1), padding='same'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(MaxPooling2D((2, 2)))  # 64 * 5
    model.add(Dropout(0.35))

    model.add(Conv2D(150, (10, 3), padding='same'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(MaxPooling2D((2, 2)))  # 32 * 2
    model.add(Dropout(0.35))

    model.add(Conv2D(200, (10, 3), padding='same'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(MaxPooling2D((2, 2)))  # 32 * 2
    model.add(Dropout(0.35))

    model.add(Conv2D(300, (10, 3), padding='same'))
    model.add(PReLU())
    model.add(BatchNormalization(axis=-1))
    #model.add(MaxPooling2D((2,2))) # 16 * 1
    model.add(Dropout(0.35))

    model.add(Conv2D(400, (10, 3), padding='same'))
    model.add(PReLU())
    #model.add(MaxPooling2D((2,2)))
    model.add(Dropout(0.35))

    model.add(Flatten())

    model.add(Dense(units=200, activation='relu'))
    model.add(PReLU(alpha_initializer='zeros'))
    model.add(BatchNormalization())
    model.add(Dropout(0.3))

    model.add(Dense(units=100, activation='relu'))
    model.add(PReLU(alpha_initializer='zeros'))
    model.add(BatchNormalization())
    model.add(Dropout(0.3))

    model.add(Dense(units=41, activation='softmax'))
    model.summary()
    return model
Esempio n. 20
0
def build_cifar_resnet(class_num=10, input_shape=(32, 32, 3)):
    model_input = Input(shape=input_shape)

    X = Conv2D(64, kernel_size=(3, 3), padding='same')(model_input)
    X = BatchNormalization()(X)
    X = PReLU()(X)

    B2 = Conv2D(64, kernel_size=(3, 3), padding='same')(X)
    B2 = BatchNormalization()(B2)
    B2 = PReLU()(B2)
    B2 = Conv2D(64, kernel_size=(3, 3), padding='same')(B2)
    B2 = BatchNormalization()(B2)
    X = Add()([X, B2])
    X = PReLU()(X)

    X = ZeroPadding2D(padding=(1, 1))(X)
    B1 = Conv2D(128, kernel_size=(3, 3), strides=2)(X)
    B1 = BatchNormalization()(B1)
    B2 = Conv2D(128, kernel_size=(3, 3), strides=2)(X)
    B2 = BatchNormalization()(B2)
    B2 = PReLU()(B2)
    B2 = Conv2D(128, kernel_size=(3, 3), padding='same')(B2)
    B2 = BatchNormalization()(B2)
    X = Add()([B1, B2])
    X = PReLU()(X)

    X = ZeroPadding2D(padding=(1, 1))(X)
    B1 = Conv2D(256, kernel_size=(3, 3), strides=2)(X)
    B1 = BatchNormalization()(B1)
    B2 = Conv2D(256, kernel_size=(3, 3), strides=2)(X)
    B2 = BatchNormalization()(B2)
    B2 = PReLU()(B2)
    B2 = Conv2D(256, kernel_size=(3, 3), padding='same')(B2)
    B2 = BatchNormalization()(B2)
    X = Add()([B1, B2])
    X = PReLU()(X)

    X = GlobalAveragePooling2D()(X)
    X = Dense(class_num, activation='softmax')(X)

    model = Model(inputs=model_input, outputs=X)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Esempio n. 21
0
def initial_block_lstm(tensor, n_filters = 13):
    """

    :param tensor: 5 dim
    :param n_filters:
    :return:
    """
    conv = TimeDistributed(Conv2D(filters=n_filters,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding='same',
                      name='initial_block_conv',
                      kernel_initializer='he_normal'))(tensor)
    pool = TimeDistributed(MaxPooling2D(pool_size=(2, 2),
                                        name='initial_block_pool'))(tensor)
    concat = Concatenate()([conv, pool])
    # print(concat.shape)
    concat = PReLU(shared_axes=[1, 2, 3], name=f'prelu_initial_block')(concat)
    return concat
Esempio n. 22
0
def make_convnet_model(embedding_matrix=None, weights_path=None):
    model1 = Sequential()
    model1.add(Embedding(len(word_index) + 1,
                         EMBEDDING_SIZE,
                         weights=[embedding_matrix], trainable=False,
                         input_length=MAX_LEN))
    model1.add(Dropout(DROPOUT_PROB))
    model1.add(Conv1D(NUM_FILTERS,
                      KERNEL_SIZE,
                      padding='valid',
                      activation='relu',
                      strides=1))
    model1.add(GlobalMaxPooling1D())
    model1.summary()

    model2 = Sequential()
    model2.add(Embedding(len(word_index) + 1,
                         EMBEDDING_SIZE,
                         weights=[embedding_matrix], trainable=False,
                         input_length=MAX_LEN))
    model2.add(Dropout(DROPOUT_PROB))
    model2.add(Conv1D(NUM_FILTERS,
                      KERNEL_SIZE,
                      padding='valid',
                      activation='relu',
                      strides=1))
    model2.add(GlobalMaxPooling1D())
    model2.summary()
    merged_model = Sequential()
    merged_model.add(Merge([model1, model2], mode='concat'))
    merged_model.add(BatchNormalization())
    merged_model.add(Dense(OUTPUT_SIZE))
    merged_model.add(PReLU())
    merged_model.add(Dropout(DROPOUT_PROB))
    merged_model.add(BatchNormalization())
    merged_model.add(Dense(1, activation='sigmoid'))
    if weights_path is not None:
        print 'Restoring weights ... '
        merged_model.load_weights(weights_path)
    merged_model.compile(loss='binary_crossentropy',
                         optimizer='adam', metrics=['accuracy'])
    merged_model.summary()
    return merged_model
Esempio n. 23
0
def create_model_simplecnn(input_shape, num_classes):
    inp = Input(shape=input_shape)
    x = _conv_simple_block(inp, 64)
    x = _conv_simple_block(x, 128)
    x = _conv_simple_block(x, 256)
    x = _conv_simple_block(x, 512)

    x1 = keras.layers.GlobalAveragePooling2D()(x)
    x2 = keras.layers.GlobalMaxPooling2D()(x)
    x = keras.layers.Add()([x1, x2])

    x = Dropout(0.2)(x)
    x = Dense(128, activation='linear')(x)
    x = PReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)
    predictions = Dense(num_classes, activation='linear')(x)

    return keras.models.Model(inputs=inp, outputs=predictions)
Esempio n. 24
0
def _bottleneck(inputs, filters, kernel, t, s, r=False):
    """Bottleneck
    This function defines a basic bottleneck structure.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        r: Boolean, Whether to use the residuals.
    # Returns
        Output tensor.
    """

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t

    x = _conv_block(inputs, tchannel, (1, 1), (1, 1))

    x = DepthwiseConv2D(kernel,
                        strides=(s, s),
                        depth_multiplier=1,
                        padding='same',
                        kernel_initializer='glorot_normal')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = PReLU(cval)(x)
    #     x = Activation(relu)(x)

    x = Conv2D(filters, (1, 1),
               strides=(1, 1),
               padding='same',
               kernel_initializer='glorot_normal',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=channel_axis, **batch_norm_params)(x)

    if r:
        x = add([x, inputs])
    return x
def createTransitionLayer(x, compression_factor, num_input_filters,
                          growth_rate, kernel_size, activation_type,
                          dropout_rate):

    x = BatchNormalization()(x)
    if activation_type == 'LeakyReLU':
        x = LeakyReLU()(x)
    elif activation_type == 'PReLU':
        x = PReLU()(x)
    else:
        x = Activation(activation_type)(x)

    x = Conv2D(int(compression_factor * num_input_filters),
               kernel_size=1,
               use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return AveragePooling2D((2, 2), strides=(2, 2))(x)
Esempio n. 26
0
def raw_cls_model():

    inputs = Input((28, 28, 1))

    x = Conv2D(32, (3, 3))(inputs)
    x = BatchNormalization()(x)
    x = PReLU()(x)
    x = Conv2D(32, (3, 3))(x)
    x = BatchNormalization()(x)
    x = PReLU()(x)

    x = Conv2D(64, (5, 5))(x)
    x = BatchNormalization()(x)
    x = PReLU()(x)
    x = Conv2D(64, (5, 5))(x)
    x = BatchNormalization()(x)
    x = PReLU()(x)

    x = Conv2D(128, (7, 7))(x)
    x = BatchNormalization()(x)
    x = PReLU()(x)
    x = Conv2D(128, (7, 7))(x)
    x = BatchNormalization()(x)
    x = PReLU()(x)

    x = Flatten()(x)
    x = Dense(2)(x)
    out1 = PReLU(name="out1")(x)  # 2 dimension for coord represention
    out2 = Dense(3, activation="softmax")(
        out1
    )  # 10 dimension for classification   kernel_regularizer=l2(0.0005)

    model = Model(inputs, out2)

    # plot_model(model, to_file='images/raw_cls_model.png', show_shapes=True, show_layer_names=True)

    model.compile(optimizer=SGD(lr=3e-3,
                                momentum=0.9,
                                decay=0.01,
                                nesterov=True),
                  loss="categorical_crossentropy",
                  metrics=["acc"])

    return model
def incept4(inputs, num_channel, activation='PReLU'):
    '''
    Google's Inception-like with dimension reduction
    '''
    z1 = Conv3D(num_channel, (1, 1, 1), padding='same')(inputs)
    z2 = Conv3D(num_channel, (3, 3, 3), padding='same')(z1)

    z3 = Conv3D(num_channel, (5, 5, 5), padding='same')(z1)

    z4 = AveragePooling3D((3, 3, 3), (1, 1, 1), padding='same')(inputs)
    z4 = Conv3D(num_channel, (1, 1, 1), padding='same')(z4)

    z = concatenate([z3, z2, z4, z1])

    if activation is 'PReLU':
        z = PReLU(shared_axes=[1, 2, 3])(z)
    elif activation is 'LeakyReLU':
        z = LeakyReLU(0.2)(z)

    return z
Esempio n. 28
0
def sr_resnet(num_filters=64, num_res_blocks=16):
    x_in = Input(shape=(None, None, 3))
    x = Normalization_01()(x_in)

    x = Conv2D(num_filters, kernel_size=9, padding='same')(x)
    x = x_1 = PReLU(shared_axes=[1, 2])(x)

    for _ in range(num_res_blocks):
        x = res_block(x, num_filters)
    x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Add()([x_1, x])

    x = upsample(x, num_filters)
    x = upsample(x, num_filters)

    x = Conv2D(3, kernel_size=9, padding='same', activation='tanh')(x)
    x = Denormalization_m11()(x)

    return Model(x_in, x)
Esempio n. 29
0
    def TvBlock(self, input_):
        '''
        two views
        '''
        con_1 = Conv1D(filters=self.conv_fiters,
                       kernel_size=self.conv_kernel_size,
                       strides=self.conv_stride,
                       padding='same',
                       activation=self.conv_activation)(input_)
        x = BatchNormalization()(con_1)

        con_2 = Conv1D(filters=self.conv_fiters,
                       kernel_size=self.conv_kernel_size,
                       strides=self.conv_stride,
                       padding='same',
                       activation=self.conv_activation)(x)
        x = BatchNormalization()(con_2)
        # per-activation, activation in paper is relu
        x = PReLU()(x)
        return x
    def __prep_fc(self, x, inps):
        drop_rate, dense_dim, act_layer_type, batch_norm_flag = inps
        if batch_norm_flag:
            bnorm_layer = BatchNormalization()
        else:
            bnorm_layer = None

        if drop_rate:
            drop_layer = Dropout(drop_rate)
        else:
            drop_layer = Dropout(0.0)

        dense_layer = Dense(dense_dim)

        if act_layer_type == "leakyrelu":
            activation_layer = LeakyReLU()
        else:
            activation_layer = PReLU()
        x = self.__fc_block(x, drop_layer, dense_layer, activation_layer, bnorm_layer)
        return x