Exemple #1
0
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output / internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                            # padding='same',
                            strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    
    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other, indices = MaxPoolingWithArgmax2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    if downsample:
        return encoder, indices
    else:
        return encoder
Exemple #2
0
def attention_3d_block(inputs, TIME_STEPS):
    # inputs.shape = (batch_size, time_steps, input_dim)
    input_dim = int(inputs.shape[2])
    a = Permute((2, 1))(inputs)
    a = Reshape((input_dim, TIME_STEPS))(a) # this line is not useful. It's just to know which dimension is what.
    a = Dense(TIME_STEPS, activation='softmax')(a)
    if SINGLE_ATTENTION_VECTOR:
        a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)
        a = RepeatVector(input_dim)(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)
#    output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')
    output_attention_mul = concatenate([inputs, a_probs])
    return output_attention_mul
Exemple #3
0
def attention_3d_block(inputs, input_dim, is_single_attention_vector=False):
    # inputs.shape = (batch_size, time_steps, input_dim)
    feature_length = int(inputs.shape[2])
    a = Permute((2, 1))(inputs)
    #    a = Reshape((input_dim, time_steps))(a) # this line is not useful. It's just to know which dimension is what.
    a = Dense(input_dim, activation='softmax')(a)
    if is_single_attention_vector:
        a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)
        a = RepeatVector(feature_length)(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)
    output_attention_mul = merge([inputs, a_probs],
                                 name='attention_mul',
                                 mode='mul')
    return output_attention_mul
Exemple #4
0
def build_socnn(input_shape_sig=(128, 1), input_shape_off=(128, 1), dim=1):
    #significant_network
    Input_sig = Input(shape=input_shape_sig, dtype='float32', name='input_sig')
    name = "Significance_Conv_0"
    x = Conv1D(filters=8,kernel_size=ks, padding='same',
           activation='linear', name=name,
           kernel_constraint=maxnorm(norm))(Input_sig)

    for i in range(num_layer_sig-1):
        name = "Significance_Conv_" + str(i+1)
        if i == (num_layer_sig-2):
            fn = dim-1
        else:
            fn = 8
        x = Conv1D(filters=fn,
                kernel_size=ks, padding='same',
                activation='linear', name=name,
                kernel_constraint=maxnorm(norm))(x)

        x = BatchNormalization(name="Significance_BN"+str(i+1))(x)
    output_sig = x

    #offset_network
    Input_off = Input(shape=input_shape_off, dtype='float32', name='input_off')
    name = "Offset_Conv_0"
    y = Conv1D(filters=dim-1,
            kernel_size=ks, padding='same',
            activation='linear', name=name,
            kernel_constraint=maxnorm(norm))(Input_off)

    output_off = keras.layers.add([y, Input_off], name='output_off')
    value = Permute((2, 1))(output_off)

    output_sig = Permute((2, 1))(output_sig)
    output_sig = TimeDistributed(Activation('softmax'), name='softmax')(output_sig)

    #Hn-1 =  𝝈(𝑺) ⨂(𝐨𝐟𝐟+𝒙𝑰)
    H1 = keras.layers.multiply(inputs=[output_sig, value], name='significancemerge')
    #Hn
    H2 = TimeDistributed(Dense(output_length, activation='linear', use_bias=False,
                                kernel_constraint=nonneg() if nonnegative else None),
                                name='out')(H1)
    main_output = Permute((2, 1), name='main_output')(H2)

    model = keras.models.Model(inputs=[Input_sig, Input_off], outputs=[main_output, output_off])
    model.compile(optimizer=keras.optimizers.Adam(lr=lr, clipnorm=clipnorm),
               loss={'main_output': 'mse', 'output_off': 'mse'},
               loss_weights={'main_output': 1., 'output_off': aux_weight})

    return model
Exemple #5
0
def create_model(embeddings, config=get_config(), sentence_length=100):

    config['sentence_length'] = sentence_length

    # sentence attention
    attention_input = Input(shape=(config['sentence_length'], 300,), dtype='float32')

    x = Permute((2, 1))(attention_input)
    x = Reshape((300, config['sentence_length']))(x)
    x = Dense(config['sentence_length'], activation='softmax', bias=True)(x)

    x = Lambda(lambda x: K.mean(x, axis=1), name='attention_vector_sentence')(x)
    x = RepeatVector(300)(x)
    # x = Lambda(lambda x: x, name='attention_vector_sentence')(x)

    attention_probabilities = Permute((2, 1))(x)

    x = merge.multiply([attention_input, attention_probabilities], name='attention_mul')
    x = Lambda(lambda x: K.sum(x, axis=1))(x)

    sentence_attention = Model(attention_input, x, name='sentence_attention')

    embedding_layer = Embedding(
            embeddings.shape[0],
            embeddings.shape[1],
            input_length=config['sentence_length'],
            trainable=False,
            weights=[embeddings],
        )

    input = Input(shape=(config['sentence_length'],), dtype='int32')
    x = embedding_layer(input)
    x = SpatialDropout1D(config['embedding_dropout'])(x)

    #x = Attention()(x)
    x1 = sentence_attention(x)
    #x2 = sentence_attention(x)
    x2 = GRU(config['lstm_layer_size'], return_sequences=False, recurrent_dropout=config['recurrent_dropout'], dropout=config['dropout_prob'])(x)

    x = add([x1, x2])

    if config['dense_layer']:
        x = Dense(config['dense_layer'], activation='relu')(x)
        x = Dropout(config['dropout_prob'])(x)

    output = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=input, outputs=output)

    return model, config
Exemple #6
0
def attention_3d_block(
    slt_api_num,
    feature_dim,
    name='',
):
    """
    :param query: (None,D)
    :param key: (None,slt_api_num,D)
    :param value: (None,slt_api_num,D) 一般等于key
    :return:
    """

    # slt_api_num = int(key.shape[1])
    # feature_dim = int(key.shape[2])

    query = Input(shape=(feature_dim, ), name=name + 'query_input')
    key = Input(shape=(
        slt_api_num,
        feature_dim,
    ), name=name + 'key_input')
    value = Input(shape=(
        slt_api_num,
        feature_dim,
    ),
                  name=name + 'value_input')

    Repeat_query = RepeatVector(slt_api_num)(query)  # (None,slt_api_num,D)
    outer_prod = Multiply()([Repeat_query, key])
    sub = Subtract()([Repeat_query, key])
    att_score = Concatenate(name=name + 'att_info_concate')(
        [Repeat_query, key, outer_prod, sub])  # (None,slt_api_num,4*D)

    a = Permute((2, 1))(att_score)  # shape=(?, 4*D, slt_api_num)
    a = Dense(slt_api_num, activation='softmax')(
        a)  # shape=(?, 4*D, slt_api_num)   # 每个特征上都做softmax
    a = Lambda(lambda x: K.mean(x, axis=1), name=name + 'dim_reduction')(
        a)  # shape=(?, slt_api_num) # 所有平均得到单个service的权重
    a = RepeatVector(feature_dim)(a)  # shape=(?,D,slt_api_num)
    a_probs = Permute(
        (2, 1), name=name + 'attention_vec')(a)  # shape=(?,slt_api_num,D)
    output_attention_mul = Multiply(name=name + 'attention_mul')(
        [value, a_probs])  # shape=(?,slt_api_num, D)
    att_result = Lambda(lambda x: tf.reduce_sum(x, axis=1))(
        output_attention_mul)  # (None,D)

    model = Model(inputs=[query, key, value],
                  outputs=[att_result],
                  name=name + 'attBlock')
    return model
def AttentionLayer(inputs, timesteps=400):
    assert len(
        inputs.shape
    ) == 3, 'Attention input should be of dim 3 but found {} dims'.format(
        len(inputs.shape))

    input_dim = inputs.shape[2]
    a = Permute((2, 1))(inputs)
    a = Reshape((int(input_dim), int(timesteps)))(a)
    a = Dense(timesteps, activation='softmax')(a)

    a_probs = Permute((2, 1), name='attention_vec')(a)
    output = merge([inputs, a_probs], name='attention_mul', mode='mul')

    return output
Exemple #8
0
def Deep_speaker_model(input_shape):
    def conv_and_res_block(x_in, filters):
        x = Conv2D(filters,
                   kernel_size=(5, 5),
                   strides=(2, 2),
                   padding='same',
                   kernel_regularizer=regularizers.l2(l=c.WEIGHT_DECAY),
                   name=f'conv_{filters}-s')(x_in)
        x = BatchNormalization(name=f'conv_{filters}-s_bn')(x)
        x = clipped_relu(x)
        for i in range(3):
            x = identity_block(x,
                               kernel_size=(3, 3),
                               filters=filters,
                               name=f'res{filters}_{i}')
        return x

    x_in = Input(input_shape, name='input')
    x = Permute((2, 1, 3), name='permute')(x_in)
    x = conv_and_res_block(x, 64)
    x = conv_and_res_block(x, 128)
    x = conv_and_res_block(x, 256)
    x = conv_and_res_block(x, 512)
    # average
    x = Lambda(lambda y: K.mean(y, axis=[1, 2]), name='avgpool')(x)
    # affine
    x = Dense(512, name='affine')(x)
    x = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x)
    model = Model(inputs=[x_in], outputs=[x], name='deepspeaker')
    return model
Exemple #9
0
def dense_cnn(input, n_classes):
    _dropout_rate = 0.1
    _weight_decay = 1e-4
    _first_filters = 64
    # input 32 * W * 1
    x = Conv2D(_first_filters, (3, 3),
               strides=(1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(_weight_decay))(input)  # 32 * W * 64
    x = MaxPooling2D((2, 2))(x)  # 16 * (W/2) * 64

    x, filters = dense_block(x, 8, _first_filters, 8,
                             _dropout_rate)  # 16 * (W/2) * 128
    x, filters = transition_block(x, filters, _dropout_rate, 3,
                                  _weight_decay)  # 8 * (W/2) * 128

    x, filters = dense_block(x, 8, filters, 8,
                             _dropout_rate)  # 8 * (W/2) * 196
    x, filters = transition_block(x, filters, _dropout_rate, 3,
                                  _weight_decay)  # 4 * (W/2) * 196

    x, filters = dense_block(x, 8, filters, 8,
                             _dropout_rate)  # 4 * (W/2) * 256
    x, filters = transition_block(x, filters, _dropout_rate, 2,
                                  _weight_decay)  # 2 * (W/4) * 256

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(n_classes, name='out', activation='softmax')(x)
    return y_pred
Exemple #10
0
def design_network(model_name):
    input1 = Input(shape=(input_length, 768))
    if model_name == 'CNN':
        lcov1 = Conv1D(filters=units, kernel_size=1, activation=relu)(input1)
        out = MaxPooling1D(pool_size=1)(lcov1)
    if model_name == 'BiLSTM':
        out = Bidirectional(LSTM(units))(input1)
    if 'Bert' in model_name or 'Xlnet' in model_name:
        convs = []
        for fsz in kernel_size:
            l_conv = Conv1D(filters=units, kernel_size=fsz,
                            activation=relu)(input1)
            lpool = MaxPooling1D(input_length - fsz + 1)(l_conv)
            convs.append(lpool)
        merge = concatenate(convs, axis=1)

        #   reshape = Reshape((units,3))(merge)
        permute = Permute((2, 1))(merge)
        if 'Att' in model_name:
            out = Bidirectional(LSTM(units, return_sequences=True))(permute)
            out = AttentionLayer(step_dim=units)(out)
        else:
            out = Bidirectional(LSTM(units))(permute)
    out = Dropout(keep_prob)(out)
    output = Dense(class_nums, activation=softmax)(out)
    model = Model(input1, output)
    model.compile(loss=losses.categorical_crossentropy,
                  optimizer=optimizers.Adam(lr=learning_rate),
                  metrics=['accuracy'])
    model.summary()
    return model
Exemple #11
0
def SE_ResNet(input_shape):
    # first layer
    x_in = Input(input_shape, name='input')
    #  f,t,c
    x = Permute((2, 1, 3), name='permute')(x_in)
    x = Conv2D(64, (3, 3),
               strides=(2, 2),
               padding='same',
               name='conv1',
               kernel_regularizer=regularizers.l2(l=c.WEIGHT_DECAY))(x)
    x = BatchNormalization(name='bn1')(x)
    x = ELU(name=f'relu1')(x)
    x = MaxPool2D((2, 2), strides=(2, 2), padding='same', name='pool1')(x)

    x = residual_block(x, outdim=256, stride=(1, 1), name='block2')
    x = residual_block(x, outdim=256, stride=(2, 2), name='block3')
    x = residual_block(x, outdim=256, stride=(2, 2), name='block4')

    x = residual_block(x, outdim=512, stride=(2, 2), name='block5')
    # x = residual_block(x,outdim=512,stride=(2,2),name='block6')

    x = Conv2D(512, (x.shape[1].value, 1),
               strides=(1, 1),
               padding='VALID',
               name='fc1')(x)
    x = BatchNormalization(name="bn_fc1")(x)
    x = ELU(name=f'relu_fc1')(x)

    x = Lambda(lambda y: K.mean(y, axis=[1, 2]), name='average')(x)

    x = Dense(512, name='fc2')(x)
    x = BatchNormalization(name='bn_fc2')(x)
    x = ELU(name=f'relu_fc2')(x)

    return Model(inputs=[x_in], outputs=[x], name='SEResNet')
Exemple #12
0
    def test_permute(self):
        """
        Test the conversion of pooling layer.
        """
        from keras.layers.core import Permute
        # Create a simple Keras model
        model = Sequential()
        model.add(Permute((3, 2, 1), input_shape=(10, 64, 3)))

        input_names = ['input']
        output_names = ['output']
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField('neuralNetwork'))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        self.assertEqual(sorted(input_names),
                         sorted(map(lambda x: x.name, spec.description.input)))
        self.assertEquals(len(spec.description.output), len(output_names))
        self.assertEqual(
            sorted(output_names),
            sorted(map(lambda x: x.name, spec.description.output)))

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.permute)
Exemple #13
0
def dense_cnn(input, nclass):

    _dropout_rate = 0.2 
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)
   
    # 64 + 8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    # basemodel = Model(inputs=input, outputs=y_pred)
    # basemodel.summary()

    return y_pred
Exemple #14
0
def build_judge_model(size):
    orig_input = Input(shape=(size, size, 10))
    orig_encoding = Conv2D(
        256,
        kernel_size=5,
        strides=1,
    )(orig_input)
    orig_encoding = build_encoder_layers(orig_encoding, size)
    orig_latent = Activation(activation='sigmoid')(orig_encoding)

    generated_input = Input(shape=(size, size, 10))
    generated_encoding = Conv2D(256, kernel_size=5, strides=1)(generated_input)
    generated_encoding = build_encoder_layers(generated_encoding, size)
    generated_latent = Activation(activation='sigmoid')(generated_encoding)

    both = Lambda(lambda x: K.abs(x[0] - x[1]))(
        [orig_latent, generated_latent])
    prediction = Dense(size * size)(both)
    prediction = Reshape((1, size * size))(prediction)
    prediction = Permute((2, 1))(prediction)
    prediction = Activation('sigmoid')(prediction)

    judge_model = Model(name='judge',
                        inputs=[orig_input, generated_input],
                        outputs=[prediction])
    judge_model.summary()
    return judge_model
Exemple #15
0
def yoloP1P2P3(shape):
    model = Sequential()
    model.add(Convolution2D(16, 3, 3,input_shape=shape,border_mode='same',subsample=(1,1)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(32,3,3 ,border_mode='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2),border_mode='valid'))
    model.add(Convolution2D(64,3,3 ,border_mode='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2),border_mode='valid'))
    model.add(Convolution2D(128,3,3 ,border_mode='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2),border_mode='valid'))
    model.add(Convolution2D(256,3,3 ,border_mode='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2),border_mode='valid'))
    model.add(Convolution2D(512,3,3 ,border_mode='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2),border_mode='valid'))
    model.add(Convolution2D(1024,3,3 ,border_mode='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(Convolution2D(1024,3,3 ,border_mode='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(Convolution2D(1024,3,3 ,border_mode='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(Permute((2,3,1)))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Dense(4096))
    model.add(LeakyReLU(alpha=0.1))
    model.add(Dense(1470))
    return model
Exemple #16
0
 def setup(self):
     _weight_decay = 1e-4
     _nrof_fliters = 64
     _dropout_rate = 0.2
     # conv 64 5*5 s=2   --> (16, 140, 64)
     x = Conv2D(_nrof_fliters, (5, 5),
                strides=(2, 2),
                kernel_initializer='he_normal',
                padding='same',
                use_bias=False,
                kernel_regularizer=l2(_weight_decay))(self.input)
     #  64 + 8 * 8 = 128 --> (16, 140, 128)
     x, _nrof_fliters = self.dense_block(x, 8, _nrof_fliters, 8, None)
     # 128               --> ( 8, 70, 128)
     x, _nrof_fliters = self.transition_block(x, 128, _dropout_rate, 2,
                                              _weight_decay)
     # 128 + 8 * 8 = 192 --> ( 8, 70, 192)
     x, _nrof_fliters = self.dense_block(x, 8, _nrof_fliters, 8, None)
     # 192 -> 128        --> ( 4, 35, 128)
     x, _nrof_fliters = self.transition_block(x, 128, _dropout_rate, 2,
                                              _weight_decay)
     # 128 + 8 * 8 = 192 --> ( 4, 35, 192)
     x, _nrof_fliters = self.dense_block(x, 8, _nrof_fliters, 8, None)
     x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
     x = Activation('relu')(x)
     #                   --> ( 35, 4, 192)
     x = Permute((2, 1, 3), name='permute')(x)
     #                   --> ( 35, 768), TimeDistributed apply each layer into every time-sequence
     # 将Flatten应用到输入的每一个时间步上, --> 相当于将x的第2和3维执行Flatten操作 == tf.reshape(x,[x_shape[0],x_shape[1],-1])
     x = TimeDistributed(Flatten(), name='flatten')(x)
     # 35个序列,每个序列做一个sofmax分类,因为训练数据一张图片(一行)有多个汉字
     self.preds = Dense(self.nrof_classes, name='out',
                        activation='softmax')(x)
Exemple #17
0
def resnet_ldnn(input_dim):
    '''
    here we use ResNet50 to replace simple CNN block in CLDNN model
    '''
    model = Sequential()
    model.add(
        ResNet50(include_top=False,
                 input_shape=(*input_dim, 1),
                 weights=None,
                 classes=None,
                 pooling='average'))
    model.add(Permute((2, 1, 3)))
    model.add(TimeDistributed(Flatten()))
    model.add(LSTM(64, dropout=0.25, return_sequences=True))
    model.add(LSTM(64, dropout=0.25))
    model.add(Dense(64))
    model.add(LeakyReLU(alpha=0.01))
    model.add(Dropout(0.5))
    model.add(
        Dense(6,
              kernel_regularizer=regularizers.l2(0.01),
              activation='softmax'))
    model.summary()

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=adam(lr=1e-4),
                  metrics=['accuracy'])
    return model
Exemple #18
0
def test_siamese_two_layer_cnn():
    data_1 = np.random.random((100, 1, 20, 10))
    data_2 = np.random.random((100, 10, 20, 1))

    labels = np.random.randint(3, size=100)
    labels = to_categorical(labels, 3)

    #print(labels)
    pool_size_2 = 5
    input_shape_3 = 10
    model = Sequential()
    model.add(
        Convolution2D(7,
                      1,
                      3,
                      border_mode='same',
                      input_shape=(1, 20, 10),
                      dim_ordering='th'))
    num = math.floor(input_shape_3 / pool_size_2)
    model.add(MaxPooling2D(pool_size=(1, pool_size_2), dim_ordering='th'))
    model.add(Permute((2, 1, 3)))
    model.add(Reshape((20, 7 * num)))

    model.add(Convolution1D(5, 3, border_mode='same'))
    model.add(MaxPooling1D(3))

    model.add(Flatten())
    model.add(Dense(3, activation='softmax'))
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
    model.fit(data_1, labels, nb_epoch=10, batch_size=32)
    predictions = model.predict(data_1)
Exemple #19
0
def get_model(in_data, out_data):
    """
    Keras model definition
    
    :param in_data: input data to the network (training data)
    :param out_data: output data to the network (training labels)
    :return: _model: keras model configuration
    """
    mel_pool_size = [1]
    mel_nb_filt = 16
    dropout_rate = 0.1

    mel_start = Input(shape=(in_data.shape[-3], in_data.shape[-2],
                             in_data.shape[-1]))
    mel_x = mel_start
    for i, convCnt in enumerate(mel_pool_size):
        mel_x = Conv2D(filters=mel_nb_filt, kernel_size=(3, 3),
                       padding='same')(mel_x)
        mel_x = BatchNormalization(axis=1)(mel_x)
        mel_x = Activation('relu')(mel_x)
        mel_x = MaxPooling2D(pool_size=(1, mel_pool_size[i]))(mel_x)
        mel_x = Dropout(dropout_rate)(mel_x)
    mel_x = Permute((2, 1, 3))(mel_x)
    mel_x = Reshape(
        (in_data.shape[-2],
         (in_data.shape[-1] * mel_nb_filt) // np.prod(mel_pool_size)))(mel_x)

    mel_x = TimeDistributed(Dense(out_data.shape[-1]))(mel_x)
    out = Activation('sigmoid')(mel_x)

    _model = Model(inputs=mel_start, outputs=out)
    _model.compile(optimizer='Adam', loss='binary_crossentropy')
    _model.summary()
    return _model
Exemple #20
0
def crnn_model():
    input = Input(shape=(img_h,None,1),name='the_input')
    m = Conv2D(64,kernel_size=(3,3),activation='relu',padding='same',name='conv1')(input)
    m = MaxPooling2D(pool_size=(2,2),strides=(2,2),name='pool1')(m)
    m = Conv2D(128,kernel_size=(3,3),activation='relu',padding='same',name='conv2')(m)
    m = MaxPooling2D(pool_size=(2,2),strides=(2,2),name='pool2')(m)
    m = Conv2D(256,kernel_size=(3,3),activation='relu',padding='same',name='conv3')(m)
    m = BatchNormalization(axis=3)(m)
    m = Conv2D(256,kernel_size=(3,3),activation='relu',padding='same',name='conv4')(m)

    m = ZeroPadding2D(padding=(0,1))(m)
    m = MaxPooling2D(pool_size=(2,2),strides=(2,1),padding='valid',name='pool3')(m)

    m = Conv2D(512,kernel_size=(3,3),activation='relu',padding='same',name='conv5')(m)
    m = BatchNormalization(axis=3)(m)
    m = Conv2D(512,kernel_size=(3,3),activation='relu',padding='same',name='conv6')(m)

    m = ZeroPadding2D(padding=(0,1))(m)
    m = MaxPooling2D(pool_size=(2,2),strides=(2,1),padding='valid',name='pool4')(m)
    m = Conv2D(512,kernel_size=(2,2),activation='relu',padding='valid',name='conv7')(m)
    m = BatchNormalization(axis=3)(m)

    m = Permute((2,1,3),name='permute')(m)
    m = TimeDistributed(Flatten(),name='timedistrib')(m)

    m = Bidirectional(GRU(rnnunit,return_sequences=True,implementation=2),name='blstm1')(m)
    m = Dense(rnnunit,name='blstm1_out',activation='linear',)(m)
    m = Bidirectional(GRU(rnnunit,return_sequences=True,implementation=2),name='blstm2')(m)
    y_pred = Dense(nclass,name='blstm2_out',activation='softmax')(m)

    basemodel = Model(inputs=input,outputs=y_pred)
    return basemodel
Exemple #21
0
def rel_types_model(model,
                    ins,
                    max_len,
                    embedding_dim,
                    rel_types2id_size,
                    focus,
                    pre='rtypes'):
    """Discourse relation types model as Keras Graph."""

    # prepare focus dimensionality
    model.add_node(RepeatVector(rel_types2id_size),
                   name=pre + '_focus_rep',
                   input=focus)
    model.add_node(Permute((2, 1)),
                   name=pre + '_focus',
                   input=pre + '_focus_rep')

    # discourse relation types dense neural network (sample, time_pad, rel_types2id)
    model.add_node(TimeDistributedDense(rel_types2id_size, init='he_uniform'),
                   name=pre + '_dense',
                   input=ins[0])
    model.add_node(Activation('softmax'),
                   name=pre + '_softmax',
                   input=pre + '_dense')

    # multiplication to focus the activations (doc, time_pad, rel_types2id)
    model.add_node(Activation('linear'),
                   name=pre + '_out',
                   inputs=[pre + '_focus', pre + '_softmax'],
                   merge_mode='mul')
    return pre + '_out'
def create_network(num_classes):
    """RNN Network for audio classification

    Args:
        num_classes: Number of classification classes.
    Returns:
        A keras model.
    """
    input_tensor = Input(shape=(None, None, DataConfig.kNumFeatures), name='input_node')

    features = Permute([1,3,2])(input_tensor)
    flat = Reshape((-1, DataConfig.kNumFeatures * DataConfig.kNumMels))(features)

    gru1 = CuDNNGRU(196, return_sequences=True)(flat)
    act1 = Activation('tanh')(gru1)
    drop1 = Dropout(args.dropout)(act1)

    gru2 = CuDNNGRU(128, return_sequences=True)(drop1)
    act2 = Activation('tanh')(gru2)
    drop2 = Dropout(args.dropout)(act2)

    gru3 = CuDNNGRU(64)(drop2)
    act3 = Activation('sigmoid')(gru3)

    output_tensor = Dense(num_classes, activation='sigmoid', name='output_layer')(act3)

    model = Model(input_tensor, output_tensor)
    return model
def createInceptionSegNet(input_shape,
                          n_labels,
                          pool_size=(2, 2),
                          output_mode="sigmoid"):
    # encoder
    inputs = Input(shape=input_shape)

    conv_1 = inceptionModule(inputs)
    conv_2 = inceptionModule(conv_1)
    pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)

    conv_3 = inceptionModule(pool_1)
    conv_4 = inceptionModule(conv_3)
    pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)

    ## encoding done, decoding start

    unpool_1 = MaxUnpooling2D(pool_size)([pool_2, mask_2])
    conv_4 = inceptionModule(unpool_1)
    conv_5 = inceptionModule(conv_4)

    unpool_2 = MaxUnpooling2D(pool_size)([conv_5, mask_1])
    conv_5 = inceptionModule(unpool_2)
    conv_6 = inceptionModule(conv_5)

    conv_7 = Convolution2D(n_labels, (1, 1), padding='valid')(conv_6)

    reshape = Reshape((n_labels, input_shape[0] * input_shape[1]))(conv_7)
    permute = Permute((2, 1))(reshape)
    outputs = Activation(output_mode)(permute)

    segnet = Model(inputs=inputs, outputs=outputs)
    return segnet
Exemple #24
0
    def create_segnet(self):
        #Model creation
        print("------------CREATING NETWORK--------------")
        # Add a noise layer to get a denoising network. This helps avoid overfitting
        #network.add(Layer(input_shape=(3, 960, 720)))

        #network.add(GaussianNoise(stddev=0.3))
        self.network.encoding_layers = self.create_encoding_layers()
        self.network.decoding_layers = self.create_decoding_layers()
        for l in self.network.encoding_layers:
            self.network.add(l)
        for l in self.network.decoding_layers:
            self.network.add(l)

        self.network.add(Conv2D(
            self.nb_class,
            1,
            padding='valid',
        ))
        self.network.add(Reshape((self.nb_class, self.data_shape)))
        self.network.add(Permute((2, 1)))
        self.network.add(Activation('softmax'))
        self.network.summary()
        from keras.optimizers import SGD, Adam
        #optimizer = SGD(lr=0.01, momentum=0.8, decay=0.1, nesterov=False)
        #optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        optimizer = Adam()
        self.network.compile(loss="categorical_crossentropy",
                             optimizer=optimizer)
Exemple #25
0
def get_decoder():
    return [
        UpSampling2D((2, 2)),
        ZeroPadding2D(padding=(1, 1)),
        Conv2D(32, (3, 3), activation='relu', padding='valid'),
        BatchNormalization(),
        UpSampling2D((2, 2)),
        ZeroPadding2D(padding=(1, 1)),
        Conv2D(32, (3, 3), activation='relu', padding='valid'),
        BatchNormalization(),
        ZeroPadding2D(padding=(1, 1)),
        Conv2D(32, (3, 3), activation='relu', padding='valid'),
        BatchNormalization(),
        UpSampling2D((2, 2)),
        ZeroPadding2D(padding=(1, 1)),
        Conv2D(16, (3, 3), activation='relu', padding='valid'),
        BatchNormalization(),
        ZeroPadding2D(padding=(1, 1)),
        Conv2D(16, (3, 3), activation='relu', padding='valid'),
        BatchNormalization(),
        # connect to label
        Conv2D(n_labels, (1, 1), border_mode='valid'),
        # Reshape((n_labels, img_h*img_w), input_shape=(2,img_h,img_w)),
        Reshape((n_labels, img_h * img_w)),
        Permute((2, 1)),
        Activation('softmax')
    ]
Exemple #26
0
def crnn(nb_classes, input_shape=(32, 280, 3)):
    model_input = Input(shape=input_shape, name='the_input')

    m = Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same')(model_input)
    m = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(m)
    # h/2, w/2

    m = Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same')(m)
    m = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(m)
    # h/4, w/4

    m = ZeroPadding2D(padding=(0, 1))(m)
    m = Conv2D(128, kernel_size=(3, 3), activation='relu')(m)
    # h/4 - 2, w/4

    m = ZeroPadding2D(padding=(0, 1))(m)
    m = Conv2D(256, kernel_size=(3, 3), padding='valid', activation='relu')(m)
    # h/4 - 4, w/4

    m = ZeroPadding2D(padding=(0, 1))(m)
    m = Conv2D(256, kernel_size=(3, 3), padding='valid', activation='relu')(m)
    # h/4 - 6, w/4

    m = Permute((2, 1, 3))(m)
    m = Reshape(target_shape=(-1, 1, (input_shape[0]//4 - 6)*256))(m)
    m = TimeDistributed(Flatten())(m)
    # m = Bidirectional(SimpleRNN(256, return_sequences=True, dropout=0.5), name='rnn1')(m)
    m = Dense(1024, activation='relu')(m)
    m = Dropout(0.5)(m)
    y_pred = Dense(nb_classes, activation='softmax')(m)

    basemodel = Model(inputs=model_input, outputs=y_pred)
    basemodel.summary()
    return basemodel, y_pred, model_input
Exemple #27
0
    def test_permute(self):
        """
        Test the conversion of pooling layer.
        """
        from keras.layers.core import Permute

        # Create a simple Keras model
        model = Sequential()
        model.add(Permute((3, 2, 1), input_shape=(10, 64, 3)))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        six.assertCountEqual(self, input_names,
                             [x.name for x in spec.description.input])
        self.assertEquals(len(spec.description.output), len(output_names))
        six.assertCountEqual(self, output_names,
                             [x.name for x in spec.description.output])

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.permute)
Exemple #28
0
def ResNet_50(input_shape):
    x_in = Input(input_shape, name='input')
    x = Permute((2, 1, 3), name='permute')(x_in)

    x = Conv2D(64, (7, 7),
               strides=(2, 2),
               padding='same',
               name='conv1',
               kernel_regularizer=regularizers.l2(l=c.WEIGHT_DECAY))(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation('relu')(x)
    x = MaxPool2D((3, 3), strides=(2, 2), padding='same', name='pool1')(x)

    x = res_conv_block(x, (64, 64, 256), (1, 1), name='block1')
    x = res_conv_block(x, (64, 64, 256), (1, 1), name='block2')
    x = res_conv_block(x, (64, 64, 256), (1, 1), name='block3')

    x = res_conv_block(x, (128, 128, 512), (1, 1), name='block4')
    x = res_conv_block(x, (128, 128, 512), (1, 1), name='block5')
    x = res_conv_block(x, (128, 128, 512), (1, 1), name='block6')
    x = res_conv_block(x, (128, 128, 512), (2, 2), name='block7')

    x = Conv2D(512, (x.shape[1].value, 1), name='fc6')(x)
    x = BatchNormalization(name="bn_fc6")(x)
    x = Activation('relu', name='relu_fc6')(x)
    # avgpool
    # x = GlobalAveragePooling2D(name='avgPool')(x)
    x = Lambda(lambda y: K.mean(y, axis=[1, 2]), name='avgpool')(x)

    model = Model(inputs=[x_in], outputs=[x], name='ResCNN')
    # model.summary()
    return model
def model(input_shape):
    autoencoder = models.Sequential()
    # Add a noise layer to get a denoising autoencoder. This helps avoid overfitting
    autoencoder.add(Layer(input_shape=input_shape))

    #autoencoder.add(GaussianNoise(sigma=0.3))
    autoencoder.encoding_layers = create_encoding_layers()
    autoencoder.decoding_layers = create_decoding_layers()
    for i, l in enumerate(autoencoder.encoding_layers):
        autoencoder.add(l)
        print(i, l.input_shape, l.output_shape)
    for i, l in enumerate(autoencoder.decoding_layers):
        autoencoder.add(l)
        print(i, l.input_shape, l.output_shape)

    the_conv = (Convolution2D(
        num_classes,
        1,
        1,
        border_mode='valid',
    ))
    autoencoder.add(the_conv)
    print(the_conv.input_shape, the_conv.output_shape)
    autoencoder.add(Reshape(
        (num_classes, data_shape)))  #, input_shape=(num_classes,360,480)))
    autoencoder.add(Permute((2, 1)))
    autoencoder.add(Activation('softmax'))
    #from keras.optimizers import SGD
    #optimizer = SGD(lr=0.01, momentum=0.8, decay=0., nesterov=False)
    return autoencoder
Exemple #30
0
def segnet_small(input_shape=(3, 90, 120)):
    autoencoder = models.Sequential()
    # Add a noise layer to get a denoising autoencoder. This helps avoid overfitting

    #autoencoder.add(GaussianNoise(sigma=0.3))
    autoencoder.encoding_layers = create_encoding_layers_small(input_shape)
    autoencoder.decoding_layers = create_decoding_layers_small()
    for i, l in enumerate(autoencoder.encoding_layers):
        autoencoder.add(l)
        print(i, l.input_shape, l.output_shape)
    for l in autoencoder.decoding_layers:
        autoencoder.add(l)
        print(i, l.input_shape, l.output_shape)

    the_conv = (Convolution2D(
        num_classes,
        1,
        1,
        border_mode='valid',
    ))
    autoencoder.add(the_conv)
    print(the_conv.input_shape, the_conv.output_shape)
    autoencoder.add(Reshape(
        (num_classes, input_shape[1] *
         input_shape[2])))  #, input_shape=(num_classes,360,480)))
    autoencoder.add(Permute((2, 1)))
    autoencoder.add(Activation('softmax'))
    return autoencoder