Example #1
0
def padding(x1, x2):
    if x1.shape[-2] > x2.shape[-2]:
        adds = x1.shape[-2] - x2.shape[-2]
        x2 = layers.ZeroPadding1D((0, adds))(x2)
    elif x2.shape[-2] > x1.shape[-2]:
        adds = x2.shape[-2] - x1.shape[-2]
        x1 = layers.ZeroPadding1D((0, adds))(x1)
    return x1, x2
Example #2
0
def block2(x,
           filters,
           kernel_size=3,
           stride=1,
           conv_shortcut=False,
           name=None):
    """A residual block.

  Arguments:
      x: input tensor.
      filters: integer, filters of the bottleneck layer.
      kernel_size: default 3, kernel size of the bottleneck layer.
      stride: default 1, stride of the first layer.
      conv_shortcut: default False, use convolution shortcut if True,
        otherwise identity shortcut.
      name: string, block label.

  Returns:
    Output tensor for the residual block.
  """
    bn_axis = 2 if backend.image_data_format() == 'channels_last' else 1

    preact = layers.BatchNormalization(axis=bn_axis,
                                       epsilon=1.001e-5,
                                       name=name + '_preact_bn')(x)
    preact = layers.Activation('relu', name=name + '_preact_relu')(preact)

    if conv_shortcut:
        shortcut = layers.Conv1D(4 * filters,
                                 1,
                                 strides=stride,
                                 name=name + '_0_conv')(preact)
    else:
        shortcut = layers.MaxPooling1D(1,
                                       strides=stride)(x) if stride > 1 else x

    x = layers.Conv1D(filters,
                      1,
                      strides=1,
                      use_bias=False,
                      name=name + '_1_conv')(preact)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_1_bn')(x)
    x = layers.Activation('relu', name=name + '_1_relu')(x)

    x = layers.ZeroPadding1D(padding=((1, ), (1, )), name=name + '_2_pad')(x)
    x = layers.Conv1D(filters,
                      kernel_size,
                      strides=stride,
                      use_bias=False,
                      name=name + '_2_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_2_bn')(x)
    x = layers.Activation('relu', name=name + '_2_relu')(x)

    x = layers.Conv1D(4 * filters, 1, name=name + '_3_conv')(x)
    x = layers.Add(name=name + '_out')([shortcut, x])
    return x
Example #3
0
def create_model(input_shape, params):
    batch_size = params['batch_size']
    lahead = params['lahead']
    #    input_shape = input_shape.shape[-2:]
    X_input = Input(input_shape)
    X = X_input
    X = layers.ZeroPadding1D(padding=2)(X)
    #    X = layers.TimeDistributed(Dense(input_shape[-1],
    #                               kernel_constraint=constraints.max_norm(1.0),
    #                               activation='tanh'))(X)
    X = Conv1D(filters=32,
               kernel_size=6,
               strides=1,
               name='conv1',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = layers.GaussianNoise(.005)(X)
    # X = layers.AveragePooling1D(2, strides=1)(X)
    X = conv_block(X, f=3, filters=[48, 48, 96], stage=2, block='a', strides=2)
    X = layers.GaussianNoise(.005)(X)
    X = identity_block(X,
                       f=3,
                       filters=[48, 48, 96],
                       stage=2,
                       block='b',
                       strides=1)
    X = layers.GaussianNoise(.005)(X)
    X = identity_block(X,
                       f=3,
                       filters=[48, 48, 96],
                       stage=2,
                       block='c',
                       strides=1)

    print(X.get_shape())
    print(X_input.get_shape())
    # X = layers.concatenate([X, X_input])

    # X = layers.AveragePooling1D(2, name="avg_pool")(X)

    #    rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)]
    #    stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)
    #    lstm_layer = tf.keras.layers.RNN(stacked_lstm)
    #    X = lstm_layer(X)

    X = layers.Flatten()(X)
    X = Dense(256)(X)
    X = Dense(lahead, kernel_initializer=glorot_uniform(seed=0))(X)

    model = Model(inputs=X_input, outputs=X, name='model1')
    model.compile(loss='mse',
                  optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001))
    return model
Example #4
0
    def __call__(self, x):
        shortcut = x
        prefix = 'expanded_conv/'
        infilters = x.shape[-1]

        if self.block_id:
            # Expand
            prefix = 'expanded_conv_{}/'.format(self.block_id)
            x = layers.Conv1D(_depth(infilters * self.expansion),
                              kernel_size=1,
                              padding='same',
                              use_bias=False,
                              name=prefix + 'expand')(x)
            x = layers.BatchNormalization(epsilon=1e-3,
                                          momentum=0.999,
                                          name=prefix + 'expand/BatchNorm')(x)
            x = self.activation(x)

        if self.stride == 2:
            x = layers.ZeroPadding1D(padding=1,
                                     name=prefix + 'depthwise/pad')(x)

        x = layers.SeparableConv1D(
            int(x.shape[-1]),
            self.kernel_size,
            strides=self.stride,
            padding='same' if self.stride == 1 else 'valid',
            use_bias=False,
            name=prefix + 'depthwise')(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + "depthwise/BatchNorm")(x)
        x = self.activation(x)

        if self.se_ratio:
            x = SEBlock(_depth(infilters * self.expansion), self.se_ratio,
                        prefix)(x)

        x = layers.Conv1D(self.filters,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          name=prefix + 'project')(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + "project/BatchNorm")(x)

        if self.stride == 1 and infilters == self.filters:
            x = layers.Add(name=prefix + 'Add')([shortcut, x])

        return x
Example #5
0
def getSiameseModel():
    left_input = layers.Input((waveform_len, 2))
    right_input = layers.Input((waveform_len, 2))

    dummy_input = layers.Input((waveform_len, 2))
    l = layers.BatchNormalization()(dummy_input)
    l = layers.ZeroPadding1D(padding=2)(l)

    # General inception-style layers
    l = layers.Conv1D(64, 2, activation='relu')(l)
    l = layers.MaxPooling1D()(l)
    l = layers.Conv1D(64, 4, activation='relu')(l)
    l = layers.MaxPooling1D()(l)
    l = layers.Conv1D(32, 8, activation='relu')(l)
    l = layers.MaxPooling1D()(l)
    l = layers.Conv1D(32, 16, activation='relu')(l)
    l = layers.MaxPooling1D()(l)
    l = layers.Conv1D(32, 32, activation='relu')(l)
    l = layers.MaxPooling1D()(l)

    l = layers.Flatten()(l)

    l = layers.Dense(256, activation="sigmoid")(l)

    extractor = models.Model(inputs=[dummy_input], outputs=[l])

    #intermediate model with fingerprints on both sides
    encoded_l = models.Model(
        inputs=[left_input],
        outputs=[extractor.call(left_input)
                 ])  #call() cuts off old input layer and attaches new one
    encoded_r = models.Model(inputs=[right_input],
                             outputs=[extractor.call(right_input)])

    # Add a customized layer to compute the absolute difference between the encodings
    L1_layer = layers.Lambda(
        lambda tensors: tf.keras.backend.abs(tensors[0] - tensors[1]))
    L1_distance = L1_layer([encoded_l.output, encoded_r.output])

    # Add a dense layer with a sigmoid unit to generate the similarity score
    prediction = layers.Dense(1, activation='sigmoid')(L1_distance)

    # Connect the inputs with the outputs
    siamese_net = models.Model(inputs=[left_input, right_input],
                               outputs=prediction)

    # return the model
    return siamese_net
Example #6
0
def get_simplegru_model(num_units=64, num_layers=2, dropout=0.1):
    input = Input((5, 2), name='dust_input')
    inputs_expanded = layers.ZeroPadding1D((0, 1))(input)
    #region=Input((),name='region_input',dtype=tf.uint8)
    #month=Input((),name='month_input',dtype=tf.float32)
    #cells=[layers.GRUCell(num_units) for _ in range(num_layers-1)]
    #cells.append(layers.GRUCell(num_units,dropout=dropout))
    #multi_gru=layers.RNN(cells,name='multi-lstm')
    multi_gru = layers.LSTM(num_units, activation='relu')

    # batch X num_units
    output = multi_gru(inputs_expanded)

    #  batch X 6 X num_units
    repeated = layers.RepeatVector(6)(output)

    # batch X 6 X num_units
    lstm_seq = layers.LSTM(num_units, return_sequences=True,
                           activation='relu')(repeated)

    #flattened=layers.Flatten()(inputs)

    # (batch,num_units)
    #feature=layers.Dense(num_units,activations.relu)(output)
    #region_one=layers.Lambda(region_one_hot,name='region_one_hot')(region)
    #month_one=layers.Lambda(month_one_hot,name='month_one_hot')(month)
    # (batch,num_units+10+12)
    #feature_concat=layers.Concatenate()([feature,month_one])
    #feature_concat=layers.Dropout(0.1)(feature_concat)
    #dense_final=layers.Dense(16,activations.relu)(feature)

    #tiny=layers.Dense(1,name='tiny_dense')(dense_final)
    #micro=layers.Dense(1,name='micro_dense')(dense_final)

    #model=Model(inputs=[inputs,region,month],outputs=[tiny,micro])

    final = layers.TimeDistributed(layers.Dense(2))(lstm_seq)
    model = Model(inputs=input, outputs=final)

    return model
Example #7
0
def res_tcn_block(x,
                  n_filters,
                  kernel_size,
                  dilation,
                  dropout_rate=0.2,
                  l2=keras.regularizers.l2(0.),
                  use_batch_norm=True):
    global name_
    prev_x = x
    for _ in reversed(range(2)):
        x = layers.ZeroPadding1D(((kernel_size - 1) * dilation, 0))(x)
        x = layers.Conv1D(
            n_filters,
            kernel_size=kernel_size,
            dilation_rate=dilation,
            kernel_regularizer=l2,
            bias_regularizer=l2,
            kernel_initializer=keras.initializers.Orthogonal())(x)

        if use_batch_norm:
            x = layers.BatchNormalization()(x)

        x = layers.Activation(tf.nn.relu)(x)

        x = layers.SpatialDropout1D(rate=dropout_rate)(inputs=x)

    # 1x1 conv to match the shapes (channel dimension).
    if prev_x.shape[-1] != n_filters:
        prev_x = layers.Conv1D(n_filters,
                               1,
                               padding='same',
                               activation='relu',
                               name='match_channel' + str(name_))(prev_x)
        name_ += 1

    res_x = keras.layers.add([x, prev_x])
    res_x = layers.Activation(tf.nn.relu)(res_x)

    return res_x  # , x
def get_layers(context = 300, filt_length_conv_1 = 150, filt_length_conv_2 = 5, 
              number_filters_conv_1 = 100, number_filters_conv_2 = 80, fc_cells = 512):
    '''
    Returns the paramters that for a CNN based model.
    Consider chaning default parameters as per application.
    
    Architecture: CNN->FC->Logits
    
    Reference:
    "Analysis of CNN-based Speech Recognition System using Raw Speech as Input"
    (https://ronan.collobert.com/pub/matos/2015_cnnspeech_interspeech.pdf)
    '''

    y_true = tf.keras.layers.Input((None,), name="y_true")
    y_true_length = tf.keras.layers.Input((1), name="y_true_length")

    # Inputshape [btch_sz, n_time, n_channels = 1]
    input_audio = layers.Input(shape=(None, 1), name="audio_input")

    # Append zeros in time for context at the beggining and end of audio sample
    input_audio_padded = layers.ZeroPadding1D(padding=(context), name = 'zero_padding_context')(input_audio)

    # Apply 1d filter: Shape [btch_sz, n_time, n_channels]
    filt_length_conv_1 = 2 * context + 1 + filt_length_conv_1
    conv = layers.Conv1D(number_filters_conv_1, filt_length_conv_1, strides=100, activation='relu', name="conv_1")(input_audio_padded)
    conv = layers.Conv1D(number_filters_conv_2, filt_length_conv_2, strides = 1, activation='relu', name="conv_2")(conv)
    conv = layers.Conv1D(number_filters_conv_2, filt_length_conv_2, strides = 1, activation='relu', name="conv_3")(conv)

    # Apply FC layer to each time step
    fc = layers.TimeDistributed(layers.Dense(fc_cells), name = 'fc')(conv)
    fc = layers.ReLU()(fc)
    fc = layers.Dropout(rate=0.1)(fc)

    # Apply FC layer to each time step to output prob distro across chars
    logits = layers.TimeDistributed(layers.Dense(29, activation='softmax'), name = 'logits')(fc)

    return logits, input_audio, y_true, y_true_length
Example #9
0
    def __call__(self, ip):
        with backend.name_scope('separable_conv_block_%s' % self.block_id):
            x = layers.Activation('relu')(ip)
            if self.strides == 2:
                x = layers.ZeroPadding1D(padding=self.kernel_size,
                                         name='separable_conv_1_pad_%s' %
                                         self.block_id)(x)
                conv_pad = 'valid'
            else:
                conv_pad = 'same'

            x = layers.SeparableConv1D(self.filters,
                                       self.kernel_size,
                                       strides=self.strides,
                                       name='separable_conv_1_%s' %
                                       self.block_id,
                                       padding=conv_pad,
                                       use_bias=False,
                                       kernel_initializer='he_normal')(x)
            x = layers.BatchNormalization(momentum=0.9997,
                                          epsilon=1e-3,
                                          name='separable_conv_1_bn_%s' %
                                          self.block_id)(x)
            x = layers.Activation('relu')(x)
            x = layers.SeparableConv1D(self.filters,
                                       self.kernel_size,
                                       name='separable_conv_2_%s' %
                                       self.block_id,
                                       padding='same',
                                       use_bias=False,
                                       kernel_initializer='he_normal')(x)
            x = layers.BatchNormalization(momentum=0.9997,
                                          epsilon=1e-3,
                                          name='separable_conv_2_bn_%s' %
                                          self.block_id)(x)
        return x
Example #10
0
def block3(x,
           filters,
           kernel_size=3,
           stride=1,
           groups=32,
           conv_shortcut=True,
           name=None):
    """A residual block.

  Arguments:
    x: input tensor.
    filters: integer, filters of the bottleneck layer.
    kernel_size: default 3, kernel size of the bottleneck layer.
    stride: default 1, stride of the first layer.
    groups: default 32, group size for grouped convolution.
    conv_shortcut: default True, use convolution shortcut if True,
        otherwise identity shortcut.
    name: string, block label.

  Returns:
    Output tensor for the residual block.
  """
    bn_axis = 2 if backend.image_data_format() == 'channels_last' else 1

    if conv_shortcut:
        shortcut = layers.Conv1D((64 // groups) * filters,
                                 1,
                                 strides=stride,
                                 use_bias=False,
                                 name=name + '_0_conv')(x)
        shortcut = layers.BatchNormalization(axis=bn_axis,
                                             epsilon=1.001e-5,
                                             name=name + '_0_bn')(shortcut)
    else:
        shortcut = x

    x = layers.Conv1D(filters, 1, use_bias=False, name=name + '_1_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_1_bn')(x)
    x = layers.Activation('relu', name=name + '_1_relu')(x)

    c = filters // groups
    x = layers.ZeroPadding1D(padding=((1, ), (1, )), name=name + '_2_pad')(x)
    x = layers.DepthwiseConv2D(kernel_size,
                               strides=stride,
                               depth_multiplier=c,
                               use_bias=False,
                               name=name + '_2_conv')(x)  # TODO
    x_shape = backend.int_shape(x)[1:-1]
    x = layers.Reshape(x_shape + (groups, c, c))(x)
    x = layers.Lambda(lambda x: sum(x[:, :, :, :, i] for i in range(c)),
                      name=name + '_2_reduce')(x)
    x = layers.Reshape(x_shape + (filters, ))(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_2_bn')(x)
    x = layers.Activation('relu', name=name + '_2_relu')(x)

    x = layers.Conv1D((64 // groups) * filters,
                      1,
                      use_bias=False,
                      name=name + '_3_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_3_bn')(x)

    x = layers.Add(name=name + '_add')([shortcut, x])
    x = layers.Activation('relu', name=name + '_out')(x)
    return x
Example #11
0
def DenseNet(number,
             include_top=True,
             weights='hasc',
             input_shape=None,
             pooling=None,
             classes=6,
             classifier_activation='softmax'):
    if input_shape is None:
        input_shape = (256 * 3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    # number of blocks
    if number == 121:
        blocks = [6, 12, 24, 16]
    elif number == 169:
        blocks = [6, 12, 32, 32]
    elif number == 201:
        blocks = [6, 12, 48, 32]
    else:
        raise ValueError('`number` should be 121, 169 or 201')

    inputs = layers.Input(shape=input_shape)

    x = layers.ZeroPadding1D(padding=(3, 3))(inputs)
    x = layers.Conv1D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = layers.BatchNormalization(epsilon=1.001e-5, name='conv1/bn')(x)
    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.ZeroPadding1D(padding=(1, 1))(x)
    x = layers.MaxPooling1D(3, strides=2, name='pool1')(x)

    x = DenseBlock(blocks[0], name='conv2')(x)
    x = TransitionBlock(0.5, name='pool2')(x)
    x = DenseBlock(blocks[1], name='conv3')(x)
    x = TransitionBlock(0.5, name='pool3')(x)
    x = DenseBlock(blocks[2], name='conv4')(x)
    x = TransitionBlock(0.5, name='pool4')(x)
    x = DenseBlock(blocks[3], name='conv5')(x)

    x = layers.BatchNormalization(epsilon=1.001e-5, name='bn')(x)
    x = layers.Activation('relu', name='relu')(x)

    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)
    y = layers.Dense(classes,
                     activation=classifier_activation,
                     name='predictions')(x)

    # Create model.
    model_ = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/densenet{}/densenet{}_hasc_weights_{}_{}.hdf5'.format(
                number, number, int(input_shape[0]), int(input_shape[1]))
        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model_.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model_ = Model(inputs=model_.input,
                           outputs=model_.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model_.layers[-3].output)
            model_ = Model(inputs=model_.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model_.layers[-3].output)
            model_ = Model(inputs=model_.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model_ = Model(inputs=model_.input,
                           outputs=model_.layers[-3].output)

    return model_
Example #12
0
    def __call__(self, ip, p):
        with backend.name_scope('normal_A_block_%s' % self.block_id):
            p = AdjustBlock(self.filters, self.block_id)(p, ip)

            h = layers.Activation('relu')(ip)
            h = layers.Conv1D(self.filters,
                              1,
                              strides=1,
                              padding='same',
                              name='normal_conv_1_%s' % self.block_id,
                              use_bias=False,
                              kernel_initializer='he_normal')(h)
            h = layers.BatchNormalization(momentum=0.9997,
                                          epsilon=1e-3,
                                          name='normal_bn_1_%s' %
                                          self.block_id)(h)

            with backend.name_scope('block_1'):
                x1_1 = SeparableConvBlock(self.filters,
                                          kernel_size=5,
                                          block_id='normal_left1_%s' %
                                          self.block_id)(h)
                x1_2 = SeparableConvBlock(self.filters,
                                          block_id='normal_right1_%s' %
                                          self.block_id)(p)

                x1_1, x1_2 = padding(x1_1, x1_2)

                x1 = layers.add([x1_1, x1_2],
                                name='normal_add_1_%s' % self.block_id)

            with backend.name_scope('block_2'):
                x2_1 = SeparableConvBlock(self.filters,
                                          kernel_size=5,
                                          block_id='normal_left2_%s' %
                                          self.block_id)(p)
                x2_2 = SeparableConvBlock(self.filters,
                                          kernel_size=3,
                                          block_id='normal_right2_%s' %
                                          self.block_id)(p)
                x2 = layers.add([x2_1, x2_2],
                                name='normal_add_2_%s' % self.block_id)

            with backend.name_scope('block_3'):
                x3 = layers.AveragePooling1D(3,
                                             strides=1,
                                             padding='same',
                                             name='normal_left3_%s' %
                                             self.block_id)(h)
                x3, p = padding(x3, p)

                x3 = layers.add([x3, p],
                                name='normal_add_3_%s' % self.block_id)

            with backend.name_scope('block_4'):
                x4_1 = layers.AveragePooling1D(3,
                                               strides=1,
                                               padding='same',
                                               name='normal_left4_%s' %
                                               self.block_id)(p)
                x4_2 = layers.AveragePooling1D(3,
                                               strides=1,
                                               padding='same',
                                               name='normal_right4_%s' %
                                               self.block_id)(p)
                x4 = layers.add([x4_1, x4_2],
                                name='normal_add_4_%s' % self.block_id)

            with backend.name_scope('block_5'):
                x5 = SeparableConvBlock(self.filters,
                                        block_id='normal_left5_%s' %
                                        self.block_id)(h)
                x5 = layers.add([x5, h],
                                name='normal_add_5_%s' % self.block_id)

            if x2.shape[-2] < p.shape[-2]:
                adds = p.shape[-2] - x2.shape[-2]
                x2 = layers.ZeroPadding1D((0, adds))(x2)

            x = layers.concatenate([p, x1, x2, x3, x4, x5],
                                   name='normal_concat_%s' % self.block_id)

        return x, ip
Example #13
0
    def f(x):
        y = layers.ZeroPadding1D(padding=1,
                                 name="padding{}{}_branch2a".format(
                                     stage_char, block_char))(x)

        y = layers.Conv1D(filters,
                          kernel_size,
                          strides=stride,
                          use_bias=False,
                          name="res{}{}_branch2a".format(
                              stage_char, block_char),
                          **parameters)(y)

        y = BatchNormalizationFreeze(axis=axis,
                                     epsilon=1e-5,
                                     freeze=freeze_bn,
                                     name="bn{}{}_branch2a".format(
                                         stage_char, block_char))(y)

        y = layers.Activation("relu",
                              name="res{}{}_branch2a_relu".format(
                                  stage_char, block_char))(y)

        y = layers.ZeroPadding1D(padding=1,
                                 name="padding{}{}_branch2b".format(
                                     stage_char, block_char))(y)

        y = layers.Conv1D(filters,
                          kernel_size,
                          use_bias=False,
                          name="res{}{}_branch2b".format(
                              stage_char, block_char),
                          **parameters)(y)

        y = BatchNormalizationFreeze(axis=axis,
                                     epsilon=1e-5,
                                     freeze=freeze_bn,
                                     name="bn{}{}_branch2b".format(
                                         stage_char, block_char))(y)

        if block == 0:
            shortcut = layers.Conv1D(filters,
                                     1,
                                     strides=stride,
                                     use_bias=False,
                                     name="res{}{}_branch1".format(
                                         stage_char, block_char),
                                     **parameters)(x)

            shortcut = BatchNormalizationFreeze(axis=axis,
                                                epsilon=1e-5,
                                                freeze=freeze_bn,
                                                name="bn{}{}_branch1".format(
                                                    stage_char,
                                                    block_char))(shortcut)
        else:
            shortcut = x

        y = layers.Add(name="res{}{}".format(stage_char, block_char))(
            [y, shortcut])

        y = layers.Activation("relu",
                              name="res{}{}_relu".format(
                                  stage_char, block_char))(y)

        return y
Example #14
0
def get_layers(context = 300, filt_length_conv_1 = 150, filt_length_conv_2 = 5, 
              number_filters_conv_1 = 40, number_filters_conv_2 = 256, LSTM_cells = 832, fc_cells = 512):
    '''
    Returns the paramters that for a CNN based model.
    Consider changing default parameters as per application.
    
    Architecture: CNN->CNN->LSTM->LSTM->LSTM->FC->Logits
    
    Reference:
    "Learning the Speech Front-end With Raw Waveform CLDNNs"
    (https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43960.pdf)
    '''

    y_true = tf.keras.layers.Input((None,), name="y_true")
    y_true_length = tf.keras.layers.Input((1), name="y_true_length")

    # Inputshape [btch_sz, n_time, n_channels = 1]
    input_audio = layers.Input(shape=(None, 1), name="audio_input")

    # Append zeros in time for context at the beggining and end of audio sample
    input_audio_padded = layers.ZeroPadding1D(padding=(context), name = 'zero_padding_context')(input_audio)

    # Apply 1d filter: Shape [btch_sz, n_time, n_channels]
    filt_length_conv_1 = 2 * context + 1 + filt_length_conv_1
    conv = layers.Conv1D(number_filters_conv_1, 
                         filt_length_conv_1, 
                         kernel_initializer = tf.glorot_normal_initializer(),
                         strides=100, 
                         activation='relu', 
                         name="conv_1")(input_audio_padded)
    conv = layers.Conv1D(number_filters_conv_2, 
                         filt_length_conv_2, 
                         kernel_initializer = tf.glorot_normal_initializer(),
                         strides = 1, 
                         activation='relu', 
                         name="conv_2")(conv)

    # 3 Layers of LSTMS
    rnn = tf.keras.layers.LSTM(LSTM_cells, 
                               kernel_initializer = initializers.RandomUniform(minval=-0.02, maxval=0.02),
                               return_sequences = True, 
                               name = 'rnn_1')(conv)
    rnn = tf.keras.layers.LSTM(LSTM_cells, 
                               kernel_initializer = initializers.RandomUniform(minval=-0.02, maxval=0.02),
                               return_sequences = True, 
                               name = 'rnn_2')(rnn)
    rnn = tf.keras.layers.LSTM(LSTM_cells, 
                               kernel_initializer = initializers.RandomUniform(minval=-0.02, maxval=0.02),
                               return_sequences = True, 
                               name = 'rnn_3')(rnn)
    # Apply FC layer to each time step
    fc = layers.TimeDistributed(layers.Dense(fc_cells,
                                             kernel_initializer = tf.glorot_normal_initializer(),
                                            ), name = 'fc')(rnn)
    fc = layers.ReLU()(fc)
    fc = layers.Dropout(rate=0.1)(fc)

    # Apply FC layer to each time step to output prob distro across chars
    logits = layers.TimeDistributed(layers.Dense(29,
                                                 kernel_initializer = tf.glorot_normal_initializer(),
                                                 activation='softmax'), name = 'logits')(fc)


    return logits, input_audio, y_true, y_true_length
Example #15
0
 def call(self, inputs, training=None, mask=None):
     return layers.Concatenate(self.axis)(
         [inputs[0],
          layers.ZeroPadding1D(
              (int(self.diff / 2 if self.diff % 2 == 0 else self.diff / 2 + 1), int(self.diff / 2))
          )(inputs[1])])
Example #16
0
 def __init__(self, kernel_size, num_filters):
     self.conv = layers.Conv1D(num_filters, kernel_size, activation='relu')
     self.conv_gate = layers.Conv1D(num_filters,
                                    kernel_size,
                                    activation="sigmoid")
     self.pad_input = layers.ZeroPadding1D(padding=(kernel_size - 1, 0))
Example #17
0
    def __call__(self, p, ip):
        ip_shape = ip.shape

        if p is not None:
            p_shape = p.shape

        with backend.name_scope('adjust_block'):
            if p is None:
                p = ip

            elif p_shape[-2] != ip_shape[-2]:
                with backend.name_scope('adjust_reduction_block_%s' %
                                        self.block_id):
                    p = layers.Activation('relu',
                                          name='adjust_relu_1_%s' %
                                          self.block_id)(p)
                    p1 = layers.AveragePooling1D(1,
                                                 strides=2,
                                                 padding='valid',
                                                 name='adjust_avg_pool_1_%s' %
                                                 self.block_id)(p)
                    p1 = layers.Conv1D(self.filters // 2,
                                       1,
                                       padding='same',
                                       use_bias=False,
                                       name='adjust_conv_1_%s' % self.block_id,
                                       kernel_initializer='he_normal')(p1)

                    p2 = layers.ZeroPadding1D((0, 1))(p)
                    p2 = layers.Cropping1D((1, 0))(p2)
                    p2 = layers.AveragePooling1D(1,
                                                 strides=2,
                                                 padding='valid',
                                                 name='adjust_avg_pool_2_%s' %
                                                 self.block_id)(p2)
                    p2 = layers.Conv1D(self.filters // 2,
                                       1,
                                       padding='same',
                                       use_bias=False,
                                       name='adjust_conv_2_%s' % self.block_id,
                                       kernel_initializer='he_normal')(p2)

                    p = layers.concatenate([p1, p2], axis=-1)
                    p = layers.BatchNormalization(momentum=0.9997,
                                                  epsilon=1e-3,
                                                  name='adjust_bn_%s' %
                                                  self.block_id)(p)

            elif p_shape[-1] != self.filters:
                with backend.name_scope('adjust_projection_block_%s' %
                                        self.block_id):
                    p = layers.Activation('relu')(p)
                    p = layers.Conv1D(self.filters,
                                      1,
                                      strides=1,
                                      padding='same',
                                      name='adjust_conv_projection_%s' %
                                      self.block_id,
                                      use_bias=False,
                                      kernel_initializer='he_normal')(p)
                    p = layers.BatchNormalization(momentum=0.9997,
                                                  epsilon=1e-3,
                                                  name='adjust_bn_%s' %
                                                  self.block_id)(p)
        return p
Example #18
0
    def __call__(self, ip, p):
        with backend.name_scope('reduction_A_block_%s' % self.block_id):
            p = AdjustBlock(self.filters, self.block_id)(p, ip)

            h = layers.Activation('relu')(ip)
            h = layers.Conv1D(self.filters,
                              1,
                              strides=1,
                              padding='same',
                              name='reduction_conv_1_%s' % self.block_id)(h)
            h = layers.BatchNormalization(momentum=0.9997,
                                          epsilon=1e-3,
                                          name='reduction_bn_1_%s' %
                                          self.block_id)(h)
            h3 = layers.ZeroPadding1D(3,
                                      name='reduction_pad_1_%s' %
                                      self.block_id)(h)

            with backend.name_scope('block_1'):
                x1_1 = SeparableConvBlock(self.filters,
                                          5,
                                          strides=2,
                                          block_id='reduction_left1_%s' %
                                          self.block_id)(h)
                x1_2 = SeparableConvBlock(self.filters,
                                          7,
                                          strides=2,
                                          block_id='reduction_right1_%s' %
                                          self.block_id)(p)

                x1_1, x1_2 = padding(x1_1, x1_2)

                x1 = layers.add([x1_1, x1_2],
                                name='reduction_add_1_%s' % self.block_id)

            with backend.name_scope('block_2'):
                x2_1 = layers.MaxPooling1D(3,
                                           strides=2,
                                           padding='valid',
                                           name='reduction_left2_%s' %
                                           self.block_id)(h3)
                x2_2 = SeparableConvBlock(self.filters,
                                          7,
                                          strides=2,
                                          block_id='reduction_right2_%s' %
                                          self.block_id)(p)

                x2_1, x2_2 = padding(x2_1, x2_2)

                x2 = layers.add([x2_1, x2_2],
                                name='reduction_add_2_%s' % self.block_id)

            with backend.name_scope('block_3'):
                x3_1 = layers.AveragePooling1D(3,
                                               strides=2,
                                               padding='valid',
                                               name='reduction_left3_%s' %
                                               self.block_id)(h3)
                x3_2 = SeparableConvBlock(self.filters,
                                          5,
                                          strides=2,
                                          block_id='reduction_right3_%s' %
                                          self.block_id)(p)

                x3_1, x3_2 = padding(x3_1, x3_2)

                x3 = layers.add([x3_1, x3_2],
                                name='reduction_add3_%s' % self.block_id)

            with backend.name_scope('block_4'):
                x4 = layers.AveragePooling1D(3,
                                             strides=1,
                                             padding='same',
                                             name='reduction_left4_%s' %
                                             self.block_id)(x1)

                x2, x4 = padding(x2, x4)

                x4 = layers.add([x2, x4])

            with backend.name_scope('block_5'):
                x5_1 = SeparableConvBlock(self.filters,
                                          3,
                                          block_id='reduction_left4_%s' %
                                          self.block_id)(x1)
                x5_2 = layers.MaxPooling1D(3,
                                           strides=2,
                                           padding='valid',
                                           name='reduction_right5_%s' %
                                           self.block_id)(h3)

                x5_1, x5_2 = padding(x5_1, x5_2)

                x5 = layers.add([x5_1, x5_2],
                                name='reduction_add4_%s' % self.block_id)

            x3 = layers.ZeroPadding1D((0, 1))(x3)

            x = layers.concatenate([x2, x3, x4, x5],
                                   name='reduction_concat_%s' % self.block_id)

        return x, ip