Exemple #1
0
 def get_text_model(embedding_size=EMBEDDING_SIZE,
                    input_length=MAX_DOCUMENT_LENGTH):
     inputs = keras.Input(shape=(input_length, embedding_size))
     x = layers.Dropout(0.1)(inputs)
     x = layers.Convolution1D(
         16,
         kernel_size=4,
         activation='relu',
         strides=1,
         padding='same',
         kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     x = layers.Convolution1D(
         12,
         kernel_size=8,
         activation='relu',
         strides=2,
         padding='same',
         kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     x = layers.Convolution1D(
         8,
         kernel_size=16,
         activation='relu',
         strides=2,
         padding='same',
         kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     x = layers.Dropout(0.5)(x)
     outputs = layers.Flatten()(x)
     # outputs = layers.Dense(2, activation='relu', kernel_constraint=constraints.MaxNorm(max_value=3))(x)
     return keras.Model(inputs, outputs)
def create_cnn(model_params):
    """
    This function creates a Deep Convolutional Network based on model_params dictionary
    :param model_params: dict of model_params
    :return: keras model
    """
    # Add an Input Layer, expected vectors of 0 and 1, with one vector for each word
    input_layer = layers.Input((70, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=True)(input_layer)
    embedding_layer = layers.SpatialDropout1D(
        model_params['spatial_dropout'])(embedding_layer)

    # Add the convolutional Layer
    for ly in range(model_params['num_conv_blocks']):
        if ly == 0:
            conv_layer = layers.Convolution1D(
                model_params['num_conv_filters'],
                model_params['filter_size'],
                activation=model_params['activation_func'])(embedding_layer)
        else:
            conv_layer = layers.Convolution1D(
                model_params['num_conv_filters'] * ly * 2,
                model_params['filter_size'],
                activation=model_params['activation_func'])(conv_layer)
    # Add the pooling Layer
    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Add the output Layers
    for ly in range(model_params['num_dense_layers']):
        if ly == 0:
            output_layer1 = layers.Dense(
                model_params['num_dense_neurons'],
                activation=model_params['activation_func'])(pooling_layer)
            output_layer1 = layers.Dropout(
                model_params['dense_dropout'])(output_layer1)
        else:
            output_layer1 = layers.Dense(
                model_params['num_dense_neurons'],
                activation=model_params['activation_func'])(output_layer1)
            output_layer1 = layers.Dropout(
                model_params['dense_dropout'])(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adam(
        lr=model_params['learning_rate'],
        decay=model_params['learning_rate'] / model_params['epochs']),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
Exemple #3
0
def create_cnn_model1():
    model = Sequential()
    # model.add(layers.Flatten(input_shape=(3000, 1)))
    model.add(layers.Input(shape=(3000, 1)))
    model.add(
        layers.Convolution1D(16,
                             kernel_size=5,
                             activation=activations.relu,
                             padding="valid"))
    model.add(
        layers.Convolution1D(16,
                             kernel_size=5,
                             activation=activations.relu,
                             padding="valid"))
    model.add(layers.MaxPool1D(pool_size=2))
    model.add(
        layers.Convolution1D(32,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(
        layers.Convolution1D(32,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    # model.add(layers.Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid"))
    model.add(layers.MaxPool1D(pool_size=2))
    model.add(layers.SpatialDropout1D(rate=0.01))
    model.add(
        layers.Convolution1D(32,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(
        layers.Convolution1D(32,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(layers.MaxPool1D(pool_size=2))
    model.add(layers.SpatialDropout1D(rate=0.01))
    model.add(
        layers.Convolution1D(256,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(
        layers.Convolution1D(256,
                             kernel_size=3,
                             activation=activations.relu,
                             padding="valid"))
    model.add(layers.GlobalMaxPool1D())
    # model.add(layers.Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(32, activation='relu'))
    model.add(Dense(5, activation=activations.softmax))
    model.compile(optimizer=optimizers.Adam(0.001),
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['acc'])
    # model.summary()
    return model
Exemple #4
0
def cnn_base():
    model = Sequential(layers=[
        layers.Convolution1D(16,
                             kernel_size=5,
                             activation='relu',
                             padding='valid',
                             input_shape=(3000, 1)),
        layers.Convolution1D(
            16, kernel_size=5, activation='relu', padding='valid'),
        layers.MaxPool1D(pool_size=2),
        layers.SpatialDropout1D(rate=0.01),
        layers.Convolution1D(
            32, kernel_size=3, activation='relu', padding='valid'),
        layers.Convolution1D(
            32, kernel_size=3, activation='relu', padding='valid'),
        layers.MaxPool1D(pool_size=2),
        layers.SpatialDropout1D(rate=0.01),
        layers.Convolution1D(
            32, kernel_size=3, activation='relu', padding='valid'),
        layers.Convolution1D(
            32, kernel_size=3, activation='relu', padding='valid'),
        layers.MaxPool1D(pool_size=2),
        layers.Convolution1D(
            256, kernel_size=3, activation='relu', padding='valid'),
        layers.Convolution1D(
            256, kernel_size=3, activation='relu', padding='valid'),
        layers.GlobalMaxPool1D(),
        layers.Dropout(rate=0.01),
        layers.Dense(64, activation='relu'),
    ])
    model.compile(optimizer=optimizers.Adam(0.001),
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['acc'])  #,class_model='categorical'
    return model
Exemple #5
0
def create_cnn(total_words=1000,
               embedded_dimension=300,
               embedding_matrix=None,
               input_length=100,
               optimizer='adam'):
    # Add an Input Layer
    input_layer = layers.Input((input_length, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(total_words + 1,
                                       embedded_dimension,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.5)(embedding_layer)

    # Add the convolutional Layer
    conv_layer = layers.Convolution1D(100, 3,
                                      activation="relu")(embedding_layer)

    # Add the pooling Layer
    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Add the output Layers
    output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
    output_layer1 = layers.Dropout(0.6)(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizer, loss='binary_crossentropy')

    return model
Exemple #6
0
def create_cnn_cnn():
    seq_input = layers.Input(shape=(None, 3000, 1))
    epoch_encoding_model = create_cnn_model1()
    encoded_sequence = layers.TimeDistributed(epoch_encoding_model)
    model = Sequential(layers=[
        seq_input, encoded_sequence,
        layers.Convolution1D(
            64, kernel_size=3, activation='relu', padding='same'),
        layers.Convolution1D(
            64, kernel_size=3, activation='relu', padding='same'),
        layers.Convolution1D(
            5, kernel_size=3, activation='softmax', padding='same')
    ])
    model.compile(optimizers.Adam(0.001),
                  losses.sparse_categorical_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model
Exemple #7
0
    def conv_bn_relu_3_sandwich(x, filters, kernel_size):
        first_x = x
        for i in range(3):
            x = layers.Convolution1D(filters,
                                     kernel_size,
                                     padding='same',
                                     kernel_initializer=weightinit,
                                     kernel_regularizer=l2(regularization))(x)
            x = layers.BatchNormalization()(x)
            x = layers.ReLU()(x)

        first_x = layers.Convolution1D(
            filters,
            kernel_size=1,
            padding='same',
            kernel_initializer=weightinit,
            kernel_regularizer=l2(regularization))(x)
        x = layers.Add()([x, first_x])
        return x
Exemple #8
0
def cnn_cnn():
    seq_input = layers.Input(shape=(10, 3000, 1))
    base_model = cnn_base()
    model = Sequential(layers=[
        seq_input,
        layers.TimeDistributed(base_model),
        layers.Convolution1D(
            128, kernel_size=3, activation='relu', padding='same'),
        layers.SpatialDropout1D(rate=0.01),
        layers.Convolution1D(
            128, kernel_size=3, activation='relu', padding='same'),
        layers.Dropout(rate=0.05),
        layers.Convolution1D(
            5, kernel_size=3, activation='softmax', padding='same')
    ])
    model.compile(optimizers.Adam(0.001),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])  #class_model='categorical'
    model.summary()
    return model
Exemple #9
0
def cnn_v2(lr=0.005):
    model = Sequential(layers=[
        layers.Convolution1D(128,
                             kernel_size=50,
                             strides=25,
                             activation='relu',
                             padding='valid',
                             input_shape=(3000, 1)),
        # layers.Convolution1D(128, kernel_size=50, strides=25, activation='relu', padding='valid'),
        layers.MaxPool1D(pool_size=8, strides=8),
        layers.SpatialDropout1D(rate=0.1),
        layers.Convolution1D(
            128, kernel_size=8, strides=1, activation='relu', padding='valid'),
        layers.Convolution1D(
            128, kernel_size=8, strides=1, activation='relu', padding='valid'),
        layers.Convolution1D(
            128, kernel_size=8, strides=1, activation='relu', padding='valid'),
        layers.MaxPool1D(4, 4),
        layers.SpatialDropout1D(rate=0.5),
        layers.Flatten(),
        # layers.Convolution1D(32, kernel_size=3, activation='relu', padding='valid'),
        # layers.Convolution1D(32, kernel_size=3, activation='relu', padding='valid'),
        # layers.MaxPool1D(pool_size=2),
        # layers.SpatialDropout1D(rate=0.1),
        # layers.Convolution1D(32, kernel_size=3, activation='relu', padding='valid'),
        # layers.Convolution1D(32, kernel_size=3, activation='relu', padding='valid'),
        # layers.MaxPool1D(pool_size=2),
        # layers.Convolution1D(256, kernel_size=3, activation='relu', padding='valid'),
        # layers.Convolution1D(256, kernel_size=3, activation='relu', padding='valid'),
        # layers.GlobalMaxPool1D(),
        # layers.Dropout(rate=0.01),
        layers.Dense(64, activation='relu'),
        layers.Dropout(rate=0.1),
        # layers.Dense(64, activation='relu'),
        layers.Dropout(rate=0.5),
        layers.Dense(5, activation='softmax')
    ])
    model.compile(optimizer=optimizers.Adam(lr),
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['accuracy'])  #,class_model='categorical'
    return model
Exemple #10
0
    def __init__(self,
                 vocab_size: int,
                 embed_dim: int,
                 hidden_size: int = 128,
                 training: bool = False):
        super(MyAdvancedModel, self).__init__()

        self.num_classes = len(ID_TO_CLASS)

        self.decoder = layers.Dense(units=self.num_classes)
        self.embeddings = tf.Variable(tf.random.normal(
            (vocab_size, embed_dim)))

        self.cnn_layers = []  # List to keep all the cnn layers
        for kernel_size in [2, 3, 4]:  # Iterate over different kernel sizes
            # Create a cnn layer for each kernel size, dimension of hidden output is hidden_size (number of filters)
            cnn = layers.Convolution1D(hidden_size,
                                       input_shape=(None, embed_dim),
                                       kernel_size=kernel_size,
                                       activation='tanh')
            self.cnn_layers.append(cnn)  # Add the cnn layer to the list
        self.max_pooling = layers.GlobalMaxPooling1D()  # Max Pooling layer
        self.dropout_layer = layers.Dropout(rate=0.5)  # Dropout layer
def create_cnn():
    # Add an Input Layer
    input_layer = layers.Input((70, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the convolutional Layer
    conv_layer = layers.Convolution1D(100, 3, activation="relu")(embedding_layer)

    # Add the pooling Layer
    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Add the output Layers
    output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
    output_layer1 = layers.Dropout(0.25)(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy')
    
    return model
Exemple #12
0
def create_point_net(input_shape,
                     output_size,
                     hidden_sizes=[512, 256],
                     use_lambda=False):
    """
    Creates a PointNet.

    See https://github.com/garyloveavocado/pointnet-keras/blob/master/train_cls.py

    Args:
        input_shape (shape): Input-shape.
        output_size (int): Output-size.

    Returns:
        Model: A model.
    """

    logger.info('Input Shape: %s', str(input_shape))

    num_points = input_shape[0]

    def mat_mul(A, B):
        result = tf.matmul(A, B)
        return result

    input_points = layers.Input(shape=input_shape)
    x = layers.Convolution1D(64, 1, activation='relu',
                             input_shape=input_shape)(input_points)
    x = layers.BatchNormalization()(x)
    x = layers.Convolution1D(128, 1, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Convolution1D(1024, 1, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPooling1D(pool_size=num_points)(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dense(256, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dense(9,
                     weights=[
                         np.zeros([256, 9]),
                         np.array([1, 0, 0, 0, 1, 0, 0, 0,
                                   1]).astype(np.float32)
                     ])(x)
    input_T = layers.Reshape((input_shape[1], input_shape[1]))(x)

    # forward net
    if use_lambda:
        g = layers.Lambda(mat_mul, arguments={'B': input_T})(input_points)
    else:
        g = layers.dot([input_points, input_T], axes=-1, normalize=True)
    g = layers.Convolution1D(64, 1, input_shape=input_shape,
                             activation='relu')(input_points)
    g = layers.BatchNormalization()(g)
    g = layers.Convolution1D(64, 1, input_shape=input_shape,
                             activation='relu')(g)
    g = layers.BatchNormalization()(g)

    # feature transform net
    f = layers.Convolution1D(64, 1, activation='relu')(g)
    f = layers.BatchNormalization()(f)
    f = layers.Convolution1D(128, 1, activation='relu')(f)
    f = layers.BatchNormalization()(f)
    f = layers.Convolution1D(1024, 1, activation='relu')(f)
    f = layers.BatchNormalization()(f)
    f = layers.MaxPooling1D(pool_size=num_points)(f)
    f = layers.Dense(512, activation='relu')(f)
    f = layers.BatchNormalization()(f)
    f = layers.Dense(256, activation='relu')(f)
    f = layers.BatchNormalization()(f)
    f = layers.Dense(64 * 64,
                     weights=[
                         np.zeros([256, 64 * 64]),
                         np.eye(64).flatten().astype(np.float32)
                     ])(f)
    feature_T = layers.Reshape((64, 64))(f)

    # forward net
    if use_lambda:
        g = layers.Lambda(mat_mul, arguments={'B': feature_T})(g)
    else:
        g = layers.dot([g, feature_T], axes=-1, normalize=True)
    g = layers.Convolution1D(64, 1, activation='relu')(g)
    g = layers.BatchNormalization()(g)
    g = layers.Convolution1D(128, 1, activation='relu')(g)
    g = layers.BatchNormalization()(g)
    g = layers.Convolution1D(1024, 1, activation='relu')(g)
    g = layers.BatchNormalization()(g)

    # global_feature
    global_feature = layers.MaxPooling1D(pool_size=num_points)(g)

    # point_net_cls
    c = global_feature
    for hidden_size in hidden_sizes:
        c = layers.Dense(hidden_size, activation='relu')(c)
        c = layers.BatchNormalization()(c)
        c = layers.Dropout(rate=0.3)(c)

    c = layers.Dense(output_size, activation='linear')(c)
    prediction = layers.Flatten()(c)

    model = models.Model(inputs=input_points, outputs=prediction)
    return model
Exemple #13
0
def get_1d_layer(type,
                 output_dim,
                 output_mul,
                 context_size,
                 stride=1,
                 dilation=1,
                 grouped=False,
                 group_size=1,
                 padding='same',
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
    if type.lower() == 'TimeDelayLayer1D'.lower():
        if padding == 'causal':
            padding = 'same'
        return vqkl.time_delay_layers.TimeDelayLayer1D(output_dim=output_dim,
                                                       context_size=context_size,
                                                       stride=stride,
                                                       dilation=dilation,
                                                       padding=padding,
                                                       activation=activation,
                                                       use_bias=use_bias,
                                                       kernel_initializer=kernel_initializer,
                                                       bias_initializer=bias_initializer,
                                                       kernel_regularizer=kernel_regularizer,
                                                       bias_regularizer=bias_regularizer,
                                                       activity_regularizer=activity_regularizer,
                                                       kernel_constraint=kernel_constraint,
                                                       bias_constraint=bias_constraint, **kwargs)
    elif type.lower() == 'DepthGroupwiseTimeDelayLayer1D'.lower():
        if padding == 'causal':
            padding = 'same'
        return vqkl.time_delay_layers.DepthGroupwiseTimeDelayLayer1D(output_mul=output_mul,
                                                                     context_size=context_size,
                                                                     stride=stride,
                                                                     dilation=dilation,
                                                                     padding=padding,
                                                                     activation=activation,
                                                                     use_bias=use_bias,
                                                                     grouped=grouped,
                                                                     group_size=group_size,
                                                                     kernel_initializer=kernel_initializer,
                                                                     bias_initializer=bias_initializer,
                                                                     kernel_regularizer=kernel_regularizer,
                                                                     bias_regularizer=bias_regularizer,
                                                                     activity_regularizer=activity_regularizer,
                                                                     kernel_constraint=kernel_constraint,
                                                                     bias_constraint=bias_constraint, **kwargs)
    elif type.lower() in ['Convolution1D'.lower(), 'Conv1D'.lower()]:
        return tfkl.Convolution1D(filters=output_dim,
                                  kernel_size=context_size,
                                  strides=stride,
                                  dilation_rate=dilation,
                                  padding=padding,
                                  activation=activation,
                                  use_bias=use_bias,
                                  grouped=grouped,
                                  group_size=group_size,
                                  kernel_initializer=kernel_initializer,
                                  bias_initializer=bias_initializer,
                                  kernel_regularizer=kernel_regularizer,
                                  bias_regularizer=bias_regularizer,
                                  activity_regularizer=activity_regularizer,
                                  kernel_constraint=kernel_constraint,
                                  bias_constraint=bias_constraint, **kwargs)
    elif type.lower() in ['SeparableConv1D'.lower(), 'SeparableConvolution1D'.lower()]:
        return tfkl.SeparableConvolution1D(filters=output_dim,
                                           kernel_size=context_size,
                                           strides=stride,
                                           dilation_rate=dilation,
                                           padding=padding,
                                           activation=activation,
                                           use_bias=use_bias,
                                           grouped=grouped,
                                           group_size=group_size,
                                           kernel_initializer=kernel_initializer,
                                           bias_initializer=bias_initializer,
                                           kernel_regularizer=kernel_regularizer,
                                           bias_regularizer=bias_regularizer,
                                           activity_regularizer=activity_regularizer,
                                           kernel_constraint=kernel_constraint,
                                           bias_constraint=bias_constraint, **kwargs)
Exemple #14
0
    def dilated_residual_block(self, data_x, res_block_i, layer_i, dilation,
                               stack_i):

        original_x = data_x

        # Data sub-block, 空洞卷积的方法
        '''data_out = layers.AtrousConvolution1D(2 * self.config['model']['filters']['depths']['res'],
                                                    self.config['model']['filters']['lengths']['res'],
                                                    atrous_rate=dilation, border_mode='same',
                                                    bias=False,
                                                    name='res_%d_dilated_conv_d%d_s%d' % (
                                                    res_block_i, dilation, stack_i),
                                                    activation=None)(data_x)'''
        #这个方法的前两个参数还没弄清他们的位置关系,如果训练报错可以查看一下这个隐患
        data_out = layers.Convolution1D(
            2 * self.config['model']['filters']['depths']['res'],
            self.config['model']['filters']['lengths']['res'],
            dilation_rate=dilation,
            padding='same',
            use_bias=False)(data_x)

        data_out_1 = BinLayers.Slice(
            (Ellipsis,
             slice(0, self.config['model']['filters']['depths']['res'])),
            (self.input_length,
             self.config['model']['filters']['depths']['res']),
            name='res_%d_data_slice_1_d%d_s%d' %
            (self.num_residual_blocks, dilation, stack_i))(data_out)

        data_out_2 = BinLayers.Slice(
            (Ellipsis,
             slice(self.config['model']['filters']['depths']['res'],
                   2 * self.config['model']['filters']['depths']['res'])),
            (self.input_length,
             self.config['model']['filters']['depths']['res']),
            name='res_%d_data_slice_2_d%d_s%d' %
            (self.num_residual_blocks, dilation, stack_i))(data_out)

        # Condition sub-block
        #condition_out = tf.keras.layers.Dense(2 * self.config['model']['filters']['depths']['res'],
        #                                   name='res_%d_dense_condition_%d_s%d' % (res_block_i, layer_i, stack_i),
        #                                   use_bias=False)(condition_x)

        #condition_out = tf.keras.layers.Reshape((self.config['model']['filters']['depths']['res'], 2),
        #                                     name='res_%d_condition_reshape_d%d_s%d' % (
        #                                         res_block_i, dilation, stack_i))(condition_out)

        #condition_out_1 = BinLayers.Slice((Ellipsis, 0), (self.config['model']['filters']['depths']['res'],),
        #                                      name='res_%d_condition_slice_1_d%d_s%d' % (
        #                                          res_block_i, dilation, stack_i))(condition_out)

        #condition_out_2 = BinLayers.Slice((Ellipsis, 1), (self.config['model']['filters']['depths']['res'],),
        #                                      name='res_%d_condition_slice_2_d%d_s%d' % (
        #                                          res_block_i, dilation, stack_i))(condition_out)

        #condition_out_1 = tf.keras.layers.RepeatVector(self.input_length, name='res_%d_condition_repeat_1_d%d_s%d' % (
        #                                                res_block_i, dilation, stack_i))(condition_out_1)
        #condition_out_2 = tf.keras.layers.RepeatVector(self.input_length, name='res_%d_condition_repeat_2_d%d_s%d' % (
        #                                                res_block_i, dilation, stack_i))(condition_out_2)

        #data_out_1 = tf.keras.layers.Merge(mode='sum', name='res_%d_merge_1_d%d_s%d' %
        #                                                 (res_block_i, dilation, stack_i))([data_out_1, condition_out_1])
        #data_out_2 = tf.keras.layers.Merge(mode='sum', name='res_%d_merge_2_d%d_s%d' % (res_block_i, dilation, stack_i))\
        #    ([data_out_2, condition_out_2])

        #data_out_1 = tf.keras.layers.Add()([data_out_1, condition_out_1])
        #data_out_2 = tf.keras.layers.Add()([data_out_2, condition_out_2])

        tanh_out = tf.keras.layers.Activation('tanh')(data_out_1)
        sigm_out = tf.keras.layers.Activation('sigmoid')(data_out_2)

        #这个操作就是相乘
        #data_x = keras.layers.Merge(mode='mul', name='res_%d_gated_activation_%d_s%d' % (res_block_i, layer_i, stack_i))(
        #    [tanh_out, sigm_out])
        data_x = layers.Multiply()([tanh_out, sigm_out])

        data_x = tf.keras.layers.Convolution1D(
            self.config['model']['filters']['depths']['res'] +
            self.config['model']['filters']['depths']['skip'],
            1,
            padding='same',
            use_bias=False)(data_x)

        res_x = BinLayers.Slice(
            (Ellipsis,
             slice(0, self.config['model']['filters']['depths']['res'])),
            (self.input_length,
             self.config['model']['filters']['depths']['res']),
            name='res_%d_data_slice_3_d%d_s%d' %
            (res_block_i, dilation, stack_i))(data_x)

        skip_x = BinLayers.Slice(
            (Ellipsis,
             slice(
                 self.config['model']['filters']['depths']['res'],
                 self.config['model']['filters']['depths']['res'] +
                 self.config['model']['filters']['depths']['skip'])),
            (self.input_length,
             self.config['model']['filters']['depths']['skip']),
            name='res_%d_data_slice_4_d%d_s%d' %
            (res_block_i, dilation, stack_i))(data_x)

        skip_x = BinLayers.Slice(
            (slice(self.samples_of_interest_indices[0],
                   self.samples_of_interest_indices[-1] + 1, 1), Ellipsis),
            (self.padded_target_field_length,
             self.config['model']['filters']['depths']['skip']),
            name='res_%d_keep_samples_of_interest_d%d_s%d' %
            (res_block_i, dilation, stack_i))(skip_x)

        #res_x = keras.layers.Merge(mode='sum')([original_x, res_x])
        res_x = layers.Add()([original_x, res_x])

        return res_x, skip_x
Exemple #15
0
    def build_model(self):
        #here, it transfer to tensorflow form
        data_input = layers.Input(shape=(self.input_length, ),
                                  name='data_input')

        #规定了输入condition数据的大小 (condition_input_length==5)
        #condition_input = layers.Input(shape=(self.condition_input_length,),
        #                                     name='condition_input')

        #这条语句应该就是维度扩展,但是我不太懂继承keras.layers的自建类,使用时直接调用哪个方法
        data_expanded = BinLayers.AddSingletonDepth()(data_input)

        #Ellipsis在python是一个常量,第一次遇到,哈哈
        #这里的指令的作用就是把输入的长度变成padded_target_field_length
        #这个到后面相减的时候才会再用到
        #我觉得如果想要直接使用tf.slice,也要封装在一个类之中
        data_input_target_field_length = BinLayers.Slice(
            (slice(self.samples_of_interest_indices[0],
                   self.samples_of_interest_indices[-1] + 1, 1), Ellipsis),
            (self.padded_target_field_length, 1),
            name='data_input_target_field_length')(data_expanded)
        #滤波器的长度是3,卷积核的输出数量为128
        data_out = layers.Convolution1D(
            self.config['model']['filters']['depths']['res'],
            self.config['model']['filters']['lengths']['res'],
            padding='same',
            use_bias=False,
            name='initial_causal_conv')(data_expanded)

        #condition_input 本来是用5位来表示的,然后为了把它和input融合也需要进行维度转换=>128
        #condition_out = layers.Dense(self.config['model']['filters']['depths']['res'],
        #                                   name='initial_dense_condition',
        #                                   use_bias=False)(condition_input)
        #在时长上面进行repeat使得与input的长度是相同的
        #condition_out = layers.RepeatVector(self.input_length,
        #                                          name='initial_condition_repeat')(condition_out)
        #下面这句话打印出来其实就是add这个node
        #data_out = layers.Merge(mode='sum', name='initial_data_condition_merge')(
        #    [data_out, condition_out])
        #data_out = layers.Add()([data_out, condition_out])

        skip_connections = []
        res_block_i = 0
        #外部的三个block
        #如果内存溢出,把stack或者是dilations改的小一点可能会有帮助
        #但是需要把输入和输出都对应的进行调整
        for stack_i in range(self.num_stacks):
            layer_in_stack = 0
            #内部的1,2,4,,,512
            for dilation in self.dilations:
                res_block_i += 1
                data_out, skip_out = self.dilated_residual_block(
                    data_out, res_block_i, layer_in_stack, dilation,
                    stack_i)  #, condition_input
                if skip_out is not None:
                    skip_connections.append(skip_out)
                layer_in_stack += 1

        #data_out = layers.Merge(mode='sum')(skip_connections)
        data_out = layers.Add()(skip_connections)
        data_out = self.activation(data_out)
        #filter 3, output 2048
        data_out = layers.Convolution1D(
            self.config['model']['filters']['depths']['final'][0],
            self.config['model']['filters']['lengths']['final'][0],
            padding='same',
            use_bias=False)(data_out)
        #condition_input也需要转成2048维的
        #condition_out = layers.Dense(self.config['model']['filters']['depths']['final'][0],
        #                                   use_bias=False,
        #                                   name='penultimate_conv_1d_condition')(condition_input)

        #长度扩展成 padded_target_field_length
        #condition_out = layers.RepeatVector(self.padded_target_field_length,
        #                                          name='penultimate_conv_1d_condition_repeat')(condition_out)

        #data_out = layers.Merge(mode='sum', name='penultimate_conv_1d_condition_merge')([data_out, condition_out])
        #data_out = layers.Add()([data_out, condition_out])

        data_out = self.activation(data_out)
        # filter 3, output 256
        data_out = layers.Convolution1D(
            self.config['model']['filters']['depths']['final'][1],
            self.config['model']['filters']['lengths']['final'][1],
            padding='same',
            use_bias=False)(data_out)

        #condition_out = layers.Dense(self.config['model']['filters']['depths']['final'][1], use_bias=False,
        #                                   name='final_conv_1d_condition')(condition_input)

        #condition_out = layers.RepeatVector(self.padded_target_field_length,
        #                                          name='final_conv_1d_condition_repeat')(condition_out)

        #data_out = layers.Merge(mode='sum', name='final_conv_1d_condition_merge')([data_out, condition_out])
        #data_out = layers.Add()([data_out, condition_out])
        #维度变成1为(音频)
        data_out = layers.Convolution1D(1, 1)(data_out)

        data_out_speech = data_out
        data_out_noise = BinLayers.Subtract(name='subtract_layer')(
            [data_input_target_field_length, data_out_speech])

        #改变一下形式
        data_out_speech = layers.Lambda(
            lambda x: tf.keras.backend.squeeze(x, 2),
            output_shape=lambda shape: (shape[0], shape[1]),
            name='data_output_1')(data_out_speech)

        data_out_noise = layers.Lambda(
            lambda x: tf.keras.backend.squeeze(x, 2),
            output_shape=lambda shape: (shape[0], shape[1]),
            name='data_output_2')(data_out_noise)

        return tf.keras.models.Model(inputs=[data_input],
                                     outputs=[
                                         data_out_speech, data_out_noise
                                     ])  #inputs=[data_input, condition_input]
Exemple #16
0
 def __init__(self,
              model_name,
              klass_name,
              embedding_matrix,
              embedding_size=EMBEDDING_SIZE,
              input_length=MAX_DOCUMENT_LENGTH):
     self.klass_name = klass_name
     self.model = models.Sequential(name=f'{model_name}-model')
     self.model.add(
         layers.Embedding(
             embedding_matrix.shape[0],
             embedding_size,
             input_length=input_length,
             embeddings_initializer=initializers.Constant(embedding_matrix),
             trainable=False))
     # model.add(layers.Embedding(len(tokenizer.word_index)+1, embedding_size, input_length=MAX_DOCUMENT_LENGTH))  # for trainable embedding layer
     self.model.add(layers.Dropout(0.1))
     self.model.add(
         layers.Convolution1D(
             16,
             kernel_size=4,
             activation='relu',
             strides=1,
             padding='same',
             kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(
         layers.Convolution1D(
             12,
             kernel_size=8,
             activation='relu',
             strides=2,
             padding='same',
             kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(
         layers.Convolution1D(
             8,
             kernel_size=16,
             activation='relu',
             strides=2,
             padding='same',
             kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(layers.Flatten())
     self.model.add(
         layers.Dense(128,
                      activation='relu',
                      kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(
         layers.Dense(64,
                      activation='relu',
                      kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.add(layers.Dropout(0.5))
     self.model.add(
         layers.Dense(2,
                      activation='softmax',
                      kernel_constraint=constraints.MaxNorm(max_value=3)))
     self.model.compile(
         optimizer=optimizers.Adam(),  #learning_rate=0.001), 
         loss=losses.CategoricalCrossentropy(from_logits=False),
         metrics=[
             metrics.CategoricalAccuracy(),
             metrics.Recall(class_id=0),
             metrics.Precision(class_id=0)
         ])