Пример #1
0
def build_model(nb_values):

    model = Sequential()

    model.add(BatchNormalization(input_shape=(nb_values, 1)))
    model.add(
        Conv1D(filters=32,
               kernel_size=5,
               strides=1,
               input_shape=(nb_values, 1)))
    model.add(AveragePooling1D(pool_size=5))

    model.add(BatchNormalization())
    model.add(Conv1D(filters=16, kernel_size=5, strides=1))
    model.add(AveragePooling1D(pool_size=5))

    model.add(BatchNormalization())
    model.add(Conv1D(filters=8, kernel_size=5, strides=1))
    model.add(AveragePooling1D(pool_size=5))

    model.add(Flatten())

    model.add(Dense(64, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
    model.summary()

    return model
Пример #2
0
    def __init__(self, img_h, img_w, batch_size):
        super(BsplineModel, self).__init__()
        """
        :param imgshape:

        Here, we define the parameters of Bspline model
        """
        self.img_h = img_h
        self.img_w = img_w
        self.batch_size = batch_size
        # self.OutputChannels = OutputChannels

        self.conv_1 = Conv1D(filters=32,
                             kernel_size=3,
                             padding='same',
                             kernel_initializer="he_normal")
        self.conv_2 = Conv1D(filters=32,
                             kernel_size=3,
                             padding='same',
                             kernel_initializer="he_normal")
        self.conv_3 = Conv1D(filters=64,
                             kernel_size=3,
                             padding='same',
                             kernel_initializer="he_normal")
        self.conv_4 = Conv1D(filters=64,
                             kernel_size=3,
                             padding='same',
                             kernel_initializer="he_normal")
        self.Pool_4_1 = AveragePooling1D(pool_size=3, strides=[1])
        self.conv_5 = Conv1D(filters=128,
                             kernel_size=3,
                             padding='same',
                             kernel_initializer="he_normal")
        self.conv_6 = Conv1D(filters=128,
                             kernel_size=3,
                             padding='same',
                             kernel_initializer="he_normal")
        self.conv_7 = Conv1D(filters=256,
                             kernel_size=3,
                             padding='same',
                             kernel_initializer="he_normal")
        self.conv_8 = Conv1D(filters=256,
                             kernel_size=3,
                             padding='same',
                             kernel_initializer="he_normal")
        self.Pool_8_1 = AveragePooling1D(pool_size=3, strides=[1])
        self.flatten = Flatten()
        self.flatten_2 = Flatten()
        self.dense_1 = Dense(1024, activation='relu')
        self.dense_1_2 = Dense(1024, activation='relu')
        self.dense_2 = Dense(2048, activation='relu')
        self.dense_2_2 = Dense(2048, activation='relu')
        self.dense_3 = Dense(1024, activation='relu')
        self.dense_3_2 = Dense(1024, activation='relu')
        self.dense_4 = Dense(512, activation='relu')
        self.dense_4_2 = Dense(512, activation='relu')
        self.dense_5 = Dense(256, activation='relu')
        self.dense_5_2 = Dense(128, activation='relu')
        self.dense_6 = Dense(10, activation='hard_sigmoid')
        self.dense_6_2 = Dense(10, activation='hard_sigmoid')
Пример #3
0
def get_model(feature,depth):
    K.clear_session()
    model = models.Sequential()
    model.add(Input(shape=(feature, depth)))
    model.add(Masking(mask_value=-999.0))
    model.add(Conv1D(filters=8, kernel_size=27, strides=1, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(AveragePooling1D(pool_size=3))
    
    model.add(Conv1D(filters=8, kernel_size=15, strides=1, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(AveragePooling1D(pool_size=3))
    
    model.add(Conv1D(filters=16, kernel_size=13, strides=1, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(AveragePooling1D(pool_size=3))
    
    model.add(Conv1D(filters=16, kernel_size=9, strides=1, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(AveragePooling1D(pool_size=3))
    
    model.add(Conv1D(filters=32, kernel_size=7, strides=1, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    
    model.add(GlobalAveragePooling1D())
    
    model.add(Dropout(0.05))
    model.add(Dense(1, activation='sigmoid'))
    
    return model
Пример #4
0
    def call(self, x):

        skip_conn = []
        output = tf.one_hot(x, depth=20)

        output = self._conv_layers[0](output)
        output = self._conv_layers[1](output)
        skip_conn.append(output)
        output = AveragePooling1D(pool_size=2)(output)

        output = self._conv_layers[2](output)
        output = self._conv_layers[3](output)
        skip_conn.append(output)
        output = AveragePooling1D(pool_size=2)(output)

        output = self._conv_layers[4](output)
        output = self._conv_layers[5](output)
        output = self._deconv_layers[0](output)

        output = Concatenate()([output, skip_conn[-1]])

        output = self._conv_layers[6](output)
        output = self._conv_layers[7](output)
        output = self._deconv_layers[1](output)

        output = Concatenate()([output, skip_conn[-2]])

        output = self._conv_layers[8](output)
        output = self._conv_layers[9](output)
        output = self._conv_layers[10](output)

        return output
Пример #5
0
def cnn_best(input_shape = (5000,1), classes=256, lr=0.00001):
    # From VGG16 design
    img_input = Input(shape=input_shape)
    # Block 1
    x = Conv1D(64, 11, activation='relu', padding='same', name='block1_conv1')(img_input)
    x = AveragePooling1D(2, strides=2, name='block1_pool')(x)
    # Block 2
    x = Conv1D(128, 11, activation='relu', padding='same', name='block2_conv1')(x)
    x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
    # Block 3
    x = Conv1D(256, 11, activation='relu', padding='same', name='block3_conv1')(x)
    x = AveragePooling1D(2, strides=2, name='block3_pool')(x)
    # Block 4
    x = Conv1D(512, 11, activation='relu', padding='same', name='block4_conv1')(x)
    x = AveragePooling1D(2, strides=2, name='block4_pool')(x)
    # Block 5
    x = Conv1D(512, 11, activation='relu', padding='same', name='block5_conv1')(x)
    x = AveragePooling1D(2, strides=2, name='block5_pool')(x)
    # Classification block
    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense(classes, activation='softmax', name='predictions')(x)

    inputs = img_input
    # Create model.
    model = Model(inputs, x, name='cnn_best')
    optimizer = RMSprop(lr=lr)
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    return model
Пример #6
0
def CNN_model(vocab_len, vec_len):
    model3 = Sequential()
    embedding_layer = Embedding(input_dim=vocab_len + 1, output_dim=256)
    inp_layer = Input(shape=(vec_len, ))
    model3.add(inp_layer)
    model3.add(embedding_layer)
    model3.add(
        Conv1D(64,
               4,
               activation='tanh',
               kernel_initializer='he_uniform',
               input_shape=(None, 256)))
    model3.add(AveragePooling1D(4))
    model3.add(
        Conv1D(128, 4, activation='tanh', kernel_initializer='he_uniform'))
    model3.add(AveragePooling1D(4))
    model3.add(GlobalAveragePooling1D())
    model3.add(Dense(256, activation='relu'))
    model3.add(Dropout(0.2))
    model3.add(Dense(128, activation='relu'))
    model3.add(Dropout(0.2))
    model3.add(Dense(64, activation='relu'))
    model3.add(Dense(5, activation='softmax'))
    model3.compile(loss="binary_crossentropy",
                   optimizer='adam',
                   metrics=['accuracy'])
    return model3
Пример #7
0
def build_model_2(nb_values):

    model = Sequential()

    model.add(BatchNormalization(input_shape=(nb_values, 1)))
    model.add(Conv1D(filters=64, kernel_size=5, padding='same'))
    model.add(AveragePooling1D(pool_size=5))

    model.add(BatchNormalization(input_shape=(nb_values, 1)))
    model.add(Conv1D(filters=128, kernel_size=5, padding='same'))
    model.add(AveragePooling1D(pool_size=5))

    model.add(BatchNormalization())
    model.add(
        Conv1D(filters=128,
               kernel_size=5,
               padding='same',
               input_shape=(nb_values, 1)))
    model.add(AveragePooling1D(pool_size=8))

    model.add(Flatten())

    model.add(Dense(64, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()

    return model
def cnn_1D(shape=(55, 300),
           layer_1=256,
           layer_2=256,
           layer_3=128,
           dropout=0.2,
           layer_4=16,
           layer_5=128,
           layer_6=64,
           activation_1='elu',
           activation_2='relu',
           optimizer='nadam',
           loss='mse'):

    # This returns a tensor
    inputs = Input(shape=shape)
    input_head = Input(shape=(6, ))

    x = Conv1D(layer_1)(inputs)
    x = AveragePooling1D()(x)
    #x = GlobalAveragePooling1D()(x)
    x = Activation(activation_1)(x)  #  tfa.activations.mish

    x = Conv1D(layer_2)(x)
    x = AveragePooling1D()(x)
    #x = GlobalAveragePooling1D()(x)
    x = Activation(activation_1)(x)

    x = Conv1D(layer_3)(x)
    x = AveragePooling1D()(x)
    #x = GlobalAveragePooling1D()(x)
    x = BatchNormalization()(x)
    x = Activation(activation_1)(x)

    x = Dropout(dropout)(x)
    x = Flatten()(x)

    # a layer instance is callable on a tensor, and returns a tensor
    x2 = Conv1D(layer_4)(input_head)
    #x2 = BatchNormalization()(x2)
    x2 = Activation(activation_1)(x2)

    x = concatenate([x, x2])
    x = Dense(layer_5)(x)
    x = BatchNormalization()(x)
    x = Activation(activation_1)(x)
    x = Dropout(dropout)(x)

    x = Dense(layer_6, activation=activation_2)(x)
    #x = Dense(128, activation='relu')(x)
    predictions = Dense(55, activation='linear')(x)

    # This creates a model that includes
    # the Input layer and three Dense layers
    model = Model(inputs=[inputs, input_head], outputs=predictions)

    return model
Пример #9
0
def build_model_CNN(input_shape,
                    outputSize,
                    denseWidth,
                    denseLength,
                    denseGrowth,
                    convFilters,
                    convLength,
                    convGrowth,
                    convFilterSize,
                    poolSize,
                    padding,
                    dropout_val,
                    activation_function,
                    output_activation):

    # Specify input layer dimensions
    i = Input(shape=(1, input_shape[2],))

    # Build convolutional layers
    for j in range(convLength):

      # First conv block takes input layer i
      if j == 0:
        x = Conv1D(convFilters, (convFilterSize), activation=activation_function, padding=padding)(i)
        x = BatchNormalization()(x)
        x = Conv1D(convFilters, (convFilterSize), activation=activation_function, padding=padding)(x)
        x = AveragePooling1D((poolSize), padding=padding,strides=1)(x)

      # Subsequent conv block takes x
      else:
        x = Conv1D(convFilters, (convFilterSize), activation=activation_function, padding=padding)(x)
        x = BatchNormalization()(x)
        x = Conv1D(convFilters, (convFilterSize), activation=activation_function, padding=padding)(x)
        x = AveragePooling1D((poolSize), padding=padding,strides=1)(x)

      # Modify number of conv filters in next layer according to conv growth factor (convGrowth)
      convFilters = convFilters * convGrowth

    # Global Average Pooling rather than max pooling *may* give better results on non-image data.
    x = GlobalAveragePooling1D()(x)

    # Build dense layers
    for k in range(denseLength):

      x = Dense(denseWidth, activation=activation_function, kernel_regularizer=l2(0.001))(x)
      x = Dropout(dropout_val)(x)
      denseWidth = denseWidth * denseGrowth

    # Specify output layer with linear activation for regression task.
    x = Dense(outputSize, activation=output_activation)(x)

    # Declare model
    model = Model(i, x)

    return model
Пример #10
0
    def __call__(self,
                 input_tensor,
                 units=64,
                 kernel_size=2,
                 predict_seq_length=1):
        out_0, out_1, out_2, x = input_tensor
        x = UpSampling1D(4)(x)
        x = Concatenate()([x, out_2])
        x = conv_br(x, units * 3, kernel_size, 1, 1)

        x = UpSampling1D(2)(x)
        x = Concatenate()([x, out_1])
        x = conv_br(x, units * 2, kernel_size, 1, 1)

        x = UpSampling1D(2)(x)
        x = Concatenate()([x, out_0])
        x = conv_br(x, units, kernel_size, 1, 1)

        # regression
        x = Conv1D(1, kernel_size=kernel_size, strides=1, padding="same")(x)
        out = Activation("sigmoid")(x)
        out = Lambda(lambda x: 12 * x)(out)
        out = AveragePooling1D(strides=4)(
            out
        )  # Todo: just a tricky way to change the batch*input_seq*1 -> batch_out_seq*1, need a more general way

        return out
Пример #11
0
def createResNetV1(inputShape=(5, 4), numClasses=2):
    inputs = Input(shape=inputShape)
    v = resLyr(inputs, lyrName='Inpt')
    v = resBlkV1(inputs=v,
                 numFilters=16,
                 numBlocks=3,
                 downsampleOnFirst=False,
                 names='Stg1')
    v = resBlkV1(inputs=v,
                 numFilters=16,
                 numBlocks=3,
                 downsampleOnFirst=True,
                 names='stg2')
    v = resBlkV1(inputs=v,
                 numFilters=16,
                 numBlocks=3,
                 downsampleOnFirst=True,
                 names='stg3')
    v = AveragePooling1D(pool_size=2, name="AvgPool")(v)
    v = Flatten()(v)
    outputs = Dense(num_classes,
                    activation='softmax',
                    kernel_initializer='he_normal')(v)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optmz,
                  metrics=['accuracy'])

    return model
Пример #12
0
def createModel():
    filter_size = 64
    #**** Convolutional Network ****#
    #Create First Block
    X_input = Input(shape=(1,360))
    X = Conv1D(filter_size, (7), 3, padding='same', activation=tf.nn.relu)(X_input)
    X = BatchNormalization()(X)
    X = MaxPooling1D((3), 1, padding='same')(X)
    
    #Create Residual Blocks
    X = createResBlocks(X, filter_size)
    
    #Add last pooling layer of CNN
    X = AveragePooling1D((3), 1, padding='same')(X)
    X = Model(inputs=X_input, outputs=X)

    #**** Fully Connected Layers ****#    
    #Add a secondary input to hidden layer for target info
    Y_in = Input(shape=(1,2))
    Y = Lambda(lambda x: x)(Y_in)
    Y = Model(inputs=Y_in, outputs=Y) 
    
    #Combine new input with output of CNN
    combined = concatenate([X.output, Y.output], axis=2)

    #Add Fully Connected Layers
    Y = Dense(256, activation=tf.nn.relu)(combined)
    Y = Dense(256, activation=tf.nn.relu)(Y)
    Y = Dense(256)(Y)
    Y = Dense(2)(Y)

    model = Model(inputs=[X_input, Y_in], outputs=Y)

    return model
Пример #13
0
def create_model_small_cnn(data_type,
                           labels=len(labels),
                           learning_rate=0.0001,
                           dense_units=200):
    if data_type == 'mfcc':
        input_shape = (98, 13)
    if data_type == 'ssc':
        input_shape = (98, 26)

    in1 = Input(shape=input_shape)
    conv = Conv1D(kernel_size=34,
                  strides=1,
                  filters=4,
                  activation='selu',
                  padding='same')(in1)
    bn = BatchNormalization()(conv)

    pool = AveragePooling1D(pool_size=2, strides=2, padding='same')(bn)
    flatten = Flatten()(pool)

    x = Dense(50, activation='selu',
              kernel_initializer='random_uniform')(flatten)

    x = Dense(50, activation='selu', kernel_initializer='random_uniform')(x)
    output = Dense(labels, activation='softmax')(x)
    model = Model(inputs=[in1], outputs=[output], name='cnn')
    optimizer = Adam(learning_rate=0.0005)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
Пример #14
0
def create_model_conv(n_steps, n_features, dropout, loss, optimizer):
    model = Sequential()
    model.add(Conv1D(filters=32, kernel_size=2, activation='relu',
                     input_shape=(n_steps, n_features)))
    # model.add(Conv1D(filters=256, kernel_size=1))
    # model.add(Conv1D(filters=64, kernel_size=2, activation='relu'))
    # model.add(Dropout(0.5))
    model.add(AveragePooling1D(pool_size=2))

    # model.add(Flatten())
    # model.add(Flatten())
    #p = {2, 3, 5}

    # model.add(Conv1D(filters=256, kernel_size=1, activation='relu'))
    # model.add(AveragePooling1D(pool_size=2, strides=1))

    # model.add(Bidirectional(LSTM(256, return_sequences=True)))
    model.add(LSTM(32, return_sequences=True))
    model.add(Dropout(dropout))

    # model.add(LSTM(64, return_sequences=True))
    # model.add(Dropout(dropout))

    model.add(LSTM(64, return_sequences=False))
    model.add(Dropout(dropout))

    # model.add(Dense(64))
    # model.add(Dropout(0.2))

    model.add(Dense(3, activation=ACTIVATION_OUTPUT))

    model.compile(loss=loss, metrics=METRICS, optimizer=optimizer)

    return model
Пример #15
0
    def __init__(self, num_layers=4, c1=128, c2=192, c3=256, drop_rate=0.1, num_heads=8):
        super().__init__()
        self.input_dense = Dense(c1)
        self.sigma_ffn = ff_network(c1//4, 2048)
        self.enc1 = ConvSubLayer(c1, [1, 2])
        self.enc2 = ConvSubLayer(c2, [1, 2])
        self.enc3 = DecoderLayer(c2, 3, drop_rate, pos_factor=4)
        self.enc4 = ConvSubLayer(c3, [1, 2])
        self.enc5 = DecoderLayer(c3, 4, drop_rate, pos_factor=2)
        self.pool = AveragePooling1D(2)
        self.upsample = UpSampling1D(2)

        self.skip_conv1 = Conv1D(c2, 3, padding='same')
        self.skip_conv2 = Conv1D(c3, 3, padding='same')
        self.skip_conv3 = Conv1D(c2*2, 3, padding='same')
        self.text_style_encoder = Text_Style_Encoder(c2*2, c2*4)
        self.att_dense = Dense(c2*2)
        self.att_layers = [DecoderLayer(c2*2, 6, drop_rate) 
                     for i in range(num_layers)]
                     
        self.dec3 = ConvSubLayer(c3, [1, 2])
        self.dec2 = ConvSubLayer(c2, [1, 1])
        self.dec1 = ConvSubLayer(c1, [1, 1])
        self.output_dense = Dense(2)
        self.pen_lifts_dense = Dense(1, activation='sigmoid')
Пример #16
0
def get_triangles_model(n_nodes, n_features):
    """
    Build and return neural network to classify TRIANGLES dataset using the
    neighborhood feature.

    :param n_nodes:     maximal number of nodes over all graphs in dataset
    :param n_features:  size of considered neighborhood in neighborhood node feature.
                        E.g. if neighborhood feature of a node is
                        of size 9x9 n_features would be 9.

    :return:            Tensorflow Neural Network
    """

    node_features_input_layer = tf.keras.Input(
        (n_nodes * n_features * n_features), dtype=tf.float32)
    # reshape input for average pool
    x = Reshape(
        (-1, n_nodes * n_features * n_features))(node_features_input_layer)
    # Avoid Overfitting by pooling and dropout
    x = AveragePooling1D(pool_size=6,
                         padding="valid",
                         data_format='channels_first')(x)
    x = Dense(units=60, activation='relu')(x)
    x = AveragePooling1D(pool_size=6,
                         padding="valid",
                         data_format='channels_first')(x)
    x = Dense(units=60, activation='relu')(x)
    x = AveragePooling1D(pool_size=6,
                         padding="valid",
                         data_format='channels_first')(x)
    x = Dense(units=32, activation='relu')(x)
    x = Dropout(0.1)(x)

    x = Dense(units=32, activation='relu')(x)
    #x = Dense(units= 32,activation='relu')(x)

    x = Reshape((32, ))(x)  # reshape to get appropriate prediction shape
    x = Dense(units=26, activation='softmax')(x)

    model = tf.keras.Model(inputs=node_features_input_layer, outputs=x)

    model.compile(
        optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.001),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    return model
Пример #17
0
    def inception():
        u1 = rcompose(AveragePooling1D(pool_size=3, strides=1, padding='same'),
                      conv1D(48, 1))
        u2 = conv1D(48, 1)
        u3 = rcompose(conv1D(16, 1), conv1D(48, 3))
        u4 = rcompose(conv1D(16, 1), conv1D(48, 3), conv1D(48, 3))

        return rcompose(ljuxt(u1, u2, u3, u4), Concatenate(axis=2))
Пример #18
0
 def pool(i):
     if pool_size > 1:
         o = Lambda(lambda x: K.temporal_padding(x, (pool_size - 1, 0)))(i)
         o = AveragePooling1D(pool_size, strides=1, padding='valid')(o)
         scale = tf.reshape(pool_size / tf.range(1, pool_size+1, dtype=o.dtype), (1, pool_size, 1))
         o = tf.concat([scale * o[:, :pool_size, :], o[:, pool_size:, :]], axis=1)
     else:
         o = i
     return o
Пример #19
0
def conv_model(n_features):
    model = Sequential()
    activation = 'relu'
    model.add(Conv1D(9, 9, input_shape=(n_features, 1), activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())
    model.add(Dropout(0.20))

    model.add(Conv1D(9, 7, activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(Conv1D(18, 7, activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())
    model.add(Dropout(0.3))

    model.add(Conv1D(18, 5, activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())
    model.add(Dropout(0.35))

    model.add(
        Conv1D(36,
               3,
               activation=activation,
               kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
               bias_regularizer=regularizers.l2(1e-4),
               activity_regularizer=regularizers.l2(1e-5)))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())
    model.add(Dropout(0.40))

    model.add(Conv1D(6, 1))
    model.add(GlobalAveragePooling1D())
    model.add(Dense(1))

    #optimizer = keras.optimizers.Adagrad(learning_rate = 0.001)
    optimizer = keras.optimizers.Adam(lr=1e-3, decay=1e-3 / 100)
    #optimizer = keras.optimizers.RMSprop(0.001)
    model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse'])

    return model
Пример #20
0
    def load_model(self, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM):
        max_len = MAX_SEQUENCE_LENGTH
        emb_dim = EMBEDDING_DIM

        x1 = Input(shape=(max_len, ))
        w_embed = Embedding(NB_WEMBS + 1,
                            emb_dim,
                            input_length=max_len,
                            trainable=False)(x1)
        w_embed = Dropout(0.5)(Dense(64, activation='relu')(w_embed))
        h = Conv1D(filters=32,
                   kernel_size=2,
                   padding='valid',
                   activation='relu')(w_embed)
        h = Bidirectional(LSTM(32,
                               return_sequences=True,
                               recurrent_dropout=0.5),
                          merge_mode='concat')(h)
        h = AveragePooling1D(pool_size=2, strides=None, padding='valid')(h)
        h = Bidirectional(LSTM(16,
                               return_sequences=True,
                               recurrent_dropout=0.5),
                          merge_mode='concat')(h)
        h = AveragePooling1D(pool_size=2, strides=None, padding='valid')(h)
        h = Flatten()(h)
        preds_pol = Dense(2, activation='sigmoid')(h)

        model_pol = Model(inputs=[x1], outputs=preds_pol)
        model_pol.compile(loss='binary_crossentropy',
                          optimizer='nadam',
                          metrics=['accuracy'])
        #model_pol.summary()

        STAMP = 'test_sentita_lstm-cnn_wikiner_v1'
        #early_stopping = EarlyStopping(monitor='val_loss', patience=7)
        if 'SENTITA_MODEL_PATH' in os.environ.keys():
            path_model = os.environ['SENTITA_MODEL_PATH']
        else:
            path_model = path
        bst_model_path = os.path.join(path_model, STAMP + '.h5')
        #checkpointer = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)
        model_pol.load_weights(bst_model_path)
        self.model_pol = model_pol
def cnn_encoder(n_timesteps, n_features):
    drop = 0
    input_ = Input((n_timesteps, 12))
    x = AveragePooling1D(4)(input_)
    x = Conv1D(32, 3, padding='same', activation='relu', name='encoder1')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(64, 3, padding='same', activation='relu', name='encoder2')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(64, 3, padding='same', activation='relu', name='encoder3')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='encoder4')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='encoder5')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='encoder6')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='encoder7')(x)
    # x = AveragePooling1D(2)(x)
    # x = Conv1D(128, 3, padding = 'same', activation = 'relu', name = 'encoder8')(x)
    # x = AveragePooling1D(2)(x)
    # x = Conv1D(128, 3, padding = 'same', activation = 'relu', name = 'encoder9')(x)

    x = AveragePooling1D(2, name='encoded')(x)

    # x = Conv1D(128, 3, padding = 'same', activation = 'relu', name = 'decoder10')(x)
    # x = UpSampling1D(2)(x)
    # x = Conv1D(128, 3, padding = 'same', activation = 'relu', name = 'decoder9')(x)
    # x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder8')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder7')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder6')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder5')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder4')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder3')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder2')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(64, 3, padding='same', activation='relu', name='decoder1')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(64, 3, padding='same', activation='relu', name='decoder0')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(64, 3, padding='same', name='decoder-1')(x)
    x = Conv1D(12, 3, activation='sigmoid', padding='same', name='recover')(x)
    x = Flatten()(x)

    model = Model(inputs=input_, outputs=x)

    return model
Пример #22
0
def Conv1D_ks(kernelsize=1, in_shape=time_series_len):
    model = Sequential()
    model.add(
        Conv1D(filters=1,
               kernel_size=kernelsize,
               input_shape=(in_shape, 1),
               data_format='channels_last'))
    model.add(AveragePooling1D(pool_size=2, padding='same'))
    model.add(LeakyReLU(0.1))
    model.add(BatchNormalization())
    return model
Пример #23
0
def PoolingNet(input_shape=None, classes=6, classifier_activation='softmax'):

    inputs = Input(input_shape)
    x_max_1 = MaxPooling1D(pool_size=2, padding='same')(inputs)
    x_avg_1 = AveragePooling1D(pool_size=2, padding='same')(inputs)
    x = Concatenate(axis=1)([x_max_1, x_avg_1])

    x_max_2 = MaxPooling1D(pool_size=2, padding='same')(x)
    x_avg_2 = AveragePooling1D(pool_size=2, padding='same')(x)
    x = Concatenate(axis=1)([x_max_2, x_avg_2])

    x = Flatten()(x)
    x = Dense(1024, activation='relu', kernel_initializer='he_normal')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu', kernel_initializer='he_normal')(x)
    x = Dropout(0.5)(x)
    outputs = Dense(classes, activation=classifier_activation)(x)

    model = Model(inputs=inputs, outputs=outputs)
    return model
Пример #24
0
def get_tc_resnet_8(input_shape, num_classes, k):
    input_layer = Input(input_shape)
    x = Conv1D(int(16 * k), 3, strides=1, use_bias=False,
               padding='same')(input_layer)
    x = get_residual_block_type_two(x, 24, k)
    x = get_residual_block_type_two(x, 32, k)
    x = get_residual_block_type_two(x, 48, k)
    x = AveragePooling1D(3, 1)(x)
    x = Flatten()(x)
    x = Dropout(DROPOUT_RATE)(x)
    output_layer = Dense(num_classes, activation='softmax')(x)
    return Model(inputs=input_layer, outputs=output_layer)
Пример #25
0
def vgg_att(n_class):
    inputs = Input(shape=(300,40,1))
    x=Conv2D(64, (3, 3), padding='same', name='block1_conv1',activation='relu')(inputs)
    x=Conv2D(64, (3, 3), padding='same', name='block1_conv2',activation='relu')(x)
    x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
    x=BatchNormalization()(x)
    x=Dropout(0.2)(x)


    print(x.shape)

    x=Conv2D(128, (3, 3), padding='same', name='block2_conv1',activation='relu')(x)
    x=Conv2D(128, (3, 3), padding='same', name='block2_conv2',activation='relu')(x)
    x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
    x=BatchNormalization()(x)
    x=Dropout(0.2)(x)
    print(x.shape)


    x=Conv2D(256, (3, 3), padding='same', name='block3_conv1',activation='relu')(x)
    x=Conv2D(256, (3, 3), padding='same', name='block3_conv2',activation='relu')(x)
    x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2),padding="same")(x)
    x=BatchNormalization()(x)
    x=Dropout(0.2)(x)
    print(x.shape)

    x=Conv2D(512, (3, 3), padding='same', name='block4_conv1',activation='relu')(x)
    x=Conv2D(512, (3, 3), padding='same', name='block4_conv2',activation='relu')(x)
    x=MaxPooling2D(pool_size = (2, 2), strides = (2, 2),padding="same")(x)
    x=BatchNormalization()(x)
    x=Dropout(0.2)(x)
    print(x.shape)

    att=SelfAttention(n_hop=4,hidden_dim=1536)
    x=Reshape((x.shape[1], x.shape[2]*x.shape[3]))(x)
    print("after reshape")
    print(x.shape)
    x=att(x)
    print("after attention")
    print(x.shape)
    x=AveragePooling1D(pool_size=4,data_format="channels_last")(x)
    #x = GlobalMaxPooling2D()(x)
    print("after avgpool")
    print(x.shape)
    x = Flatten()(x)
    x = Dense(256, activation = 'relu')(x)
    x=Dropout(0.4)(x)
    output = Dense(n_class,activation = 'softmax')(x)
    model = Model(inputs=inputs, outputs=output)

    model.compile(loss='categorical_crossentropy',optimizer ='adam')#need hyperparam-tuning 
    model.summary()
    return model
Пример #26
0
 def pool(self, tensor):
     tensor = Conv1D(self.filters,
                     1,
                     1,
                     padding="same",
                     activation=self.activation,
                     kernel_regularizer=l2(0.00))(tensor)
     shape = tensor.shape.as_list()  # [batch,lenth,channels]
     scaler = AveragePooling1D(tensor.shape.as_list()[1])(
         tensor)  # [batch,channels]
     scaler = UpSampling1D(shape[1])(scaler)
     return scaler
Пример #27
0
def layer_1(input_data):

    lay_1 = Conv1D(filters=128, kernel_size=2, strides=1,
                   activation='relu')(input_data)

    #(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))

    lay_2 = Conv1D(filters=64, kernel_size=2, strides=1,
                   activation='relu')(lay_1)
    lay_2 = AveragePooling1D(2, padding='valid', strides=1)(lay_2)

    return lay_2
Пример #28
0
    def get_model_conf(self):
        input_shape = (None, 6)
        inputs = Input(shape=input_shape, dtype='float32', name='xs')
        inner = inputs

        inner = tdnn_bn_relu(inner, 60, 7)
        inner = tdnn_bn_relu(inner, 90, 5)
        inner = tdnn_bn_relu(inner, 120, 5)
        inner = AveragePooling1D(pool_size=2)(inner)

        inner = tdnn_bn_relu(inner, 120, 3)
        inner = tdnn_bn_relu(inner, 160, 3)
        inner = tdnn_bn_relu(inner, 200, 3)

        inner = AveragePooling1D(pool_size=2)(inner)

        # No significant difference between gru and lstm
        inner = self.bi_rnn(inner, 60)
        inner = self.bi_rnn(inner, 60)
        inner = self.bi_rnn(inner, 60)
        inner = self.bi_rnn(inner, 60)

        inner = BatchNormalization()(inner)

        inner = Dense(DATA.CHARS_SIZE, kernel_initializer='he_normal')(inner)
        y_pred = Activation('softmax', name='softmax')(inner)

        # parameters for CTC loss, fed as network input
        labels = Input(name='ys', shape=[None], dtype='float32')
        input_length = Input(name='ypred_length', shape=[1], dtype='int64')
        label_length = Input(name='ytrue_length', shape=[1], dtype='int64')

        loss_out = Lambda(self.__ctc_lambda_func,
                          output_shape=(1, ),
                          name='ctc')(
                              [y_pred, labels, input_length, label_length])

        model = Model(inputs=[inputs, labels, input_length, label_length],
                      outputs=loss_out)
        return model
Пример #29
0
def transition_block(x, nb_filter, compression=1.0, dropout_rate=None):
    eps = 1.1e-5

    x = BatchNormalization(epsilon=eps, axis=concat_axis)(x)
    x = Activation('relu')(x)
    x = Conv1D(int(nb_filter * compression), 1, use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling1D(2, strides=2)(x)

    return x
Пример #30
0
 def f(x):
     num_transition_output_filters = int(int(x.shape[2]) * float(theta))
     x = BatchNormalization()(x)
     x = Activation("relu")(x)
     x = Conv1D(num_transition_output_filters,
                1,
                strides=1,
                padding="same",
                dilation_rate=1)(x)
     x = AveragePooling1D(pool_size=pool_size,
                          strides=stride,
                          padding="same")(x)
     return x