Beispiel #1
0
    def __init__(self):
        model_m = Sequential()

        model_m.add(
            layers.Conv1D(25, 50, activation='relu', input_shape=(50000, 4)))
        model_m.add(layers.Conv1D(25, 50, activation='relu'))
        model_m.add(layers.MaxPooling1D(5, strides=2))

        model_m.add(layers.Conv1D(50, 25, activation='relu'))
        model_m.add(layers.MaxPooling1D(5, strides=2))

        model_m.add(layers.Conv1D(50, 25, activation='relu'))
        model_m.add(layers.MaxPooling1D(20, strides=4))

        model_m.add(layers.Conv1D(70, 20, activation='relu'))
        model_m.add(layers.MaxPooling1D(25, strides=4))

        # dilated layers
        model_m.add(layers.Conv1D(100, 15, activation='relu', dilation_rate=2))
        model_m.add(layers.Conv1D(100, 15, activation='relu', dilation_rate=2))
        model_m.add(layers.MaxPooling1D(25, strides=4))

        model_m.add(layers.Flatten())
        model_m.add(layers.Dense(2500, activation='linear'))

        # make model
        self.model = model_m
    def learn(self, X, y, s, v_s):
        #construction of model
        self.net = models.Sequential()
        self.net.add(
            layers.Conv1D(self.params['filter1'],
                          3,
                          activation=self.params['act'],
                          input_shape=(None, s)))
        self.net.add(layers.MaxPooling1D(1))
        self.net.add(
            layers.Conv1D(self.params['filter1'],
                          3,
                          activation=self.params['act']))
        self.net.add(layers.MaxPooling1D(1))
        self.net.add(
            layers.Conv1D(self.params['filter1'],
                          3,
                          activation=self.params['act']))
        self.net.add(layers.GlobalMaxPooling1D())
        self.net.add(layers.Dense(1))
        #compile
        self.net.compile(optimizer='rmsprop', loss='mae')

        #training
        his = self.net.fit_generator(X,
                                     steps_per_epoch=500,
                                     epochs=self.params['epochs'],
                                     validation_data=y,
                                     validation_steps=v_s)

        return np.mean(his.history['val_loss'])
Beispiel #3
0
def three_CNN_LSTM(learning_rate=0.001,INPUT_SHAPE=[1000, 4],KERNEL_SIZE=9,NUM_KERNEL=64,RNN_UNITS=40):
    params = locals()

    inp = Input(shape=INPUT_SHAPE)
    x = layers.Conv1D(NUM_KERNEL,kernel_size=KERNEL_SIZE,kernel_initializer=KERNEL_INITIAL)(inp)
    x = layers.Activation('relu')(x)
    # x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.2)(x)
    x = layers.MaxPooling1D()(x)

    x = layers.Conv1D(NUM_KERNEL,kernel_size=KERNEL_SIZE,kernel_initializer=KERNEL_INITIAL)(x)
    x = layers.Activation('relu')(x)
    # x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    # x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    # x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.3)(x)
    x = layers.MaxPooling1D()(x)
    
    x = layers.Conv1D(NUM_KERNEL,kernel_size=KERNEL_SIZE,kernel_initializer=KERNEL_INITIAL)(x)
    x = layers.Activation('relu')(x)
    # x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.3)(x)
    x = layers.MaxPooling1D()(x)

    #LSTM
    #HIDDEN_UNITS = 20
    x = layers.Bidirectional(layers.LSTM(RNN_UNITS,kernel_initializer=KERNEL_INITIAL,return_sequences=True,dropout=0.5))(x)
    x = layers.Flatten()(x)
    x = layers.Dense(1)(x)
    #a soft max classifier
    x = layers.Activation('sigmoid')(x)
    
    return models.Model(inp, x), params
Beispiel #4
0
def define_discriminator(input_shape, n_class=4):

    input_ecg = tf.keras.Input(input_shape)
    lyr = layers.Conv1D(32, 16, activation=layers.LeakyReLU(alpha=0.2), padding='same')(input_ecg)
    lyr = layers.BatchNormalization()(lyr)
    lyr = layers.MaxPooling1D()(lyr)

    lyr = layers.Conv1D(64, 16, activation=layers.LeakyReLU(alpha=0.2), padding='same')(lyr)
    lyr = layers.BatchNormalization()(lyr)
    lyr = layers.MaxPooling1D()(lyr)

    lyr = layers.Conv1D(128, 16, activation=layers.LeakyReLU(alpha=0.2), padding='same')(lyr)
    lyr = layers.BatchNormalization()(lyr)
    lyr = layers.MaxPooling1D()(lyr)
    lyr = layers.Flatten()(lyr)
    # lyr = layers.GRU(30)(lyr)
    # lyr = layers.Dropout(0.5)(lyr)
    # lyr = layers.GaussianNoise(0.2)(lyr)
    # lyr = layers.Dense(n_class)(lyr)
    
    # label output
    lb_out = layers.Dense(n_class, activation='softmax')(lyr)
    # true/fake output
    tf_out = layers.Dense(1, activation='sigmoid')(lyr)
    d_model = tf.keras.Model(input_ecg, [tf_out, lb_out])
    # compile both model
    opt = tf.keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
    d_model.compile(loss=['binary_crossentropy', 'sparse_categorical_crossentropy'], optimizer=opt)
    c_model = tf.keras.Model(input_ecg, lb_out)
    c_model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy'])    
    return c_model, d_model
Beispiel #5
0
    def __init__(self):
        super(MyCNN, self).__init__()
        self.embedding = layers.Embedding(num_words,
                                          embedding_len,
                                          weights=[embedding_matrix],
                                          input_length=max_review_len,
                                          trainable=True)

        self.cnn1 = layers.Conv1D(128,
                                  3,
                                  padding='same',
                                  strides=1,
                                  activation='relu')
        self.p1 = layers.MaxPooling1D(pool_size=28)
        self.cnn2 = layers.Conv1D(128,
                                  4,
                                  padding='same',
                                  strides=1,
                                  activation='relu')
        self.p2 = layers.MaxPooling1D(pool_size=27)
        self.cnn3 = layers.Conv1D(128,
                                  5,
                                  padding='same',
                                  strides=1,
                                  activation='relu')
        self.p3 = layers.MaxPooling1D(pool_size=26)
        self.cnn4 = layers.Conv1D(128,
                                  6,
                                  padding='same',
                                  strides=1,
                                  activation='relu')
        self.p4 = layers.MaxPooling1D(pool_size=25)
        self.f = layers.Flatten()  # 打平层,方便全连接层处理
        self.d = layers.Dropout(0.3)
        self.outlayer = layers.Dense(5, activation='softmax')
 def cnn1d(self, in_shape=(16000, 1)):
     model = models.Sequential()
     model.add(
         layers.Conv1D(filters=256,
                       kernel_size=320,
                       activation='relu',
                       input_shape=in_shape))
     model.add(layers.MaxPooling1D(pool_size=2))
     model.add(layers.Conv1D(filters=64, kernel_size=160,
                             activation='relu'))
     model.add(layers.MaxPooling1D(pool_size=2))
     model.add(layers.Conv1D(filters=128, kernel_size=80,
                             activation='relu'))
     model.add(layers.MaxPooling1D(pool_size=2))
     model.add(layers.Flatten())
     model.add(layers.Dropout(0.5))
     model.add(layers.Dense(256, activation='relu'))
     model.add(layers.Dropout(0.5))
     model.add(layers.Dense(256, activation='relu'))
     model.add(layers.Dropout(0.5))
     model.add(layers.Dense(256, activation='relu'))
     model.add(layers.Dropout(0.5))
     model.add(layers.Dense(64, activation='relu'))
     model.add(layers.Dense(30, activation='softmax'))
     model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
                   loss='categorical_crossentropy',
                   metrics=['categorical_accuracy'])
     return model
Beispiel #7
0
def build_cnn_cnn():
    global n_past, n_future, n_features
    inputA = keras.Input(shape=(n_past, int(n_features)), name="cA")
    inputD = keras.Input(shape=(n_past, int(n_features)), name="cD")
    #x is the CNN for approximate
    x = layers.Conv1D(filters=64, kernel_size=2, activation='relu')(inputA)
    x = layers.MaxPooling1D(pool_size=2)(x)
    x = layers.Flatten()(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(100, activation='relu')(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(50)(x)
    # x = layers.BatchNormalization()(x)

    #y is the CNN for detail
    y = layers.Conv1D(filters=64, kernel_size=2, activation='relu')(inputD)
    y = layers.MaxPooling1D(pool_size=2)(y)
    y = layers.Flatten()(y)
    y = layers.Dropout(0.3)(y)
    y = layers.Dense(100, activation='relu')(y)
    y = layers.Dropout(0.3)(y)
    y = layers.Dense(50)(y)
    #combining 2 lstm
    com = layers.concatenate([x, y])
    z = layers.Dense(n_future)(com)

    model = keras.Model(inputs=[inputA, inputD], outputs=z)
    model.compile(loss='mse', optimizer='adam')
    return model
 def build_model(self, hp):
     """
     Builds the model as specified in the model configuration file.
     """
     self.model = models.Sequential()
     self.model.add(
         layers.Conv1D(hp.Int('conv1d_1', 16, 128, 4),
                       kernel_size=2,
                       padding="same",
                       input_shape=(7, self.n_feats)))
     self.model.add(layers.LeakyReLU(alpha=0.001))
     self.model.add(layers.MaxPooling1D(pool_size=2))
     self.model.add(
         layers.Conv1D(hp.Int('conv1d_2', 16, 128, 4),
                       kernel_size=2,
                       padding="same"))
     self.model.add(layers.LeakyReLU(alpha=0.001))
     self.model.add(layers.MaxPooling1D(pool_size=2))
     self.model.add(layers.Flatten())
     self.model.add(layers.Dense(hp.Int('dense_1', 4, 100, 4)))
     self.model.add(layers.LeakyReLU(alpha=0.001))
     self.model.add(layers.Dropout(hp.Float('dropout', 0.01, 0.5, 0.01)))
     self.model.add(layers.Dense(1))
     self.model.compile(loss="huber",
                        optimizer=optimizers.Adam(
                            hp.Choice('learning_rate',
                                      values=[1e-2, 1e-3, 1e-4])),
                        metrics=[self.soft_acc, "mae"])
     return self.model
Beispiel #9
0
def build_cnn_cnn():
    global n_past, n_future, n_features
    inputA = keras.Input(shape=(n_past, int(n_features)), name="cA")
    inputD = keras.Input(shape=(n_past, int(n_features)), name="cD")
    #x is the LSTM for approximate
    x = layers.Conv1D(filters=64, kernel_size=2, activation='relu')(inputA)
    x = layers.MaxPooling1D(pool_size=2)(x)
    x = layers.Flatten()(x)
    x = layers.Dense(50, activation='relu')(x)

    # x = Model(inputs=inputA, outputs=x)
    #y is the LSTM for detail
    y = layers.Conv1D(filters=64, kernel_size=2, activation='relu')(inputD)
    y = layers.MaxPooling1D(pool_size=2)(y)
    y = layers.Flatten()(y)
    y = layers.Dense(50, activation='relu')(y)
    # y = Model(inputs=inputD, outputs=y)
    #combining 2 lstm
    com = layers.concatenate([x, y])
    # z = LSTM(200, activation='relu', return_sequences=False)(com)
    z = Dense(100, activation="relu")(com)
    z = Dense(n_future, activation='relu')(z)

    model = keras.Model(inputs=[inputA, inputD], outputs=z)
    model.compile(loss='mse', optimizer=my_optimizer)
    return model
 def create_model(self):
     self.model = tf.keras.Sequential([
         layers.InputLayer(input_shape=(self.num_of_frames,
                                        self.frame_size)),
         layers.Conv1D(128, kernel_size=3),
         layers.Conv1D(128, kernel_size=3),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.MaxPooling1D(),
         layers.Conv1D(256, kernel_size=3),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.MaxPooling1D(),
         layers.Conv1D(512, kernel_size=3),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.MaxPooling1D(),
         layers.Flatten(),
         layers.Dense(512),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.Dense(256),
         layers.ReLU(),
         layers.Dropout(.5),
         layers.Dense(self.num_of_classes, activation='softmax')
     ])
Beispiel #11
0
def create_model():
    model = tf.keras.Sequential([
        layers.Conv1D(64, 3, activation="relu", input_shape=(8192, 1)),
        # layers.Conv1D(64, 1, activation="relu", ),
        layers.BatchNormalization(),
        layers.MaxPooling1D(2),
        layers.Conv1D(32, 3, activation="relu"),
        # layers.Conv1D(32, 1, activation="relu"),
        layers.Conv1D(16, 3, activation="relu"),
        layers.Dropout(0.4),
        layers.BatchNormalization(),
        layers.MaxPooling1D(2),
        # layers.Conv1D(16, 1, activation="relu"),
        layers.Conv1D(8, 3, activation="relu"),
        # layers.GlobalMaxPool1D(),
        # layers.Conv1D(8, 1, activation="relu"),
        layers.BatchNormalization(),
        layers.GlobalMaxPool1D(),
        # tf.keras.layers.Flatten(),
        layers.Dense(8, activation="relu"),
        layers.Dense(4,
                     activation="relu",
                     kernel_regularizer=tf.keras.regularizers.l2(0.0001)),
        layers.Dense(2,
                     activation='softmax',
                     kernel_regularizer=tf.keras.regularizers.l2(0.0001))
    ])
    # Dense(2, kernel_initializer='he_normal', activation='softmax', kernel_regularizer=l2(0.0001))
    model.summary()
    return model
Beispiel #12
0
def build_discriminator():

    discriminator = tf.keras.Sequential()
    discriminator.add(layers.Conv1D(filters = 32,kernel_size = 3, padding = 'same',input_shape=(inputLength,1)))
    discriminator.add(layers.LeakyReLU(alpha=0.2))
    discriminator.add(layers.Conv1D(filters = 32,kernel_size = 3, padding = 'same'))
    discriminator.add(layers.LeakyReLU(alpha=0.2))
    discriminator.add(layers.MaxPooling1D(pool_size=2))
    discriminator.add(layers.Conv1D(filters = 32,kernel_size = 3, padding = 'same'))
    discriminator.add(layers.LeakyReLU(alpha=0.2))
    discriminator.add(layers.Conv1D(filters = 32,kernel_size = 3, padding = 'same'))
    discriminator.add(layers.LeakyReLU(alpha=0.2))
    discriminator.add(layers.MaxPooling1D(pool_size=2))
    discriminator.add(layers.Conv1D(filters = 32,kernel_size = 3, padding = 'same'))
    discriminator.add(layers.LeakyReLU(alpha=0.2))
    discriminator.add(layers.Conv1D(filters = 32,kernel_size = 3, padding = 'same'))
    discriminator.add(layers.LeakyReLU(alpha=0.2))
    discriminator.add(layers.MaxPooling1D(pool_size=2))
    discriminator.add(layers.Flatten(input_shape=(inputLength,1)))
    discriminator.add(layers.Dense(64,dtype='float32'))
    discriminator.add(layers.Dropout(0.4))
    discriminator.add(layers.LeakyReLU(alpha=0.2))
    discriminator.add(layers.Dense(1, activation='tanh',dtype='float32'))

    return discriminator
Beispiel #13
0
def cnn1D(inputSize,
          opt='adam',
          loss=SparseCategoricalCrossentropy(from_logits=True),
          classes=10):
    model = models.Sequential()
    # 81%
    model.add(
        layers.Conv1D(32, (5),
                      activation='relu',
                      padding="same",
                      input_shape=inputSize))
    # model.add(layers.BatchNormalization())
    model.add(layers.Dropout(rate=0.3))
    model.add(layers.MaxPooling1D((2)))
    model.add(layers.Conv1D(64, (3), padding="same", activation='relu'))
    # model.add(layers.BatchNormalization())
    model.add(layers.Dropout(rate=0.3))
    model.add(layers.MaxPooling1D((2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dropout(rate=0.5))
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dropout(rate=0.5))
    model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
    return model
Beispiel #14
0
 def createModel(self):
     print("creating model")
     self.model = models.Sequential()
     
     
     self.model.add(
         layers.Conv1D(8, (3), activation='relu', input_shape=(self.num_hourly_per_element, self.num_values_per_hour))
     )
     self.model.add(
         layers.MaxPooling1D((2))
     )
     self.model.add(
         layers.Conv1D(16, (3), activation='relu')
     )
     self.model.add(
         layers.MaxPooling1D((2))
     )
     self.model.add(
         layers.Conv1D(16, (3), activation='relu')
     )
     
     
     self.model.add(layers.Flatten())
     self.model.add(layers.Dense(16, activation='relu'))
     self.model.add(layers.Dense(8, activation='relu'))
     self.model.add(layers.Dense(1))
     
     print(self.model.layers[0].input_shape)
     self.model.summary()
     
     print("done creating model")
Beispiel #15
0
def create_conv_model(input_num=input_num, addtion_num=input_num):
    inputs = keras.Input(shape=(input_num, ), name='main')
    # addition_inputs = keras.Input(shape=(addtion_num,), name='addition')

    x = inputs
    x = layers.Reshape(target_shape=(1, input_num))(x)
    x = layers.Conv1D(50, 10, activation='relu', strides=1, padding='same')(x)
    x = layers.Conv1D(50, 10, activation='relu', strides=1, padding='same')(x)
    x = layers.MaxPooling1D(5, padding='same')(x)
    x = layers.Conv1D(50, 5, activation='relu', strides=1, padding='same')(x)
    x = layers.MaxPooling1D(5, padding='same')(x)
    x = layers.Flatten()(x)

    # conbine with additon_inputs
    # x = layers.Concatenate()([x, addition_inputs])
    x = layers.Concatenate()([x, inputs])

    x = layers.Dense(200, activation='relu')(x)
    x = layers.Dropout(0.2)(x)
    outputs = layers.Dense(output_num, activation='sigmoid')(x)

    # model = keras.Model(inputs=[inputs, addition_inputs], outputs=outputs, name='functional_group_model')
    model = keras.Model(inputs=inputs,
                        outputs=outputs,
                        name='functional_group_model')

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[
                      'binary_accuracy', 'Precision', 'Recall',
                      'TruePositives', 'FalseNegatives'
                  ])

    return model
Beispiel #16
0
def residual_network(L_window):
    input_sig = kl.Input(shape=(L_window, 1))
    # First Layer
    conv0 = kl.Conv1D(64, kernel_size=128, padding="same")(input_sig)
    acti0 = kl.Activation("relu")(conv0)
    pool0 = kl.MaxPooling1D(pool_size=2, strides=2, padding="same")(acti0)
    # Block 1
    input1 = pool0
    BN1 = kl.BatchNormalization(momentum=0.8)(input1)
    acti1 = kl.Activation("relu")(BN1)
    conv1 = kl.Conv1D(64, kernel_size=6, padding="same")(acti1)
    pool1 = kl.MaxPooling1D(pool_size=2, strides=2,
                            padding="same")(kl.add([input1, conv1]))
    acti2 = kl.Activation("relu")(pool1)
    # Block 2
    input2 = acti2
    BN2 = kl.BatchNormalization(momentum=0.8)(input2)
    acti3 = kl.Activation("relu")(BN2)
    conv2 = kl.Conv1D(64, kernel_size=4, padding="same")(acti3)
    pool2 = kl.MaxPooling1D(pool_size=2, strides=2,
                            padding="same")(kl.add([input2, conv2]))
    acti4 = kl.Activation("relu")(pool2)
    # Block 3
    input3 = acti4
    BN3 = kl.BatchNormalization(momentum=0.8)(input3)
    acti5 = kl.Activation("relu")(BN3)
    conv3 = kl.Conv1D(64, kernel_size=4, padding="same")(acti5)
    pool3 = kl.MaxPooling1D(pool_size=2, strides=2,
                            padding="same")(kl.add([input3, conv3]))
    x = kl.GlobalAveragePooling1D()(pool3)
    x = kl.Dense(128, activation='relu')(x)
    x = kl.Dropout(0.3)(x)
    output_sig = kl.Dense(2, activation='sigmoid')(x)
    model = km.Model(input_sig, output_sig)
    return model
Beispiel #17
0
    def build_model(self):

        # Model defn

        inputs = layers.Input(shape=(self.window_size, 1))

        # Route 1 - local detection
        r1 = layers.Conv1D(100, 10, activation='relu')(inputs)
        r1 = layers.MaxPooling1D(5)(r1)
        r1 = layers.Dropout(0.5)(r1)
        r1 = layers.Conv1D(100, 10, activation='relu')(r1)
        r1 = layers.Conv1D(100, 10, activation='relu')(r1)
        r1 = layers.Conv1D(100, 10, activation='relu')(r1)
        r1 = layers.MaxPooling1D(5)(r1)

        # Route 2
        r2 = layers.Conv1D(100, 100, activation='relu')(inputs)
        r2 = layers.MaxPooling1D(5)(r2)
        r2 = layers.Dropout(0.5)(r2)
        r2 = layers.Conv1D(100, 100, activation='relu')(r2)
        r2 = layers.MaxPooling1D(5)(r2)

        # Concatenation and classifying
        x = layers.Concatenate(axis=1)([r1, r2])
        x = layers.Dropout(0.5)(x)

        flatten = layers.Flatten()(x)
        softmax = layers.Dense(self.num_states * self.window_size,
                               activation='softmax')(flatten)
        finalReshape = layers.Reshape(
            (self.window_size, self.num_states))(softmax)

        model = Model(inputs, finalReshape, name='SplitCNN')

        return model
Beispiel #18
0
def create_model(embedding_dim, embedding_matrix, vocab_size, maxlen):
    model = Sequential()
    model.add(layers.Embedding(
        vocab_size,
        embedding_dim,
        input_length=maxlen,
        weights=[embedding_matrix],
        trainable=False)
    )
    model.add(layers.Conv1D(256, 5, activation='relu'))
    model.add(layers.MaxPooling1D(5))
    model.add(layers.Dropout(0.5))
    model.add(layers.Bidirectional(layers.LSTM(128, return_sequences=True)))
    model.add(layers.Dropout(0.5))
    model.add(layers.Conv1D(256, 5, activation='relu'))
    model.add(layers.MaxPooling1D(5))
    model.add(layers.Dropout(0.5))
    model.add(layers.Bidirectional(layers.LSTM(128)))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(1, activation='sigmoid'))

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
def model_split_channels():
    xs = []
    inputs = []
    feature_maps = []
    # for position in DataReader.smartphone_positions:
    for position in ['Hips']:
        for _, channel in DataReader.channels.items():
            ts = keras.Input(
                shape=(500, ), name=position + '_' + channel
            )  # 3D tensor with shape: (batch_size, steps, input_dim)
            x = layers.Reshape((500, 1))(ts)
            # xs.append(x)

            x = layers.Conv1D(filters=32,
                              kernel_size=13,
                              strides=2,
                              padding='valid',
                              activation='relu',
                              input_shape=(None, 500, 1))(x)
            x = layers.MaxPooling1D()(x)
            x = layers.BatchNormalization()(x)

            x = layers.Conv1D(filters=16,
                              kernel_size=15,
                              strides=2,
                              padding='valid',
                              activation='relu')(x)
            x = layers.MaxPooling1D()(x)
            x = layers.BatchNormalization()(x)

            x = layers.Conv1D(filters=8,
                              kernel_size=9,
                              strides=2,
                              padding='valid',
                              activation='relu')(x)
            x = layers.GlobalMaxPooling1D()(x)
            x = layers.BatchNormalization()(x)

            inputs.append(ts)
            feature_maps.append(x)

    x = layers.concatenate(feature_maps)  # , axis=-1)

    x = layers.Dense(2048, activation='relu')(x)
    x = layers.Dropout(0.3)(x)
    class_output = layers.Dense(8, activation='softmax',
                                name='class_output')(x)

    model = keras.Model(inputs=inputs, outputs=class_output)

    keras.utils.plot_model(model,
                           'cnn_split_modalities_model.png',
                           show_shapes=True)

    model.compile(optimizer=keras.optimizers.Adam(1e-3, amsgrad=True),
                  loss=keras.losses.CategoricalCrossentropy(from_logits=True),
                  metrics=['acc'])

    print('[SHL Challenge] model compiled successfully')
    return model
Beispiel #20
0
def set_convolution_layer():
    input_shape = (98 + k, 256)  #Using Hocnnlb data , filename[1~31]
    #input_shape = (48+k , 256)  #Using Pyfeat data,filename[0]

    model = models.Sequential()
    model.add(layers.Conv1D(N, k, padding='valid', input_shape=input_shape))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPooling1D(pool_size=m))
    model.add(layers.Dropout(0.5))

    model.add(layers.Conv1D(N, int(k / 2), padding='valid'))
    model.add(layers.Activation('relu'))
    model.add(layers.MaxPooling1D(pool_size=m))
    model.add(layers.Dropout(0.25))

    model.add(layers.Flatten())
    model.add(layers.Dense(l, activation='relu'))
    model.add(layers.Dropout(0.25))

    model.add(layers.Dense(2))
    model.add(layers.Activation('softmax'))

    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    return model
Beispiel #21
0
def try_conv1d_weather():
    train_gen = tuto11.train_gen
    val_gen = tuto11.val_gen
    train_steps = tuto11.train_steps
    val_steps = tuto11.val_steps

    model = Sequential()
    model.add(
        layers.Conv1D(32,
                      5,
                      activation='relu',
                      input_shape=(None, tuto11.float_data.shape[-1])))
    model.add(layers.MaxPooling1D(3))
    model.add(layers.Conv1D(32, 5, activation='relu'))
    model.add(layers.MaxPooling1D(3))
    model.add(layers.Conv1D(32, 5, activation='relu'))
    model.add(layers.GlobalMaxPooling1D())
    model.add(layers.Dense(1))

    model.summary()

    model.compile(optimizer=RMSprop(), loss='mae')
    history = model.fit(train_gen,
                        steps_per_epoch=train_steps,
                        epochs=20,
                        validation_data=val_gen,
                        validation_steps=val_steps)

    loss = history.history['loss']
    val_loss = history.history['val_loss']

    plot_history(loss, val_loss, 20, 'loss')
Beispiel #22
0
def make_classifier(optimizer=opt):
    #BUILD CNN MODEL
    model = Sequential()
    model.add(
        layers.Conv1D(64,
                      kernel_size=(10),
                      activation='relu',
                      input_shape=(X_train.shape[1], 1)))
    model.add(
        layers.Conv1D(128,
                      kernel_size=(10),
                      activation='relu',
                      kernel_regularizer=l2(0.01),
                      bias_regularizer=l2(0.01)))
    model.add(layers.MaxPooling1D(pool_size=(8)))
    model.add(layers.Dropout(0.4))
    model.add(layers.Conv1D(128, kernel_size=(10), activation='relu'))
    model.add(layers.MaxPooling1D(pool_size=(8)))
    model.add(layers.Dropout(0.4))
    model.add(layers.Flatten())
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dropout(0.4))
    model.add(layers.Dense(8, activation='sigmoid'))
    opt = keras.optimizers.Adam(lr=0.0001)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    return model
Beispiel #23
0
def build_AlexNet(optimizer, init):
    model = models.Sequential()
    model.add(
        layers.Conv1D(32,
                      3,
                      kernel_initializer=init,
                      activation='relu',
                      input_shape=(265, 1)))

    model.add(layers.Conv1D(64, 3, kernel_initializer=init, activation='relu'))
    model.add(layers.MaxPooling1D(2))

    model.add(layers.Conv1D(64, 3, kernel_initializer=init, activation='relu'))
    model.add(layers.MaxPooling1D(2))

    model.add(layers.Conv1D(64, 3, kernel_initializer=init, activation='relu'))
    model.add(layers.Conv1D(64, 3, kernel_initializer=init, activation='relu'))
    model.add(layers.Conv1D(64, 3, kernel_initializer=init, activation='relu'))
    model.add(layers.MaxPooling1D(2))

    model.add(layers.Dense(64, kernel_initializer=init, activation='relu'))
    model.add(layers.Dense(64, kernel_initializer=init, activation='relu'))

    # what happens if you remove the flatten??
    model.add(layers.Flatten())
    model.add(layers.Dense(64, kernel_initializer=init, activation='sigmoid'))
    model.add(layers.Dense(1))

    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
Beispiel #24
0
def get_20_news_model():

    path = "/content/drive/My Drive/Datasets/"
    # path = ''
    embedding_matrix = pickle.load(
        open(path + "20_news_embedding_matrix.pl", "rb"))

    model = models.Sequential()
    model.add(
        layers.Embedding(
            20002,
            100,
            embeddings_initializer=tf.keras.initializers.Constant(
                embedding_matrix),
            trainable=False,
        ))

    model.add(layers.Conv1D(128, 5, activation="relu"))
    model.add(layers.MaxPooling1D(5))
    model.add(layers.Conv1D(128, 5, activation="relu"))
    model.add(layers.MaxPooling1D(5))
    model.add(layers.Conv1D(128, 5, activation="relu"))
    model.add(layers.GlobalMaxPooling1D())
    model.add(layers.Dense(128, activation="relu"))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(20, activation="softmax"))

    # model.summary()

    model.compile(loss="categorical_crossentropy",
                  optimizer="rmsprop",
                  metrics=["accuracy"])

    return model
def build_CNN(train_ds, val_ds, test_ds):
    """Function to build a convolutional neural network with 2 convolutions and
       1 dense layer.
    """
    def add_dim(slice, label):
        """To add a dimension to the tensors in each dataset for Conv1D layer.
        """
        slice = tf.expand_dims(slice, 1)
        return (slice, label)

    # Add dimension to each dataset for compatability with Conv1D layer
    train_ds = train_ds.map(add_dim)
    val_ds = val_ds.map(add_dim)
    test_ds = test_ds.map(add_dim)

    'Build model'
    model = keras.models.Sequential([
        layers.Conv1D(filters=16, kernel_size=1, padding='valid',
                      activation='relu', input_shape=(1, 410)),
        layers.MaxPooling1D(pool_size=1, strides=1),
        layers.Conv1D(filters=32, kernel_size=1, padding='valid',
                      activation='relu'),
        layers.MaxPooling1D(pool_size=1, strides=1),
        layers.Dropout(0.2),
        layers.Flatten(),
        layers.Dense(256, activation='relu'),
        layers.Dense(3, activation='softmax'),
    ])
    return (model, train_ds, val_ds, test_ds)
Beispiel #26
0
def build_mod2_cnn1d():
    global n_past, n_future, n_features
    input = keras.Input(shape=(n_past, int(n_features)))
    x = layers.Conv1D(filters=64, kernel_size=2, activation='relu')(input)
    x = layers.Conv1D(filters=64, kernel_size=2, activation='relu')(x)
    x = layers.MaxPooling1D(pool_size=2)(x)
    x = layers.Conv1D(filters=64, kernel_size=2, activation='relu')(x)
    x = layers.Conv1D(filters=64, kernel_size=2, activation='relu')(x)
    x = layers.MaxPooling1D(pool_size=2)(x)
    x = layers.Flatten()(x)
    x = layers.Dropout(0.2)(x)
    x = layers.BatchNormalization()(x)  # added
    x = layers.Dense(1000, activation='relu')(x)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(500)(x)
    x = layers.Dense(200)(x)
    x = layers.Dense(n_future)(x)
    x = layers.LeakyReLU()(x)
    model = keras.Model(inputs=[input], outputs=x)
    model.compile(optimizer='adam', loss='mse')
    model.summary()
    plot_model(model,
               to_file=save_path + 'modelCNN_{}.png'.format(syn),
               show_shapes=True)
    return model
Beispiel #27
0
def multi_ouput_toy():
    """ p265 toy example taking series of post from social media and attempt to output age, gender, income.
        currently also broken
        compile just fine, probably a sample issue again here, come back with real life data """

    vocabulary_size = 50000
    num_income_groups = 10

    posts_input = Input(shape=(None,), dtype='int32', name='posts')
    embedded_posts = layers.Embedding(256, vocabulary_size)(posts_input)
    x = layers.Conv1D(128, 5, activation='relu')(embedded_posts)
    x = layers.MaxPooling1D(5)(x)
    x = layers.Conv1D(256, 5, activation='relu')(x)
    x = layers.Conv1D(256, 5, activation='relu')(x)
    x = layers.MaxPooling1D(5)(x)
    x = layers.Conv1D(256, 5, activation='relu')(x)
    x = layers.Conv1D(256, 5, activation='relu')(x)
    x = layers.GlobalMaxPooling1D()(x)
    x = layers.Dense(128, activation='relu')(x)

    age_prediction = layers.Dense(1, name='age')(x)
    income_prediction = layers.Dense(num_income_groups, activation='softmax', name='income')(x)
    gender_prediction = layers.Dense(1, activation='sigmoid', name='gender')(x)

    model = Model(posts_input, [age_prediction, income_prediction, gender_prediction])
    model.compile(optimizer='rmsprop', loss=['mse', 'categorical_crossentropy', 'binary_crossentropy'])
    # ==
    model.compile(optimizer='rmsprop',
                  loss={'age': 'mse', 'income': 'categorical_crossentropy', 'gender': 'binary_crossentropy'})

    # loss weight can be an issue, this is how to solve it
    # mse usually range around 3-5, catcross 1 and binarycross 0.1
    model.compile(optimizer='rmsprop', loss=['mse', 'categorical_crossentropy', 'binary_crossentropy'],
                  loss_weights=[.25, 1., 10.])

    model.compile(optimizer='rmsprop',
                  loss={'age': 'mse', 'income': 'categorical_crossentropy', 'gender': 'binary_crossentropy'},
                  loss_weights={'age': .25, 'income': 1., 'gender': 10.})

    num_samples = 5000
    max_length = 200
    posts = np.random.randint(1, vocabulary_size, size=(num_samples, max_length))
    age_targets = np.random.randint(0, 100, size=num_samples)
    income_targets = np.random.randint(0, num_income_groups, size=num_samples)
    gender_targets = np.random.randint(0, 1, size=num_samples)

    model.fit(posts, [age_targets, income_targets, gender_targets],
              epochs=10, batch_size=64)
    # ==
    model.fit(posts, {'age': age_targets,
                      'income': income_targets,
                      'gender': gender_targets},
              epochs=10, batch_size=64)


# p267

# see p270 for usage of same layer on multiple input (like for sentence comparison)
Beispiel #28
0
def model_lstmEm(inp_shape=[1000, 4],
                 embed_n=4,
                 embed_dim=64,
                 kernel=9,
                 filters=64,
                 rnn_units=40):

    params = locals()

    inp = Input(shape=inp_shape)
    ''' Embedding layer or not '''
    if embed_dim > 0:
        x = layers.Lambda(tokenize,
                          arguments={
                              'n': embed_n,
                              'padding': 'valid'
                          })(inp)
        x = layers.Embedding(4**embed_n,
                             embed_dim,
                             input_length=[1000 - embed_n + 1])(x)
    else:
        if embed_n == 1:
            x = inp
        else:
            x = layers.Lambda(tokenize,
                              arguments={
                                  'n': embed_n,
                                  'padding': 'valid'
                              })(inp)
            x = layers.Lambda(one_hot_layer, arguments={'n': 4**embed_n})(x)

    x = layers.Conv1D(filters, kernel)(inp)
    x = layers.Activation('relu')(x)
    x = layers.Dropout(0.2)(x)
    x = layers.MaxPooling1D()(x)

    x = layers.Conv1D(filters, kernel)(x)
    x = layers.Activation('relu')(x)
    x = layers.Dropout(0.3)(x)
    x = layers.MaxPooling1D()(x)

    x = layers.Conv1D(filters, kernel)(x)
    x = layers.Activation('relu')(x)
    x = layers.Dropout(0.3)(x)
    x = layers.MaxPooling1D()(x)

    x = layers.Bidirectional(
        layers.LSTM(rnn_units, return_sequences=True, dropout=0.5))(x)

    x = layers.Flatten()(x)
    x = layers.Dense(1)(x)
    x = layers.Activation('sigmoid')(x)

    return models.Model(inp, x), params
Beispiel #29
0
def cnn_1(N):
    layer_in = tfkl.Input(shape=[N, 1])
    enc = tfkl.Conv1D(8,
                      8,
                      strides=1,
                      padding='SAME',
                      activation='relu',
                      use_bias=False)(layer_in)
    enc = tfkl.MaxPooling1D(2)(enc)
    enc = tfkl.Conv1D(16,
                      8,
                      strides=1,
                      padding='SAME',
                      activation='relu',
                      use_bias=False)(enc)
    enc = tfkl.MaxPooling1D(2)(enc)
    enc = tfkl.Conv1D(32,
                      8,
                      strides=1,
                      padding='SAME',
                      activation='relu',
                      use_bias=False)(enc)
    enc = tfkl.MaxPooling1D(2)(enc)
    enc = tfkl.Conv1D(64,
                      8,
                      strides=1,
                      padding='SAME',
                      activation='relu',
                      use_bias=False)(enc)
    dec = tfkl.UpSampling1D(2)(enc)
    dec = tfkl.Conv1D(32,
                      8,
                      strides=1,
                      padding='SAME',
                      activation='relu',
                      use_bias=False)(dec)
    dec = tfkl.UpSampling1D(2)(dec)
    dec = tfkl.Conv1D(16,
                      8,
                      strides=1,
                      padding='SAME',
                      activation='relu',
                      use_bias=False)(dec)
    dec = tfkl.UpSampling1D(2)(dec)
    dec = tfkl.Conv1D(8,
                      8,
                      strides=1,
                      padding='SAME',
                      activation='relu',
                      use_bias=False)(dec)
    out = tfkl.Conv1D(1, 8, strides=1, padding='SAME', use_bias=False)(dec)
    model = tf.keras.Model(inputs=[layer_in], outputs=[out])
    return model
Beispiel #30
0
def seq_epi_20210105_v1_model(summary=True):
    seq_dim = (5000, 4)
    epi_dim = (200, 12)

    seq_input_one = layers.Input(shape=seq_dim, name="seq_feature_one")
    seq_input_two = layers.Input(shape=seq_dim, name="seq_feature_two")
    epi_input_one = layers.Input(shape=epi_dim, name="epi_feature_one")
    epi_input_two = layers.Input(shape=epi_dim, name="epi_feature_two")

    seq_path = layers.concatenate([seq_input_one, seq_input_two],
                                  axis=1,
                                  name="seq_concat")
    epi_path = layers.concatenate([epi_input_one, epi_input_two],
                                  axis=1,
                                  name="epi_concat")

    seq_path = layers.Conv1D(filters=128,
                             kernel_size=40,
                             padding='same',
                             activation='relu')(seq_path)
    epi_path = layers.Conv1D(filters=128,
                             kernel_size=10,
                             padding='same',
                             activation='relu')(epi_path)

    seq_path = layers.MaxPooling1D(50)(seq_path)
    epi_path = layers.MaxPooling1D(2)(epi_path)

    combined_path = layers.concatenate([seq_path, epi_path], axis=2)
    combined_path = layers.Dropout(0.5)(combined_path)

    combined_path = layers.Conv1D(filters=32,
                                  kernel_size=1,
                                  padding='same',
                                  activation='relu',
                                  name="bottleneck")(combined_path)
    combined_path = layers.Conv1D(filters=512,
                                  kernel_size=10,
                                  padding='same',
                                  activation='relu')(combined_path)
    combined_path = layers.MaxPooling1D(pool_size=100,
                                        padding="same")(combined_path)
    combined_path = layers.Flatten()(combined_path)
    combined_path = layers.Dropout(0.5)(combined_path)
    combined_path = layers.Dense(512, activation="relu")(combined_path)
    combined_path = layers.Dense(1, activation="sigmoid")(combined_path)

    model = Model(inputs=(seq_input_one, seq_input_two, epi_input_one,
                          epi_input_two),
                  outputs=combined_path)

    return model