Example #1
0
def build_cnn_layers(layers, lstm_units, ksize, stride_1, filter_num, cnn_layers, single_lstm_layer):
    conv_stride = 2
    padding = "same"
    dropout = 0.5
    pool_size = 2

    branch = Sequential()

    branch.add(Bidirectional(LSTM(
        lstm_units,
        return_sequences=True),
        input_shape = (layers[1], layers[0])))
    branch.add(Dropout(dropout/2))

    branch.add(Bidirectional(LSTM(
        lstm_units,
        return_sequences=True)))
    branch.add(Dropout(dropout))

    branch.add(Conv1D(
        input_shape = (layers[1], layers[0]),
        filters = filter_num,
        kernel_size = ksize,
        strides = stride_1,
        padding = padding,
        activation = None))
    BatchNormalization(axis=-1)
    branch.add(Activation('relu'))

    branch.add(MaxPooling1D(
        pool_size = pool_size))

    for x in range(1, cnn_layers):
        branch.add(Conv1D(
            filters = filter_num*int(math.pow(2,x)),
            kernel_size = ksize,
            strides = conv_stride,
            padding = padding,
            activation = None))
        BatchNormalization(axis=-1)
        branch.add(Activation('relu'))
        branch.add(MaxPooling1D(
                pool_size = pool_size))

    if (not single_lstm_layer):
        branch.add(LSTM(
            lstm_units,
            return_sequences = True))
        branch.add(Dropout(dropout/2))

    branch.add(LSTM(
        lstm_units,
        return_sequences = False))
    branch.add(Dropout(dropout))

    branch.add(Dense(
        output_dim = 1))
    branch.add(Activation('linear'))

    return branch
Example #2
0
def get_model():
    inp = Input(shape=(maxlen, ))
    x_3 = Embedding(max_features, EMBEDDING_DIM)(inp)
    cnn1 = Conv1D(128, 2, padding='same', strides=1, activation='relu')(x_3)
    cnn2 = Conv1D(128, 3, padding='same', strides=1, activation='relu')(x_3)
    cnn = keras.layers.concatenate([cnn1, cnn2], axis=-1)
    cnn1 = Conv1D(64, 2, padding='same', strides=1, activation='relu')(cnn)
    cnn1 = MaxPooling1D(pool_size=100)(cnn1)
    cnn2 = Conv1D(64, 3, padding='same', strides=1, activation='relu')(cnn)
    cnn2 = MaxPooling1D(pool_size=100)(cnn2)
    cnn = keras.layers.concatenate([cnn1, cnn2], axis=-1)
    flat = Flatten()(cnn)
    drop = Dropout(0.2)(flat)
    x = Dense(400, kernel_initializer='he_normal')(drop)
    x = PReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)
    x = Dense(6, activation="sigmoid")(x)
    adam = keras.optimizers.Adam(lr=0.001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08)
    sgd = keras.optimizers.SGD(lr=0.001)
    model = Model(inputs=inp, outputs=x)
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy', 'binary_crossentropy'])
    return model
Example #3
0
def get_siamese_model(input_shape):
    # Define the tensors for the two input images
    left_input = Input(input_shape)
    right_input = Input(input_shape)

    # Convolutional Neural Network

    model = Sequential()

    model.add(
        Conv1D(filters=256, kernel_size=50, strides=50, activation='relu', weights=wghts_pretrained[0], padding='same',
               input_shape=input_shape))
    # model.add(MaxPooling1D(pool_size=2))
    model.add(
        Conv1D(filters=128, kernel_size=10, strides=1, activation='relu', weights=wghts_pretrained[1], padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(filters=128, kernel_size=5, strides=1, activation='sigmoid', weights=wghts_pretrained[2],
                     padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())

    # model.add(Dense((512),activation='sigmoid',kernel_regularizer=l2(1e-3),
    #                 kernel_initializer=initialize_weights, bias_initializer=initialize_bias))
    # model.add(Dropout(0.25))

    encoded_l = model(left_input)
    encoded_r = model(right_input)

    # Connect the inputs with the outputs
    siamese_net = Model(inputs=[left_input, right_input],
                        outputs=last_layer(encoded_l, encoded_r, lyr_name='L2'))  ## prediction and cosine_similarity
    return siamese_net
def make_cnn(loss='categorical_crossentropy', optimizer='adam'):
    model = Sequential()
    model.add(
        Convolution1D(1,
                      19,
                      subsample_length=19,
                      border_mode='same',
                      input_shape=(n_components, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=2))
    model.add(Convolution1D(32, 3, border_mode='same', input_shape=(32, 1)))
    model.add(Activation('relu'))
    model.add(Convolution1D(32, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=2))
    model.add(Dropout(0.25))
    model.add(Convolution1D(64, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution1D(64, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=2))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(n_cl))
    model.add(Activation('softmax'))
    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    return model
Example #5
0
def design_network(model_name):
    input1 = Input(shape=(input_length, 768))
    if model_name == 'CNN':
        lcov1 = Conv1D(filters=units, kernel_size=1, activation=relu)(input1)
        out = MaxPooling1D(pool_size=1)(lcov1)
    if model_name == 'BiLSTM':
        out = Bidirectional(LSTM(units))(input1)
    if 'Bert' in model_name or 'Xlnet' in model_name:
        convs = []
        for fsz in kernel_size:
            l_conv = Conv1D(filters=units, kernel_size=fsz,
                            activation=relu)(input1)
            lpool = MaxPooling1D(input_length - fsz + 1)(l_conv)
            convs.append(lpool)
        merge = concatenate(convs, axis=1)

        #   reshape = Reshape((units,3))(merge)
        permute = Permute((2, 1))(merge)
        if 'Att' in model_name:
            out = Bidirectional(LSTM(units, return_sequences=True))(permute)
            out = AttentionLayer(step_dim=units)(out)
        else:
            out = Bidirectional(LSTM(units))(permute)
    out = Dropout(keep_prob)(out)
    output = Dense(class_nums, activation=softmax)(out)
    model = Model(input1, output)
    model.compile(loss=losses.categorical_crossentropy,
                  optimizer=optimizers.Adam(lr=learning_rate),
                  metrics=['accuracy'])
    model.summary()
    return model
Example #6
0
class Model9():
    model = Sequential()
    model.add(
        Conv1D(1, 5, padding="same", activation="relu", input_shape=(1024, 1)))
    model.add(Conv1D(16, 5, padding="same", activation="relu"))
    model.add(MaxPooling1D(pool_size=5, padding="same"))
    model.add(Conv1D(16, 5, padding="same", activation="relu"))
    model.add(Conv1D(32, 5, padding="same", activation="relu"))
    model.add(MaxPooling1D(pool_size=5, padding="same"))
    model.add(Conv1D(32, 5, padding="same", activation="relu"))
    model.add(Conv1D(64, 5, padding="same", activation="relu"))
    model.add(MaxPooling1D(pool_size=5, padding="same"))
    model.add(Conv1D(64, 5, padding="same", activation="relu"))
    model.add(Conv1D(128, 5, padding="same", activation="relu"))
    model.add(MaxPooling1D(pool_size=5, padding="same"))
    model.add(Conv1D(128, 5, padding="same", activation="relu"))
    model.add(Conv1D(256, 5, padding="same", activation="relu"))
    model.add(MaxPooling1D(pool_size=5, padding="same"))
    model.add(Dense(256, activation="relu", input_dim=1024))
    model.add(Dense(128, activation="relu", input_dim=256))
    model.add(Dense(64, activation="relu", input_dim=128))
    model.add(Dense(1, activation="sigmoid", input_dim=64))
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])
Example #7
0
def model_conv_3l_glob(data, paramdims):
    '''
    Conv1D:
        {} x {}, relu
        {} pooling
        {} x {}
        {} pooling
        {} Dense
        GlobPool
    '''
    input = Input(shape=data['input_1'].shape, name='input_1')
    layer = Conv1D(paramdims[0], kernel_size=(paramdims[1]),
                   activation='relu')(input)
    layer = BatchNormalization()(layer)

    layer = MaxPooling1D(pool_size=paramdims[2])(layer)

    layer = Conv1D(paramdims[3], kernel_size=(paramdims[4]),
                   activation='relu')(layer)
    layer = BatchNormalization()(layer)

    layer = MaxPooling1D(pool_size=paramdims[5])(layer)
    layer = Dense(paramdims[6])(layer)
    layer = Dropout(0.5)(layer)

    output = GlobalAveragePooling1D()(layer)
    return input, output
def make_cnn(loss='mean_absolute_error',optimizer='adam'):
    model = Sequential()
    model.add(Convolution1D(1,19,subsample_length=19,border_mode='same',input_shape=(n_components,1)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=2))
    model.add(Convolution1D(32, 3, border_mode='same',input_shape=(1,32)))
    model.add(Activation('relu'))
    model.add(Convolution1D(32, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=2))
    model.add(Dropout(0.25))
    model.add(Convolution1D(64, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution1D(64, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=2))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('relu'))
    model.compile(loss=loss, optimizer=optimizer, metrics=['mean_absolute_error'])
    return model
Example #9
0
def architect_model(input_shape, n_classes):
    inp = Input(shape=input_shape, dtype="float32")

    x = Conv1D(input_shape[1] * 2,
               kernel_size=4,
               padding="causal",
               activation="relu")(inp)
    x = MaxPooling1D(pool_size=4)(x)
    x = Dropout(0.25)(x)

    x = Conv1D(input_shape[1] * 2,
               kernel_size=4,
               padding="causal",
               activation="relu")(x)
    x = MaxPooling1D(pool_size=4)(x)
    x = Dropout(0.25)(x)

    x = Conv1D(input_shape[1] * 2,
               kernel_size=4,
               padding="causal",
               activation="relu")(x)
    x = MaxPooling1D(pool_size=4)(x)
    x = Dropout(0.25)(x)

    x = Concatenate()([GlobalAveragePooling1D()(x), GlobalMaxPooling1D()(x)])

    # Fully connected
    x = Dense(2048, activation="relu")(x)
    x = Dense(2048, activation="relu")(x)
    out = Dense(n_classes, activation="softmax")(x)

    return Model(inputs=inp, outputs=out)
Example #10
0
 def __call__(self,
              window_size,
              n_channels=4,
              regression=False,
              dense=False):
     model = Sequential()
     model.add(
         Conv1D(64,
                7,
                padding='valid',
                input_shape=(window_size, n_channels),
                kernel_regularizer=l2(0.0001)))
     model.add(Activation('relu'))
     model.add(MaxPooling1D(2))
     model.add(Conv1D(64, 3, padding='valid',
                      kernel_regularizer=l2(0.0001)))
     model.add(Activation('relu'))
     model.add(MaxPooling1D(2))
     model.add(Conv1D(64, 3, padding='valid',
                      kernel_regularizer=l2(0.0001)))
     model.add(Activation('relu'))
     model.add(MaxPooling1D(2))
     model.add(Flatten())
     model.add(Dense(64, kernel_regularizer=l2(0.0001)))
     model.add(Activation('relu'))
     output_size = window_size if dense else 1
     model.add(Dense(output_size, kernel_regularizer=l2(0.0001)))
     if not regression:
         model.add(Activation('sigmoid'))
     return model
Example #11
0
def build_model():
    seq_input1 = Input(shape=(seq_size, dim), name='seq1')
    seq_input2 = Input(shape=(seq_size, dim), name='seq2')
    l1 = Conv1D(hidden_dim, 3)
    l2 = Conv1D(hidden_dim, 3)
    l3 = Conv1D(hidden_dim, 3)
    l4 = Conv1D(hidden_dim, 3)
    l5 = Conv1D(hidden_dim, 3)
    l6 = Conv1D(hidden_dim, 3)
    s1 = GlobalAveragePooling1D()(l6(
        MaxPooling1D(2)(l5(
            MaxPooling1D(2)(l4(
                MaxPooling1D(2)(l3(
                    MaxPooling1D(2)(l2(MaxPooling1D(2)(
                        l1(seq_input1))))))))))))
    s2 = GlobalAveragePooling1D()(l6(
        MaxPooling1D(2)(l5(
            MaxPooling1D(2)(l4(
                MaxPooling1D(2)(l3(
                    MaxPooling1D(2)(l2(MaxPooling1D(2)(
                        l1(seq_input2))))))))))))
    merge_text = multiply([s1, s2])
    x = Dense(hidden_dim, activation='linear')(merge_text)
    x = keras.layers.LeakyReLU(alpha=0.3)(x)
    x = Dense(int((hidden_dim + 7) / 2), activation='linear')(x)
    x = keras.layers.LeakyReLU(alpha=0.3)(x)
    main_output = Dense(2, activation='softmax')(x)
    merge_model = Model(inputs=[seq_input1, seq_input2], outputs=[main_output])
    return merge_model
def build_model(layers,
                cnn_layers,
                lstm_units=200,
                kernel_size=5,
                stride_1=2,
                filter_num=128):
    # parameters obtained from stock_model.py in Convolutional Neural Stock Market Technical Analyser
    dropout = 0.5
    conv_stride = 2
    ksize = kernel_size
    pool_size = 2
    padding = "same"

    model = Sequential()

    model.add(
        Conv1D(
            input_shape=(layers[1], layers[0]),  # (50, 1)
            filters=filter_num,
            kernel_size=ksize,
            strides=conv_stride,
            padding=padding,
            activation=None))
    BatchNormalization(axis=-1)
    model.add(Activation('relu'))

    model.add(MaxPooling1D(pool_size=pool_size))

    for x in range(1, cnn_layers):
        model.add(
            Conv1D(filters=filter_num * 2 * x,
                   kernel_size=ksize,
                   strides=conv_stride,
                   padding=padding,
                   activation=None))
        BatchNormalization(axis=-1)
        model.add(Activation('relu'))

        model.add(MaxPooling1D(pool_size=pool_size))

    model.add(
        GRU(lstm_units,
            input_shape=(layers[1], layers[0]),
            activation='tanh',
            return_sequences=True))
    model.add(Dropout(dropout / 2))

    model.add(GRU(lstm_units, activation='tanh', return_sequences=False))
    model.add(Dropout(dropout))

    model.add(Dense(output_dim=1))  # Linear output
    model.add(Activation("linear"))

    print(model.summary())

    start = time.time()
    model.compile(loss="mse", optimizer="adadelta")
    print("> Compilation Time : ", time.time() - start)
    return model
Example #13
0
def build_model(layers, cnn_layers):
    # parameters obtained from stock_model.py in Convolutional Neural Stock Market Technical Analyser
    dropout = 0.5
    conv_size = 9
    conv_stride = 1
    ksize = 2
    pool_stride = 2
    filter_num = 64
    padding = "same"

    model = Sequential()

    model.add(
        Conv1D(
            input_shape=(layers[1], layers[0]),  # (50, 1)
            filters=filter_num,
            kernel_size=ksize,
            strides=conv_stride,
            padding=padding,
            activation=None))
    BatchNormalization(axis=-1)
    model.add(Activation('relu'))

    model.add(MaxPooling1D(pool_size=2))

    for x in range(1, cnn_layers):
        model.add(
            Conv1D(filters=filter_num * 2 * x,
                   kernel_size=ksize,
                   strides=conv_stride,
                   padding=padding,
                   activation=None))
        BatchNormalization(axis=-1)
        model.add(Activation('relu'))

        model.add(MaxPooling1D(pool_size=2))

    model.add(LSTM(
        #32,
        output_dim=layers[1],  # 50
        return_sequences=True))
    model.add(Dropout(dropout / 2))

    model.add(
        LSTM(
            #16,
            #layers[2], # 100
            layers[1] * 2,  # 100
            return_sequences=False))
    model.add(Dropout(dropout))

    model.add(Dense(output_dim=layers[3]))  # 1
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss="mse", optimizer="adadelta")
    print("> Compilation Time : ", time.time() - start)
    return model
Example #14
0
def model2(maxlen, batch_size, num_epochs, w2v, traindf, cvdf):
    def encoder(b):
        encoded_x, encoded_y = data_init.encode_w2v(df=b,
                                                    w2v=w2v,
                                                    maxlen=maxlen)
        return [[encoded_x, encoded_x, encoded_x], encoded_y]

    train_gen = batch_generator(df=traindf,
                                encoder=encoder,
                                batch_size=batch_size,
                                force_batch_size=True)
    cv_gen = batch_generator(df=cvdf, encoder=encoder, batch_size=batch_size)

    # creates the neural network consisting in 3 convolutional layers going to an LSTM layer
    model1 = Sequential()
    model1.add(
        Convolution1D(50,
                      6,
                      border_mode='valid',
                      activation='relu',
                      input_shape=(maxlen, 300)))
    model1.add(MaxPooling1D(pool_length=3, border_mode='valid'))
    model1.add(Dropout(0.5))

    model2 = Sequential()
    model2.add(
        Convolution1D(50,
                      5,
                      border_mode='valid',
                      activation='relu',
                      input_shape=(maxlen, 300)))
    model2.add(MaxPooling1D(pool_length=3, border_mode='valid'))
    model2.add(Dropout(0.5))

    model3 = Sequential()
    model3.add(
        Convolution1D(50,
                      4,
                      border_mode='valid',
                      activation='relu',
                      input_shape=(maxlen, 300)))
    model3.add(MaxPooling1D(pool_length=3, border_mode='valid'))
    model3.add(Dropout(0.5))

    model = Sequential()
    model.add(Merge([model1, model2, model3], mode='concat', concat_axis=1))
    model.add(LSTM(60, return_sequences=True))
    model.add(Dropout(0.5))
    model.add(LSTM(60))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))

    # compiles the model
    model.compile('adam', 'binary_crossentropy', metrics=['accuracy', 'mse'])

    nb_val_samples = len(cvdf)
    return model, train_gen, cv_gen, nb_val_samples
Example #15
0
def cnnModel(inputShape, CNNType):
    # model construction
    if CNNType == '1D':
        inputs = inputShape
        kernelSize = 3
        activFunc = 'relu'
        #activFunc = LeakyReLU()

        model = Sequential()
        model.add(Conv1D(32, kernelSize, input_shape=inputs))
        model.add(Activation(activFunc))
        model.add(BatchNormalization())
        model.add(MaxPooling1D(pool_size=2, strides=2))

        model.add(Conv1D(64, kernelSize))
        model.add(Activation(activFunc))
        model.add(BatchNormalization())
        model.add(MaxPooling1D(pool_size=2, strides=2))

        model.add(Conv1D(128, kernelSize))
        model.add(Activation(activFunc))
        model.add(BatchNormalization())

        model.add(GlobalMaxPooling1D())
        model.add(
            Dense(512,
                  kernel_initializer='glorot_normal',
                  bias_initializer='glorot_normal'))
        model.add(Activation(activFunc))
        model.add(Dropout(0.5))
        model.add(
            Dense(512,
                  kernel_initializer='glorot_normal',
                  bias_initializer='glorot_normal'))
        model.add(Activation(activFunc))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation=tf.nn.softmax))

        loss = keras.losses.categorical_crossentropy
        # loss=keras.losses.binary_crossentropy

        # Model optimizers and compilers
        #adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-5)
        sgd = keras.optimizers.SGD(lr=0.001,
                                   decay=1e-5,
                                   momentum=0.9,
                                   nesterov=True)
        model.compile(loss=loss, optimizer=sgd, metrics=['accuracy'])
        cnn = model
        earlystop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  min_delta=0,
                                                  patience=2,
                                                  verbose=0,
                                                  mode='auto')

        return cnn, earlystop
Example #16
0
def buildSplitModel(numClasses, seqLen, vecDim, contextDim, eventMap):
	"""
	Builds a GRU model on top of word embeddings, and doc2vec
	"""
	hidden = 512
	rnnHidden = 128
	denseDim = 256
	cl2 = .001
	drop = .5
	convSize = 2
	maxSize = 2
	shape = (seqLen, vecDim)

	left = Sequential()
	#left.add(GRU(rnnHidden, W_regularizer=l2(cl2), U_regularizer=l2(cl2), dropout_W=drop, dropout_U=drop, input_shape=shape, return_sequences=True))

	left.add(Conv1D(denseDim, convSize, W_regularizer=l2(cl2), input_shape=shape))
	left.add(MaxPooling1D(maxSize))

	#left.add(Conv1D(denseDim, convSize, W_regularizer=l2(cl2)))
	#left.add(MaxPooling1D(convSize))

	left.add(GRU(rnnHidden, W_regularizer=l2(cl2), U_regularizer=l2(cl2), dropout_W=drop, dropout_U=drop))
	
	right = Sequential()
	#right.add(GRU(rnnHidden, W_regularizer=l2(cl2), U_regularizer=l2(cl2), dropout_W=drop, dropout_U=drop, input_shape=shape, return_sequences=True))

	right.add(Conv1D(denseDim, convSize, W_regularizer=l2(cl2), input_shape=shape))
	right.add(MaxPooling1D(maxSize))

	#right.add(Conv1D(denseDim, convSize, W_regularizer=l2(cl2)))
	#right.add(MaxPooling1D(convSize))

	right.add(GRU(rnnHidden, W_regularizer=l2(cl2), U_regularizer=l2(cl2), dropout_W=drop, dropout_U=drop))

	context = Sequential()
	context.add(Dense(denseDim, input_shape=(contextDim,)))
	context.add(LeakyReLU(.01))
	context.add(Dropout(drop))
	
	#do nothing
	#context.add(Reshape((contextDim,), input_shape=(contextDim,)))

	model = Sequential()
	model.add(Merge([left, right, context], mode="concat"))

	model.add(Dense(denseDim, W_regularizer=l2(cl2)))
	model.add(LeakyReLU(.01))
	#model.add(MaxoutDense(denseDim, W_regularizer=l2(cl2)))
	model.add(Dropout(drop))

	model.add(Dense(numClasses))
	model.add(Activation("softmax"))
	model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', microF1(eventMap)])

	return model
Example #17
0
def create_model2(num_task, input_len_l, input_len_r):
    K.clear_session()
    tf.set_random_seed(5005)
    left_dim = 4
    right_dim = 4
    num_units = 50
    input_l = input_len_l
    input_r = input_len_r

    nb_f_l = [90, 100]
    f_len_l = [7, 7]
    p_len_l = [4, 10]
    s_l = [2, 5]
    nb_f_r = [90, 100]
    f_len_r = [7, 7]
    p_len_r = [10, 10]
    s_r = [5, 5]

    left_input = Input(shape=(input_l, left_dim), name="left_input")
    right_input = Input(shape=(input_r, right_dim), name="right_input")

    left_conv1 = Conv1D(filters=nb_f_l[0],
                        kernel_size=f_len_l[0],
                        padding='valid',
                        activation="relu",
                        name="left_conv1")(left_input)
    left_pool1 = MaxPooling1D(pool_size=p_len_l[0],
                              strides=s_l[0],
                              name="left_pool1")(left_conv1)
    left_drop1 = Dropout(0.25, name="left_drop1")(left_pool1)

    right_conv1 = Conv1D(filters=nb_f_r[0],
                         kernel_size=f_len_r[0],
                         padding='valid',
                         activation="relu",
                         name="right_conv1")(right_input)
    right_pool1 = MaxPooling1D(pool_size=p_len_r[0],
                               strides=s_r[0],
                               name="right_pool1")(right_conv1)
    right_drop1 = Dropout(0.25, name="right_drop1")(right_pool1)

    merge = concatenate([left_drop1, right_drop1], name="merge", axis=-2)
    conv_merged = Conv1D(filters=100,
                         kernel_size=5,
                         padding='valid',
                         activation="relu",
                         name="conv_merged")(merge)
    #merged_pool = MaxPooling1D(pool_size=4, strides=2)(conv_merged)
    merged_pool = MaxPooling1D(pool_size=10, strides=5)(conv_merged)
    merged_drop = Dropout(0.25)(merged_pool)
    merged_flat = Flatten()(merged_drop)
    hidden1 = Dense(250, activation='relu', name="hidden1")(merged_flat)
    output = Dense(num_task, activation='sigmoid', name="output")(hidden1)
    model = Model(inputs=[left_input, right_input], outputs=output)
    print(model.summary())
    return model
Example #18
0
def get_model(numLabels, numConvLayers, numConvFilters, preLastLayerUnits,
              poolingDropout, learningRate, momentum, length):
    model = Sequential()
    l1_reg = 0.00001
    l2_reg = 0.00001
    l3_reg = 0.00001
    l4_reg = 0.00001
    dropout = 0.2  #0.2
    filter1 = 500
    filter2 = 250
    filter3 = 100
    conv1_layer = Conv1D(filters=filter1,
                         kernel_size=8,
                         input_shape=(length, 4),
                         padding="valid",
                         activation="relu",
                         use_bias=True,
                         kernel_regularizer=l2(l1_reg))
    # use_bias=True)
    model.add(conv1_layer)
    model.add(MaxPooling1D(pool_size=4))
    model.add(Dropout(dropout))

    convn_layer = Conv1D(padding="valid",
                         activation="relu",
                         kernel_size=4,
                         filters=filter2,
                         use_bias=True,
                         kernel_regularizer=l2(l2_reg))
    # use_bias=True)
    model.add(convn_layer)
    model.add(MaxPooling1D(pool_size=4))
    model.add(Dropout(dropout))

    convn_layer = Conv1D(padding="valid",
                         activation="relu",
                         kernel_size=4,
                         filters=filter3,
                         use_bias=True,
                         kernel_regularizer=l2(l3_reg))
    # use_bias=True)
    model.add(convn_layer)
    model.add(MaxPooling1D(pool_size=4))
    model.add(Dropout(dropout))

    model.add(Flatten())
    model.add(
        Dense(units=numLabels,
              activation='relu',
              use_bias=True,
              kernel_regularizer=l2(l4_reg)))
    print("Regularization values: ", l1_reg, l2_reg, l3_reg, l4_reg)
    print("Dropout values: ", dropout)
    print("Filter values: ", filter1, filter2, filter3)
    return model
Example #19
0
def createModel(label_num=3000):
    beishu = 2
    input_data = Input(name='features', shape=(None, 39))
    x1 = Conv1D(40 * beishu, 1, padding='same', activation='relu')(input_data)
    x1 = Conv1D(40 * beishu, 3, padding='same', activation='relu')(x1)
    x1 = Conv1D(40 * beishu, 3, padding='same')(x1)
    x1 = BatchNormalization()(x1)
    x1 = keras.layers.ReLU()(x1)
    x1 = MaxPooling1D(2)(x1)
    x1 = Conv1D(80 * beishu, 1, padding='same', activation='relu')(x1)
    x1 = Conv1D(80 * beishu, 3, padding='same', activation='relu')(x1)
    x1 = Conv1D(80 * beishu, 3, padding='same')(x1)
    x1 = BatchNormalization()(x1)
    x1 = keras.layers.ReLU()(x1)
    x1 = MaxPooling1D(2)(x1)
    x1 = Conv1D(160 * beishu, 1, padding='same', activation='relu')(x1)
    x1 = Conv1D(160 * beishu, 3, padding='same', activation='relu')(x1)
    x1 = Conv1D(160 * beishu, 3, padding='same', activation='relu')(x1)
    x1 = Conv1D(160 * beishu, 3, padding='same')(x1)
    # x1 = MaxPooling1D(1)(x1)
    x1 = BatchNormalization()(x1)
    x1 = keras.layers.ReLU()(x1)
    x1 = MaxPooling1D(2)(x1)
    x1 = Conv1D(320 * beishu, 1, padding='same', activation='relu')(x1)
    x1 = Conv1D(320 * beishu, 3, padding='same', activation='relu')(x1)
    x1 = Conv1D(320 * beishu, 3, padding='same', activation='relu')(x1)
    x1 = Conv1D(320 * beishu, 3, padding='same')(x1)
    # x1 = MaxPooling1D(1)(x1)
    x1 = BatchNormalization()(x1)
    x1 = keras.layers.ReLU()(x1)
    # # with tf.device('/cpu'):
    # x1 = GRU(100, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', dropout=0.3)(x1)
    # x2 = GRU(100, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', dropout=0.3)(x1)
    # x3 = add([x1, x2])
    x4 = Dense(1024, activation='relu')(x1)
    x4 = Dropout(0.3)(x4)
    preds = Dense(label_num)(x4)
    preds = Activation('softmax', name='Activation0')(preds)
    model_data = Model(input_data, outputs=preds)

    # ctc
    labels = Input(name='labels', shape=[None], dtype='float32')
    input_len = Input(name='input_len', shape=[1], dtype='int64')
    label_len = Input(name='label_len', shape=[1], dtype='int64')
    loss_out = Lambda(ctc_lambda, output_shape=(1, ),
                      name='ctc')([labels, preds, input_len, label_len])

    model = Model(inputs=[input_data, labels, input_len, label_len],
                  outputs=loss_out)
    model.summary()
    adam = Adam(lr=0.01)
    model.compile(loss={'ctc': lambda y_true, output: output}, optimizer=adam)

    print('model compile over')
    return model, model_data
Example #20
0
def create_model(tokenizer=None):
  ## with tfidf input
  feat_nr = len(tokenizer.word_counts)+1

  S, F = 30, 300
  x1_input = Input(shape=(max_features,), dtype='int32', name='x1_input')
  x2_input = Input(shape=(max_features,), dtype='int32', name='x2_input')

  x1 = Embedding(input_dim=feat_nr, output_dim=64, trainable=False)(x1_input)
  x2 = Embedding(input_dim=feat_nr, output_dim=64, trainable=False)(x2_input)

  x1 = BatchNormalization()(x1)
  x1 = Conv1D(128, 3, activation='relu')(x1)
  x1 = MaxPooling1D(3)(x1)

  x2 = BatchNormalization()(x2)
  x2 = Conv1D(128, 3, activation='relu')(x2)
  x2 = MaxPooling1D(3)(x2)

  x = dot([x1, x2], -1, normalize=True)
#  x = Conv1D(128, 3, activation='relu')(x)
#  x = MaxPooling1D(3)(x)
#  x = Conv1D(128, 3, activation='relu')(x)
#  x = MaxPooling1D(3)(x)
#  x = Conv1D(128, 3, activation='relu')(x)
#  x = MaxPooling1D(3)(x)
  x = LSTM(128)(x)
#  x = Flatten()(x)
  x = Dense(128, kernel_initializer='uniform', activation='relu')(x)
  x = Dropout(0.3)(x)
  x = Dense(128, kernel_initializer='uniform', activation='relu')(x)
  x = Dropout(0.3)(x)
  x = Dense(128, kernel_initializer='uniform', activation='relu')(x)
  x = Dropout(0.3)(x)
  x = Dense(128, kernel_initializer='uniform', activation='relu')(x)
  x = Dropout(0.3)(x)
  x = Dense(128, kernel_initializer='uniform', activation='relu')(x)
  x = Dropout(0.3)(x)
  x = Dense(128, kernel_initializer='uniform', activation='relu')(x)
  x = Dropout(0.3)(x)
  x = Dense(128, kernel_initializer='uniform', activation='relu')(x)
  x = Dropout(0.3)(x)

  y = Dense(2, kernel_initializer='uniform', activation='softmax', name='output')(x)
  model = Model(inputs=[x1_input, x2_input], outputs=[y], name='final')
#  sgd = SGD(lr=learning_rate)
  opt = RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-08, decay=0.00000000001)
  model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc','sparse_categorical_accuracy', 'binary_accuracy'])
   
  model.summary()
  plot_model(model, to_file='{}.png'.format(model_path), show_shapes=True, show_layer_names=True)
  print('model_path:{} steps:{} epochs:{}/{} batch_size:{} max_features:{}'.format(
       model_path, steps_per_epoch, init_epoch, total_epochs, batch_size, max_features))
  return model
Example #21
0
def create_model4(num_task, input_len_l, input_len_r):
    K.clear_session()
    tf.set_random_seed(5005)
    left_dim = 4
    right_dim = 4
    num_units = 60
    input_l = input_len_l
    input_r = input_len_r

    nb_f_l = [90, 100]
    f_len_l = [7, 7]
    p_len_l = [4, 10]
    s_l = [2, 5]
    nb_f_r = [90, 100]
    f_len_r = [7, 7]
    p_len_r = [10, 10]
    s_r = [5, 5]

    left_input = Input(shape=(input_l, left_dim), name="left_input")
    right_input = Input(shape=(input_r, right_dim), name="right_input")

    left_conv1 = Conv1D(filters=nb_f_l[0],
                        kernel_size=f_len_l[0],
                        padding='valid',
                        activation="relu",
                        name="left_conv1")(left_input)
    left_pool1 = MaxPooling1D(pool_size=p_len_l[0],
                              strides=s_l[0],
                              name="left_pool1")(left_conv1)
    left_drop1 = Dropout(0.25, name="left_drop1")(left_pool1)

    right_conv1 = Conv1D(filters=nb_f_r[0],
                         kernel_size=f_len_r[0],
                         padding='valid',
                         activation="relu",
                         name="right_conv1")(right_input)
    right_pool1 = MaxPooling1D(pool_size=p_len_r[0],
                               strides=s_r[0],
                               name="right_pool1")(right_conv1)
    right_drop1 = Dropout(0.25, name="right_drop1")(right_pool1)

    merge = concatenate([left_drop1, right_drop1], name="merge", axis=-2)

    gru = Bidirectional(GRU(num_units, return_sequences=True),
                        name="gru")(merge)
    #gru = Bidirectional(GRU(num_units),return_sequences=True,name="gru")(merged)
    flat = Flatten(name="flat")(gru)
    hidden1 = Dense(250, activation='relu', name="hidden1")(flat)
    output = Dense(num_task, activation='sigmoid', name="output")(hidden1)
    model = Model(inputs=[left_input, right_input], outputs=output)
    print(model.summary())
    return model
Example #22
0
def Model():
    model = Sequential()
    model.add(Conv1D(128, 5, activation='relu', input_shape=(300, 100)))
    model.add(MaxPooling1D(2))
    model.add(Conv1D(256, 5, activation='relu'))
    model.add(MaxPooling1D(2))
    model.add(Conv1D(512, 5, activation='relu'))
    model.add(LSTM(512))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc', f1score])
    return model
Example #23
0
def make_model_cnn(dir_name):
    mhc_in = Input(shape=(34, 20))
    mhc_branch = Conv1D(32, 5, kernel_initializer="he_uniform")(mhc_in)
    mhc_branch = PReLU()(mhc_branch)

    mhc_branch = Conv1D(32, 3, kernel_initializer="he_uniform")(mhc_branch)
    mhc_branch = PReLU()(mhc_branch)

    mhc_branch = MaxPooling1D(pool_size=POOL_SIZE)(mhc_branch)
    mhc_branch = Dropout(.2)(mhc_branch)

    pep_in = Input(shape=(9, 20))
    pep_branch = Conv1D(32, 5, kernel_initializer="he_uniform")(pep_in)
    pep_branch = PReLU()(pep_branch)

    pep_branch = Conv1D(32, 3, kernel_initializer="he_uniform")(pep_branch)
    pep_branch = PReLU()(pep_branch)

    pep_branch = MaxPooling1D(pool_size=POOL_SIZE)(pep_branch)
    pep_branch = Dropout(.2)(pep_branch)

    mhc_branch = Flatten()(mhc_branch)
    pep_branch = Flatten()(pep_branch)

    merged = concatenate([pep_branch, mhc_branch])
    merged = Dense(128, kernel_initializer="he_uniform")(merged)
    merged = Dropout(.3)(merged)
    merged = PReLU()(merged)

    merged = Dense(64, kernel_initializer="he_uniform")(merged)
    merged = Dropout(.3)(merged)
    merged = PReLU()(merged)

    merged = Dense(16, kernel_initializer="he_uniform")(merged)
    merged = Dropout(.3)(merged)
    merged = PReLU()(merged)

    merged = Dense(8, kernel_initializer="he_uniform")(merged)
    merged = Dropout(.3)(merged)
    merged = PReLU()(merged)

    merged = Dense(1)(merged)
    pred = PReLU()(merged)

    model = Model([mhc_in, pep_in], pred)
    model.compile(loss='mse', optimizer="nadam")

    with open(dir_name + "model.json", "w") as outf:
        outf.write(model.to_json())

    return model
def regression_model(input_dim,
                     emb_mat,
                     seq_len,
                     conv_layers=1,
                     conv_filters=32,
                     filter_size=3,
                     lstm_dim=32,
                     fc_layers=1,
                     fc_units=64,
                     dropout=0.0,
                     metrics=[]):
    """
    Compiles a model that learns representations from convolutional and
    recurrent layers. These representations are combined with auxiliary input,
    informing about the tweet context.
    """
    seqs = Input(shape=(seq_len, ), dtype='int32', name='text_input')
    emb = Embedding(emb_mat.shape[0],
                    emb_mat.shape[1],
                    weights=[emb_mat],
                    input_length=seq_len,
                    trainable=True,
                    name='word_embedding')(seqs)
    lstm = LSTM(lstm_dim, name='lstm_1')(emb)
    x = ZeroPadding1D(name='pad_1')(emb)
    x = Conv1D(conv_filters, filter_size, activation='relu', name='conv_1')(x)
    x = MaxPooling1D(name='pool_1')(x)
    for i in range(2, conv_layers + 1):
        pad_name = 'pad_' + str(i)
        conv_name = 'conv_' + str(i)
        pool_name = 'pool_' + str(i)
        x = ZeroPadding1D(name=pad_name)(x)
        x = Conv1D(conv_filters,
                   filter_size,
                   activation='relu',
                   name=conv_name)(x)
        x = MaxPooling1D(name=pool_name)(x)
    flatten = Flatten(name='flatten')(x)
    aux_input = Input(shape=(input_dim, ), name='aux_input')
    norm_inputs = BatchNormalization(name='bn_aux')(aux_input)
    x = concatenate([flatten, lstm, norm_inputs], name='comb_input')
    x = Dropout(dropout, name='dropout_1')(x)
    for i in range(1, fc_layers + 1):
        fc_name = 'fc_' + str(i)
        bn_name = 'bn_' + str(i)
        x = Dense(fc_units, activation='relu', name=fc_name)(x)
        x = BatchNormalization(name=bn_name)(x)
    output = Dense(1, activation='relu', name='output')(x)
    model = Model(inputs=[seqs, aux_input], outputs=[output])
    model.compile(optimizer='Adam', loss='mean_squared_error', metrics=metrics)
    return model
    def cnn_melspect_urban_sound(input_shape):
        kernel_size = 4
        # activation_func = LeakyReLU()
        activation_func = Activation('relu')
        inputs = Input(input_shape)

        # Convolutional block_1
        conv1 = Conv1D(8, kernel_size)(inputs)
        act1 = activation_func(conv1)
        bn1 = BatchNormalization()(act1)
        pool1 = MaxPooling1D(pool_size=2, strides=2)(bn1)

        # Convolutional block_2
        conv2 = Conv1D(16, kernel_size)(pool1)
        act2 = activation_func(conv2)
        bn2 = BatchNormalization()(act2)
        pool2 = MaxPooling1D(pool_size=2, strides=2)(bn2)

        # Convolutional block_3
        conv3 = Conv1D(32, kernel_size)(pool2)
        act3 = activation_func(conv3)
        pool3 = BatchNormalization()(act3)

        # Convolutional block_4
        conv4 = Conv1D(64, kernel_size)(pool3)
        act4 = activation_func(conv4)
        bn4 = BatchNormalization()(act4)

        # Global Layers
        gmaxpl = GlobalMaxPooling1D()(bn4)
        gmeanpl = GlobalAveragePooling1D()(bn4)
        mergedlayer = concatenate([gmaxpl, gmeanpl], axis=1)

        # Regular MLP
        dense1 = Dense(512,
                       kernel_initializer='glorot_normal',
                       bias_initializer='glorot_normal')(mergedlayer)
        actmlp = activation_func(dense1)
        reg = Dropout(0.5)(actmlp)

        dense2 = Dense(512,
                       kernel_initializer='glorot_normal',
                       bias_initializer='glorot_normal')(reg)
        actmlp = activation_func(dense2)
        reg = Dropout(0.5)(actmlp)

        dense2 = Dense(10, activation='softmax')(reg)

        model = Model(inputs=[inputs], outputs=[dense2])
        return model, 'Kernel size 4 - 8/16/32/64'
Example #26
0
def create_conv1d_model(X_train):
    """

    """
    model = Sequential()
    # example here: https://gist.github.com/jkleint/1d878d0401b28b281eb75016ed29f2ee
    model.add(
        Conv1D(64,
               30,
               strides=1,
               padding='valid',
               kernel_initializer='glorot_normal',
               activation=None,
               input_shape=(X_train.shape[1], 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    # https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
    model.add(MaxPooling1D(pool_size=2, strides=2, padding='valid'))
    model.add(
        Conv1D(256,
               30,
               strides=1,
               padding='valid',
               kernel_initializer='glorot_normal',
               activation=None,
               input_shape=(X_train.shape[1], 1)))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    # https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
    model.add(MaxPooling1D(pool_size=2, strides=2, padding='valid'))
    # model.add(Flatten())  # dimensions were too big with this
    model.add(GlobalAveragePooling1D())
    model.add(Dense(256, kernel_initializer='glorot_normal'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout(0.5))
    model.add(Dense(128, kernel_initializer='glorot_normal'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout(0.5))
    model.add(Dense(1))

    # build model using keras documentation recommended optimizer initialization
    optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)

    # compile the model
    model.compile(loss='mean_squared_error', optimizer=optimizer)

    return model
Example #27
0
def create_cnn_model(l_seq):
    inputs = Input(shape=(l_seq, in_out_neurons))
    x = Conv1D(32, 3, activation='relu', padding='valid')(inputs)
    x = Conv1D(32, 3, activation='relu', padding='valid')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(64, 3, activation='relu', padding='valid')(inputs)
    x = Conv1D(64, 3, activation='relu', padding='valid')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    predictions = Dense(in_out_neurons, activation='linear')(x)
    model = Model(input=inputs, output=predictions)
    model.compile(loss="mean_squared_error", optimizer="adam")
    return model
Example #28
0
    def model(self):
        input_tensor = Input(shape=self.get_input_shape(), name='input')
        cnn_layer_1 = Conv1D(kernel_initializer=self.initializer,
                             activation=self.activation,
                             padding="same",
                             filters=self.num_filters[0],
                             kernel_size=self.filter_len,
                             kernel_regularizer=l2(self.l2),
                             bias_regularizer=l2(self.l2))(input_tensor)
        cnn_layer_2 = Conv1D(kernel_initializer=self.initializer,
                             activation=self.activation,
                             padding="same",
                             filters=self.num_filters[1],
                             kernel_size=self.filter_len,
                             kernel_regularizer=l2(self.l2),
                             bias_regularizer=l2(self.l2))(cnn_layer_1)
        maxpool_1 = MaxPooling1D(pool_size=self.pool_length)(cnn_layer_2)
        flattener = Flatten()(maxpool_1)
        layer_3 = Dense(units=self.num_hidden[0],
                        kernel_initializer=self.initializer,
                        activation=self.activation,
                        kernel_regularizer=l2(self.l2),
                        bias_regularizer=l2(self.l2))(flattener)
        layer_4 = Dense(units=self.num_hidden[1],
                        kernel_initializer=self.initializer,
                        activation=self.activation,
                        kernel_regularizer=l2(self.l2),
                        bias_regularizer=l2(self.l2))(layer_3)
        mu, sigma = GaussianLayer(len(self.targetname),
                                  name='main_output')(layer_4)

        model = Model(inputs=input_tensor, outputs=mu)

        return model, sigma
Example #29
0
def VDCNN_model(input_shape, num_classes, num_words, emb_size, emb_matrix,
                num_filters=num_filters_default, top_k=8, emb_trainable=False):
    inputs = Input(shape=(input_shape,), dtype='int32', name='inputs')

    embedded_sent = Embedding(num_words, emb_size, weights=[emb_matrix],
                              trainable=emb_trainable, name='embs')(inputs)

    conv = Conv1D(filters=64, kernel_size=3, strides=2, padding="same")(
        embedded_sent)

    for i in range(len(num_filters)):
        conv = ConvBlockVDCNN(conv.get_shape().as_list()[1:], num_filters[i])(
            conv)
        conv = MaxPooling1D(pool_size=3, strides=2, padding="same")(conv)

    def k_max_pooling(x):
        x = tf.transpose(x, [0, 2, 1])
        k_max = tf.nn.top_k(x, k=top_k)
        return tf.reshape(k_max[0], (-1, num_filters[-1] * top_k))

    k_max = Lambda(k_max_pooling, output_shape=(num_filters[-1] * top_k,))(
        conv)

    # fully-connected layers
    fc1 = Dropout(0.2)(
        Dense(4096, activation='relu', kernel_initializer='he_normal')(k_max))
    fc2 = Dropout(0.2)(
        Dense(2048, activation='relu', kernel_initializer='he_normal')(fc1))
    fc3 = Dense(num_classes, activation='softmax')(fc2)

    model = Model(inputs=inputs, outputs=fc3)
    return model
def build_sentence_model(input_set_size, vector_size, maxlen):
    word_inputs = Input(shape=(maxlen, ), dtype='int32')
    embed = Embedding(input_set_size, vector_size,
                      input_length=maxlen)(word_inputs)

    # 1 conv
    conv1_1 = Convolution1D(128, 3, border_mode='same',
                            activation='relu')(embed)
    bn1 = BatchNormalization(mode=1)(conv1_1)
    pool1 = MaxPooling1D(pool_length=2)(bn1)
    drop1 = Dropout(0.3)(pool1)
    '''
	# 2 conv
	conv2_1 = Convolution1D(128, 3, border_mode='same', activation='relu')(drop1)
	bn2 = BatchNormalization(mode=1)(conv2_1)
	pool2 = MaxPooling1D(pool_length=2)(bn2)
	drop2 = Dropout(0.2)(bn2)
	'''

    #gru 256
    bgru = Bidirectional(GRU(192, return_sequences=False),
                         merge_mode='sum')(drop1)
    drop = Dropout(0.5)(bgru)

    drop_3d = Reshape((192, 1))(drop)
    att = TimeDistributed(Dense(1))(drop_3d)
    att = Activation(activation="softmax")(att)
    merg1 = Flatten()(merge([drop_3d, att], mode='mul'))

    sentence_model = Model(input=[word_inputs], output=merg1)
    sentence_model.summary()
    return sentence_model