def build(self):
     print('\nBuilding model...')
     # create the model
     embedding_vector_length = settings['EMBEDDING_VECTOR_LENGTH']
     self.model = Sequential()
     self.model.add(
         Embedding(self.top_words,
                   embedding_vector_length,
                   input_length=self.max_words_limit))
     self.model.add(
         Convolution1D(nb_filter=settings['CNN_NO_OF_FILTER'],
                       filter_length=settings['CNN_FILTER_LENGTH'],
                       border_mode='same',
                       activation='relu'))
     self.model.add(MaxPooling1D(pool_length=settings['CNN_POOL_LENGTH']))
     self.model.add(LSTM(settings['LSTM_CELLS_COUNT']))
     self.model.add(Dropout(settings['DROPOUT']))
     self.model.add(Dense(self.num_classes, activation='softmax'))
     print(self.model.summary())
    def build_cnn_model(self, embedding_weights):
        model = Sequential()
        vocab_size = len(embedding_weights)
        # 嵌入层将正整数(下标)转换为具有固定大小的向量. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
        model.add(
            Embedding(
                input_dim=vocab_size,  # 字典长度
                output_dim=EMBEDDING_SIZE,
                input_length=self.max_len,
                weights=[embedding_weights])
        )  # (None, MAX_SENTENCE_LENGTH, EMBEDDING_SIZE), where None is the batch dimension
        model.add(SpatialDropout1D(0.2))
        model.add(Convolution1D(filters=128, kernel_size=3, activation='relu'))
        model.add(GlobalMaxPool1D())  # 对于时间信号的全局最大池化,MaxPooling1D限制每一步的池化大小
        model.add(Dense(1, activation="sigmoid"))
        # model.add(Dense(2, activation='softmax'))

        model.summary()
        return model
Exemple #3
0
def get_feature_model(params):
    embedding_dims = params['embedding_dims']
    max_features = 8001
    model = Sequential()
    model.add(Embedding(
        max_features,
        embedding_dims,
        input_length=MAXLEN,
        dropout=params['embedding_dropout']))
    model.add(Convolution1D(
        nb_filter=params['nb_filter'],
        filter_length=params['filter_length'],
        border_mode='valid',
        activation='relu',
        subsample_length=1))
    model.add(MaxPooling1D(
        pool_length=params['pool_length'], stride=params['stride']))
    model.add(Flatten())
    return model
Exemple #4
0
def build_graph(n_variables):
    '''
    Creates the Graph component of the model, i.e., this creates the 
    conv+gru  with bi-directional
    '''
    nb_feature_maps = 64
    ngram_filters = [1, 2, 3, 4, 5]  #, 6, 7, 8]

    graph = Graph()
    graph.add_input(name='data', input_shape=(N_TRACKS, n_variables))

    for n_gram in ngram_filters:
        graph.add_node(Convolution1D(nb_feature_maps,
                                     n_gram,
                                     activation='relu',
                                     input_shape=(N_TRACKS, n_variables)),
                       name='conv_%s' % n_gram,
                       input='data')

        graph.add_node(
            GRU(25),
            name='gru_fwd_%s' % n_gram,
            input='conv_%s' % n_gram,
        )

        graph.add_node(
            GRU(25, go_backwards=True),
            name='gru_bwd_%s' % n_gram,
            input='conv_%s' % n_gram,
        )

        pass_thru = Lambda(lambda x: x)
        graph.add_node(pass_thru,
                       name='unit_{}'.format(n_gram),
                       inputs=['gru_fwd_%s' % n_gram,
                               'gru_bwd_%s' % n_gram])

    graph.add_node(Dropout(0.4),
                   name='dropout',
                   inputs=['unit_{}'.format(n) for n in ngram_filters],
                   create_output=True)

    return graph
Exemple #5
0
def get_feature_model():
    embedding_dims = 128
    max_features = 8001
    model = Sequential()
    model.add(
        Embedding(max_features,
                  embedding_dims,
                  input_length=MAXLEN,
                  dropout=0.2))
    model.add(
        Convolution1D(nb_filter=32,
                      filter_length=128,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=1))
    model.add(MaxPooling1D(pool_length=64, stride=32))
    model.add(Flatten())
    model.summary()
    return model
Exemple #6
0
def get_conv_model(config):

    nclass = config.n_classes
    input_length = config.audio_length

    model = Sequential()
    model.add(
        Convolution1D(16,
                      9,
                      activation='relu',
                      padding="valid",
                      input_shape=(input_length, 1)))
    model.add(Convolution1D(16, 9, activation='relu', padding="valid"))
    model.add(MaxPooling1D(16))
    model.add(Dropout(rate=0.1))

    model.add(Convolution1D(32, 3, activation='relu', padding="valid"))
    model.add(Convolution1D(32, 3, activation='relu', padding="valid"))
    model.add(MaxPooling1D(4))
    model.add(Dropout(rate=0.1))

    model.add(Convolution1D(32, 3, activation='relu', padding="valid"))
    model.add(Convolution1D(32, 3, activation='relu', padding="valid"))
    model.add(MaxPooling1D(4))
    model.add(Dropout(rate=0.1))

    model.add(Convolution1D(256, 3, activation='relu', padding="valid"))
    model.add(Convolution1D(256, 3, activation='relu', padding="valid"))
    model.add(GlobalMaxPooling1D())
    model.add(Dropout(rate=0.2))

    model.add(Dense(64, activation='relu'))
    model.add(Dense(1028, activation='relu'))
    model.add(Dense(nclass, activation='softmax'))

    opt = optimizers.Adam(config.learning_rate)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    return model
Exemple #7
0
def cnn_train(X_train, y_train, vocab_size):

    X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)

    print('Build model...')
    model = Sequential()
    model.add(Embedding(vocab_size, EMBED_SIZE, input_length=MAX_LEN))

    model.add(Dropout(0.25))

    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    model.add(
        Convolution1D(nb_filter=nb_filter,
                      filter_length=filter_length,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=1))
    # we use standard max pooling (halving the output of the previous layer):
    model.add(MaxPooling1D(pool_length=2))

    # We flatten the output of the conv layer,
    # so that we can add a vanilla dense layer:
    model.add(Flatten())

    # We add a vanilla hidden layer:
    model.add(Dense(HIDDEN_SIZE))
    model.add(Dropout(0.25))
    model.add(Activation('relu'))

    # We project onto a single unit output layer, and squash it with a sigmoid:
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    model.fit(X_train,
              y_train,
              batch_size=BATCH_SIZE,
              nb_epoch=EPOCHS,
              show_accuracy=True)

    return model
Exemple #8
0
def model_relu6():

    batch_size=32
    filter_length = 5
    nb_filter = 64
    pool_length = 4
    nb_epoch = 3

    inputDim = 26

    model = Sequential()
    model.add( Embedding(inputDim*inputDim*inputDim, inputDim, dropout=0.2) ) #input vector dimension

    model.add(Convolution1D(nb_filter= nb_filter, filter_length= filter_length, border_mode='valid', activation='relu',subsample_length=1))

    model.add(MaxPooling1D(pool_length= pool_length))


    model.add( Bidirectional( LSTM(1024, return_sequences=True) ))
    model.add(LeakyReLU())
    model.add(BatchNormalization( ))
    model.add(Dropout(0.5))

    model.add( Bidirectional( LSTM(2048, return_sequences=True)) )
    model.add(Activation('relu'))
    model.add(BatchNormalization( ))
    model.add(Dropout(0.5))

    model.add( Bidirectional( LSTM(512, return_sequences=True)) )
    model.add(Activation('relu'))
    model.add(BatchNormalization( ))
    model.add(Dropout(0.5))

    model.add( Bidirectional( LSTM(256)) )
    model.add(Activation('relu'))
    model.add(BatchNormalization( ))
    model.add(Dropout(0.5))

    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam',metrics=[jacek_auc,discussion41015_auc])

    return model
Exemple #9
0
def build_lstm_model(top_words,
                     embedding_size,
                     max_input_length,
                     num_outputs,
                     internal_lstm_size=100,
                     embedding_matrix=None,
                     embedding_trainable=True):
    """ 
    Parameters
    top_words : int
        Size of the vocabulary
    embedding_size : int
        Number of dimensions of the word embedding. e.g. 300 for Google word2vec
    embedding_matrix: None, or `top_words` x `embedding_size` matrix
        Initial/pre-trained embeddings
    embedding_trainable : bool
        Whether we should train the word embeddings. Must be true if no embedding matrix provided
    """

    if not embedding_trainable:
        assert embedding_matrix is not None, "Must provide an embedding matrix if not training one"

    _weights = None
    if embedding_matrix is not None:
        _weights = [embedding_matrix]

    model = Sequential()
    model.add(
        Embedding(top_words,
                  embedding_size,
                  input_length=max_input_length,
                  weights=_weights,
                  trainable=embedding_trainable))
    model.add(
        Convolution1D(filters=32,
                      kernel_size=3,
                      padding='same',
                      activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(LSTM(internal_lstm_size))
    model.add(Dense(num_outputs, activation='softmax'))
    return model
Exemple #10
0
    def create(self, charset_len, weights=None):
        latent_dim = 500
        max_len = 120

        inputs = Input(shape=(max_len, charset_len))

        #6 Convolutional Layers (All Conv)
        x = Convolution1D(charset_len * 2, 1, activation='relu',
                          name='conv_1')(inputs)
        x = Convolution1D(charset_len * 2,
                          1,
                          subsample_length=2,
                          activation='relu',
                          name='resh_1')(inputs)

        x = Convolution1D(charset_len, 1, activation='relu', name='conv_2')(x)
        x = Convolution1D(charset_len,
                          1,
                          subsample_length=2,
                          activation='relu',
                          name='resh_2')(x)

        x = Convolution1D(charset_len / 2,
                          1,
                          activation='relu',
                          name='resize_1')(x)
        x = Convolution1D(charset_len / 2,
                          1,
                          subsample_length=max_len / 4,
                          activation='relu',
                          name='resh_3')(x)

        x = Flatten()(x)

        x = Dropout(0.2)(x)
        x = Dense(240, name='dense_1')(x)
        x = Dense(charset_len, name='dense_2')(x)
        x = Dense(1, name='dense_3')(x)

        predictions = Activation('sigmoid')(x)

        self.sol = Model(input=inputs, output=predictions)

        if weights:
            self.sol.load_weights(weights)

        self.sol.compile(optimizer='adam',
                         loss='binary_crossentropy',
                         metrics=['mean_absolute_error'])
def build_mixed_model(max_features, seqlen, nb_classes):
    print('build mixed model ...')
    model = Sequential()
    model.add(Embedding(max_features, 100, input_length=seqlen))
    model.add(Dropout(0.25))
    model.add(
        Convolution1D(nb_filter=200,
                      filter_length=10,
                      border_mode='valid',
                      activation='relu'))
    model.add(MaxPooling1D(pool_length=50))
    model.add(LSTM(100))
    model.add(Dropout(0.25))
    model.add(Activation('relu'))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=["accuracy"])
    return model
Exemple #12
0
def cnn(sequence_input):
    embedded = embedding_layer(sequence_input)
    embedded = Dropout(0.25)(embedded)
    # convolutional layer
    convolution = Convolution1D(filters=nb_filter,
                                kernel_size=kernel_size,
                                padding='valid',
                                activation='relu',
                                strides=1)(embedded)

    maxpooling = MaxPooling1D(pool_size=2)(convolution)
    maxpooling = Flatten()(maxpooling)

    # We add a vanilla hidden layer:
    dense = Dense(70)(maxpooling)  # best: 120
    dense = Dropout(0.25)(dense)  # best: 0.25
    dense = Activation('relu')(dense)
    output = Dense(2, activation='softmax')(dense)
    model = Model(inputs=sequence_input, outputs=output)
    return model
def hierarchical_cnn (input_shape, aux_shape, targets = 1, hidden = 256, multiclass = False, learn_rate=1e-4):
    x = Input(shape = input_shape, name = 'x')
    xx = Convolution1D(nb_filter = 64, filter_length = 3, border_mode = 'same', activation = 'relu') (x)
    xx = MaxPooling1D(pool_length = 3) (xx)
    
    xx = Bidirectional(LSTM (256, activation = 'relu'), merge_mode = 'concat') (xx)
    xx = Dropout(0.5)(xx)
    
    dx = Input(shape = aux_shape, name = 'aux')

    xx = concatenate([xx, dx])
    if multiclass:
        y = Dense(targets, activation = 'softmax') (xx)
        model = Model(inputs = [x, dx], outputs = [y])
        model.compile (loss = 'categorical_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['categorical_accuracy'])
    else:
        y = Dense(targets, activation = 'sigmoid') (xx)
        model = Model(inputs = [x, dx], outputs = [y])
        model.compile (loss = 'binary_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['accuracy'])
    return (model)
Exemple #14
0
def buildModel_max(vocab_size):
    model = Sequential()
    model.add(
        Embedding(vocab_size, EMBED_SIZE, input_length=MAX_LEN, dropout=0.2))
    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    model.add(
        Convolution1D(nb_filter=nb_filter,
                      filter_length=filter_length,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=1))
    # we use standard max pooling (halving the output of the previous layer):
    model.add(MaxPooling1D(pool_length=2))

    # We flatten the output of the conv layer,
    # so that we can add a vanilla dense layer:
    model.add(Flatten())
    #    model.add(RepeatVector(HIDDEN_SIZE))
    return model
Exemple #15
0
def cnn_prediction(X_train, y_train, X_test, y_test, vocab_size):
    X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)
    X_test = sequence.pad_sequences(X_test, maxlen=MAX_LEN)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Build model...')
    model = Sequential()
    model.add(
        Embedding(vocab_size, EMBED_SIZE, input_length=MAX_LEN, dropout=0.2))
    model.add(
        Convolution1D(nb_filter=nb_filter,
                      filter_length=filter_length,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=1))

    model.add(MaxPooling1D(pool_length=2))

    model.add(Flatten())

    model.add(Dense(HIDDEN_SIZE))
    model.add(Dropout(0.25))
    model.add(Activation('relu'))

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    model.fit(X_train,
              y_train,
              batch_size=BATCH_SIZE,
              nb_epoch=EPOCHS,
              show_accuracy=True,
              validation_data=(X_test, y_test))

    X_pred = model.predict(X_test)
    results = [result[0] for result in X_pred]

    return readResult(y_test, results)
Exemple #16
0
def set_up_model_up():
    print('building model')

    seq_input_shape = (2000, 4)
    nb_filter = 64
    filter_length = 6
    input_shape = (2000, 4)
    attentionhidden = 256

    seq_input = Input(shape=seq_input_shape, name='seq_input')
    convul1 = Convolution1D(filters=nb_filter,
                            kernel_size=filter_length,
                            padding='valid',
                            activation='relu',
                            kernel_constraint=maxnorm(3),
                            subsample_length=1)

    pool_ma1 = MaxPooling1D(pool_size=3)
    dropout1 = Dropout(0.5977908689086315)
    dropout2 = Dropout(0.30131233477637737)
    decoder = Attention(hidden=attentionhidden, activation='linear')
    dense1 = Dense(1)
    dense2 = Dense(1)

    output_1 = pool_ma1(convul1(seq_input))
    output_2 = dropout1(output_1)
    att_decoder = decoder(output_2)
    output_3 = attention_flatten(output_2._keras_shape[2])(att_decoder)

    output_4 = dense1(dropout2(Flatten()(output_2)))
    all_outp = merge([output_3, output_4], mode='concat')
    output_5 = dense2(all_outp)
    output_f = Activation('sigmoid')(output_5)

    model = Model(inputs=seq_input, outputs=output_f)
    model.compile(loss='binary_crossentropy',
                  optimizer='nadam',
                  metrics=['accuracy'])

    print(model.summary())
    return model
Exemple #17
0
def imdb_cnn(W=None):
    # Number of feature maps (outputs of convolutional layer)
    N_fm = 60
    # kernel size of convolutional layer
    kernel_size = 3
    dims = 300  # 300 dimension
    maxlen = 200  # maxlen of sentence
    max_features = W.shape[0]
    hidden_dims = 100
    print('Build model...')
    model = Sequential()

    # we start off with an efficient embedding layer which maps
    # our vocab indices into embedding_dims dimensions
    model.add(Embedding(max_features, dims, input_length=maxlen, weights=[W]))
    model.add(Dropout(0.5))

    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    model.add(Convolution1D(nb_filter=N_fm,
                            filter_length=kernel_size,
                            border_mode='valid',
                            activation='relu',
                            ))
    model.add(Dropout(0.5))
    # we use standard max pooling (halving the output of the previous layer):
    model.add(MaxPooling1D(pool_length=kernel_size*7))

    # We flatten the output of the conv layer,
    # so that we can add a vanilla dense layer:
    model.add(Flatten())

    # We add a vanilla hidden layer:
    model.add(Dense(hidden_dims))
    model.add(Dropout(0.5))
    model.add(Activation('relu'))

    # We project onto a single unit output layer, and squash it with a sigmoid:
    model.add(Dense(2))
    model.add(Activation('softmax'))
    return model
def get_model(
        go_id,
        max_features=5000,
        embedding_dims=100,
        nb_filters=250,
        hidden_dims=250,
        pool_length=2,
        filter_length=3):
    filepath = DATA_ROOT + 'level_1/models/' + go_id + '.hdf5'
    size = os.path.getsize(filepath)
    max_features = get_model_max_features(size)
    global go_model
    if go_id in go_model:
        return go_model[go_id]
    # length of APAAC
    maxlen = 20 + 6 * LAMBDA

    model = Sequential()
    model.add(Embedding(max_features, embedding_dims))
    model.add(Dropout(0.25))
    model.add(Convolution1D(
        input_dim=embedding_dims,
        nb_filter=nb_filters,
        filter_length=filter_length,
        border_mode='valid',
        activation='relu',
        subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(Flatten())
    output_size = nb_filters * (((maxlen - filter_length) / 1) + 1) / 2
    model.add(Dense(output_size, hidden_dims))
    model.add(Dropout(0.25))
    model.add(Activation('relu'))
    model.add(Dense(hidden_dims, 1))
    model.add(Activation('sigmoid'))
    model.compile(
        loss='binary_crossentropy', optimizer='adam', class_mode='binary')
    # Loading saved weights
    print 'Loading weights for ' + go_id
    model.load_weights(filepath)
    return model
    def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        model_list = [None] * self._config.language_cnn_views
        for j in xrange(1, self._config.language_cnn_views + 1):
            current_view = Sequential()
            self.textual_embedding(current_view, mask_zero=True)
            current_view.add(
                Convolution1D(nb_filter=self._config.language_cnn_filters,
                              filter_length=j,
                              border_mode='valid',
                              activation=self._config.language_cnn_activation,
                              subsample_length=1))
            self.temporal_pooling(current_view)
            model_list[j - 1] = current_view

        self.add(Merge(model_list, mode='concat'))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
Exemple #20
0
    def get_model(self):
        inputs_x = Input(shape=(self.sequence_len, ), dtype='int32')
        embedded_sequences = Embedding(self.nb_words,
                                       self.embedding_dim,
                                       weights=[self.embedding_matrix],
                                       input_length=self.sequence_len,
                                       trainable=False)(inputs_x)

        x = Convolution1D(256, 3, padding='same')(embedded_sequences)
        x = Activation('relu')(x)
        x = GlobalMaxPooling1D()(x)
        x = Dropout(0.25)(x)
        x = Dense(256)(x)  #128
        x = Activation('relu')(x)
        x = Dense(6)(x)

        outputs = Activation('sigmoid', name='outputs')(x)
        model = Model(inputs=[inputs_x], outputs=outputs)
        model.compile(loss='binary_crossentropy', optimizer='adamax')
        self.model = model
        return self.model
Exemple #21
0
def create_grouped():
    # np.random.seed(seed)
    feature_len = len(X[0, 0, :])
    season_len = len(X[0, :])
    model = Sequential()
    model.add(GaussianNoise(.05,
                            input_shape=(season_len,
                                         feature_len)))  # output:(None, 12, 9)
    model.add(Convolution1D(40, 1))  # (None, 12, 40)
    model.add(Flatten())  # (None, 480)
    model.add(Dense(int(40), init='normal', activation='relu'))  # (None, 40)
    model.add(Dropout(.25))
    model.add(Dense(int(40), init='normal', activation='relu', bias=True))
    model.add(Dropout(.25))
    model.add(Dense(int(20), init='normal', activation='tanh'))  # (None, 20)
    model.add(Dropout(.2))
    model.add(Dense(12, init='normal', activation='softmax'))  # (None, 12)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', 'precision', 'recall', 'fmeasure'])
    return model
Exemple #22
0
def create_deep_cnn_model(nb_dim, nb_width):
    print("dim: " + str(nb_dim))
    model = Sequential()
    # model.add(Convolution2D(nb_dim,5,nb_dim, border_mode='valid',input_shape=(1, nb_width, nb_dim)))
    # model.add(Activation('tanh'))
    # model.add(MaxPooling2D(pool_size=( 46,1)))

    model.add(Convolution1D(nb_filter=128,
                            filter_length=3,
                            border_mode="valid",
                            activation="tanh",
                            input_shape=(nb_width, nb_dim)))
    model.add(MaxPooling1D(pool_length=48))

    model.add(Flatten())
    model.add(Dense(256, init='normal'))
    model.add(Activation('tanh'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_class, init='normal'))
    model.add(Activation('softmax'))
    return model
Exemple #23
0
    def __init__(self, output_dimesion, vocab_size, dropout_rate, emb_dim, max_len, nb_filters, init_W=None):

        self.max_len = max_len
        max_features = vocab_size
        vanila_dimension = 200

        self.filter_lengths = [3, 4, 5]

        '''Embedding Layer'''
        in_seq = Input(shape=(max_len,))
        if init_W is None:
            seq_emb = Embedding(max_features, emb_dim, input_length=max_len, trainable=True)(in_seq)
        else:
            seq_emb = Embedding(max_features, emb_dim, input_length=max_len, trainable=False, weights=[init_W / 20])(in_seq)

        '''Convolution Layer & Max Pooling Layer'''
        tmp_list = []
        for ws in self.filter_lengths:
#            cnn = Reshape((self.max_len, emb_dim, 1), input_shape=(self.max_len,))(seq_emb)
#            cnn = Convolution2D(nb_filters, ws, emb_dim, activation="relu")(cnn)
#            cnn = MaxPooling2D(pool_size=(self.max_len - ws + 1, 1))(cnn)
#            cnn = Flatten()(cnn)
            cnn = Convolution1D(nb_filters, ws, border_mode='valid', activation='relu', subsample_length=1)(seq_emb)
            cnn = MaxPooling1D(pool_length=self.max_len - ws + 1)(cnn)
            cnn = Flatten()(cnn)
            tmp_list.append(cnn)
        cnn_con = Concatenate()(tmp_list)
        
        '''Dropout Layer'''
        seq_dropout = Dense(vanila_dimension, activation='tanh')(cnn_con)
        seq_dropout = Dropout(dropout_rate)(seq_dropout)
        
        '''Projection Layer & Output Layer'''
        out_seq = Dense(output_dimesion, activation='tanh')(seq_dropout)

        # Output Layer
        self.model = Model(in_seq, out_seq)
        self.model.compile(optimizer='rmsprop', loss='mse')
        
        print (self.model.summary())
Exemple #24
0
    def baseModel(self, nb_filter=250, filter_length=3, hidden_dims=125):
        model = Sequential()

        # we start off with an efficient embedding layer which maps
        # our vocab indices into embedding_dims dimensions
        model.add(
            Embedding(self.max_words + self.index_from,
                      self.embedding_dims,
                      input_length=self.max_length))
        model.add(Dropout(0.25))

        # we add a Convolution1D, which will learn nb_filter
        # word group filters of size filter_length:

        # filter_length is like filter size, subsample_length is like step in 2D CNN.
        model.add(
            Convolution1D(filters=nb_filter,
                          kernel_size=filter_length,
                          padding='valid',
                          activation='relu',
                          strides=1))
        # we use standard max pooling (halving the output of the previous layer):
        model.add(MaxPooling1D(pool_size=2))

        # We flatten the output of the conv layer,
        # so that we can add a vanilla dense layer:
        model.add(Flatten())

        # We add a vanilla hidden layer:
        model.add(Dense(hidden_dims))
        model.add(Dropout(0.25))
        model.add(Activation('relu'))

        # We project onto a single unit output layer, and squash it with a sigmoid:
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        model.compile(loss='binary_crossentropy', optimizer='rmsprop')

        return model
Exemple #25
0
def build_model(ncell, nmark, nfilter, coeff_l1, coeff_l2,
                k, dropout, dropout_p, regression, n_classes, lr=0.01):

    """ Builds the neural network architecture """

    # the input layer
    data_input = Input(shape=(ncell, nmark))

    # the filters
    conv = Convolution1D(nfilter, 1, activation='linear',
                         W_regularizer=l1l2(l1=coeff_l1, l2=coeff_l2),
                         name='conv1')(data_input)
    conv = Activation('relu')(conv) ### filter responses?
    # the cell grouping part
    pooled = Lambda(select_top, output_shape=(nfilter,), arguments={'k':k})(conv)

    # possibly add dropout
    if dropout or ((dropout == 'auto') and (nfilter > 5)):
        pooled = Dropout(p=dropout_p)(pooled)

    # network prediction output
    if not regression:
        output = Dense(n_classes, activation='softmax',
                       W_regularizer=l1l2(l1=coeff_l1, l2=coeff_l2),
                       name='output')(pooled)
    else:
        output = Dense(1, activation='linear', W_regularizer=l1l2(l1=coeff_l1, l2=coeff_l2),
                       name='output')(pooled)
    model = Model(input=data_input, output=output)


    if not regression:
        model.compile(optimizer=Adam(lr=lr),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
    else:
        model.compile(optimizer=Adam(lr=lr),
                      loss='mean_squared_error')
    return model
 def cgru_model():
     m = Sequential()
     layers = [
         (FixedEmbedding if embedding_fixed else Embedding)\
             (*embeddings.shape, input_length=max_len, weights=[embeddings]),
         SimpleRNN(rnn_output, activation="relu", return_sequences=True),
         Convolution1D(
             nb_filter,
             filter_length,
             border_mode="valid",
             activation="relu"),
         MaxPooling1D(pool_length=max_len - filter_length + 1),
         Flatten(),
         Dropout(0.5),
         Dense(1, activation='sigmoid', W_constraint=maxnorm(W_constraint))
     ]
     for l in layers:
         m.add(l)
     m.compile(loss='binary_crossentropy',
               optimizer="adadelta",
               class_mode='binary')
     return m
 def cgru_model():
     m = Sequential()
     layers = [
         (FixedEmbedding if embedding_fixed else Embedding)\
             (*embeddings.shape, input_length=max_len, weights=[embeddings]),
         Dropout(0.25),
         Convolution1D(
             nb_filter,
             filter_length,
             border_mode="valid",
             activation="relu"),
         MaxPooling1D(pool_length=pool_length),
         GRU(rnn_output),
         # Lambda(lambda X: X.mean(axis=-2), output_shape=(rnn_output,)),
         Dense(1, activation='sigmoid', W_constraint=maxnorm(W_constraint))
     ]
     for l in layers:
         m.add(l)
     m.compile(loss='binary_crossentropy',
               optimizer="adadelta",
               class_mode='binary')
     return m
Exemple #28
0
def build_model(features, seq_len, out):
    model = Sequential()

    model.add(LSTM(100, input_shape=(seq_len, features),
                   return_sequences=True))
    model.add(Activation("tanh"))

    model.add(Convolution1D(50, 10, border_mode='valid'))
    model.add(Activation("relu"))

    model.add(Flatten())

    model.add(Dense(units=out))
    model.add(Activation("linear"))

    start = time.time()
    adam = Adam(lr=0.25, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    #model.compile(loss = "mean_absolute_percentage_error", optimizer = 'RMSprop')
    model.compile(loss="mean_absolute_percentage_error", optimizer=adam)
    print("> Compilation Time : ", time.time() - start)
    return model
def model(sequence_length=None):
    graph = Graph()
    graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1,
                             input_dim=embedding_dim,
                             input_length=sequence_length)
        pool = MaxPooling1D(pool_length=sequence_length - fsz + 1)
        graph.add_node(conv, name='conv-%s' % fsz, input='input')
        graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
        graph.add_node(Flatten(),
                       name='flatten-%s' % fsz,
                       input='maxpool-%s' % fsz)

    if len(filter_sizes) > 1:
        graph.add_output(name='output',
                         inputs=['flatten-%s' % fsz for fsz in filter_sizes],
                         merge_mode='concat')
    else:
        graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])

    # main sequential model
    model = Sequential()
    model.add(
        Embedding(vocab_size,
                  embedding_dim,
                  input_length=sequence_length,
                  weights=[embedding_weights]))
    model.add(
        Dropout(dropout_prob[0], input_shape=(sequence_length, embedding_dim)))
    model.add(graph)
    model.add(Dense(hidden_dims))
    model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    return model
Exemple #30
0
 def _simple_model(self, shape):
     filter_width = 8
     n_filters = 128
     timesteps = shape[1]
     features = shape[2]
     model = Sequential()
     # model.add(TimeDistributed(Dense(512), input_shape=(timesteps,features)))
     model.add(
         Convolution1D(n_filters,
                       filter_width,
                       input_shape=(timesteps, features)))
     timesteps -= filter_width - 1
     model.add(Dropout(self.dropout))
     model.add(AveragePooling1D(timesteps))
     model.add(Flatten())
     #model.add(Dense(128,activation='relu'))
     model.add(Dense(15))
     model.add(Activation('softmax'))
     model.compile(loss='categorical_crossentropy',
                   optimizer='rmsprop',
                   metrics=['accuracy'])
     return model