コード例 #1
0
def build_rhodes():
    print "building rhodes"
    auth_model_3gram = Sequential()
    auth_model_3gram.add(convolutional.Convolution1D(100, 3, border_mode='same', input_shape=(longest_sentence_corpus, 300)))    
    auth_model_3gram.add(convolutional.MaxPooling1D(2, stride=1, border_mode='same')) 

    auth_model_4gram = Sequential()
    auth_model_4gram.add(convolutional.Convolution1D(100, 4, border_mode='same', input_shape=(longest_sentence_corpus, 300)))
    auth_model_4gram.add(convolutional.MaxPooling1D(2, stride=1, border_mode='same'))   

    auth_model_5gram = Sequential()
    auth_model_5gram.add(convolutional.Convolution1D(100, 5, border_mode='same', input_shape=(longest_sentence_corpus, 300)))
    auth_model_5gram.add(convolutional.MaxPooling1D(2, stride=1, border_mode='same'))   

    global merged_model
    merged_model = Sequential()
    merged_model.add(Merge([auth_model_3gram, auth_model_4gram, auth_model_5gram], mode='concat', concat_axis=2))
    merged_model.add(Flatten())
    merged_model.add(Dense(200))
    merged_model.add(Dense(200, activation='relu'))
    merged_model.add(Dropout(0.5))
    merged_model.add(Dense(200))
    merged_model.add(Dense(2, activation='softmax'))
    
    merged_model.summary()
    
    ada = Adagrad(lr=0.0001, epsilon=1e-06)
    merged_model.compile(loss='categorical_crossentropy', optimizer=ada, metrics=['accuracy'])
コード例 #2
0
def test_maxpooling_1d():
    nb_samples = 9
    nb_steps = 7
    input_dim = 10

    input = np.ones((nb_samples, nb_steps, input_dim))
    for stride in [1, 2]:
        layer = convolutional.MaxPooling1D(stride=stride, border_mode='valid')
        layer.input = K.variable(input)
        for train in [True, False]:
            K.eval(layer.get_output(train))
        layer.get_config()
コード例 #3
0
    def test_maxpooling_1d(self):
        nb_samples = 9

        nb_steps = 7
        input_dim = 10

        input = np.ones((nb_samples, nb_steps, input_dim))
        for ignore_border in [True, False]:
            for stride in [None, 2]:
                layer = convolutional.MaxPooling1D(stride=stride, ignore_border=ignore_border)
                layer.input = theano.shared(value=input)
                for train in [True, False]:
                    layer.get_output(train).eval()

                config = layer.get_config()
コード例 #4
0
ファイル: net.py プロジェクト: chitrita/deepcpg_legacy
def seq_layers(params):
    layers = []
    if params.drop_in:
        layer = kcore.Dropout(params.drop_in)
        layers.append(('xd', layer))
    nb_layer = len(params.nb_filter)
    w_reg = kr.WeightRegularizer(l1=params.l1, l2=params.l2)
    for l in range(nb_layer):
        layer = kconv.Convolution1D(nb_filter=params.nb_filter[l],
                                    filter_length=params.filter_len[l],
                                    activation=params.activation,
                                    init='glorot_uniform',
                                    W_regularizer=w_reg,
                                    border_mode='same')
        layers.append(('c%d' % (l + 1), layer))
        layer = kconv.MaxPooling1D(pool_length=params.pool_len[l])
        layers.append(('p%d' % (l + 1), layer))

    layer = kcore.Flatten()
    layers.append(('f1', layer))
    if params.drop_out:
        layer = kcore.Dropout(params.drop_out)
        layers.append(('f1d', layer))
    if params.nb_hidden:
        layer = kcore.Dense(output_dim=params.nb_hidden,
                            activation='linear',
                            init='glorot_uniform')
        layers.append(('h1', layer))
        if params.batch_norm:
            layer = knorm.BatchNormalization()
            layers.append(('h1b', layer))
        layer = kcore.Activation(params.activation)
        layers.append(('h1a', layer))
        if params.drop_out:
            layer = kcore.Dropout(params.drop_out)
            layers.append(('h1d', layer))
    return layers
コード例 #5
0
ファイル: CNN-a.py プロジェクト: xypan1232/CNN-Att
def set_cnn_model_attention(input_dim=4, input_length=2701):
    attention_reg_x = 0.25
    attention_reg_xr = 1
    attentionhidden_x = 16
    attentionhidden_xr = 8
    nbfilter = 16
    input = Input(shape=(input_length, input_dim))
    x = conv.Convolution1D(nbfilter, 10, border_mode="valid")(input)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)
    x = conv.MaxPooling1D(pool_length=3)(x)
    x_reshape = core.Reshape((x._keras_shape[2], x._keras_shape[1]))(x)

    x = Dropout(0.5)(x)
    x_reshape = Dropout(0.5)(x_reshape)

    decoder_x = Attention(hidden=attentionhidden_x,
                          activation='linear')  # success
    decoded_x = decoder_x(x)
    output_x = myFlatten(x._keras_shape[2])(decoded_x)

    decoder_xr = Attention(hidden=attentionhidden_xr, activation='linear')
    decoded_xr = decoder_xr(x_reshape)
    output_xr = myFlatten(x_reshape._keras_shape[2])(decoded_xr)

    output = merge([output_x, output_xr, Flatten()(x)], mode='concat')
    #output = BatchNormalization()(output)
    output = Dropout(0.5)(output)
    print output.shape
    output = Dense(nbfilter * 10, activation="relu")(output)
    output = Dropout(0.5)(output)
    out = Dense(2, activation='softmax')(output)
    #output = BatchNormalization()(output)
    model = Model(input, out)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    return model