Пример #1
0
    def __init__(self):
        model = models.Sequential(name='ICP_model')
        # 第1层卷积,卷积核大小为3*3,32个,60*200为待训练图片的大小
        model.add(
            layers.Conv2D(32, (3, 3),
                          activation='re' + 'lu',
                          input_shape=(Height, Width, Channels)))
        model.add(layers.MaxPooling2D((2, 2)))
        # 第2层卷积,卷积核大小为3*3,64个
        model.add(layers.Conv2D(32, (3, 3), activation='re' + 'lu'))
        model.add(layers.MaxPooling2D((2, 2)))
        # 第3层卷积,卷积核大小为3*3,64个
        model.add(layers.Conv2D(64, (3, 3), activation='re' + 'lu'))
        model.add(layers.MaxPooling2D((2, 2)))
        # 第4层卷积,卷积核大小为3*3,128个
        model.add(layers.Conv2D(128, (3, 3), activation='re' + 'lu'))

        model.add(layers.Flatten())
        model.add(layers.Dense(6 * 62, activation='re' + 'lu'))
        model.add(layers.Reshape([6, 62]))

        model.add(layers.Softmax())

        # 打印网络的字符串摘要
        model.summary()

        self.model = model
def build_network():
    network = models.Sequential()
    network.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
    network.add(MaxPooling2D((2, 2)))
    network.add(Conv2D(64, (3, 3), activation='relu'))
    network.add(MaxPooling2D((2, 2)))
    network.add(Conv2D(64, (3, 3), activation='relu'))
    network.add(Flatten())
    network.add(Dense(64, activation='relu'))
    # network.add(Dense(32, activation='relu'))
    network.add(Dense(10, activation='softmax'))

    network.compile(optimizer='adam',
                    loss='sparse_categorical_crossentropy',
                    metrics=['accuracy'])
    return network
Пример #3
0
print('Loading data...')
(input_train, y_train), (input_test, y_test) = imdb.load_data(num_words=max_features)
print(len(input_train), 'train sequences')
print(len(input_test), 'test sequences')

print('Pad sequences (samples x time)')
input_train = sequence.pad_sequences(input_train, maxlen=maxlen)
input_test = sequence.pad_sequences(input_test, maxlen=maxlen)
print('input_train shape:', input_train.shape)
print('input_test shape:', input_test.shape)


from tensorflow_core.python.keras import models
from tensorflow_core.python.keras import layers

network = models.Sequential()
network.add(layers.Embedding(max_features, 128))
network.add(layers.Conv1D(256,3,padding='valid',activation='relu',strides=1))
network.add(layers.MaxPooling1D())
network.add(layers.Bidirectional(layers.LSTM(128)))
network.add(layers.Dense(256, activation='relu'))
network.add(layers.Dense(64, activation='relu'))
network.add(layers.Dense(32, activation='relu'))
network.add(layers.Dense(16, activation='relu'))
network.add(layers.Dense(16, activation='relu'))
network.add(layers.Dense(1))
network.add(layers.Activation('sigmoid'))

#make the training data 80% and testing 20%
input_train = np.concatenate((input_train, input_test[:15000]))
input_test = input_test[15000:]
Пример #4
0
print('Pad sequences (samples x time)')
input_train = sequence.pad_sequences(input_train, maxlen=maxlen)
input_test = sequence.pad_sequences(input_test, maxlen=maxlen)
print('input_train shape:', input_train.shape)
print('input_test shape:', input_test.shape)
# make the training data 80% and testing 20%
x_train = np.concatenate((input_train, input_test[:15000]))
input_test = input_test[15000:]
y_train = np.concatenate((y_train, y_test[:15000]))
y_test = y_test[15000:]

from tensorflow_core.python.keras import models
from tensorflow_core.python.keras import layers

model = models.Sequential()
model.add(layers.Embedding(max_features, 32))
model.add(layers.SimpleRNN(32))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])

model.summary()
his = model.fit(x_train,
                y_train,
                epochs=3,
                batch_size=64,
                validation_split=0.025)

results = model.evaluate(input_test, y_test)
print(results)