Пример #1
0
def layer_outputs():
    input_a = Input(shape=(140, 256))
    input_b = Input(shape=(140, 256))
    lstm = LSTM(32)

    encoded_a = lstm(input_a)
    assert lstm.output == encoded_a

    encoded_b = lstm(input_b)
    assert lstm.get_output_at(0) == encoded_a
    assert lstm.get_output_at(1) == encoded_b
Пример #2
0
          {'main_output': labels, 'aux_output': labels},
          epochs=50, batch_size=32)
'''

# 共享层

tweet_a = Input(shape=(140, 256))
tweet_b = Input(shape=(140, 256))

shared_lstm = LSTM(64)

encoded_a = shared_lstm(tweet_a)
encoded_b = shared_lstm(tweet_b)

# 查看隐藏层输出
assert shared_lstm.get_output_at(0) == encoded_a
assert shared_lstm.get_output_at(1) == encoded_b

merged_vector = keras.layers.concatenate([encoded_a, encoded_b], axis=-1)

predictions = Dense(1, activation='sigmoid')(merged_vector)

model = Model(inputs=[tweet_a, tweet_b], outputs=predictions)







Пример #3
0
def test_functional_guide():
    # MNIST
    from keras.layers import Input, Dense, LSTM
    from keras.models import Model
    from keras.utils import np_utils

    # this returns a tensor
    inputs = Input(shape=(784, ))

    # a layer instance is callable on a tensor, and returns a tensor
    x = Dense(64, activation='relu')(inputs)
    x = Dense(64, activation='relu')(x)
    predictions = Dense(10, activation='softmax')(x)

    # this creates a model that includes
    # the Input layer and three Dense layers
    model = Model(input=inputs, output=predictions)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # the data, shuffled and split between tran and test sets
    X_train = np.random.random((100, 784))
    Y_train = np.random.random((100, 10))

    model.fit(X_train, Y_train, nb_epoch=2, batch_size=128)

    assert model.inputs == [inputs]
    assert model.outputs == [predictions]
    assert model.input == inputs
    assert model.output == predictions
    assert model.input_shape == (None, 784)
    assert model.output_shape == (None, 10)

    # try calling the sequential model
    inputs = Input(shape=(784, ))
    new_outputs = model(inputs)
    new_model = Model(input=inputs, output=new_outputs)
    new_model.compile(optimizer='rmsprop',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    ##################################################
    # multi-io
    ##################################################
    tweet_a = Input(shape=(4, 25))
    tweet_b = Input(shape=(4, 25))
    # this layer can take as input a matrix
    # and will return a vector of size 64
    shared_lstm = LSTM(64)

    # when we reuse the same layer instance
    # multiple times, the weights of the layer
    # are also being reused
    # (it is effectively *the same* layer)
    encoded_a = shared_lstm(tweet_a)
    encoded_b = shared_lstm(tweet_b)

    # we can then concatenate the two vectors:
    merged_vector = merge([encoded_a, encoded_b],
                          mode='concat',
                          concat_axis=-1)

    # and add a logistic regression on top
    predictions = Dense(1, activation='sigmoid')(merged_vector)

    # we define a trainable model linking the
    # tweet inputs to the predictions
    model = Model(input=[tweet_a, tweet_b], output=predictions)

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    data_a = np.random.random((1000, 4, 25))
    data_b = np.random.random((1000, 4, 25))
    labels = np.random.random((1000, ))
    model.fit([data_a, data_b], labels, nb_epoch=1)

    model.summary()
    assert model.inputs == [tweet_a, tweet_b]
    assert model.outputs == [predictions]
    assert model.input == [tweet_a, tweet_b]
    assert model.output == predictions

    assert model.output == predictions
    assert model.input_shape == [(None, 4, 25), (None, 4, 25)]
    assert model.output_shape == (None, 1)

    assert shared_lstm.get_output_at(0) == encoded_a
    assert shared_lstm.get_output_at(1) == encoded_b
    assert shared_lstm.input_shape == (None, 4, 25)
Пример #4
0
predict = Dense(units=10, activation="softmax")(x)

model = Model(inputs=inputs, outputs=predict)
model.compile(optimizer="rmsprop",
              loss="categorical_crossentropy",
              metrics=["accuracy"])

#----------------model可视化
# from keras.utils import plot_model
# plot_model(model, to_file= "1.jpg")

train = input_data.read_data_sets("data/", one_hot=True).train
history = model.fit(train.images, train.labels, batch_size=50, epochs=1)
#
# #---------------引用训练好的model
# from keras.layers import TimeDistributed
# input_seq = Input(shape=(20, 784))
# Timemodel = TimeDistributed(model)(input_seq)

#----------------两个输入公用一个网络
from keras.layers import LSTM
a = Input(shape=(5, 3))
b = Input(shape=(5, 3))

lstm = LSTM(10)
encode_a = lstm(a)
encode_b = lstm(b)

assert lstm.get_output_at(0) == encode_a
assert lstm.get_output_at(1) == encode_b
print(1)
Пример #5
0
model = Model(inputs=[tweet_a, tweet_b], outputs=predictions)
model.compile(optimizer="rmsprop",
              loss="binary_crossentropy",
              metrics=["accuracy"])
odel.fit([data_a, data_b], labels, epochs=10)

a = Input(shape=(280, 256))
lstm = LSTM(32)
encoded_a = lstm(a)
lstm.output == encoded_a

a = Input(shape=(280, 256))
b = Input(shape=(280, 256))
lstm = LSTLM(32)
encoded_a = lstm(a)
encoded_b = lstm(b)

lstm.get_output_at(0)
lstm.get_output_at(1)

a = Input(shape=(32, 32, 3))
b = Input(shape=(64, 64, 3))

conv = Conv2D(16, (3, 3), padding="same")
conved_a = conv(a)
conved_b = conb(b)

conv.input_shape
conv.get_input_shape_at(0)
cont.get_input_shape_at(1)
Пример #6
0
def test_functional_guide():
    # MNIST
    from keras.layers import Input, Dense, LSTM
    from keras.models import Model
    from keras.utils import np_utils

    # this returns a tensor
    inputs = Input(shape=(784,))

    # a layer instance is callable on a tensor, and returns a tensor
    x = Dense(64, activation='relu')(inputs)
    x = Dense(64, activation='relu')(x)
    predictions = Dense(10, activation='softmax')(x)

    # this creates a model that includes
    # the Input layer and three Dense layers
    model = Model(input=inputs, output=predictions)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # the data, shuffled and split between tran and test sets
    X_train = np.random.random((100, 784))
    Y_train = np.random.random((100, 10))

    model.fit(X_train, Y_train, nb_epoch=2, batch_size=128)

    assert model.inputs == [inputs]
    assert model.outputs == [predictions]
    assert model.input == inputs
    assert model.output == predictions
    assert model.input_shape == (None, 784)
    assert model.output_shape == (None, 10)

    # try calling the sequential model
    inputs = Input(shape=(784,))
    new_outputs = model(inputs)
    new_model = Model(input=inputs, output=new_outputs)
    new_model.compile(optimizer='rmsprop',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    ##################################################
    # multi-io
    ##################################################
    tweet_a = Input(shape=(4, 25))
    tweet_b = Input(shape=(4, 25))
    # this layer can take as input a matrix
    # and will return a vector of size 64
    shared_lstm = LSTM(64)

    # when we reuse the same layer instance
    # multiple times, the weights of the layer
    # are also being reused
    # (it is effectively *the same* layer)
    encoded_a = shared_lstm(tweet_a)
    encoded_b = shared_lstm(tweet_b)

    # we can then concatenate the two vectors:
    merged_vector = merge([encoded_a, encoded_b],
                          mode='concat', concat_axis=-1)

    # and add a logistic regression on top
    predictions = Dense(1, activation='sigmoid')(merged_vector)

    # we define a trainable model linking the
    # tweet inputs to the predictions
    model = Model(input=[tweet_a, tweet_b], output=predictions)

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    data_a = np.random.random((1000, 4, 25))
    data_b = np.random.random((1000, 4, 25))
    labels = np.random.random((1000,))
    model.fit([data_a, data_b], labels, nb_epoch=1)

    model.summary()
    assert model.inputs == [tweet_a, tweet_b]
    assert model.outputs == [predictions]
    assert model.input == [tweet_a, tweet_b]
    assert model.output == predictions

    assert model.output == predictions
    assert model.input_shape == [(None, 4, 25), (None, 4, 25)]
    assert model.output_shape == (None, 1)

    assert shared_lstm.get_output_at(0) == encoded_a
    assert shared_lstm.get_output_at(1) == encoded_b
    assert shared_lstm.input_shape == (None, 4, 25)
Пример #7
0
lstm = LSTM(32)
encoded_a = lstm(a)

assert lstm.output == encoded_a
# 如果调用相同的层次许多次

a = Input(shape=(280, 256))
b = Input(shape=(280, 256))

lstm = LSTM(32)
encoded_a = lstm(a)
encoded_b = lstm(b)

lstm.output # Layer lstm_1 has multiple inbound nodes,hence the notion of "layer output" is ill-defined.Use `get_output_at(node_index)` instead

assert lstm.get_output_at(0) == encoded_a # 第一次调用到
assert lstm.get_output_at(1) == encoded_b # 第二次调用到

# input_shape 和 output_shape 也一样

# 如果一个卷积先后可以处理两个输入,在查看shape的时候也要分别进行处理

a = Input(shape=(32, 32, 3))
b = Input(shape=(64, 64, 3))

conv = Conv2D(16, (3, 3), padding='same')
conved_a = conv(a)

# Only one input so far, the following will work:
assert conv.input_shape == (None, 32, 32, 3)
""" each layer is a node in the computation graph. With shared layer, this
layer will correpond to multiple nodes in the computation graph. To get the
output for each computation, you will need to supply an index.
"""
a = Input(shape(140, 256))
lstm = LSTM(32)
encoded_a = lstm(a)
assert ltsm.output==encoded_a

a = Input(shape(140, 256))
b = Input(shape(140, 256))
lstm = LSTM(32)
encoded_a = lstm(a)
encoded_b = lstm(b)
assert lstm.get_output_at(0)==encoded_a
assert lstm.get_output_at(1)==encoded_b

""" inception module (go deeper with convolution) """
input_img = Input(shape=(256,256,3))
tower_1 = Conv2D(64, (1,1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(64, (3,3), padding='same', activation='relu')(tower_1)

tower_2 = Conv2D(64, (1,1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(64, (5,5), padding='same', activation='relu')(tower_2)

tower_3 = MaxPooling2D((3,3), strides=(1,1), padding='same')(input_img)
tower_3 = Conv2D(64, (1,1), padding='same', activation='relu')(tower_2)

output = keras.layers.concatenate([tower_1, tower_2, tower_3], axix=1)