Пример #1
0
def test_find_activation_layer():
    conv1_filters = 1
    conv2_filters = 1
    dense_units = 1
    model = Sequential()
    model.add(
        Conv2D(conv1_filters, [3, 3],
               input_shape=(28, 28, 1),
               data_format="channels_last",
               name='conv_1'))
    model.add(Activation('relu', name='act_1'))
    model.add(MaxPool2D((2, 2), name='pool_1'))
    model.add(
        Conv2D(conv2_filters, [3, 3],
               data_format="channels_last",
               name='conv_2'))
    model.add(Activation('relu', name='act_2'))
    model.add(MaxPool2D((2, 2), name='pool_2'))
    model.add(Flatten(name='flat_1'))
    model.add(Dense(dense_units, name='dense_1'))
    model.add(Activation('relu', name='act_3'))
    model.add(Dense(10, name='dense_2'))
    model.add(Activation('softmax', name='act_4'))
    assert find_activation_layer(model.get_layer('conv_1'),
                                 0) == (model.get_layer('act_1'), 0)
    assert find_activation_layer(model.get_layer('conv_2'),
                                 0) == (model.get_layer('act_2'), 0)
    assert find_activation_layer(model.get_layer('dense_1'),
                                 0) == (model.get_layer('act_3'), 0)
    assert find_activation_layer(model.get_layer('dense_2'),
                                 0) == (model.get_layer('act_4'), 0)
Пример #2
0
test_generator = data_generator.flow_from_directory(
    directory = 'sketch/tx_000000000000',
    target_size = (image_size, image_size),
    batch_size = BATCH_SIZE_TESTING,
    class_mode = None,
    shuffle = False,
    seed = 123
)

#Need to compile layer[0] for extracting the 256- dim features.
#model.layers[1].compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)

test_generator.reset()
#pred = model.layers[1].predict_generator(test_generator, steps = len(test_generator), verbose = 1) 
f =  Model(inputs=model.input,outputs=model.get_layer('dense').output)
f.compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
for idx,layer in enumerate(f.layers):
        if(layer.name == 'dense'):
                print(idx,layer.name)
                #print(layer.get_weights())
                print("________________")

pred = f.predict_generator(test_generator, steps = len(test_generator), verbose = 1)
#Predicted labels
#pred2 = model.predict_generator(test_generator, steps = len(test_generator), verbose = 1)
#predicted_class_indices = np.argmax(pred2, axis = 1)

fname = test_generator.filenames
sio.savemat('sketchmatnew.mat',mdict={'feature':pred,'label':fname})
Пример #3
0
score = model.evaluate(x=X_train, y=y_train)

print('Train loss:', score[0])
print('Train accuracy:', score[1])

score = model.evaluate(x=X_test, y=y_test)

print('Test loss:', score[0])
print('Test accuracy:', score[1])

predictions = model.predict(x=X_test)

layer_name = "dense_3"
intermediate_layer_model = Model(inputs=model.input,
                                 outputs=model.get_layer(layer_name).output)

intermediate_output_train = intermediate_layer_model.predict(x=X_train)
intermediate_output_test = intermediate_layer_model.predict(x=X_test)

#get_3rd_layer_output = K.function([model.layers[0].input, K.learning_phase()],[model.layers[3].output])

# Testing
# test = np.random.random(input_shape)[np.newaxis,...]
# layer_outs = [func([test, 1.]) for func in functors]
# print layer_outs

print(intermediate_output_train.shape)
print(intermediate_output_test.shape)

final = []
Пример #4
0
model.fit(
    x_train_pad,
    y_train,  # train model
    validation_split=0.05,
    epochs=3,
    batch_size=64)
# We can now use the trained model to predict the sentiment for these texts.

result_train = model.evaluate(x_train_pad, y_train)  # test model on train set
result_test = model.evaluate(x_test_pad, y_test)  # test model on test set
print("Accuracy: {0:.2%}".format(result_test[1]))
model.predict(x_test_pad)  # predict new texts
# A value close to 0.0 means a negative sentiment and a value close to 1.0 means a positive sentiment. These numbers will vary every time you train the model
"""
The model cannot work on integer-tokens directly, because they are integer values that may range between 0 and the number of words in our vocabulary, e.g. 10000. 
So we need to convert the integer-tokens into vectors of values that are roughly between -1.0 and 1.0 which can be used as input to a neural network.
This mapping from integer-tokens to real-valued vectors is also called an "embedding". 
It is essentially just a matrix where each row contains the vector-mapping of a single token. 
This means we can quickly lookup the mapping of each integer-token by simply using the token as an index into the matrix. 
The embeddings are learned along with the rest of the model during training.
Ideally the embedding would learn a mapping where words that are similar in meaning also have similar embedding-values. Let us investigate if that has happened here.
First we need to get the embedding-layer from the model
"""
layer_embedding = model.get_layer('layer_embedding')
# We can then get the weights used for the mapping done by the embedding-layer.
weights_embedding = layer_embedding.get_weights()[0]
# Note that the weights are actually just a matrix with the number of words in the vocabulary times the vector length for each embedding.
# That's because it is basically just a lookup-matrix.
token_bad = tokenizer.word_index['bad']
weights_embedding[token_bad]
Пример #5
0
model.add(Dropout(rate=0.3))

model.add(Flatten())

#fc 1
model.add(Dense(32, activation=ft.nn.relu))
model.add(Dropout(rate=0.3))
#fc 2
model.add(Dense(1,activation=ft.nn.tanh))

model.summary()
model.compile(loss='mse', optimizer=k.optimizers.Adam(learning_rate=0.00005), metrics=ft.keras.metrics.RootMeanSquaredError())


model = k.models.load_model('seguidor2_gw3_com_mapa7_e_realinhamento.h5')
model.get_layer('conv2d').kernel_regularizer = ft.keras.regularizers.l2(0.001)
model.get_layer('conv2d_1').kernel_regularizer = ft.keras.regularizers.l2(0.001)
model.get_layer('conv2d_2').kernel_regularizer = ft.keras.regularizers.l2(0.001)

for x in range(200):
    print("x = ", x)
    for y in range(5):
        random.shuffle(params)
        #particoes = list(chunks(params, 500))
        #for e, p in enumerate(particoes):
        #print("part = ", e)
        #inp,out = leiaImagens(p)
        inp,out = leiaImagens(params[:700])
        model.fit(inp, out, epochs=40) # a,s,b
    model.save('seguidor2_gw3_com_mapa7_e_realinhamento.h5')