예제 #1
0
input_tensor = Input(shape=(2,))
hidden = Dense(2)(input_tensor)
relu_hid = K.relu(hidden)
dense_out = Dense(1)(relu_hid) # output shaped by sigmoid
sigmoid_out = K.sigmoid(dense_out)

# inputs and outputs must be layer tensor, not just tensor made from K
model2 = Model(inputs=input_tensor, outputs=sigmoid_out)

model2.summary() # see each layer of a model
"""

#######################################################
# Get initial weights and set initial weights
#######################################################
model.get_weights()
init_weights = [
    np.array([[0.21884251, 0.37195587, 0.95793033, 0.37305808],
              [0.62441111, 0.79077578, -0.79303694, 0.8752284]],
             dtype='float32'),
    np.array([0., 0., 0., 0.], dtype='float32'),
    np.array([[1.0339992], [-0.56100774], [0.35092974], [-0.34449542]],
             dtype='float32'),
    np.array([0.], dtype='float32')
]
model.set_weights(init_weights)

## prepare a list for each node's weights
w_1_1_1 = []
w_1_1_2 = []
w_1_2_1 = []
### both BatchNormalization and Droput have some basic operations prior to Normalization and Droping, Diving into the source when feeling so

#### Access middle layer output when First Dropout and then BatchNormalization

model_seq = Sequential()
model_seq.add(Dropout(0.3, input_shape=(10, )))
model_seq.add(BatchNormalization())
model_seq.add(Dense(1))
# check out weights before training
# model_seq.get_weights()

# compile and train
model_seq.compile(optimizer='SGD', loss='mse')

model_seq.fit(input_array_small, target_small, epochs=10)
model_seq.get_weights()
model_seq.save("to_delete.h5")
model_best = load_model("to_delete.h5")

###### compare two weights from two different training
# model_seq.get_weights()

###### check output
batchNorm_test = K.function(
    [model_best.input, K.learning_phase()],
    [model_best.layers[-2].output])([input_array_small, 0])[0]
batchNorm_train = K.function(
    [model_best.input, K.learning_phase()],
    [model_best.layers[-2].output])([input_array_small, 1])[0]

# dropout_layer_behaviour
예제 #3
0
input_tensor = Input(shape=(2,))
hidden = Dense(2)(input_tensor)
relu_hid = K.relu(hidden)
dense_out = Dense(1)(relu_hid) # output shaped by sigmoid
sigmoid_out = K.sigmoid(dense_out)

# inputs and outputs must be layer tensor, not just tensor made from K
model2 = Model(inputs=input_tensor, outputs=sigmoid_out)

model2.summary() # see each layer of a model
"""

#######################################################
# Get initial weights and set initial weights
#######################################################
model.get_weights()
init_weights = [
    np.array([[0.61500782, -0.8923322], [-0.83968955, -0.36485523]],
             dtype='float32'),
    np.array([-0.01146811, 0.], dtype='float32'),
    np.array([[1.19760191], [-0.74140114]], dtype='float32'),
    np.array([0.02545028], dtype='float32')
]
model.set_weights(init_weights)

# use SGD optimizer with learning rate 0.1
sgd = SGD(lr=0.1)
# set loss function to be mse, print out accuracy
model.compile(loss='mse', optimizer=sgd, metrics=['accuracy'])
# set loss function to be binary_crossentropy, print accuracy
model1.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
"""
If you need to load weights into a *different* architecture (with some layers in common), for instance for fine-tuning or transfer-learning, you can load weights by *layer name*:
"""
model_yaml.load_weights('to_delete_weights.h5', by_name=True)

# make sure they share the same weights: total 202 parameters
(model_json.get_weights()[0] == model_yaml.get_weights()[0]).sum()
(model_json.get_weights()[1] == model_yaml.get_weights()[1]).sum()
"""
For example
Assume original model looks like this:
"""
model1 = Sequential()
model1.add(Dense(2, input_dim=3, name='dense_1'))
model1.add(Dense(3, name='dense_2'))
model1.save_weights("weights1.h5")

# check out the weights
model1.get_weights()

# new model
model2 = Sequential()
model2.add(Dense(2, input_dim=3, name='dense_1'))  # will be loaded
model2.add(Dense(10, name='new_dense'))  # will not be loaded

# load weights from first model; will only affect the first layer, dense_1.
model2.load_weights("weights1.h5", by_name=True)

# check out the weights
model2.get_weights()