data_dir = "D:\dev_root\dataset\original\\fer2013_224_NEW_CLEAN" target_data_dir = data_dir + "\Val" layer_name = "fc6_1" sample_files = [ r + '/' + file for r, d, files in os.walk(target_data_dir) for file in files ] num_samples = len(sample_files) model = load_model(emot_model) model.summary() model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output) features_dict = {} features = None for sample_file in sample_files: sample_file_base = os.path.basename(sample_file) img = load_img(sample_file, target_size=(224, 224)) x = img_to_array(img) x = x.reshape((1, ) + x.shape) features = model.predict_on_batch(x) features_dict[sample_file_base] = features[0] num_features = len(features)
def Alexnet(): inputs = Input(shape=(227, 227, 3)) x = Conv2D(96, (11, 11), strides=(4, 4), input_shape=(227, 227, 3), padding='valid', name='conv1', kernel_initializer=RandomNormal(0.0, 0.01))(inputs) ''' x=BatchNormalization(axis=-1,momentum=0.99, epsilon=0.001, center=True, scale=True )(x) ''' x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = Conv2D(256, (5, 5), strides=(1, 1), name='conv2', padding='same', bias_initializer='ones', kernel_initializer=RandomNormal(0.0, 0.01))(x) ''' x=BatchNormalization(axis=-1,momentum=0.99, epsilon=0.001, center=True, scale=True )(x) ''' x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = Conv2D(384, (3, 3), strides=(1, 1), name='conv3', padding='same', kernel_initializer=RandomNormal(0, 0.01))(x) ''' x=BatchNormalization(axis=-1,momentum=0.99, epsilon=0.001, center=True, scale=True )(x) ''' x = Activation('relu')(x) x = Conv2D(384, (3, 3), strides=(1, 1), name='conv4', padding='same', bias_initializer='ones', kernel_initializer=RandomNormal(0, 0.01))(x) ''' x=BatchNormalization(axis=-1,momentum=0.99, epsilon=0.001, center=True, scale=True )(x) ''' x = Activation('relu')(x) x = Conv2D(256, (3, 3), strides=(1, 1), name='conv5', padding='same', bias_initializer='ones', kernel_initializer=RandomNormal(0, 0.01))(x) ''' x=BatchNormalization(axis=-1,momentum=0.99, epsilon=0.001, center=True, scale=True )(x) ''' x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = Flatten()(x) x = Dense(4096, kernel_initializer=RandomNormal(0.0, 0.01), bias_initializer='ones', name='fc6')(x) ''' x=BatchNormalization(axis=1,momentum=0.99, epsilon=0.001, center=True, scale=True )(x) ''' x = Activation('relu')(x) x = Dropout(0.5)(x) x = Dense(4096, kernel_initializer=RandomNormal(0.0, 0.01), bias_initializer='ones', name='fc7')(x) ''' x=BatchNormalization(axis=1,momentum=0.99, epsilon=0.001, center=True, scale=True )(x) ''' x = Activation('relu')(x) x = Dropout(0.5)(x) x = Dense(1000, kernel_initializer=RandomNormal(0.0, 0.01), bias_initializer='ones', name='fc8')(x) predictions = Activation('softmax')(x) model = Model(inputs=inputs, outputs=predictions) model.summary() #model.load_weights('again_model_10.hdf5',by_name=True) for i in [ 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc8', 'fc6', 'fc7' ]: layer = model.get_layer(name=i) #with h5.File('model_0.54.h5',mode='r') as f: with h5.File('model.h5', mode='r') as f: x1 = f['model_14/' + i + '_9/kernel:0'].value x2 = f['model_14/' + i + '_9/bias:0'].value K.set_value(layer.weights[0], x1) #设置权重的值 K.set_value(layer.weights[1], x2) #model.load_weights('F:/weight_point/epoch_10.h5py',by_name=True) return model
def get_vgg_7conv(ims, nchannels, n_cls): input_shape_base = (None, None, 4) img_input = Input(input_shape_base) vgg16_base = VGG16(input_tensor=img_input, include_top=False, weights=None) #for l in vgg16_base.layers: # l.trainable = True conv1 = vgg16_base.get_layer("block1_conv2").output conv2 = vgg16_base.get_layer("block2_conv2").output conv3 = vgg16_base.get_layer("block3_conv3").output pool3 = vgg16_base.get_layer("block3_pool").output conv4 = Conv2D(384, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block4_conv1")(pool3) conv4 = Conv2D(384, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block4_conv2")(conv4) # pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(conv4) pool4 = MaxPooling2D((2, 2), strides=None, name='block4_pool')(conv4) conv5 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block5_conv1")(pool4) conv5 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block5_conv2")(conv5) # pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(conv5) pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(conv5) conv6 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block6_conv1")(pool5) conv6 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block6_conv2")(conv6) #pool6 = MaxPooling2D((2, 2), strides=(2, 2), name='block6_pool')(conv6) pool6 = MaxPooling2D((2, 2), strides=(2, 2), name='block6_pool')(conv6) conv7 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block7_conv1")(pool6) conv7 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block7_conv2")(conv7) up8 = concatenate([ Conv2DTranspose(384, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='valid')(conv7), conv6 ], axis=3) #up8 = merge([Conv2DTranspose(384, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv7), conv6], mode='concat', concat_axis=3) conv8 = Conv2D(384, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up8) up9 = concatenate([ Conv2DTranspose(256, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv8), conv5 ], axis=3) conv9 = Conv2D(256, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up9) up10 = concatenate([ Conv2DTranspose(192, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv9), conv4 ], axis=3) conv10 = Conv2D(192, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up10) up11 = concatenate([ Conv2DTranspose(128, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv10), conv3 ], axis=3) conv11 = Conv2D(128, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up11) up12 = concatenate([ Conv2DTranspose(64, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv11), conv2 ], axis=3) conv12 = Conv2D(64, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up12) up13 = concatenate([ Conv2DTranspose(32, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv12), conv1 ], axis=3) conv13 = Conv2D(32, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up13) # #Batch normalization #conv13 = BatchNormalization(mode=0, axis=1)(conv13) conv13 = Conv2D(n_cls, (1, 1), activation='sigmoid')(conv13) #conv13 = Conv2D(1, (1, 1))(conv13) #conv13 = Activation("sigmoid")(conv13) model = Model(img_input, conv13) # Recalculate weights on first layer conv1_weights = np.zeros((3, 3, nchannels, 64), dtype="float32") vgg = VGG16(include_top=False, input_shape=(ims, ims, 3)) conv1_weights[:, :, :3, :] = vgg.get_layer( "block1_conv1").get_weights()[0][:, :, :, :] bias = vgg.get_layer("block1_conv1").get_weights()[1] model.get_layer('block1_conv1').set_weights((conv1_weights, bias)) return model
history = model.fit(x_train, labels_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, validation_split=0.1, callbacks=[callbacks.EarlyStopping(min_delta=0.00001, verbose=1), callbacks.ReduceLROnPlateau(verbose=1)]) K.set_learning_phase(0) print("********************************************************************") N = 5 print(N, "prediction samples...") get_layer_output = K.function([model.layers[0].input], [model.get_layer(name="sinkhorn").output]) np.set_printoptions(precision=3) for (orig, pred), (true, perm) in zip(zip(x_test[:N], model.predict(x_test)[:N]), zip(y_test[:N], p_test)): layer_output = get_layer_output([[orig]])[0] assignment = linear_sum_assignment(-layer_output[0]) print("---------------------------------------------------------------") print("True permutation: \t", perm, end="\n") print("Predicted permutation:\t", assignment[1]) print() np.set_printoptions(precision=3) if PREDICT_VALUES: