def Face(train=False): base_model = VGGFace(input_shape=(200, 200, 3), include_top=False, model='resnet50', weights=None, pooling='avg') # load pre-trained weights if used for fine-tuning if train: base_model.load_weights(BASE_WEIGHTS_PATH) for layer in base_model.layers[:len(base_model.layers) - 50]: layer.trainable = False base_output = base_model.output # age 1~93, treat age as classifications task output_a = Dense(93, activation='softmax', name='predications_age')(base_output) # gender 0 or 1 output_g = Dense(2, activation='softmax', name='predications_gender')(base_output) # race 0~4 output_r = Dense(5, activation='softmax', name='predications_race')(base_output) new_model = Model(inputs=base_model.input, outputs=[output_a, output_g, output_r], name='network_based_vggface') return new_model
def id_loss(images, renders, vggpath): model = VGGFace(vggpath, False) inputs = tf.concat([images, renders], axis=0) layers, _, _ = model.encoder(inputs, False) z = layers['fc7'] z_images, z_renders = tf.split(z, 2, axis=0) loss = tf.reduce_mean(tf.square(z_images - z_renders), name='id_loss') return loss
def get_embeddings(filenames): # extract faces faces = [extract_face(f) for f in filenames] # convert into an array of samples samples = np.asarray(faces, 'float32') # prepare the face for the model, e.g. center pixels samples = preprocess_input(samples, version=2) model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg') pred = model.predict(samples) return pred
class CoarseModel(object): def __init__(self, vggpath='', basis3dmm=None, trainable=True): self.basis3dmm = basis3dmm self.model = VGGFace(vggpath, trainable) def encoder3DMM(self, imgs, reuse=False): with tf.variable_scope('CoarseModel', reuse=tf.AUTO_REUSE): layers, _, _ = self.model.encoder(imgs, reuse) z = tf.reshape(layers['fc7'], [-1, 4096]) with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE) as scope: para_shape = tf.layers.dense(z, self.basis3dmm['bases_shape'].shape[0], use_bias=False, kernel_initializer=tf.truncated_normal_initializer(stddev=0.002), name='para_shape') para_exp = tf.layers.dense(z, self.basis3dmm['bases_exp'].shape[0], use_bias=False, kernel_initializer=tf.zeros_initializer(), name='para_exp') para_tex = tf.layers.dense(z, self.basis3dmm['bases_tex'].shape[0], use_bias=False, kernel_initializer=tf.truncated_normal_initializer(stddev=0.002), name='para_tex') para_pose = tf.layers.dense(z, 6, use_bias=False, kernel_initializer=tf.zeros_initializer(), name='para_pose') para_illum = tf.layers.dense(z, 27, use_bias=False, kernel_initializer=tf.zeros_initializer(), name='para_illum') return para_shape, para_exp, para_tex, para_pose, para_illum
def extract_features(arguments, dataset_list): PATH = str(arguments.path) DATASET = str(arguments.file) DESCRIPTOR = str(arguments.desc) IMG_WIDTH = int(arguments.width) IMG_HEIGHT = int(arguments.height) KNOWN_SET_SIZE = float(arguments.known_set_size) TRAIN_SET_SIZE = float(arguments.train_set_size) matrix_x = [] matrix_y = [] matrix_z = [] vgg_model = None if DESCRIPTOR == 'df': from vggface import VGGFace vgg_model = VGGFace() counterA = 0 for sample in dataset_list: sample_path = sample[0] sample_name = sample[1] subject_path = PATH + sample_path subject_image = cv.imread(subject_path, cv.IMREAD_COLOR) if DESCRIPTOR == 'hog': subject_image = cv.resize(subject_image, (IMG_HEIGHT, IMG_WIDTH)) feature_vector = Descriptor.get_hog(subject_image) elif DESCRIPTOR == 'df': feature_vector = Descriptor.get_deep_feature(subject_image, vgg_model, layer_name='fc6') matrix_x.append(feature_vector) matrix_y.append(sample_name) matrix_z.append(sample_path) counterA += 1 print(counterA, sample_path, sample_name) return matrix_z, matrix_y, matrix_x
def extract_features(arguments, dataset_list): PATH = str(arguments.path) DATASET = str(arguments.file) DESCRIPTOR = str(arguments.desc) IMG_WIDTH = int(arguments.width) IMG_HEIGHT = int(arguments.height) matrix_x = [] matrix_y = [] matrix_z = [] vgg_model = None if DESCRIPTOR == 'df': from vggface import VGGFace vgg_model = VGGFace() counterA = 0 for sample in dataset_list: try: sample_path = sample[0] sample_name = sample[1] subject_path = PATH + sample_path subject_image = cv.imread(subject_path, cv.IMREAD_COLOR) if DESCRIPTOR == 'hog': subject_image = cv.resize(subject_image, (IMG_HEIGHT, IMG_WIDTH)) feature_vector = Descriptor.get_hog(subject_image) elif DESCRIPTOR == 'df': feature_vector = Descriptor.get_deep_feature(subject_image, vgg_model, layer_name='fc6') matrix_x.append(feature_vector) matrix_y.append(sample_name) matrix_z.append(sample_path) print(counterA, sample_path, sample_name, len(feature_vector)) except Exception, e: print(counterA, sample_path + ' not loaded', str(e)) counterA += 1
def model_fn(lam, dropout): """Create a Keras Sequential model with layers.""" input_tensor = Input(shape=(img_rows, img_cols, 3)) vggface16 =VGGFace(include_top=True, model='vgg16',weights='vggface', input_tensor=input_tensor) vggface16.layers.pop() vggface16.layers.pop() #vgg16.outputs = [vgg16.layers[-1].output] #vgg16.layers[-1].outbound_nodes = [] # 最後のconv層の直前までの層をfreeze #vgg16.output_shape = vgg16.layers[-1].output_shape #top_model = Flatten()(vgg16.output) #top_model = Dense(1024, activation='relu', name='last_2', kernel_initializer=he_normal(seed))(top_model) # top_model.add( print(vggface16.layers[-1].output) top_model = Dropout(dropout)(vggface16.layers[-1].output) top_model = Dense(101, activation='softmax', kernel_initializer=glorot_normal(seed), name='last')(top_model) model = Model(inputs=vggface16.input, outputs=top_model) #for layer in model.layers[:18]: # layer.trainable = False #compile_model(model, learning_rate) return model
def train_face_model(finetune=True): #===============custom parameters =============== # hidden_dim = 512 img_width, img_height = 224, 224 nb_class = 16 One_Class_Train_MAX = 30 One_Class_Valid_MAX = 10 nb_train_samples = nb_class * One_Class_Train_MAX nb_validation_samples = nb_class * One_Class_Valid_MAX nb_epoch = 10 batch_size = 16 train_data_dir = 'data/train' validation_data_dir = 'data/validation' save_model_path = './faceDB/face-model.json' save_model_h5 = './faceDB/face-model.h5' save_face_index = './faceDB/face-index.json' # =============== NN =============== # vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3)) # print('----------------After Add finetune layers----------------') # for l in vgg_model.layers: # print('Name ', l.name, 'trainable' ,l.trainable) last_layer = vgg_model.get_layer('pool5').output x = Flatten(name='flatten')(last_layer) x = Dense(hidden_dim, activation='relu', name='fc6')(x) x = Dense(hidden_dim, activation='relu', name='fc7')(x) out = Dense(nb_class, activation='softmax', name='fc8')(x) custom_vgg_model = Model(vgg_model.input, out) if finetune: # print('----------------After Disable Trainable----------------') all_layers = custom_vgg_model.layers pool5_index = custom_vgg_model.layers.index( custom_vgg_model.get_layer('pool5')) for ind, l in enumerate(all_layers): if ind <= pool5_index: l.trainable = False # all_layers[:pool5_index].trainable = False # for ind, l in enumerate(all_layers): # print('Name ', l.name, 'trainable' ,l.trainable,'index',ind) # Train your model as usual. # You can Try different optimizers # opt = optimizers.SGD(lr=1e-5, decay=1e-6) #OK # adagrad = optimizers.Adagrad( decay=1e-6) # opt = optimizers.Adadelta( ) opt = optimizers.Adam(lr=1e-5, decay=1e-6) custom_vgg_model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) custom_vgg_model.summary() X_train, Y_train, X_valid, Y_valid, Face_Label_Dic = load_face_data( 'data/') ftool = FaceTool() ftool.write_json(save_face_index, Face_Label_Dic) # Start Fine-tuning custom_vgg_model.fit( X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, shuffle=True, verbose=1, validation_data=(X_valid, Y_valid), ) # Make predictions predictions_valid = custom_vgg_model.predict(X_valid, batch_size=batch_size, verbose=1) # Cross-entropy loss score score = log_loss(Y_valid, predictions_valid) # ===============Save Model=============== print("Saved model to disk") model_json = custom_vgg_model.to_json() with open(save_model_path, "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 custom_vgg_model.save_weights(save_model_h5) # ===============Test=============== face_index = prdict_one_face(custom_vgg_model, 'data/test/1.jpg') print Face_Label_Dic[face_index] face_index = prdict_one_face(custom_vgg_model, 'data/test/2.jpg') print Face_Label_Dic[face_index] face_index = prdict_one_face(custom_vgg_model, 'data/test/3.jpg') print Face_Label_Dic[face_index]
def svm_oneclass(args): PATH = str(args.path) DATASET = str(args.file) DESCRIPTOR = str(args.desc) NUM_HASH = int(args.hash) IMG_WIDTH = int(args.width) IMG_HEIGHT = int(args.height) matrix_x = [] matrix_y = [] models = [] splits = [] nmatrix_x = [] nmatrix_y = [] x_train = [] y_train = [] nx_train = [] ny_train = [] plotting_labels = [] plotting_scores = [] vgg_model = None if DESCRIPTOR == 'df': from vggface import VGGFace vgg_model = VGGFace() print('>> EXPLORING DATASET') dataset_list = load_txt_file(PATH + DATASET) known_tuples, unknown_tuples = split_known_unknown_sets(dataset_list, known_set_size=0.5) known_train, known_test = split_train_test_sets(known_tuples, train_set_size=0.5) print(known_train) counterA = 0 for gallery_sample in known_train: sample_path = gallery_sample[0] sample_name = gallery_sample[1] gallery_path = PATH + sample_path gallery_image = cv.imread(gallery_path, cv.IMREAD_COLOR) if DESCRIPTOR == 'hog': gallery_image = cv.resize(gallery_image, (IMG_HEIGHT, IMG_WIDTH)) feature_vector = Descriptor.get_hog(gallery_image) elif DESCRIPTOR == 'df': feature_vector = Descriptor.get_deep_feature(gallery_image, vgg_model, layer_name='fc6') matrix_x.append(feature_vector) matrix_y.append(sample_name) counterA += 1 print(counterA, sample_path, sample_name) print('>> GENERATING FILES TO SVM') counterSVM = 0 for feature in matrix_x: y_train.insert(counterSVM, 1) x_train.insert(counterSVM, {}) count_inner = 0 for pos in feature: x_train[counterSVM].update({count_inner: pos}) count_inner += 1 counterSVM += 1 print('>> GENERATING THE SVM MODEL') x_train_total = x_train + nx_train y_train_total = y_train + ny_train besthit = 0 bestn = 0 bestg = 0 for n in range(1, 50): for g in range(-15, 3): nu = n / 100 gamma = pow(2, g) parameters = '-s 2 -t 2' parameters = parameters + ' -g ' + str(gamma) + ' -n ' + str(nu) m = svm_train(y_train_total, x_train_total, parameters) hits = 0 #print('>> LOADING KNOWN PROBE: {0} samples'.format(len(known_test))) counterB = 0 for probe_sample in known_test: sample_path = probe_sample[0] sample_name = probe_sample[1] query_path = PATH + sample_path query_image = cv.imread(query_path, cv.IMREAD_COLOR) if DESCRIPTOR == 'hog': query_image = cv.resize(query_image, (IMG_HEIGHT, IMG_WIDTH)) feature_vector = Descriptor.get_hog(query_image) elif DESCRIPTOR == 'df': feature_vector = Descriptor.get_deep_feature( query_image, vgg_model) count_inner = 0 x_teste = [] y_teste = [] y_teste.insert(0, 1) x_teste.insert(0, {}) for pos in feature_vector: x_teste[0].update({count_inner: pos}) count_inner += 1 p_label, p_acc, p_val = svm_predict(y_teste, x_teste, m) counterB += 1 # Getting known set plotting relevant information plotting_labels.append([(sample_name, 1)]) plotting_scores.append([(sample_name, p_label[0])]) if p_label[0] == 1: hits = hits + 1 print('>> LOADING UNKNOWN PROBE: {0} samples'.format( len(unknown_tuples))) counterC = 0 for probe_sample in unknown_tuples: sample_path = probe_sample[0] sample_name = probe_sample[1] query_path = PATH + sample_path query_image = cv.imread(query_path, cv.IMREAD_COLOR) if DESCRIPTOR == 'hog': query_image = cv.resize(query_image, (IMG_HEIGHT, IMG_WIDTH)) feature_vector = Descriptor.get_hog(query_image) elif DESCRIPTOR == 'df': feature_vector = Descriptor.get_deep_feature( query_image, vgg_model) count_inner = 0 x_teste = [] y_teste = [] y_teste.insert(0, -1) x_teste.insert(0, {}) for pos in feature_vector: x_teste[0].update({count_inner: pos}) count_inner += 1 p_label, p_acc, p_val = svm_predict(y_teste, x_teste, m) counterC += 1 # Getting unknown set plotting relevant information plotting_labels.append([(sample_name, -1)]) plotting_scores.append([(sample_name, p_label[0])]) if p_label[0] == -1: hits = hits + 1 if hits > besthit: besthit = hits bestn = nu bestg = gamma # cmc_score_norm = np.divide(cmc_score, counterA) # generate_cmc_curve(cmc_score_norm, DATASET + '_' + str(NUM_HASH) + '_' + DESCRIPTOR) print(besthits) print(bestn) print(bestg) pr = generate_precision_recall(plotting_labels, plotting_scores) roc = generate_roc_curve(plotting_labels, plotting_scores) return pr, roc
def plshface(args): PATH = str(args.path) DATASET = str(args.file) DESCRIPTOR = str(args.desc) NUM_HASH = int(args.hash) IMG_WIDTH = int(args.width) IMG_HEIGHT = int(args.height) TRAIN_SET_SIZE = float(args.train_set_size) matrix_x = [] matrix_y = [] splits = [] plotting_labels = [] plotting_scores = [] vgg_model = None if DESCRIPTOR == 'df': vgg_model = VGGFace() print('>> EXPLORING DATASET') dataset_list = load_txt_file(PATH + DATASET) known_train, known_test = split_train_test_sets( dataset_list, train_set_size=TRAIN_SET_SIZE) print('>> LOADING GALLERY: {0} samples'.format(len(known_train))) counterA = 0 for gallery_sample in known_train: sample_path = gallery_sample[0] sample_name = gallery_sample[1] gallery_path = PATH + sample_path gallery_image = cv.imread(gallery_path, cv.IMREAD_COLOR) if DESCRIPTOR == 'hog': gallery_image = cv.resize(gallery_image, (IMG_HEIGHT, IMG_WIDTH)) feature_vector = Descriptor.get_hog(gallery_image) elif DESCRIPTOR == 'df': feature_vector = Descriptor.get_deep_feature(gallery_image, vgg_model, layer_name='fc6') matrix_x.append(feature_vector) matrix_y.append(sample_name) counterA += 1 print(counterA, sample_path, sample_name) print('>> SPLITTING POSITIVE/NEGATIVE SETS') individuals = list(set(matrix_y)) cmc_score = np.zeros(len(individuals)) for index in range(0, NUM_HASH): splits.append(generate_pos_neg_dict(individuals)) print('>> LEARNING PLS MODELS:') input_list = itertools.izip(splits, itertools.repeat((matrix_x, matrix_y))) models = Parallel(n_jobs=1, verbose=11, backend='threading')(map(delayed(learn_plsh_model), input_list)) print('>> LOADING KNOWN PROBE: {0} samples'.format(len(known_test))) counterB = 0 for probe_sample in known_test: sample_path = probe_sample[0] sample_name = probe_sample[1] query_path = PATH + sample_path query_image = cv.imread(query_path, cv.IMREAD_COLOR) if DESCRIPTOR == 'hog': query_image = cv.resize(query_image, (IMG_HEIGHT, IMG_WIDTH)) feature_vector = Descriptor.get_hog(query_image) elif DESCRIPTOR == 'df': feature_vector = Descriptor.get_deep_feature( query_image, vgg_model) vote_dict = dict(map(lambda vote: (vote, 0), individuals)) for model in models: pos_list = [ key for key, value in model[1].iteritems() if value == 1 ] response = model[0].predict_confidence(feature_vector) for pos in pos_list: vote_dict[pos] += response result = vote_dict.items() result.sort(key=lambda tup: tup[1], reverse=True) for outer in range(len(individuals)): for inner in range(outer + 1): if result[inner][0] == sample_name: cmc_score[outer] += 1 break counterB += 1 denominator = np.absolute(np.mean([result[1][1], result[2][1]])) if denominator > 0: output = result[0][1] / denominator else: output = result[0][1] print(counterB, sample_name, result[0][0], output) # Getting known set plotting relevant information plotting_labels.append([(sample_name, 1)]) plotting_scores.append([(sample_name, output)]) cmc_score_norm = np.divide(cmc_score, counterA) return cmc_score_norm
def main(args): # lr_decay = decay_lr(10, 0.5) # opt = Adam(lr=1e-4, decay=1e-5) # read label train_csv = pd.read_csv(args.train_csv) train_label = [i for i in train_csv['lianxing']] # read images train_data = np.load(open(args.train, 'rb')) train_data = train_data / 255.0 train_data = np.transpose(train_data, (0, 3, 1, 2)) print('train data and label shape: ', train_data.shape, np.shape(train_label)) print('Train Data is done!') train_label = to_categorical(train_label, num_classes=args.num_classes) if args.val: val_csv = pd.read_csv(args.val_csv) val_label = [i for i in val_csv['lianxing']] val_data = np.load(open(args.val, 'rb')) val_data = val_data / 255.0 val_data = np.transpose(val_data, (0, 3, 1, 2)) val_label = to_categorical(val_label, num_classes=args.num_classes) print('val data and label shape: ', val_data.shape, np.shape(val_label)) print('Val Data is done!') # model print('get model....') model_name = args.model model = VGGFace(include_top=False, model=model_name, weights='vggface', pooling='avg', input_shape=(224, 224, 3), classes=args.num_classes) # if you want to change the layers of model # fc5 = model.layers[-8].output # fc6 = Flatten()(fc5) # fc7_1 = Dense(256, activation='relu', name='fc7_1')(fc6) # dropout7_1 = Dropout(0.3)(fc7_1) # fc7_2 = Dense(128, activation='relu', name='fc7_2')(dropout7_1) # prediction = Dense(classes, activation='softmax')(fc7_2) # model = Model(inputs=model.input, outputs=prediction) model.summary() model.compile(optimizer=args.opt, loss='categorical_crossentropy', metrics=['accuracy', precision, recall, fmeasure]) # callbacks filepath = args.out_dir + model_name + '_model/' + model_name + "-weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5" model_checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True) tensorboard_log = TensorBoard(args.out_dir + model_name + '_tensorboard/', write_graph=True, histogram_freq=0) csv = CSVLogger(args.out_dir + model_name + '_csv/' + model_name + '.csv') # train print('Trainning model......') if args.val: model.fit(train_data, train_label, batch_size=args.batch_size, epochs=args.epochs, callbacks=[model_checkpoint, csv, tensorboard_log], verbose=1, shuffle=True, validation_data=(val_data, val_label)) else: model.fit(train_data, train_label, batch_size=args.batch_size, epochs=args.epochs, callbacks=[model_checkpoint, csv, tensorboard_log], verbose=1, shuffle=True)
def __init__(self, vggpath='', basis3dmm=None, trainable=True): self.basis3dmm = basis3dmm self.model = VGGFace(vggpath, trainable)
if __name__ == '__main__': # Initialize Parameters root = '/Users/azinasgarian/Documents/Data/UNBC/Images' path = './data/high_pain.pkl' output_path = '/Users/azinasgarian/Desktop/test/' rotation_degree = 15 seq_len = 15 shear_x = 8 shear_y = 8 print "Building VGG Model ... " # pooling: None, avg or max model_conv = VGGFace(model='vgg16', include_top=False, input_shape=(224, 224, 3), pooling='avg') print "VGG Model is built! " print "Reading and extracting vgg features ..." Data = read_data(path, root, rotation_degree, shear_x, shear_y, seq_len, output_path) print "Features are extracted!" print "Saving data into .h5 file." dd.io.save('tmp.h5', Data) print "Data is saved!" print "All Done!"
from vggface import VGGFace from scipy import misc import copy import numpy as np if __name__ == '__main__': model = VGGFace(weights=None) model.load_weights( '../temp/weight/rcmalli_vggface_tf_weights_tf_ordering.h5') print 'model loaded.' im = misc.imread('../image/ak2.jpg') im = misc.imresize(im, (224, 224)).astype(np.float32) aux = copy.copy(im) im[:, :, 0] = aux[:, :, 2] im[:, :, 2] = aux[:, :, 0] # Remove image mean im[:, :, 0] -= 93.5940 im[:, :, 1] -= 104.7624 im[:, :, 2] -= 129.1863 im = np.expand_dims(im, axis=0) res = model.predict(im) print np.argmax(res[0])