def load_model(weights = AGE_WEIGHTS): stage_num = [3,3,3] lambda_local = 1 lambda_d = 1 model = SSR_net(IMG_SIZE,stage_num, lambda_local, lambda_d)() model.load_weights(weights) return model
def model_init(args): K.set_learning_phase(0) # make sure its testing mode # load model and weights stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 model_age = SSR_net(args.image_size, stage_num, lambda_local, lambda_d)() model_age.load_weights(args.age_model) model_gender = SSR_net_general(args.image_size, stage_num, lambda_local, lambda_d)() model_gender.load_weights(args.gender_model) return model_gender, model_age
def main(): weight_file = "./ssrnet_3_3_3_64_1.0_1.0.h5" # load model and weights img_size = 64 stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 model = SSR_net(img_size, stage_num, lambda_local, lambda_d)() model.load_weights(weight_file) model.save('ssrnet.h5') model.summary() coreml_model = coremltools.converters.keras.convert( model, input_names="image", image_input_names="image", output_names="output", add_custom_layers=True, custom_conversion_functions={"Lambda": convert_lambda}) # Look at the layers in the converted Core ML model. print("\nLayers in the converted model:") for i, layer in enumerate(coreml_model._spec.neuralNetwork.layers): if layer.HasField("custom"): print("Layer %d = %s --> custom layer = %s" % (i, layer.name, layer.custom.className)) else: print("Layer %d = %s" % (i, layer.name)) coreml_model.save('ssrnet.mlmodel')
def load_models(): weight_file = os.path.join( ssrnet_dir, '../pre-trained/morph2/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5' ) weight_file_gender = os.path.join( ssrnet_dir, '../pre-trained/wiki_gender_models/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5' ) # load model and weights stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 model = SSR_net(img_size, stage_num, lambda_local, lambda_d)() model.load_weights(weight_file) model_gender = SSR_net_general(img_size, stage_num, lambda_local, lambda_d)() model_gender.load_weights(weight_file_gender) return model, model_gender
def main(): logger.info('Load InceptionV3 model') inceptionv3 = AgenderNetInceptionV3() inceptionv3.setWeight('trainweight/inceptionv3_2/model.16-3.7887-0.9004-6.6744.h5') logger.info('Load MobileNetV2 model') mobilenetv2 = AgenderNetMobileNetV2() mobilenetv2.setWeight('trainweight/mobilenetv2/model.10-3.8290-0.8965-6.9498.h5') logger.info('Load SSRNet model') ssrnet = SSRNet(64, [3, 3, 3], 1.0, 1.0) ssrnet.setWeight('trainweight/ssrnet/model.37-7.3318-0.8643-7.1952.h5') logger.info('Load pretrain imdb model') imdb_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)() imdb_model.load_weights("tes_ssrnet/imdb_age_ssrnet_3_3_3_64_1.0_1.0.h5") imdb_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)() imdb_model_gender.load_weights("tes_ssrnet/imdb_gender_ssrnet_3_3_3_64_1.0_1.0.h5") logger.info('Load pretrain wiki model') wiki_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)() wiki_model.load_weights("tes_ssrnet/wiki_age_ssrnet_3_3_3_64_1.0_1.0.h5") wiki_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)() wiki_model_gender.load_weights("tes_ssrnet/wiki_gender_ssrnet_3_3_3_64_1.0_1.0.h5") logger.info('Load pretrain morph model') morph_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)() morph_model.load_weights("tes_ssrnet/morph_age_ssrnet_3_3_3_64_1.0_1.0.h5") morph_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)() morph_model_gender.load_weights("tes_ssrnet/morph_gender_ssrnet_3_3_3_64_1.0_1.0.h5") utk = pd.read_csv('dataset/UTKface.csv') fgnet = pd.read_csv('dataset/FGNET.csv') utk_paths = utk['full_path'].values fgnet_paths = fgnet['full_path'].values logger.info('Read UTKface aligned images') utk_images = [cv2.imread('UTKface_aligned/'+path) for path in tqdm(utk_paths)] logger.info('Read FGNET aligned images') fgnet_images = [cv2.imread('FGNET_aligned/'+path) for path in tqdm(fgnet_paths)] utk_X = np.array(utk_images) fgnet_X = np.array(fgnet_images) utk_pred_age = dict() utk_pred_gender = dict() fgnet_pred_age = dict() logger.info('Predict with InceptionV3') start = time.time() utk_pred_gender['inceptionv3'], utk_pred_age['inceptionv3'] = get_result(inceptionv3, utk_X) _, fgnet_pred_age['inceptionv3'] = get_result(inceptionv3, fgnet_X) elapsed = time.time() - start logger.info('Time elapsed {:.2f} sec'.format(elapsed)) del utk_X, fgnet_X logger.info('Resize image to 96 for MobileNetV2') utk_images = [cv2.resize(image, (96, 96), interpolation = cv2.INTER_CUBIC) for image in tqdm(utk_images)] fgnet_images = [cv2.resize(image, (96, 96), interpolation = cv2.INTER_CUBIC) for image in tqdm(fgnet_images)] utk_X = np.array(utk_images) fgnet_X = np.array(fgnet_images) logger.info('Predict with MobileNetV2') start = time.time() utk_pred_gender['mobilenetv2'], utk_pred_age['mobilenetv2'] = get_result(mobilenetv2, utk_X) _, fgnet_pred_age['mobilenetv2'] = get_result(mobilenetv2, fgnet_X) elapsed = time.time() - start logger.info('Time elapsed {:.2f} sec'.format(elapsed)) del utk_X, fgnet_X logger.info('Resize image to 64 for SSR-Net') utk_images = [cv2.resize(image, (64, 64), interpolation = cv2.INTER_CUBIC) for image in tqdm(utk_images)] fgnet_images = [cv2.resize(image, (64, 64), interpolation = cv2.INTER_CUBIC) for image in tqdm(fgnet_images)] utk_X = np.array(utk_images) fgnet_X = np.array(fgnet_images) logger.info('Predict with SSR-Net') start = time.time() utk_pred_gender['ssrnet'], utk_pred_age['ssrnet'] = get_result(ssrnet, utk_X) _, fgnet_pred_age['ssrnet'] = get_result(ssrnet, fgnet_X) elapsed = time.time() - start logger.info('Time elapsed {:.2f} sec'.format(elapsed)) logger.info('Predict with IMDB_SSR-Net') start = time.time() utk_pred_gender['ssrnet-imdb'] = np.around(imdb_model_gender.predict(utk_X).squeeze()).astype('int') utk_pred_age['ssrnet-imdb'] = imdb_model.predict(utk_X).squeeze() fgnet_pred_age['ssrnet-imdb'] = imdb_model.predict(fgnet_X).squeeze() elapsed = time.time() - start logger.info('Time elapsed {:.2f} sec'.format(elapsed)) logger.info('Predict with Wiki_SSR-Net') start = time.time() utk_pred_gender['ssrnet-wiki'] = np.around(wiki_model_gender.predict(utk_X).squeeze()).astype('int') utk_pred_age['ssrnet-wiki'] = wiki_model.predict(utk_X).squeeze() fgnet_pred_age['ssrnet-wiki'] = wiki_model.predict(fgnet_X).squeeze() elapsed = time.time() - start logger.info('Time elapsed {:.2f} sec'.format(elapsed)) logger.info('Predict with Morph_SSR-Net') start = time.time() utk_pred_gender['ssrnet-morph'] = np.around(morph_model_gender.predict(utk_X).squeeze()).astype('int') utk_pred_age['ssrnet-morph'] = morph_model.predict(utk_X).squeeze() fgnet_pred_age['ssrnet-morph'] = morph_model.predict(fgnet_X).squeeze() elapsed = time.time() - start logger.info('Time elapsed {:.2f} sec'.format(elapsed)) utk_pred_age = pd.DataFrame.from_dict(utk_pred_age) utk_pred_gender = pd.DataFrame.from_dict(utk_pred_gender) fgnet_pred_age = pd.DataFrame.from_dict(fgnet_pred_age) utk_pred_age = pd.concat([utk['age'], utk_pred_age], axis=1) utk_pred_gender = pd.concat([utk['gender'], utk_pred_gender], axis=1) fgnet_pred_age = pd.concat([fgnet['age'], fgnet_pred_age], axis=1) utk_pred_age.to_csv('result/utk_age_prediction.csv', index=False) utk_pred_age.to_csv('result/utk_gender_prediction.csv', index=False) fgnet_pred_age.to_csv('result/fgnet_age_prediction.csv', index=False)
def main(): K.set_learning_phase(0) # make sure its testing mode weight_file = "../pre-trained/wiki/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" # for face detection # detector = dlib.get_frontal_face_detector() detector = MTCNN() try: os.mkdir('./img') except OSError: pass # load model and weights img_size = 64 stage_num = [3,3,3] lambda_local = 1 lambda_d = 1 model = SSR_net(img_size,stage_num, lambda_local, lambda_d)() model.load_weights(weight_file) clip = VideoFileClip(sys.argv[1]) # can be gif or movie #python version pyFlag = '' if len(sys.argv)<3 : pyFlag = '2' #default to use moviepy to show, this can work on python2.7 and python3.5 elif len(sys.argv)==3: pyFlag = sys.argv[2] #python version else: print('Wrong input!') sys.exit() img_idx = 0 detected = '' #make this not local variable time_detection = 0 time_network = 0 time_plot = 0 ad = 0.4 skip_frame = 5 # every 5 frame do 1 detection and network forward propagation for img in clip.iter_frames(): img_idx = img_idx + 1 input_img = img #using python2.7 with moivepy to show th image without channel flip if pyFlag == '3': input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_h, img_w, _ = np.shape(input_img) input_img = cv2.resize(input_img, (1024, int(1024*img_h/img_w))) img_h, img_w, _ = np.shape(input_img) if img_idx==1 or img_idx%skip_frame == 0: # detect faces using dlib detector start_time = timeit.default_timer() detected = detector.detect_faces(input_img) elapsed_time = timeit.default_timer()-start_time time_detection = time_detection + elapsed_time faces = np.empty((len(detected), img_size, img_size, 3)) for i, d in enumerate(detected): print(i) print(d['confidence']) if d['confidence'] > 0.95: x1,y1,w,h = d['box'] x2 = x1 + w y2 = y1 + h xw1 = max(int(x1 - ad * w), 0) yw1 = max(int(y1 - ad * h), 0) xw2 = min(int(x2 + ad * w), img_w - 1) yw2 = min(int(y2 + ad * h), img_h - 1) cv2.rectangle(input_img, (x1, y1), (x2, y2), (255, 0, 0), 2) # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2) faces[i,:,:,:] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size)) start_time = timeit.default_timer() if len(detected) > 0: # predict ages and genders of the detected faces results = model.predict(faces) predicted_ages = results # draw results for i, d in enumerate(detected): if d['confidence'] > 0.95: x1,y1,w,h = d['box'] label = "{}".format(int(predicted_ages[i])) draw_label(input_img, (x1, y1), label) elapsed_time = timeit.default_timer()-start_time time_network = time_network + elapsed_time start_time = timeit.default_timer() if pyFlag == '2': img_clip = ImageClip(input_img) img_clip.show() cv2.imwrite('img/'+str(img_idx)+'.png',cv2.cvtColor(input_img, cv2.COLOR_RGB2BGR)) elif pyFlag == '3': cv2.imshow("result", input_img) cv2.imwrite('img/'+str(img_idx)+'.png',cv2.cvtColor(input_img, cv2.COLOR_RGB2BGR)) elapsed_time = timeit.default_timer()-start_time time_plot = time_plot + elapsed_time else: for i, d in enumerate(detected): if d['confidence'] > 0.95: x1,y1,w,h = d['box'] x2 = x1 + w y2 = y1 + h xw1 = max(int(x1 - ad * w), 0) yw1 = max(int(y1 - ad * h), 0) xw2 = min(int(x2 + ad * w), img_w - 1) yw2 = min(int(y2 + ad * h), img_h - 1) cv2.rectangle(input_img, (x1, y1), (x2, y2), (255, 0, 0), 2) # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2) faces[i,:,:,:] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size)) # draw results for i, d in enumerate(detected): if d['confidence'] > 0.95: x1,y1,w,h = d['box'] label = "{}".format(int(predicted_ages[i])) draw_label(input_img, (x1, y1), label) start_time = timeit.default_timer() if pyFlag == '2': img_clip = ImageClip(input_img) img_clip.show() elif pyFlag == '3': cv2.imshow("result", input_img) elapsed_time = timeit.default_timer()-start_time time_plot = time_plot + elapsed_time #Show the time cost (fps) print('avefps_time_detection:',img_idx/time_detection) print('avefps_time_network:',img_idx/time_network) print('avefps_time_plot:',img_idx/time_plot) print('===============================') if pyFlag == '3': key = cv2.waitKey(30) if key == 27: break
async def main(): weight_file = "../pre-trained/megaface_asian/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" weight_file_gender = "../pre-trained/wiki_gender_models/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" mtccn = False if mtccn: detector = MTCNN() else: detector = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml') # load model and weights img_size = 64 stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 model = SSR_net(img_size, stage_num, lambda_local, lambda_d)() model.load_weights(weight_file) model_gender = SSR_net_general(img_size, stage_num, lambda_local, lambda_d)() model_gender.load_weights(weight_file_gender) # capture video cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024 * 1) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768 * 1) detected = '' time_detection = 0 time_network = 0 time_plot = 0 ad = 0.5 img_idx = 0 skip_frame = 10 skip_frame = 10 sleep = 1 send_post_time = time.time() + sleep while True: # get video frame img_idx = img_idx + 1 ret, input_img = cap.read() img_h, img_w, _ = np.shape(input_img) if img_idx == 1 or img_idx % skip_frame == 0: time_detection = 0 time_network = 0 time_plot = 0 # detect faces using LBP detector start_time = timeit.default_timer() gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY) if mtccn: detected = detector.detect_faces(input_img) else: detected = detector.detectMultiScale(gray_img, 1.1) elapsed_time = timeit.default_timer() - start_time time_detection = time_detection + elapsed_time faces = np.empty((len(detected), img_size, img_size, 3)) input_img, time_network, time_plot, resultados = show_results( detected, input_img, faces, ad, img_size, img_w, img_h, model, model_gender, time_detection, time_network, time_plot, mtccn) # Show the time cost (fps) # print('time_detection:', time_detection) # print('time_network:', time_network) # print('time_plot:', time_plot) # print('===============================') cv2.waitKey(1) if send_post_time < time.time(): send_post_time = time.time() + sleep await nested(resultados)
def main(): weight_file = "../pre-trained/morph2/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" weight_file_gender = "../pre-trained/wiki_gender_models/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml') try: os.mkdir('./img') except OSError: pass # load model and weights img_size = 64 stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 model = SSR_net(img_size, stage_num, lambda_local, lambda_d)() model.load_weights(weight_file) model_gender = SSR_net_general(img_size, stage_num, lambda_local, lambda_d)() model_gender.load_weights(weight_file_gender) # capture video cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024 * 1) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768 * 1) img_idx = 0 detected = '' #make this not local variable time_detection = 0 time_network = 0 time_plot = 0 skip_frame = 5 # every 5 frame do 1 detection and network forward propagation ad = 0.5 while True: # get video frame ret, input_img = cap.read() img_idx = img_idx + 1 img_h, img_w, _ = np.shape(input_img) if img_idx == 1 or img_idx % skip_frame == 0: time_detection = 0 time_network = 0 time_plot = 0 # detect faces using LBP detector gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY) start_time = timeit.default_timer() detected = face_cascade.detectMultiScale(gray_img, 1.1) elapsed_time = timeit.default_timer() - start_time time_detection = time_detection + elapsed_time faces = np.empty((len(detected), img_size, img_size, 3)) input_img, time_network, time_plot = draw_results( detected, input_img, faces, ad, img_size, img_w, img_h, model, model_gender, time_detection, time_network, time_plot) cv2.imwrite('img/' + str(img_idx) + '.png', input_img) else: input_img, time_network, time_plot = draw_results( detected, input_img, faces, ad, img_size, img_w, img_h, model, model_gender, time_detection, time_network, time_plot) #Show the time cost (fps) print('avefps_time_detection:', 1 / time_detection) print('avefps_time_network:', skip_frame / time_network) print('avefps_time_plot:', skip_frame / time_plot) print('===============================') key = cv2.waitKey(1)
def main(): #dynamicaly allocate GPU memory config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) K.tensorflow_backend.set_session(sess) print('[LOAD DATA]') images, ageLabel, genderLabel = prepData(64) n_fold = 1 img_size = 64 stage_num = [3,3,3] lambda_local = 1 lambda_d = 1 imdb_model = SSR_net(img_size,stage_num, lambda_local, lambda_d)() imdb_model.compile(optimizer='adam', loss="mae", metrics=["mae"]) imdb_model.load_weights("imdb_age_ssrnet_3_3_3_64_1.0_1.0.h5") imdb_model_gender = SSR_net_general(img_size,stage_num, lambda_local, lambda_d)() imdb_model_gender.compile(optimizer='adam', loss="mae", metrics=["binary_accuracy"]) imdb_model_gender.load_weights("imdb_gender_ssrnet_3_3_3_64_1.0_1.0.h5") wiki_model = SSR_net(img_size,stage_num, lambda_local, lambda_d)() wiki_model.compile(optimizer='adam', loss="mae", metrics=["mae"]) wiki_model.load_weights("wiki_age_ssrnet_3_3_3_64_1.0_1.0.h5") wiki_model_gender = SSR_net_general(img_size,stage_num, lambda_local, lambda_d)() wiki_model_gender.compile(optimizer='adam', loss="mae", metrics=["binary_accuracy"]) wiki_model_gender.load_weights("wiki_gender_ssrnet_3_3_3_64_1.0_1.0.h5") morph_model = SSR_net(img_size,stage_num, lambda_local, lambda_d)() morph_model.compile(optimizer='adam', loss="mae", metrics=["mae"]) morph_model.load_weights("morph_age_ssrnet_3_3_3_64_1.0_1.0.h5") morph_model_gender = SSR_net_general(img_size,stage_num, lambda_local, lambda_d)() morph_model_gender.compile(optimizer='adam', loss="mae", metrics=["binary_accuracy"]) morph_model_gender.load_weights("morph_gender_ssrnet_3_3_3_64_1.0_1.0.h5") print('[K-FOLD] Started...') kf = KFold(n_splits=10, shuffle=True, random_state=1) kf_split = kf.split(ageLabel) for _, test_idx in kf_split: print('[K-FOLD] Fold {}'.format(n_fold)) testImages = images[test_idx] testAge = ageLabel[test_idx] testGender = genderLabel[test_idx] scores = evaluate(imdb_model, testImages, testAge) print('imdb Age score:', scores) scores = evaluate(wiki_model, testImages, testAge) print('wiki Age score:', scores) scores = evaluate(morph_model, testImages, testAge) print('morph Age score:', scores) scores = evaluate(imdb_model_gender, testImages, testGender) print('imdb Gender score:', scores) scores = evaluate(wiki_model_gender, testImages, testGender) print('wiki Gender score:', scores) scores = evaluate(morph_model_gender, testImages, testGender) print('morph Gender score:', scores) n_fold += 1 del testImages, testAge, testGender, scores
def main(): args = get_args() input_path1 = args.input1 input_path2 = args.input2 db_name = args.db batch_size = args.batch_size nb_epochs = args.nb_epochs netType1 = args.netType1 netType2 = args.netType2 logging.debug("Loading training data...") image1, age1, image_size = load_data_npz(input_path1) logging.debug("Loading testing data...") image2, age2, image_size = load_data_npz(input_path2) start_decay_epoch = [30, 60] optMethod = Adam() stage_num = [3, 3, 3] lambda_local = 0.25 * (netType1 % 5) lambda_d = 0.25 * (netType2 % 5) model = SSR_net(image_size, stage_num, lambda_local, lambda_d)() save_name = 'ssrnet_%d_%d_%d_%d_%s_%s' % (stage_num[0], stage_num[1], stage_num[2], image_size, lambda_local, lambda_d) model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred_a': 'mae'}) if db_name == "megaage": weight_file = "./pre-trained/wiki/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" #weight_file = "./pre-trained/imdb/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" model.load_weights(weight_file) logging.debug("Model summary...") model.count_params() model.summary() logging.debug("Saving model...") mk_dir(db_name + "_models") mk_dir(db_name + "_models/batch_size_%d/" % (batch_size)) mk_dir(db_name + "_models/batch_size_%d/" % (batch_size) + save_name) mk_dir(db_name + "_checkpoints") mk_dir(db_name + "_checkpoints/batch_size_%d/" % (batch_size)) plot_model(model, to_file=db_name + "_models/batch_size_%d/" % (batch_size) + save_name + "/" + save_name + ".png") with open( os.path.join( db_name + "_models/batch_size_%d/" % (batch_size) + save_name, save_name + '.json'), "w") as f: f.write(model.to_json()) decaylearningrate = TYY_callbacks.DecayLearningRate(start_decay_epoch) callbacks = [ ModelCheckpoint(db_name + "_checkpoints/batch_size_%d/" % (batch_size) + "weights.{epoch:02d}-{val_loss:.2f}.hdf5", monitor="val_loss", verbose=1, save_best_only=True, mode="auto"), decaylearningrate ] logging.debug("Running training...") data_num = len(image1) + len(image2) indexes1 = np.arange(len(image1)) indexes2 = np.arange(len(image2)) np.random.shuffle(indexes1) np.random.shuffle(indexes2) x_train = image1[indexes1] x_test = image2[indexes2] y_train_a = age1[indexes1] y_test_a = age2[indexes2] train_num = len(image1) hist = model.fit_generator(generator=data_generator_reg( X=x_train, Y=y_train_a, batch_size=batch_size), steps_per_epoch=train_num // batch_size, validation_data=(x_test, [y_test_a]), epochs=nb_epochs, verbose=1, callbacks=callbacks) logging.debug("Saving weights...") model.save_weights(os.path.join( db_name + "_models/batch_size_%d/" % (batch_size) + save_name, save_name + '.h5'), overwrite=True) pd.DataFrame(hist.history).to_hdf( os.path.join( db_name + "_models/batch_size_%d/" % (batch_size) + save_name, 'history_' + save_name + '.h5'), "history")
def main(): K.set_learning_phase(0) # make sure its testing mode weight_file = "../pre-trained/wiki/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" weight_file_gender = "../pre-trained/wiki_gender_models/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" # for face detection # detector = dlib.get_frontal_face_detector() detector = MTCNN() try: os.mkdir('./img') except OSError: pass # load model and weights img_size = 64 stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 model = SSR_net(img_size, stage_num, lambda_local, lambda_d)() model.load_weights(weight_file) model_gender = SSR_net_general(img_size, stage_num, lambda_local, lambda_d)() model_gender.load_weights(weight_file_gender) clip = VideoFileClip(sys.argv[1]) # can be gif or movie #python version pyFlag = '' if len(sys.argv) < 3: pyFlag = '2' #default to use moviepy to show, this can work on python2.7 and python3.5 elif len(sys.argv) == 3: pyFlag = sys.argv[2] #python version else: print('Wrong input!') sys.exit() img_idx = 0 detected = '' #make this not local variable time_detection = 0 time_network = 0 time_plot = 0 ad = 0.4 skip_frame = 1 # every 5 frame do 1 detection and network forward propagation for img in clip.iter_frames(): img_idx = img_idx + 1 input_img = img #using python2.7 with moivepy to show th image without channel flip if pyFlag == '3': input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_h, img_w, _ = np.shape(input_img) input_img = cv2.resize(input_img, (1024, int(1024 * img_h / img_w))) img_h, img_w, _ = np.shape(input_img) if img_idx == 1 or img_idx % skip_frame == 0: # detect faces using dlib detector start_time = timeit.default_timer() detected = detector.detect_faces(input_img) elapsed_time = timeit.default_timer() - start_time time_detection = time_detection + elapsed_time faces = np.empty((len(detected), img_size, img_size, 3)) input_img, time_network, time_plot = draw_results( detected, input_img, faces, ad, img_size, img_w, img_h, model, model_gender, time_detection, time_network, time_plot) #Show the time cost (fps) # print('avefps_time_detection:',img_idx/time_detection) # print('avefps_time_network:',img_idx/time_network) # print('avefps_time_plot:',img_idx/time_plot) # print('===============================') if pyFlag == '3': key = cv2.waitKey() if key == 27: break
age_net = None # Load age and gender models if (age_gender_kind == 'ssrnet'): # Setup global parameters face_size = 64 face_padding_ratio = 0.10 # Default parameters for SSR-Net stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 # Initialize gender net gender_net = SSR_net_general(face_size, stage_num, lambda_local, lambda_d)() gender_net.load_weights('age_gender_ssrnet_models/ssrnet_gender_3_3_3_64_1.0_1.0.h5') # Initialize age net age_net = SSR_net(face_size, stage_num, lambda_local, lambda_d)() age_net.load_weights('age_gender_ssrnet_models/ssrnet_age_3_3_3_64_1.0_1.0.h5') else: # Setup global parameters face_size = 227 face_padding_ratio = 0.0 # Initialize gender detector gender_net = cv.dnn.readNetFromCaffe('gender_deploy.prototxt', 'gender_net.caffemodel') # Initialize age detector age_net = cv.dnn.readNetFromCaffe('age_deploy.prototxt', 'age_net.caffemodel') # Mean values for gender_net and age_net Genders = ['Male', 'Female'] Ages = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)'] def calculateParameters(height_orig, width_orig):
sess = K.get_session() from tensorflow.python.framework import graph_util, graph_io init_graph = sess.graph.as_graph_def() main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes) graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False) if log_tensorboard: from tensorflow.python.tools import import_pb_to_tensorboard import_pb_to_tensorboard.import_to_tensorboard(os.path.join(output_dir, model_name), output_dir) # weight_file = "../pre-trained/morph2/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" weight_file = "../pre-trained/wiki/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" # weight_file = "../pre-trained/imdb/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5" img_size = 64 stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 model = SSR_net(img_size, stage_num, lambda_local, lambda_d)() model.load_weights(weight_file) output_dir = os.path.join(os.getcwd(), "out") keras_to_tensorflow(model, output_dir=output_dir, model_name="ssr_model.pb") print("MODEL SAVED")
def check_inference_time(): age_layer = 'age_prediction' gender_layer = 'gender_prediction' logger.info('Load InceptionV3 model') inceptionv3 = AgenderNetInceptionV3() inceptionv3.setWeight('trainweight/inceptionv3_2/model.16-3.7887-0.9004-6.6744.h5') inceptionv3_age = Model(inputs=inceptionv3.input, outputs=inceptionv3.get_layer(age_layer).output) inceptionv3_gender = Model(inputs=inceptionv3.input, outputs=inceptionv3.get_layer(gender_layer).output) logger.info('Load MobileNetV2 model') mobilenetv2 = AgenderNetMobileNetV2() mobilenetv2.setWeight('trainweight/mobilenetv2/model.10-3.8290-0.8965-6.9498.h5') mobilenetv2_age = Model(inputs=mobilenetv2.input, outputs=mobilenetv2.get_layer(age_layer).output) mobilenetv2_gender = Model(inputs=mobilenetv2.input, outputs=mobilenetv2.get_layer(gender_layer).output) logger.info('Load SSRNet model') ssrnet = SSRNet(64, [3, 3, 3], 1.0, 1.0) ssrnet.setWeight('trainweight/agender_ssrnet/model.31-7.5452-0.8600-7.4051.h5') ssrnet_age = Model(inputs=ssrnet.input, outputs=ssrnet.get_layer(age_layer).output) ssrnet_gender = Model(inputs=ssrnet.input, outputs=ssrnet.get_layer(gender_layer).output) logger.info('Load pretrain imdb model') imdb_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)() imdb_model.load_weights("tes_ssrnet/imdb_age_ssrnet_3_3_3_64_1.0_1.0.h5") imdb_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)() imdb_model_gender.load_weights("tes_ssrnet/imdb_gender_ssrnet_3_3_3_64_1.0_1.0.h5") images = cv2.imread('UTKface_aligned/part1/34_1_0_20170103183147490.jpg') image = cv2.resize(images, (64, 64), interpolation = cv2.INTER_CUBIC) X = image.astype('float16') X = np.expand_dims(X, axis=0) logger.info('Predict age and gender with SSR-Net') wrapped = wrapper(predictone, ssrnet, X) logger.info(proces_time(wrapped)) logger.info('Predict age with SSR-Net') wrapped = wrapper(predictone, ssrnet_age, X) logger.info(proces_time(wrapped)) logger.info('Predict gender with SSR-Net') wrapped = wrapper(predictone, ssrnet_gender, X) logger.info(proces_time(wrapped)) logger.info('Predict age with IMDB_SSR-Net') wrapped = wrapper(predictone, imdb_model, X) logger.info(proces_time(wrapped)) logger.info('Predict gender with IMDB_SSR-Net') wrapped = wrapper(predictone, imdb_model_gender, X) logger.info(proces_time(wrapped)) del X image = cv2.resize(images, (96, 96), interpolation = cv2.INTER_CUBIC) X = image.astype('float16') X = np.expand_dims(X, axis=0) logger.info('Predict age and gender with MobileNetV2') wrapped = wrapper(predictone, mobilenetv2, X) logger.info(proces_time(wrapped)) logger.info('Predict age with MobileNetV2') wrapped = wrapper(predictone, mobilenetv2_age, X) logger.info(proces_time(wrapped)) logger.info('Predict gender with MobileNetV2') wrapped = wrapper(predictone, mobilenetv2_gender, X) logger.info(proces_time(wrapped)) del X X = images.astype('float16') X = np.expand_dims(X, axis=0) logger.info('Predict age and gender with InceptionV3') wrapped = wrapper(predictone, inceptionv3, X) logger.info(proces_time(wrapped)) logger.info('Predict age with InceptionV3') wrapped = wrapper(predictone, inceptionv3_age, X) logger.info(proces_time(wrapped)) logger.info('Predict gender with InceptionV3') wrapped = wrapper(predictone, inceptionv3_gender, X) logger.info(proces_time(wrapped))
lambda_local = 0.25*(netType1%5) lambda_d = 0.25*(netType2%5) logging.debug("Loading testing data...") image2, age2, image_size = load_data_npz(test_file) mk_dir('Results_csv') model_file = 'megaage_models/batch_size_50/ssrnet_%d_%d_%d_%d_%s_%s/ssrnet_%d_%d_%d_%d_%s_%s.h5' % (stage_num[0],stage_num[1],stage_num[2], image_size, lambda_local, lambda_d, stage_num[0],stage_num[1],stage_num[2], image_size, lambda_local, lambda_d) save_name = 'Results_csv/ssrnet_%d_%d_%d_%d_%s_%s_age.csv' % (stage_num[0],stage_num[1],stage_num[2], image_size, lambda_local, lambda_d) ''''''''''''''''''''''''''''''''''''''''''''' load data ''''''''''''''''''''''''''''''''''''''''''''' model = SSR_net(image_size,stage_num, lambda_local, lambda_d)() logging.debug("Loading model file...") model.load_weights(model_file) age_p=model.predict(image2) ''''''''''''''''''''''''''''''''''''''''''''' prediction ''''''''''''''''''''''''''''''''''''''''''''' age_p2=age_p pred=[['MAE'],[str(MAE(age2[age2>=-1],age_p2[age2>=-1]))],['CA3','CA5'],['0','0'],['ID','age','age_p','error']] CA3=0 CA5=0 for i in range(0,len(image2)):
def main(): args = get_args() input_path = args.input db_name = args.db batch_size = args.batch_size nb_epochs = args.nb_epochs validation_split = args.validation_split netType1 = args.netType1 netType2 = args.netType2 logging.debug("Loading data...") image, gender, age, image_size = load_data_npz(input_path) x_data = image y_data_a = age start_decay_epoch = [30,60] optMethod = Adam() stage_num = [3,3,3] lambda_local = 0.25*(netType1%5) lambda_d = 0.25*(netType2%5) model = SSR_net(image_size,stage_num, lambda_local, lambda_d)() save_name = 'ssrnet_%d_%d_%d_%d_%s_%s' % (stage_num[0],stage_num[1],stage_num[2], image_size, lambda_local, lambda_d) model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred_a':'mae'}) if db_name == "wiki": weight_file = f'imdb_models/{save_name}/{save_name}.h5' if os.path.isfile(weight_file): # error out if file not exist print("previous weight loading...") model.load_weights(weight_file) elif db_name == "morph": weight_file = f'wiki_models/{save_name}/{save_name}.h5' if os.path.isfile(weight_file): print("previous weight loading...") model.load_weights(weight_file) logging.debug("Model summary...") model.count_params() model.summary() logging.debug("Saving model...") mk_dir(db_name+"_models") mk_dir(db_name+"_models/"+save_name) mk_dir(db_name+"_checkpoints") plot_model(model, to_file=db_name+"_age_models/"+save_name+"/"+save_name+".png") with open(os.path.join(db_name+"_age_models/"+save_name, save_name+'.json'), "w") as f: f.write(model.to_json()) decaylearningrate = TYY_callbacks.DecayLearningRate(start_decay_epoch) callbacks = [ModelCheckpoint(db_name+"_checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5", monitor="val_loss", verbose=1, save_best_only=True, mode="auto"), decaylearningrate ] logging.debug("Running training...") data_num = len(x_data) indexes = np.arange(data_num) np.random.shuffle(indexes) x_data = x_data[indexes] y_data_a = y_data_a[indexes] train_num = int(data_num * (1 - validation_split)) x_train = x_data[:train_num] x_test = x_data[train_num:] y_train_a = y_data_a[:train_num] y_test_a = y_data_a[train_num:] hist = model.fit_generator(generator=data_generator_reg(X=x_train, Y=y_train_a, batch_size=batch_size), steps_per_epoch=train_num // batch_size, validation_data=(x_test, [y_test_a]), epochs=nb_epochs, verbose=1, callbacks=callbacks) logging.debug("Saving weights...") model.save_weights(os.path.join(db_name+"_models/"+save_name, save_name+'.h5'), overwrite=True) pd.DataFrame(hist.history).to_hdf(os.path.join(db_name+"_models/"+save_name, 'history_'+save_name+'.h5'), "history")
emotion_target_size = emotion_classifier.input_shape[1:3] # starting lists for calculating modes emotion_window = [] # starting video streaming cv2.namedWindow('window_frame') video_capture = cv2.VideoCapture(0) # load model and weights img_size = 64 stage_num = [3, 3, 3] lambda_local = 1 lambda_d = 1 model = SSR_net(img_size, stage_num, lambda_local, lambda_d)() model.load_weights(weight_file) model_gender = SSR_net_general(img_size, stage_num, lambda_local, lambda_d)() model_gender.load_weights(weight_file_gender) # Select video or webcam feed cap = None if (USE_WEBCAM == True): cap = cv2.VideoCapture(0) # Webcam source else: cap = cv2.VideoCapture('./Emotion/demo/TGOP.mp4') # Video file source while cap.isOpened(): # True: ret, bgr_image = cap.read()