def model_init(args):
    K.set_learning_phase(0)  # make sure its testing mode
    # load model and weights
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    model_age = SSR_net(args.image_size, stage_num, lambda_local, lambda_d)()
    model_age.load_weights(args.age_model)

    model_gender = SSR_net_general(args.image_size, stage_num, lambda_local,
                                   lambda_d)()
    model_gender.load_weights(args.gender_model)
    return model_gender, model_age
コード例 #2
0
    def load_models():
        weight_file = os.path.join(
            ssrnet_dir,
            '../pre-trained/morph2/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5'
        )
        weight_file_gender = os.path.join(
            ssrnet_dir,
            '../pre-trained/wiki_gender_models/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5'
        )
        # load model and weights
        stage_num = [3, 3, 3]
        lambda_local = 1
        lambda_d = 1
        model = SSR_net(img_size, stage_num, lambda_local, lambda_d)()
        model.load_weights(weight_file)

        model_gender = SSR_net_general(img_size, stage_num, lambda_local,
                                       lambda_d)()
        model_gender.load_weights(weight_file_gender)
        return model, model_gender
コード例 #3
0
def check_inference_time():
    age_layer = 'age_prediction'
    gender_layer = 'gender_prediction'

    logger.info('Load InceptionV3 model')
    inceptionv3 = AgenderNetInceptionV3()
    inceptionv3.setWeight('trainweight/inceptionv3_2/model.16-3.7887-0.9004-6.6744.h5')

    inceptionv3_age = Model(inputs=inceptionv3.input,
                                 outputs=inceptionv3.get_layer(age_layer).output)
    inceptionv3_gender = Model(inputs=inceptionv3.input,
                                 outputs=inceptionv3.get_layer(gender_layer).output)
                                 
    logger.info('Load MobileNetV2 model')
    mobilenetv2 = AgenderNetMobileNetV2()
    mobilenetv2.setWeight('trainweight/mobilenetv2/model.10-3.8290-0.8965-6.9498.h5')

    mobilenetv2_age = Model(inputs=mobilenetv2.input,
                                 outputs=mobilenetv2.get_layer(age_layer).output)
    mobilenetv2_gender = Model(inputs=mobilenetv2.input,
                                 outputs=mobilenetv2.get_layer(gender_layer).output)

    logger.info('Load SSRNet model')
    ssrnet = SSRNet(64, [3, 3, 3], 1.0, 1.0)
    ssrnet.setWeight('trainweight/agender_ssrnet/model.31-7.5452-0.8600-7.4051.h5')

    ssrnet_age =  Model(inputs=ssrnet.input,
                                 outputs=ssrnet.get_layer(age_layer).output)
    ssrnet_gender = Model(inputs=ssrnet.input,
                                 outputs=ssrnet.get_layer(gender_layer).output)

    logger.info('Load pretrain imdb model')
    imdb_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)()
    imdb_model.load_weights("tes_ssrnet/imdb_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    imdb_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)()
    imdb_model_gender.load_weights("tes_ssrnet/imdb_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    images = cv2.imread('UTKface_aligned/part1/34_1_0_20170103183147490.jpg')
    image = cv2.resize(images, (64, 64), interpolation = cv2.INTER_CUBIC)
    X = image.astype('float16')
    X = np.expand_dims(X, axis=0)

    logger.info('Predict age and gender with SSR-Net')
    wrapped = wrapper(predictone, ssrnet, X)
    logger.info(proces_time(wrapped))
    logger.info('Predict age with SSR-Net')
    wrapped = wrapper(predictone, ssrnet_age, X)
    logger.info(proces_time(wrapped))
    logger.info('Predict gender with SSR-Net')
    wrapped = wrapper(predictone, ssrnet_gender, X)
    logger.info(proces_time(wrapped))

    logger.info('Predict age with IMDB_SSR-Net')
    wrapped = wrapper(predictone, imdb_model, X)
    logger.info(proces_time(wrapped))
    logger.info('Predict gender with IMDB_SSR-Net')
    wrapped = wrapper(predictone, imdb_model_gender, X)
    logger.info(proces_time(wrapped))

    del X
    image = cv2.resize(images, (96, 96), interpolation = cv2.INTER_CUBIC)
    X = image.astype('float16')
    X = np.expand_dims(X, axis=0)
    logger.info('Predict age and gender with MobileNetV2')
    wrapped = wrapper(predictone, mobilenetv2, X)
    logger.info(proces_time(wrapped))
    logger.info('Predict age with MobileNetV2')
    wrapped = wrapper(predictone, mobilenetv2_age, X)
    logger.info(proces_time(wrapped))
    logger.info('Predict gender with MobileNetV2')
    wrapped = wrapper(predictone, mobilenetv2_gender, X)
    logger.info(proces_time(wrapped))
    
    del X
    X = images.astype('float16')
    X = np.expand_dims(X, axis=0)
    logger.info('Predict age and gender with InceptionV3')
    wrapped = wrapper(predictone, inceptionv3, X)
    logger.info(proces_time(wrapped))
    logger.info('Predict age with InceptionV3')
    wrapped = wrapper(predictone, inceptionv3_age, X)
    logger.info(proces_time(wrapped))
    logger.info('Predict gender with InceptionV3')
    wrapped = wrapper(predictone, inceptionv3_gender, X)
    logger.info(proces_time(wrapped))
コード例 #4
0
def main():
    logger.info('Load InceptionV3 model')
    inceptionv3 = AgenderNetInceptionV3()
    inceptionv3.setWeight('trainweight/inceptionv3_2/model.16-3.7887-0.9004-6.6744.h5')
    
    logger.info('Load MobileNetV2 model')
    mobilenetv2 = AgenderNetMobileNetV2()
    mobilenetv2.setWeight('trainweight/mobilenetv2/model.10-3.8290-0.8965-6.9498.h5')

    logger.info('Load SSRNet model')
    ssrnet = SSRNet(64, [3, 3, 3], 1.0, 1.0)
    ssrnet.setWeight('trainweight/ssrnet/model.37-7.3318-0.8643-7.1952.h5')

    logger.info('Load pretrain imdb model')
    imdb_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)()
    imdb_model.load_weights("tes_ssrnet/imdb_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    imdb_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)()
    imdb_model_gender.load_weights("tes_ssrnet/imdb_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    logger.info('Load pretrain wiki model')
    wiki_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)()
    wiki_model.load_weights("tes_ssrnet/wiki_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    wiki_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)()
    wiki_model_gender.load_weights("tes_ssrnet/wiki_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    logger.info('Load pretrain morph model')
    morph_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)()
    morph_model.load_weights("tes_ssrnet/morph_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    morph_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)()
    morph_model_gender.load_weights("tes_ssrnet/morph_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    utk = pd.read_csv('dataset/UTKface.csv')
    fgnet = pd.read_csv('dataset/FGNET.csv')

    utk_paths = utk['full_path'].values
    fgnet_paths = fgnet['full_path'].values

    logger.info('Read UTKface aligned images')
    utk_images = [cv2.imread('UTKface_aligned/'+path) for path in tqdm(utk_paths)]

    logger.info('Read FGNET aligned images')
    fgnet_images = [cv2.imread('FGNET_aligned/'+path) for path in tqdm(fgnet_paths)]
    
    utk_X = np.array(utk_images)
    fgnet_X = np.array(fgnet_images)

    utk_pred_age = dict()
    utk_pred_gender = dict()
    fgnet_pred_age = dict()

    logger.info('Predict with InceptionV3')
    start = time.time()
    utk_pred_gender['inceptionv3'], utk_pred_age['inceptionv3'] = get_result(inceptionv3, utk_X)
    _, fgnet_pred_age['inceptionv3'] = get_result(inceptionv3, fgnet_X)
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    del utk_X, fgnet_X
    logger.info('Resize image to 96 for MobileNetV2')
    utk_images = [cv2.resize(image, (96, 96), interpolation = cv2.INTER_CUBIC) for image in tqdm(utk_images)]
    fgnet_images = [cv2.resize(image, (96, 96), interpolation = cv2.INTER_CUBIC) for image in tqdm(fgnet_images)]
    utk_X = np.array(utk_images)
    fgnet_X = np.array(fgnet_images)
    
    logger.info('Predict with MobileNetV2')
    start = time.time()
    utk_pred_gender['mobilenetv2'], utk_pred_age['mobilenetv2'] = get_result(mobilenetv2, utk_X)
    _, fgnet_pred_age['mobilenetv2'] = get_result(mobilenetv2, fgnet_X)
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    del utk_X, fgnet_X
    logger.info('Resize image to 64 for SSR-Net')
    utk_images = [cv2.resize(image, (64, 64), interpolation = cv2.INTER_CUBIC) for image in tqdm(utk_images)]
    fgnet_images = [cv2.resize(image, (64, 64), interpolation = cv2.INTER_CUBIC) for image in tqdm(fgnet_images)]
    utk_X = np.array(utk_images)
    fgnet_X = np.array(fgnet_images)

    logger.info('Predict with SSR-Net')
    start = time.time()
    utk_pred_gender['ssrnet'], utk_pred_age['ssrnet'] = get_result(ssrnet, utk_X)
    _, fgnet_pred_age['ssrnet'] = get_result(ssrnet, fgnet_X)
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    logger.info('Predict with IMDB_SSR-Net')
    start = time.time()
    utk_pred_gender['ssrnet-imdb'] = np.around(imdb_model_gender.predict(utk_X).squeeze()).astype('int')
    utk_pred_age['ssrnet-imdb'] = imdb_model.predict(utk_X).squeeze()
    fgnet_pred_age['ssrnet-imdb'] = imdb_model.predict(fgnet_X).squeeze()
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    logger.info('Predict with Wiki_SSR-Net')
    start = time.time()
    utk_pred_gender['ssrnet-wiki'] = np.around(wiki_model_gender.predict(utk_X).squeeze()).astype('int')
    utk_pred_age['ssrnet-wiki'] = wiki_model.predict(utk_X).squeeze()
    fgnet_pred_age['ssrnet-wiki'] = wiki_model.predict(fgnet_X).squeeze()
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    logger.info('Predict with Morph_SSR-Net')
    start = time.time()
    utk_pred_gender['ssrnet-morph'] = np.around(morph_model_gender.predict(utk_X).squeeze()).astype('int')
    utk_pred_age['ssrnet-morph'] = morph_model.predict(utk_X).squeeze()
    fgnet_pred_age['ssrnet-morph'] = morph_model.predict(fgnet_X).squeeze()
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    utk_pred_age = pd.DataFrame.from_dict(utk_pred_age)
    utk_pred_gender = pd.DataFrame.from_dict(utk_pred_gender)
    fgnet_pred_age = pd.DataFrame.from_dict(fgnet_pred_age)

    utk_pred_age = pd.concat([utk['age'], utk_pred_age], axis=1)
    utk_pred_gender = pd.concat([utk['gender'], utk_pred_gender], axis=1)
    fgnet_pred_age = pd.concat([fgnet['age'], fgnet_pred_age], axis=1)

    utk_pred_age.to_csv('result/utk_age_prediction.csv', index=False)
    utk_pred_age.to_csv('result/utk_gender_prediction.csv', index=False)
    fgnet_pred_age.to_csv('result/fgnet_age_prediction.csv', index=False)
コード例 #5
0
def main():
    args = get_args()
    input_path = args.input
    db_name = args.db
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    validation_split = args.validation_split
    netType1 = args.netType1
    netType2 = args.netType2

    logging.debug("Loading data...")
    image, gender, age, image_size = load_data_npz(input_path)
    # image, age, image_size = load_data_npz_megaface(input_path)

    x_data = image
    # y_data_a = age
    y_data_g = gender

    start_decay_epoch = [30, 60]

    optMethod = Adam()

    stage_num = [3, 3, 3]
    lambda_local = 0.25 * (netType1 % 5)
    lambda_d = 0.25 * (netType2 % 5)

    model = SSR_net_general(image_size, stage_num, lambda_local, lambda_d)()
    save_name = 'ssrnet_%d_%d_%d_%d_%s_%s' % (stage_num[0], stage_num[1],
                                              stage_num[2], image_size,
                                              lambda_local, lambda_d)
    model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred': 'mae'})

    if db_name == "wiki":
        weight_file = "imdb_gender_models/" + save_name + "/" + save_name + ".h5"
        model.load_weights(weight_file)
    elif db_name == "morph":
        weight_file = "wiki_gender_models/" + save_name + "/" + save_name + ".h5"
        model.load_weights(weight_file)
    # elif db_name == "megaface_asian":
    #     weight_file = "wiki_models/"+save_name+"/"+save_name+".h5"
    #     model.load_weights(weight_file)

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir(db_name + "_gender_models")
    mk_dir(db_name + "_gender_models/" + save_name)
    mk_dir(db_name + "_checkpoints")
    plot_model(model,
               to_file=db_name + "_gender_models/" + save_name + "/" +
               save_name + ".png")

    with open(
            os.path.join(db_name + "_models/" + save_name,
                         save_name + '.json'), "w") as f:
        f.write(model.to_json())

    decaylearningrate = TYY_callbacks.DecayLearningRate(start_decay_epoch)

    callbacks = [
        ModelCheckpoint(db_name +
                        "_checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto"), decaylearningrate
    ]

    logging.debug("Running training...")

    data_num = len(x_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    x_data = x_data[indexes]
    # y_data_a = y_data_a[indexes]
    y_data_g = y_data_g[indexes]

    train_num = int(data_num * (1 - validation_split))

    x_train = x_data[:train_num]
    x_test = x_data[train_num:]
    # y_train_a = y_data_a[:train_num]
    # y_test_a = y_data_a[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]

    hist = model.fit_generator(generator=data_generator_reg(
        X=x_train, Y=y_train_g, batch_size=batch_size),
                               steps_per_epoch=train_num // batch_size,
                               validation_data=(x_test, [y_test_g]),
                               epochs=nb_epochs,
                               verbose=1,
                               callbacks=callbacks)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join(db_name + "_gender_models/" + save_name,
                                    save_name + '.h5'),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join(db_name + "_gender_models/" + save_name,
                     'history_' + save_name + '.h5'), "history")
コード例 #6
0
async def main():
    weight_file = "../pre-trained/megaface_asian/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"
    weight_file_gender = "../pre-trained/wiki_gender_models/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"

    mtccn = False

    if mtccn:
        detector = MTCNN()
    else:
        detector = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml')


# load model and weights
    img_size = 64
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    model = SSR_net(img_size, stage_num, lambda_local, lambda_d)()
    model.load_weights(weight_file)
    model_gender = SSR_net_general(img_size, stage_num, lambda_local,
                                   lambda_d)()
    model_gender.load_weights(weight_file_gender)

    # capture video
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024 * 1)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768 * 1)

    detected = ''
    time_detection = 0
    time_network = 0
    time_plot = 0
    ad = 0.5
    img_idx = 0
    skip_frame = 10
    skip_frame = 10
    sleep = 1
    send_post_time = time.time() + sleep

    while True:
        # get video frame
        img_idx = img_idx + 1
        ret, input_img = cap.read()
        img_h, img_w, _ = np.shape(input_img)

        if img_idx == 1 or img_idx % skip_frame == 0:
            time_detection = 0
            time_network = 0
            time_plot = 0
            # detect faces using LBP detector
            start_time = timeit.default_timer()
            gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            if mtccn:
                detected = detector.detect_faces(input_img)
            else:
                detected = detector.detectMultiScale(gray_img, 1.1)
            elapsed_time = timeit.default_timer() - start_time
            time_detection = time_detection + elapsed_time
            faces = np.empty((len(detected), img_size, img_size, 3))

        input_img, time_network, time_plot, resultados = show_results(
            detected, input_img, faces, ad, img_size, img_w, img_h, model,
            model_gender, time_detection, time_network, time_plot, mtccn)

        # Show the time cost (fps)
        # print('time_detection:', time_detection)
        # print('time_network:', time_network)
        # print('time_plot:', time_plot)
        # print('===============================')
        cv2.waitKey(1)

        if send_post_time < time.time():
            send_post_time = time.time() + sleep
            await nested(resultados)
コード例 #7
0
def main():

    weight_file = "../pre-trained/morph2/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"
    weight_file_gender = "../pre-trained/wiki_gender_models/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"

    face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml')
    try:
        os.mkdir('./img')
    except OSError:
        pass

    # load model and weights
    img_size = 64
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    model = SSR_net(img_size, stage_num, lambda_local, lambda_d)()
    model.load_weights(weight_file)

    model_gender = SSR_net_general(img_size, stage_num, lambda_local,
                                   lambda_d)()
    model_gender.load_weights(weight_file_gender)

    # capture video
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024 * 1)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768 * 1)

    img_idx = 0
    detected = ''  #make this not local variable
    time_detection = 0
    time_network = 0
    time_plot = 0
    skip_frame = 5  # every 5 frame do 1 detection and network forward propagation
    ad = 0.5

    while True:
        # get video frame
        ret, input_img = cap.read()

        img_idx = img_idx + 1
        img_h, img_w, _ = np.shape(input_img)

        if img_idx == 1 or img_idx % skip_frame == 0:
            time_detection = 0
            time_network = 0
            time_plot = 0

            # detect faces using LBP detector
            gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            start_time = timeit.default_timer()
            detected = face_cascade.detectMultiScale(gray_img, 1.1)
            elapsed_time = timeit.default_timer() - start_time
            time_detection = time_detection + elapsed_time
            faces = np.empty((len(detected), img_size, img_size, 3))

            input_img, time_network, time_plot = draw_results(
                detected, input_img, faces, ad, img_size, img_w, img_h, model,
                model_gender, time_detection, time_network, time_plot)
            cv2.imwrite('img/' + str(img_idx) + '.png', input_img)

        else:
            input_img, time_network, time_plot = draw_results(
                detected, input_img, faces, ad, img_size, img_w, img_h, model,
                model_gender, time_detection, time_network, time_plot)

        #Show the time cost (fps)
        print('avefps_time_detection:', 1 / time_detection)
        print('avefps_time_network:', skip_frame / time_network)
        print('avefps_time_plot:', skip_frame / time_plot)
        print('===============================')
        key = cv2.waitKey(1)
コード例 #8
0
def main():
    K.set_learning_phase(0)  # make sure its testing mode
    weight_file = "../pre-trained/wiki/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"
    weight_file_gender = "../pre-trained/wiki_gender_models/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"

    # for face detection
    # detector = dlib.get_frontal_face_detector()
    detector = MTCNN()
    try:
        os.mkdir('./img')
    except OSError:
        pass
    # load model and weights
    img_size = 64
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    model = SSR_net(img_size, stage_num, lambda_local, lambda_d)()
    model.load_weights(weight_file)

    model_gender = SSR_net_general(img_size, stage_num, lambda_local,
                                   lambda_d)()
    model_gender.load_weights(weight_file_gender)
    clip = VideoFileClip(sys.argv[1])  # can be gif or movie

    #python version
    pyFlag = ''
    if len(sys.argv) < 3:
        pyFlag = '2'  #default to use moviepy to show, this can work on python2.7 and python3.5
    elif len(sys.argv) == 3:
        pyFlag = sys.argv[2]  #python version
    else:
        print('Wrong input!')
        sys.exit()

    img_idx = 0
    detected = ''  #make this not local variable
    time_detection = 0
    time_network = 0
    time_plot = 0
    ad = 0.4
    skip_frame = 1  # every 5 frame do 1 detection and network forward propagation
    for img in clip.iter_frames():
        img_idx = img_idx + 1

        input_img = img  #using python2.7 with moivepy to show th image without channel flip

        if pyFlag == '3':
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        img_h, img_w, _ = np.shape(input_img)
        input_img = cv2.resize(input_img, (1024, int(1024 * img_h / img_w)))
        img_h, img_w, _ = np.shape(input_img)

        if img_idx == 1 or img_idx % skip_frame == 0:

            # detect faces using dlib detector
            start_time = timeit.default_timer()
            detected = detector.detect_faces(input_img)
            elapsed_time = timeit.default_timer() - start_time
            time_detection = time_detection + elapsed_time
            faces = np.empty((len(detected), img_size, img_size, 3))
            input_img, time_network, time_plot = draw_results(
                detected, input_img, faces, ad, img_size, img_w, img_h, model,
                model_gender, time_detection, time_network, time_plot)

        #Show the time cost (fps)
        # print('avefps_time_detection:',img_idx/time_detection)
        # print('avefps_time_network:',img_idx/time_network)
        # print('avefps_time_plot:',img_idx/time_plot)
        # print('===============================')
        if pyFlag == '3':
            key = cv2.waitKey()
            if key == 27:
                break
コード例 #9
0
ファイル: agenderFromImage.py プロジェクト: DiazArce/IA
gender_net = None
age_net = None

# Load age and gender models
if (age_gender_kind == 'ssrnet'):
    # Setup global parameters
    face_size = 64
    face_padding_ratio = 0.10
    # Default parameters for SSR-Net
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    # Initialize gender net
    gender_net = SSR_net_general(face_size, stage_num, lambda_local, lambda_d)()
    gender_net.load_weights('age_gender_ssrnet_models/ssrnet_gender_3_3_3_64_1.0_1.0.h5')
    # Initialize age net
    age_net = SSR_net(face_size, stage_num, lambda_local, lambda_d)()
    age_net.load_weights('age_gender_ssrnet_models/ssrnet_age_3_3_3_64_1.0_1.0.h5')
else:
    # Setup global parameters
    face_size = 227
    face_padding_ratio = 0.0
    # Initialize gender detector
    gender_net = cv.dnn.readNetFromCaffe('gender_deploy.prototxt', 'gender_net.caffemodel')
    # Initialize age detector
    age_net = cv.dnn.readNetFromCaffe('age_deploy.prototxt', 'age_net.caffemodel')
    # Mean values for gender_net and age_net
    Genders = ['Male', 'Female']
    Ages = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
コード例 #10
0
def main():
    #dynamicaly allocate GPU memory
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.tensorflow_backend.set_session(sess)
    print('[LOAD DATA]')
    images, ageLabel, genderLabel = prepData(64)
    n_fold = 1
    
    img_size = 64
    stage_num = [3,3,3]
    lambda_local = 1
    lambda_d = 1
    imdb_model = SSR_net(img_size,stage_num, lambda_local, lambda_d)()
    imdb_model.compile(optimizer='adam', loss="mae", metrics=["mae"])
    imdb_model.load_weights("imdb_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    imdb_model_gender = SSR_net_general(img_size,stage_num, lambda_local, lambda_d)()
    imdb_model_gender.compile(optimizer='adam', loss="mae", metrics=["binary_accuracy"])
    imdb_model_gender.load_weights("imdb_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    wiki_model = SSR_net(img_size,stage_num, lambda_local, lambda_d)()
    wiki_model.compile(optimizer='adam', loss="mae", metrics=["mae"])
    wiki_model.load_weights("wiki_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    wiki_model_gender = SSR_net_general(img_size,stage_num, lambda_local, lambda_d)()
    wiki_model_gender.compile(optimizer='adam', loss="mae", metrics=["binary_accuracy"])
    wiki_model_gender.load_weights("wiki_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    morph_model = SSR_net(img_size,stage_num, lambda_local, lambda_d)()
    morph_model.compile(optimizer='adam', loss="mae", metrics=["mae"])
    morph_model.load_weights("morph_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    morph_model_gender = SSR_net_general(img_size,stage_num, lambda_local, lambda_d)()
    morph_model_gender.compile(optimizer='adam', loss="mae", metrics=["binary_accuracy"])
    morph_model_gender.load_weights("morph_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

   
    print('[K-FOLD] Started...')
    kf = KFold(n_splits=10, shuffle=True, random_state=1)
    kf_split = kf.split(ageLabel)
    for _, test_idx in kf_split:
        print('[K-FOLD] Fold {}'.format(n_fold))      
        testImages = images[test_idx]
        testAge = ageLabel[test_idx]
        testGender = genderLabel[test_idx]

        scores = evaluate(imdb_model, testImages, testAge)
        print('imdb Age score:', scores)
        scores = evaluate(wiki_model, testImages, testAge)
        print('wiki Age score:', scores)
        scores = evaluate(morph_model, testImages, testAge)
        print('morph Age score:', scores)

        scores = evaluate(imdb_model_gender, testImages, testGender)
        print('imdb Gender score:', scores)
        scores = evaluate(wiki_model_gender, testImages, testGender)
        print('wiki Gender score:', scores)
        scores = evaluate(morph_model_gender, testImages, testGender)
        print('morph Gender score:', scores)

        n_fold += 1
        del	testImages, testAge, testGender, scores
コード例 #11
0
# starting video streaming

cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)

# load model and weights
img_size = 64
stage_num = [3, 3, 3]
lambda_local = 1
lambda_d = 1
model = SSR_net(img_size, stage_num, lambda_local, lambda_d)()
model.load_weights(weight_file)

model_gender = SSR_net_general(img_size, stage_num, lambda_local, lambda_d)()
model_gender.load_weights(weight_file_gender)

# Select video or webcam feed
cap = None
if (USE_WEBCAM == True):
    cap = cv2.VideoCapture(0)  # Webcam source
else:
    cap = cv2.VideoCapture('./Emotion/demo/TGOP.mp4')  # Video file source

while cap.isOpened():  # True:
    ret, bgr_image = cap.read()

    #bgr_image = video_capture.read()[1]

    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)