Exemplo n.º 1
0
def train_face():
    train_images = help_functions.load_all_images(
        config.chokepoint_cropped_train, file_type='.pgm', preprocess=help_functions.prepare_face_random_resize)

    # Hyper-parameters
    people_count = 8
    iterations = 25000
    checkpoint = 20
    save_checkpoint = 10000

    backend.clear_session()
    model = get_face_model((config.face_image_resize[1], config.face_image_resize[0], 1))
    f = open(os.path.join('model_history', 'face_perf.txt'), 'a')
    # print(model.summary())
    for i in range(1, iterations+1):
        inputs, targets = get_image_pairs(train_images, people_count)
        (loss, acc) = model.train_on_batch(inputs, targets)
        if i % checkpoint == 0:
            logging.info('Iteration: {}'.format(i))
            logging.info('Loss: {}'.format(loss))
            logging.info('Accuracy: {}'.format(acc))
            f.write(str(i) + ' ' + str(loss) + ' ' + str(acc) + '\n')
        if i % save_checkpoint == 0:
            model.save_weights(os.path.join('model_history', 'base_face_weights_it{}.h5'.format(i)))
            f.flush()
    model.save_weights(os.path.join('model_history', 'base_face_weights.h5'))
    f.close()
Exemplo n.º 2
0
def test_face_oneshot(model_file, iterations=10, versus=4):
    test_images = help_functions.load_all_images(
        config.chokepoint_cropped_test,
        file_type='.pgm',
        preprocess=help_functions.prepare_face)
    model = get_face_model(
        (config.face_image_resize[1], config.face_image_resize[0], 1))
    model.load_weights(filepath=model_file)

    matched = 0
    for i in range(iterations):
        inputs, targets = train_help_functions.get_oneshot_pair(
            test_images, versus)
        result = model.predict_on_batch(inputs)
        matched += np.argmax(result) == np.argmax(targets)
    print('Oneshot face:', float(matched) / float(iterations), 'vs', versus)
Exemplo n.º 3
0
def improve_faces():
    # same person in different folders might have different ID, causing false negatives if we connected them
    train_images = []
    # load images from each folder
    for group_name in config.improve_camera_groups:
        images = help_functions.load_all_images(
            os.path.join(config.improve_folder, group_name, 'faces'),
            file_type='.jpg',
            preprocess=help_functions.prepare_face)
        train_images.append(images)

    # hyper-parameters
    people_count = 8
    iterations = 8000
    checkpoint = 2
    save_checkpoint = 5000

    model = siamese_network.get_face_model(
        (config.face_image_resize[1], config.face_image_resize[0], 1))

    # are we improving base model of already improved model?
    # load weight for model we are improving
    if config.learning_start:
        model.load_weights(filepath=config.base_face_model)
    elif config.learning_improving:
        model.load_weights(filepath=config.improved_face_model)

    f = open(os.path.join('model_history', 'face_improve_perf.txt'), 'a')
    logging.info('IMPROVING: Starting to improve model for faces')
    for i in range(1, iterations + 1):
        inputs, targets = get_image_pairs(
            train_images[np.random.randint(0, len(train_images))],
            people_count)
        (loss, acc) = model.train_on_batch(inputs, targets)
        if i % checkpoint == 0:
            logging.info('Iteration: {}'.format(i))
            logging.info('Loss: {}'.format(loss))
            logging.info('Accuracy: {}'.format(acc))
            f.write(str(i) + ' ' + str(loss) + ' ' + str(acc) + '\n')
        if i % save_checkpoint == 0:
            model.save_weights(os.path.join('model_history', str(i) + 'FI.h5'))
            f.flush()
    model.save_weights(config.improved_face_model)
    f.close()
Exemplo n.º 4
0
def test_face(model_file,
              image_folder=config.chokepoint_cropped_test,
              file_type='.pgm'):
    test_images = help_functions.load_all_images(
        image_folder,
        file_type=file_type,
        preprocess=help_functions.prepare_face)
    model = get_face_model(
        (config.face_image_resize[1], config.face_image_resize[0], 1))
    model.load_weights(filepath=model_file)
    print('face')
    rates = np.array([0, 0, 0, 0])
    for i in range(100):
        inputs, targets = train_help_functions.get_image_pairs(test_images, 10)
        if targets.shape[0] == 0:
            continue
        predicted = model.predict_on_batch(inputs)
        rates += calc_rates(predicted, targets)
    print_rates(rates)
Exemplo n.º 5
0
import numpy as np
import logging
import cv2

import siamese_network

import config

body_model = siamese_network.get_body_model()
face_model = siamese_network.get_face_model()

if config.learning_start:
    body_model.load_weights(filepath=config.base_body_model)
    face_model.load_weights(filepath=config.base_face_model)
elif config.learning_improving or config.production:
    body_model.load_weights(filepath=config.improved_body_model)
    face_model.load_weights(filepath=config.improved_face_model)


def confirm_match(candidate_track, same_track):
    while True:
        face1 = candidate_track.get_face_images()
        face2 = same_track.get_face_images()
        body1 = candidate_track.get_body_images()
        body2 = same_track.get_body_images()
        showed = False
        if body1.shape[0] > 0 and body2.shape[0] > 0:
            body1 = body1[np.random.randint(0, body1.shape[0])]
            body2 = body2[np.random.randint(0, body2.shape[0])]
            cv2.imshow('candidate', body1.astype(np.uint8))
            cv2.imshow('matched to', body2.astype(np.uint8))