Esempio n. 1
0
def train_face():
    train_images = help_functions.load_all_images(
        config.chokepoint_cropped_train, file_type='.pgm', preprocess=help_functions.prepare_face_random_resize)

    # Hyper-parameters
    people_count = 8
    iterations = 25000
    checkpoint = 20
    save_checkpoint = 10000

    backend.clear_session()
    model = get_face_model((config.face_image_resize[1], config.face_image_resize[0], 1))
    f = open(os.path.join('model_history', 'face_perf.txt'), 'a')
    # print(model.summary())
    for i in range(1, iterations+1):
        inputs, targets = get_image_pairs(train_images, people_count)
        (loss, acc) = model.train_on_batch(inputs, targets)
        if i % checkpoint == 0:
            logging.info('Iteration: {}'.format(i))
            logging.info('Loss: {}'.format(loss))
            logging.info('Accuracy: {}'.format(acc))
            f.write(str(i) + ' ' + str(loss) + ' ' + str(acc) + '\n')
        if i % save_checkpoint == 0:
            model.save_weights(os.path.join('model_history', 'base_face_weights_it{}.h5'.format(i)))
            f.flush()
    model.save_weights(os.path.join('model_history', 'base_face_weights.h5'))
    f.close()
Esempio n. 2
0
def test_body(model_file, image_folder=config.test_body_folder):
    test_images = help_functions.load_all_images(
        image_folder, preprocess=help_functions.prepare_body)
    model = get_body_model(
        (config.body_image_resize[1], config.body_image_resize[0], 3))
    model.load_weights(filepath=model_file)
    print('body')
    rates = np.array([0, 0, 0, 0])
    for i in range(100):
        inputs, targets = train_help_functions.get_image_pairs(test_images, 10)
        predicted = model.predict_on_batch(inputs)
        rates += calc_rates(predicted, targets)
    print_rates(rates)
Esempio n. 3
0
def test_face_oneshot(model_file, iterations=10, versus=4):
    test_images = help_functions.load_all_images(
        config.chokepoint_cropped_test,
        file_type='.pgm',
        preprocess=help_functions.prepare_face)
    model = get_face_model(
        (config.face_image_resize[1], config.face_image_resize[0], 1))
    model.load_weights(filepath=model_file)

    matched = 0
    for i in range(iterations):
        inputs, targets = train_help_functions.get_oneshot_pair(
            test_images, versus)
        result = model.predict_on_batch(inputs)
        matched += np.argmax(result) == np.argmax(targets)
    print('Oneshot face:', float(matched) / float(iterations), 'vs', versus)
Esempio n. 4
0
def test_body_oneshot(model_file, iterations=10, versus=4):
    test_images = help_functions.load_all_images(
        config.test_body_folder, preprocess=help_functions.prepare_body)
    model = get_body_model(
        (config.body_image_resize[1], config.body_image_resize[0], 3))
    model.load_weights(filepath=model_file)

    matched = 0
    for i in range(iterations):
        inputs, targets = train_help_functions.get_oneshot_pair(
            test_images, versus)
        if targets.shape[0] == 0:
            continue
        result = model.predict_on_batch(inputs)
        matched += np.argmax(result) == np.argmax(targets)
    print('Oneshot body:', float(matched) / float(iterations), 'vs', versus)
Esempio n. 5
0
def improve_bodies():
    # same person in different folders might have different ID, causing false negatives if we connected them
    train_images = []
    # load images from each folder
    for group_name in config.improve_camera_groups:
        images = help_functions.load_all_images(
            os.path.join(config.improve_folder, group_name, 'bodies'),
            file_type='.jpg',
            preprocess=help_functions.prepare_body)
        train_images.append(images)

    # hyper-parameters
    people_count = 8
    iterations = 8000
    checkpoint = 2
    save_checkpoint = 5000

    model = siamese_network.get_body_model(
        (config.body_image_resize[1], config.body_image_resize[0], 3))

    # are we improving base model of already improved model?
    # load weight for model we are improving
    if config.learning_start:
        model.load_weights(filepath=config.base_body_model)
    elif config.learning_improving:
        model.load_weights(filepath=config.improved_body_model)

    f = open(os.path.join('model_history', 'body_improve_perf.txt'), 'a')
    logging.info('IMPROVING: Starting to improve model for bodies')
    for i in range(1, iterations + 1):
        inputs, targets = get_image_pairs(
            train_images[np.random.randint(0, len(train_images))],
            people_count)
        (loss, acc) = model.train_on_batch(inputs, targets)
        if i % checkpoint == 0:
            logging.info('Iteration: {}'.format(i))
            logging.info('Loss: {}'.format(loss))
            logging.info('Accuracy: {}'.format(acc))
            f.write(str(i) + ' ' + str(loss) + ' ' + str(acc) + '\n')
        if i % save_checkpoint == 0:
            model.save_weights(os.path.join('model_history',
                                            str(i) + 'FBI.h5'))
            f.flush()
    model.save_weights(config.improved_body_model)
    f.close()
Esempio n. 6
0
def test_face(model_file,
              image_folder=config.chokepoint_cropped_test,
              file_type='.pgm'):
    test_images = help_functions.load_all_images(
        image_folder,
        file_type=file_type,
        preprocess=help_functions.prepare_face)
    model = get_face_model(
        (config.face_image_resize[1], config.face_image_resize[0], 1))
    model.load_weights(filepath=model_file)
    print('face')
    rates = np.array([0, 0, 0, 0])
    for i in range(100):
        inputs, targets = train_help_functions.get_image_pairs(test_images, 10)
        if targets.shape[0] == 0:
            continue
        predicted = model.predict_on_batch(inputs)
        rates += calc_rates(predicted, targets)
    print_rates(rates)
Esempio n. 7
0
def build_known_people():
    # not necessary to know people
    if config.learning_start or config.learning_improving:
        return
    images = help_functions.load_all_images(config.keep_track_targeted_files,
                                            '', help_functions.identity)
    # one id for one name, there may be multiple files with same them
    name_to_id = {}

    # make IDs negative so we don't have to worry about collisions with centroid tracker
    # TODO: hack-y way, instead make centroid tracker next_id larger so there are no collision, but this works OK
    next_id = -1
    for image, file in images:
        # name format F_person name_.... for faces, B_person name for bodies
        match = re.search('([fFbB])_([a-zA-Z0-9 ]+)(_.*)?', file)
        # ignore wrong files
        if match is None:
            logging.warning(
                'KNOWN PEOPLE: File {} has wrong format'.format(file))
            continue
        name = match.group(2)
        # try to get track if we have seen this person before and how many time we have seen him
        track_id, count = name_to_id.get(name, (None, 0))
        # first time we see picture of this person
        if track_id is None:
            track_id = next_id
            next_id -= 1
            track = PersonTrack(track_id, n_cameras)
            track.reid()  # not necessary but good to have
            track.identify(name)
            known_objects[track_id] = track
            name_to_id[name] = (track_id, count + 1)
        else:
            track = known_objects.get(track_id, None)

        # distribute pictures to different cameras, but it doesn't really matter that much
        if match.group(1).lower() == 'f':  # face
            track.add_face_sample(image, math.inf, count % n_cameras, True)
        elif match.group(1).lower() == 'b':  # body
            track.add_body_sample(image, math.inf, count % n_cameras, True)
    pattern = np.empty((height, width), int)
    for y in range(height):
        for x in range(width):
            sum_pixel = 0
            for i, (py, px) in enumerate(points):
                x_ = math.floor(x + px + 0.5)
                y_ = math.floor(y + py + 0.5)
                if x_ < 0 or x_ >= width or y_ < 0 or y_ >= height:
                    continue
                sum_pixel += scale(image[y][x], image[y_][x_],
                                   threshold) * math.pow(3, i)
            pattern[y][x] = sum_pixel
    return pattern


if __name__ == '__main__':
    train_images = help_functions.load_all_images(
        config.train_body_folder,
        preprocess=help_functions.prepare_body_retinex)
    save_features(train_images, config.train_body_features)

    test_images = help_functions.load_all_images(
        config.test_body_folder,
        preprocess=help_functions.prepare_body_retinex)
    save_features(test_images, config.test_body_features)

    query_images = help_functions.load_all_images(
        config.query_body_folder,
        preprocess=help_functions.prepare_body_retinex)
    save_features(query_images, config.query_body_features)