コード例 #1
0
def main(samples_dir, test_dir, output_dir):
    face_descriptors, class_names = load_samples_descriptors(samples_dir)
    # save2pickle(face_descriptors, class_names, "wg_colleagues.pkl")
    # face_descriptors, class_names = load_from_pickle('wg_colleagues.pkl')

    image_files = images_in_dir(test_dir)
    labels, names = _labeled(class_names)
    classifier = SVMClassifier()
    classifier.train(face_descriptors, labels, names)

    for im_f in image_files:
        f_name = filename(im_f)
        im = cv2.imread(im_f)
        faces = detect_faces(im)
        start = time.time()
        print('{} faces detected'.format(len(faces)))
        for face in faces:
            descriptor = compute_face_descriptor(im, face)
            results = classifier.predict([descriptor])

            for r in results:
                label = '{}'.format(r)
                print('label:', label)

                subdir = _subdir(output_dir, label)

                images_count = len(glob.glob('{}/*.jpg'.format(subdir)))

                f_name = '{}_{}.jpg'.format(label,
                                            '{0:04d}'.format(images_count))

                output_path = os.path.join(subdir, f_name)
                cv2.imwrite(output_path, im)

        print('{} done, time spent: {}'.format(f_name, time.time() - start))
コード例 #2
0
def main(samples_dir, test_images_dir, output_dir):
    face_descriptors, class_names = load_samples_descriptors(samples_dir)
    # save2pickle(face_descriptors, class_names, 'wg_merged.pkl')
    # face_descriptors, class_names = load_from_pickle('wg_merged.pkl')
    print(class_names)
    labels, names = _labeled(class_names)
    classifier = SVMClassifier()

    print([names[i] for i in labels])

    classifier.train(face_descriptors, labels, names)

    # classifier.load("classifier_2018-05-15 13:30:06.213832.pkl")
    image_files = images_in_dir(test_images_dir)
    for im_f in image_files:
        output_path = os.path.join(output_dir, filename(im_f))
        im = cv2.imread(im_f)
        faces = detect_faces(im)
        for face in faces:
            descriptor = compute_face_descriptor(im, face)
            results = classifier.predict([descriptor])
            for r in results:
                txt = '{}'.format(r)
                put_text(im, txt, font_face=cv2.FONT_HERSHEY_SIMPLEX)

                print('{}: {} '.format(im_f, r))
        cv2.imwrite(output_path, im)
def process_one_image(image_file, output_dir):
    start = time.time()
    im = cv2.imread(image_file)
    file_name = filename(image_file)

    persons, bounding_boxes, heat_map = predict(im)
    # visualization(im, persons, bounding_boxes, heat_map)
    save_results(im, bounding_boxes, output_dir, file_name)
    print('{} done, time spent: {}'.format(file_name, time.time() - start))
コード例 #4
0
def predict(images_dir):
    if not os.path.exists(OUTPUT_DIR):
        os.makedirs(OUTPUT_DIR)

    image_files = images_in_dir(images_dir)
    for im_f in image_files:
        f_name = filename(im_f)
        im = cv2.imread(im_f)
        face = extract(im_f)
        results = find_top_n_closest(face)
        im = overlay(im, results)
        cv2.imwrite(os.path.join(OUTPUT_DIR, f_name), im)
コード例 #5
0
def main(images_dir, output_dir):
    image_files = images_in_dir(images_dir=images_dir)
    for im_f in image_files:
        im = cv2.imread(im_f)
        start = time.time()
        faces = detect_faces(im)
        end = time.time()
        print('{}: {}'.format(im_f, end - start))

        for face in faces:
            x1, y1, x2, y2 = to_rectangle(face)
            cv2.rectangle(im, (x1, y1), (x2, y2), (0, 255, 0), 2)

        cv2.imwrite(os.path.join(output_dir, filename(im_f)), im)
コード例 #6
0
def main(samples_dir, test_dir, output_dir):
    # face_descriptors, class_names = load_samples_descriptors(samples_dir)
    # save2pickle(face_descriptors, class_names, "wg_colleagues.pkl")
    face_descriptors, class_names = load_from_pickle('../wg_colleagues.pkl')
    print(face_descriptors[0])
    print("len face_descriptors: {}".format(len(face_descriptors)))
    print("len class_names: {}".format(len(class_names)))
    print("class_names: {}".format(class_names))
    image_files = images_in_dir(test_dir)

    f_nums = {}

    for im_f in image_files:
        f_name = filename(im_f)
        im = cv2.imread(im_f)
        faces = detect_faces(im)
        start = time.time()
        for face in faces:
            descriptor = compute_face_descriptor(im, face)
            idx, distance = closest_one(face_descriptors, descriptor)

            #
            # f_num = len(glob.glob('{}/{}*.jpg'.format(output_dir, class_names[idx])))
            # f_name = '{}_{}.jpg'.format(class_names[idx], '{0:04d}'.format(f_num))

            #
            f_nums.setdefault(
                class_names[idx],
                len(
                    glob.glob('{}/{}*.jpg'.format(output_dir,
                                                  class_names[idx]))))
            f_name = '{}_{}.jpg'.format(
                class_names[idx], '{0:04d}'.format(f_nums[class_names[idx]]))
            f_nums[class_names[idx]] += 1

            # txt = '{}:{}'.format(class_names[idx], distance)
            # put_text(im, txt, font_face=cv2.FONT_HERSHEY_SIMPLEX)
            print('{}: {}, of distance :{} '.format(im_f, f_name, distance))
        end = time.time()
        print('time : ', end - start)
        output_path = os.path.join(output_dir, f_name)
        cv2.imwrite(output_path, im)
コード例 #7
0
ファイル: sample_face_rec.py プロジェクト: abhiTronix/fr_dlib
def main(samples_dir, test_dir, output_dir):
    # face_descriptors, class_names = load_samples_descriptors(samples_dir)
    # save2pickle(face_descriptors, class_names, "wg_colleagues.pkl")
    face_descriptors, class_names = load_from_pickle('wg_colleagues.pkl')
    print(face_descriptors[0])
    print("len face_descriptors: {}".format(len(face_descriptors)))
    print("len class_names: {}".format(len(class_names)))
    print("class_names: {}".format(class_names))
    image_files = images_in_dir(test_dir)
    for im_f in image_files:
        output_path = os.path.join(output_dir, filename(im_f))
        im = cv2.imread(im_f)
        faces = detect_faces(im)
        for face in faces:
            descriptor = compute_face_descriptor(im, face)
            idx, distance = closest_one(face_descriptors, descriptor)
            txt = '{}:{}'.format(class_names[idx], distance)
            put_text(im, txt, font_face=cv2.FONT_HERSHEY_SIMPLEX)
            print('{}: {}, of distance :{} '.format(im_f, class_names[idx],
                                                    distance))
        cv2.imwrite(output_path, im)
コード例 #8
0
    args = parser.parse_args()
    input_images = args.images
    outputs = args.output
    keras_weights_file = args.model

    print('start processing...')

    # load model

    # authors of original model don't use
    # vgg normalization (subtracting mean) on input images
    model = get_testing_model()
    model.load_weights(keras_weights_file)

    # load config
    params, model_params = config_reader()
    print('params:\n {}'.format(params))
    print('model_params:\n {}'.format(model_params))
    # generate image with body parts
    image_files = images_in_dir(input_images)
    for im_f in image_files:
        tic = time.time()
        canvas = process(im_f, params, model_params)
        toc = time.time()
        print('processing time is %.5f' % (toc - tic))

        cv2.imwrite(os.path.join(outputs, filename(im_f)), canvas)

    cv2.destroyAllWindows()