landmarks.append(landmark)
    _prx_t = time.time() - _start_t

    if len(bboxes):
        for i, landmark in enumerate(landmarks):
            for j, point in enumerate(landmark):
                cv.circle(image, tuple(point), 3, (0, 255, 0), -1)

        image = vis.plotBBoxes(image, bboxes, len(bboxes) * ['face'], scores)

    image = vis.plotInfo(image, 'Raspberry Pi - FPS: {:.3f}'.format(1/_prx_t))
    image = cv.cvtColor(np.asarray(image), cv.COLOR_BGR2RGB)

    cv.imshow(image_path, image)
    key = cv.waitKey(0)
       
    cv.destroyAllWindows()


def main(args):
    app(args.image)


if __name__ == '__main__':
    LOG.info('Raspberry Pi: Face Detection\n')

    args = helper.getArgs()
    main(args)

    LOG.info('Process done')
Exemple #2
0
        if record:
            writer.write(frm)

        if show:
            cv.imshow(video_name, frm)
            key = cv.waitKey(1)
            if key in [27, ord('q')]:
                LOG.info('Interrupted by Users')
                break

    if record:
        writer.release()
    cap.release()
    cv.destroyAllWindows()


def main(args):
    video_link = args.video if args.video else 0
    app(video_link, args.name, args.show, args.record, args.flip_hor,
        args.flip_ver)


if __name__ == '__main__':
    LOG.info('Raspberry Pi: Object Detection')

    args = helper.getArgs()
    main(args)

    LOG.info('Process done')
Exemple #3
0
        frm = cv.cvtColor(np.asarray(frm), cv.COLOR_BGR2RGB)

        if record:
            writer.write(frm)

        if show:
            cv.imshow(video_name, frm)
            key = cv.waitKey(1)
            if key in [27, ord('q')]:
                LOG.info('Interrupted by Users')
                break

    if record:
        writer.release()
    cap.release()
    cv.destroyAllWindows()


def main(args):
    video_link = args.video if args.video else 0
    app(video_link, args.name, args.show, args.record)


if __name__ == '__main__':
    LOG.info('Raspberry Pi: Head Pose\n')

    args = helper.getArgs()
    main(args)

    LOG.info('Process done')
    face_image2 = image2[y1:y2, x1:x2]
    lm2 = landmarker.getLandmark(face_image2)
    aligned_face2 = aligner.align(face_image2, lm2)
    # ================================================================

    emb1 = embedder.getEmb(face_image1)
    LOG.info('emb1 shape: {}'.format(emb1.shape))

    emb2 = embedder.getEmb(face_image2)
    LOG.info('emb2 shape: {}'.format(emb2.shape))

    dist = getDistance(emb1, emb2)
    LOG.info('distance: {:.4}'.format(dist))
    # ================================================================


def main(args):
    image1_path = args.image1
    image2_path = args.image2

    app(image1_path, image2_path)


if __name__ == '__main__':
    LOG.info('Raspberry Pi: Face Identification\n')

    args = helper.getArgs()
    main(args)

    LOG.info('Process done')
Exemple #5
0
                              IDs_lst[i], i)
            fw.write(line)


def app(data_path, data_name, verbal=True):
    image_fnames, image_shapes, bboxes_lst, IDs_lst = \
        processDataFolder(data_path)

    train_cnt = int(cfg.TRAIN_RATIO * len(image_fnames))
    val_cnt = len(image_fnames) - train_cnt

    createLSTFile(image_fnames[:train_cnt], image_shapes[:train_cnt],
                  bboxes_lst[:train_cnt], IDs_lst[:train_cnt],
                  'train-' + data_name)
    createLSTFile(image_fnames[train_cnt:], image_shapes[train_cnt:],
                  bboxes_lst[train_cnt:], IDs_lst[train_cnt:],
                  'val-' + data_name)


def main(args):
    app(args.data, args.name, args.verbal)


if __name__ == '__main__':
    LOG.info('Task: Generate REC data\n')

    args = getArgs()
    main(args)

    LOG.info('Process done')
    _prx_t = time.time() - _start_t

    if len(bboxes):
        for i, landmark in enumerate(landmarks):
            for j, point in enumerate(landmark):
                cv.circle(image, tuple(point), 3, (0, 255, 0), -1)

        image = vis.plotBBoxes(image, bboxes, len(bboxes) * ['face'], scores)

    image = vis.plotInfo(image,
                         'Raspberry Pi - FPS: {:.3f}'.format(1 / _prx_t))
    image = cv.cvtColor(np.asarray(image), cv.COLOR_BGR2RGB)

    cv.imshow(image_path, image)
    key = cv.waitKey(0)

    cv.destroyAllWindows()


def main(args):
    app(args.image)


if __name__ == '__main__':
    LOG.info('Raspberry Pi: Face Alignment\n')

    args = helper.getArgs()
    main(args)

    LOG.info('Process done')
    frm = cv.imread(image_path)
    _start_t = time.time()
    scores, bboxes = face_detector.getFaces(frm, def_score=0.5)
    for i, bbox in enumerate(bboxes):
        x1, y1, x2, y2 = bbox
        face_img = frm[y1:y2, x1:x2]
        emo_idx = emotioner.getEmotion(face_img)
        LOG.info('emotion: {}'.format(emo_idx))
    _prx_t = time.time() - _start_t

    #    if len(bboxes):
    #        frm = vis.plotBBoxes(frm, bboxes, len(bboxes) * ['face'], scores)
    #    frm = vis.plotInfo(frm, 'Raspberry Pi - FPS: {:.3f}'.format(1/_prx_t))
    #    frm = cv.cvtColor(np.asarray(frm), cv.COLOR_BGR2RGB)

    cv.destroyAllWindows()


def main(args):
    app(args.image)


if __name__ == '__main__':
    LOG.info('Raspberry Pi: Emotion Recognition\n')

    args = helper.getArgs()
    main(args)

    LOG.info('Process done')