Esempio n. 1
0
def main():
    fps = 0.0
    minsize = 20
    threshold = [0.06, 0.6, 0.6]
    factor = 0.709

    Pnet, Rnet, Onet = detector.initFaceDetector()
    video_capture = cv2.VideoCapture(0)

    while True:
        t1 = time.time()

        _, img = video_capture.read()
        cv2.imshow("origin", img)
        cv2.waitKey(10)

        img_matlab = img.copy()
        tmp = img_matlab[:, :, 2].copy()
        img_matlab[:, :, 2] = img_matlab[:, :, 0]  ####  BGR    CHW
        img_matlab[:, :, 0] = tmp

        boundingboxes = detector.detect_face(img_matlab, minsize, Pnet, Rnet,
                                             Onet, threshold, False, factor)
        detector.drawBoxes(img, boundingboxes)
        cv2.imshow("Pnet", img)

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    cv2.destroyAllWindows()
Esempio n. 2
0
def main(use_gpu=False):
    frame_root = 'test/data/3.0'
    res_root = 'test/res/3.0/face_attribute'
    if not os.path.exists(res_root):
        os.makedirs(res_root)

    for vid in tqdm(sorted(os.listdir(frame_root))):
        video_frame_root = os.path.join(frame_root, vid)
        fids, images = get_input(video_frame_root)
        results = []
        for i, image in enumerate(images):
            face_box = detect_face(image, det_net)
            if face_box is None:
                res = ['NaN'] * len(attr_names)
            else:
                face_image = crop_face(face_box, image, 2.4)
                attr_probs = recognize_attributes(face_image, rec_net, use_gpu)
                res = [attr_probs[i] for i in range(len(attr_probs))]
                res = ['%.4f' % scr for scr in res]
            results.append(res)

        res_path = os.path.join(res_root, vid + '.txt')
        with open(res_path, 'w') as f:
            for i in range(len(fids)):
                line = '%s %s\n' % (fids[i].split('.')[0], ' '.join(results[i]))
                f.writelines(line)
Esempio n. 3
0
def detect(im):
    bounding_boxes, _ = detector.detect_face(im, minsize, pnet, rnet, onet, threshold, factor)

    if len(bounding_boxes) < 1:
        return None

    det = np.squeeze(bounding_boxes[0, 0:4])
    bb = np.zeros(4, dtype=np.int32)
    bb[0] = np.maximum(det[0] - margin / 2, 0)
    bb[1] = np.maximum(det[1] - margin / 2, 0)
    bb[2] = np.minimum(det[2] + margin / 2, im.shape[1])
    bb[3] = np.minimum(det[3] + margin / 2, im.shape[0])
    cropped = im[bb[1]:bb[3], bb[0]:bb[2], :]
    im = Image.fromarray(cropped, 'RGB')
    im = im.resize((160, 160))
    im = ops.normalize_image(im)
    return im, bb
Esempio n. 4
0
def main(video_list_path, res_root, use_gpu=False):
    global logger

    with open(video_list_path) as f:
        video_list = [line.strip().split()[1] for line in f.readlines()]

    if not os.path.exists(res_root):
        os.makedirs(res_root)

    for frame_dir in tqdm(video_list):
        video_frame_root = frame_dir
        fids, images = get_input(video_frame_root)

        vid = frame_dir.split('/')[-1]
        res_path = os.path.join(res_root, vid + '.txt')
        if os.path.exists(res_path):
            with open(res_path, 'r') as f:
                lines = f.readlines()
            if len(lines) == len(fids):
                continue

        results = []
        for i, image in enumerate(images):
            face_box = detect_face(image, det_net)
            if face_box is None:
                res = ['NaN'] * len(attr_names)
            else:
                face_image = crop_face(face_box, image, 2.4)
                attr_probs = recognize_attributes(face_image, rec_net, use_gpu)
                res = [attr_probs[i] for i in range(len(attr_probs))]
                res = ['%.4f' % scr for scr in res]
            results.append(res)

        with open(res_path, 'w') as f:
            for i in range(len(fids)):
                line = '%s %s\n' % (fids[i].split('.')[0], ' '.join(
                    results[i]))
                f.writelines(line)

        logger.write('%s\n' % vid)
        logger.flush()

    logger.write('%s done!' % video_list_path)
    logger.flush()
    logger.close()
Esempio n. 5
0
def mark(requests):
    """
    :param requests: Image
    :return: Cropped face
    """
    im = ops.read_image(requests)

    bounding_boxes, _ = detector.detect_face(im, minsize, pnet, rnet, onet, threshold, factor)

    if len(bounding_boxes) < 1:
        return HttpResponse('no face detected.')

    det = np.squeeze(bounding_boxes[0, 0:4])
    bb = np.zeros(4, dtype=np.int32)
    bb[0] = np.maximum(det[0] - margin / 2, 0)
    bb[1] = np.maximum(det[1] - margin / 2, 0)
    bb[2] = np.minimum(det[2] + margin / 2, im.shape[1])
    bb[3] = np.minimum(det[3] + margin / 2, im.shape[0])

    return HttpResponse(' '.join(map(lambda i: str(i), list(bb))))
Esempio n. 6
0
def main(body):
    logger, log_stream = get_logger()

    image = None
    success = False
    data = None

    try:
        file = process_body(body)
    except Exception:
        file = None
        logger.error('bad request body!', exc_info=True, stack_info=True)

    if file is not None:
        try:
            image = decode_img_from_b64(file)
        except Exception:
            logger.error('bad image!', exc_info=True, stack_info=True)

    if image is not None:
        try:
            face_box = detect_face(image, det_net)
            if face_box is None:
                logger.error('no face detected!')
            else:
                face_image = crop_face(face_box, image, 2.4)
                attr_probs = recognize_attributes(face_image, rec_net)
                res = ['%.4f' % prob for prob in attr_probs]
                data = ' '.join(res)
                success = True
        except Exception:
            logger.error('model runtime error!',
                         exc_info=True,
                         stack_info=True)

    res = {'data': data, 'success': success, 'log': log_stream.getvalue()}
    return res
Esempio n. 7
0
# for debugging
# statusLight = gpiozero.LED(4)
# statusLightSwitch = 1

while True:
    # indicate if the program is still running
    #     statusLightSwitch *= -1
    #     if statusLightSwitch > 0:
    #         statusLight.on()
    #     else:
    #         statusLight.off()

    # default for the program is to detect every 6 seconds
    time.sleep(6)  # call detector every 6 seconds
    s = detector.detect_face()  # detection results: true for face detected
    timeline.pop(0)
    if s:
        timeline.append(1)
    else:
        timeline.append(0)

    # more calculation, but simplifies the design logic
    sit = sum(timeline)
    if sit >= 500:
        led.on()
    else:
        led.off()

    # if leave for 5 minutes, reset timeline
    if sum(timeline[-50:]) == 0:  # -50