Exemplo n.º 1
0
def test(request):
    # 获取 上传的 图片信息
    img = request.FILES.get('img')
    # 获取上传图片的名称
    img_name = img.name

    # 获取后缀
    ext = os.path.splitext(img_name)[1]
    # 加时间防止重名
    now = time.time()
    # 重新规定图片名称,图片类型
    img_name = f'imgs{now}{ext}'
    # 图片保存路径
    # img_path = os.path.join(settings.IMG_ROOT, img_name)
    BASE_DIR = Path(__file__).resolve().parent.parent
    img_path = os.path.join(os.path.join(BASE_DIR, "imgs/input"), img_name)

    # 写入 上传图片的 内容
    with open(img_path, 'ab') as fp:
        # 如果上传的图片非常大, 那么通过 img对象的 chunks() 方法 分割成多个片段来上传
        for chunk in img.chunks():
            fp.write(chunk)
    face_detector = FaceDetector()
    print(img_path)
    img = cv2.imread(img_path)
    rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)
    bboxes = face_detector.predict(rgb_img, 0.8)
    ann_img = annotate_image(img, bboxes)
    cv2.imwrite('C:/Users/Administrator/Desktop/HelloWorld/static/test1.png',
                ann_img)
    return render(request, 'face.html')
def Detection_Faces(img_path):
    face_detector = FaceDetector()
    img = cv2.imread(img_path)
    rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)
    thresh = 0.85
    bboxes = face_detector.predict(rgb_img, thresh)
    return bboxes
Exemplo n.º 3
0
class FaceTrackServer(object):

    faces = []
    face_locations = []
    face_relative_locations = []
    cam_h = None
    cam_w = None
    camera_address = None

    def __init__(self, down_scale_factor=0.25):
        assert 0 <= down_scale_factor <= 1
        self.down_scale_factor = down_scale_factor
        self.face_detector = FaceDetector()

    def get_cam_info(self):
        return {
            'camera': {
                'width': self.cam_w,
                'height': self.cam_h,
                'address': self.camera_address
            }
        }

    def reset(self):
        self.face_relative_locations = []
        self.face_locations = []
        self.faces = []

    def process(self, frame):
        self.reset()
        self.cam_h, self.cam_w, _ = frame.shape
        # Resize frame of video to 1/4 size for faster face recognition processing

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)
        self.face_locations = self.face_detector.predict(rgb_img)
        # Display the results
        if len(self.face_locations) > 1:
            self.face_locations = []

        for x, y, w, h, _ in self.face_locations:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            x1 = int((x - int(w / 2)) * (1 - 0.1))
            y1 = int((y - int(h / 2)) * (1 - 0.1))
            x2 = int((x + int(w / 2)) * (1 + 0.1))
            y2 = int((y + int(h / 2)) * (1 + 0.1))

            _face_area = frame[y1:y2, x1:x2, :]

            if _face_area.size != 0:
                self.faces.append(_face_area)

        print('[FaceTracker Server] Found {} faces!'.format(len(self.faces)))
        return self.faces

    def get_faces_loc(self):
        return self.face_locations

    def get_faces(self):
        return self.faces
Exemplo n.º 4
0
    def facedect(self):

        face_detector = FaceDetector()
        self.progressbar.setValue(0)

        if self.videoname != None:
            cap = cv2.VideoCapture(self.videoname)

            frame_width = int(cap.get(3))
            frame_height = int(cap.get(4))

            filename = self.videoname
            filename = filename.split('/')
            filename = filename[-1].split('.')

            fourcc = cv2.VideoWriter_fourcc(*'DIVX')
            out = cv2.VideoWriter(
                '{0}/masked_{1}.avi'.format(self.outputpath, filename[0]),
                fourcc, 25.0, (frame_width, frame_height))

            ret = True
            i = 0
            frames = []
            num_frame = 0
            while ret:
                ret, frame = cap.read()

                if ret:
                    frames.append(frame)
                    num_frame += 1
            step = 1

            for i, frame in enumerate(frames):

                rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)

                # Receives RGB numpy image (HxWxC) and
                # returns (x_center, y_center, width, height, prob) tuples.
                bboxes = face_detector.predict(rgb_img, 0.8)

                # Use this utils function to annotate the image.
                ann_img = annotate_image(frame, bboxes)
                out.write(ann_img)

                if (i / num_frame * 100) - step > 0:
                    step += 1
                    print(step)
                    self.progressbar.setValue(step)

            cap.release()
            out.release()
Exemplo n.º 5
0
class FADetector():
    def __init__(self):
        self.fa = FaceDetector()
        self.fa2 = dlib.get_frontal_face_detector()
        self.fa3 = cv2.dnn.readNetFromCaffe(
            "./models/deploy.prototxt.txt",
            "./models/res10_300x300_ssd_iter_140000.caffemodel")
        self.fa4 = dlib.cnn_face_detection_model_v1(
            "./models/mmod_human_face_detector.dat")
        self.thresh = 0.5

    #DLIB SIMPLE LANDMARK DETECTION + CPU YOLO FACE DETECTION
    def cv2dnn_facedetection(self, rgb, pad=20):
        h, w = rgb.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(rgb, (300, 300)), 1.0,
                                     (300, 300), (103.93, 116.77, 123.68))
        self.fa3.setInput(blob)
        detections = self.fa3.forward()

        #get driver bounding box based on rightmost position
        rightmost = -1
        for i in range(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            if confidence > 0.7 and box[0] > rightmost:
                rightmost = box[0]
                box = box.astype("int")
                bbox = dlib.rectangle(box[0], box[1], box[2], box[3])
        if rightmost == -1: return

        return rgb[bbox.top():bbox.bottom(), bbox.left():bbox.right()]

    #YOLO FACE DETECTION FROM https://github.com/iitzco/faced
    def yolo_facedetection(self, rgb):
        bbox = self.fa.predict(rgb, self.thresh)
        box = bbox[0]
        l = box[1] - box[2] // 2
        t = box[0] - box[3] // 2
        r = box[1] + box[2] // 2
        b = box[0] + box[3] // 2
        return rgb[t:b, l:r]

    #CNN FACE DETECTION FROM DLIB
    def dlibcnn_facedetection(self, rgb, save=False):
        dets = self.fa4(rgb, 0)
        d = dets[0]
        return rgb[d.rect.top():d.rect.bottom(), d.rect.left():d.rect.right()]
Exemplo n.º 6
0
def process_and_encode(dataset):
    print("[LOG] Collecting images ...")
    images = []
    for direc, _, files in tqdm(os.walk(dataset)):
        for file in files:
            if file.endswith("jpg"):
                images.append(os.path.join(direc,file))
    # initialize the list of known encodings and known names
    known_encodings = []
    known_names = []
    print("[LOG] Encoding faces ...")
    model=vggface()
    vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
    face_detector = FaceDetector()
    face_alignment_predictor = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,flip_input=False)
    for image_path in tqdm(images):
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        boxes = face_detector.predict(image)
        if(boxes==[]):
            warnings.warn('system could not detect face in this image %s'%(image_path))
            continue
        (x,y,w,h,prob)=boxes[0]
        #TODO align faces
        x1 = int(x - w/2)
        y1 = int(y - h/2)
        x2 = int(x + w/2)
        y2 = int(y + h/2)
        part_image = image[y1:y2,x1:x2]
        landmarks=face_alignment_predictor.get_landmarks(part_image)
        if(landmarks!=[] and landmarks!=None):
            part_image=execute_alignment(part_image,landmarks)
        part_image=preprocess_image(part_image)
        encoding = vgg_face_descriptor.predict(part_image)[0,:]
        # the person's name is the name of the folder where the image comes from
        name = image_path.split(os.path.sep)[-2]
        if len(encoding) > 0 : 
            known_encodings.append(encoding)
            known_names.append(name)
    np.savez('data/encodings/encoding_vggface.npz',encodings=known_encodings,names=known_names)
    return 
Exemplo n.º 7
0
def face_finder():
    fps = 12
    #camera_addr="/dev/video0"
    camera_ip = "10.32.89.135"
    camera_addr = "rtsp://*****:*****@" + camera_ip
    #camera_addr='0'
    camera_addr = "/dev/video1"

    print("Connecting to " + camera_ip + " ...")
    vcap = cv2.VideoCapture(camera_addr)
    vcap.set(20, 1)  #buffer size = 1
    vcap.set(
        cv2.CAP_PROP_FPS,
        60)  #camera.fps = fps Se tiene que configurar en la camara tambien
    vcap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
    vcap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    #vcap.set(cv2.CV_CAP_PROP_FPS, 60)
    vcap.set(15, -10)
    ret, frame = vcap.read(
    )  #hay que leerlo una vez para poder ver sus propiedades
    #print (frame.shape)
    frame_size_x = vcap.get(3)  #guarda el tama;o del frame
    frame_size_y = vcap.get(4)
    print("Succesfully connected :D")

    actors = init_actors_multi()
    #print('initial actors shape' + str(actors['pos'].shape))
    boxes = init_boxes()
    face_detector = FaceDetector()
    counter = 0
    centers = []
    while True:
        start = time.time()
        vcap.grab()  #leer imagen de vcap
        ret, frame = vcap.retrieve(0)
        boxes = np.empty([0, 4], dtype=int)
        if ret:  #revisa que si recibio una imagen
            #print("vcap worked")
            #frame_small = cv2.resize(frame, (int(frame_size_x/factor), int(frame_size_y/factor)))
            frame_ann = frame
            rgb_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            bboxes = face_detector.predict(rgb_img, thresh=0.8)
            #frame_ann = annotate_image(frame, bboxes)
            #transform boxes into x,y,w,h starting in corner
            for bb in bboxes:
                x, y, w, h = bb[0:4]
                x = x - w / 2
                y = y - w / 2
                boxes = np.vstack([boxes, [int(x), int(y), int(w), int(h)]])
            #print('boxes: '+str(boxes))
        actors_new = scene_multi(boxes, actors, frame_size_x, frame_size_y)

        appeared = init_actors_multi()

        for i in range(actors_new['id'].shape[0]):
            unique = True
            box = actors_new['pos'][i, 0]
            center_x, center_y = get_box_center(box)
            plot_traj(frame_ann, centers, center_x, center_y)
            x, y, w, h = expand_box(box, 2, frame_size_x, frame_size_y)
            cv2.rectangle(frame_ann, (x, y), (x + w, y + h), (0, 255, 0), 1)
            for j in range(actors['id'].shape[0]):
                #check if it existed already, else, append it to the appeared array
                if (actors_new['id'][i] == actors['id'][j]):
                    unique = False
            if unique:
                appeared['id'] = np.vstack(
                    [appeared['id'], actors_new['id'][i]])
                appeared['pos'] = np.vstack(
                    [appeared['pos'], [actors_new['pos'][i]]])

                print(x, y, w, h)
                face_crop = frame[y:y + h, x:x + w]
                cv2.imwrite('findings/' + str(time.time()) + '.jpg', face_crop)
                cv2.rectangle(frame_ann, (x, y), (x + w, y + h), (0, 255, 255),
                              4)
                counter += 1
                #print("Finding " + actors_new['id'][i][0] + ' saved')
        if len(centers) > 0 and actors_new['id'].shape[0] == 0:
            centers.pop(0)
        try:
            cv2.imshow("test", frame_ann)
        except:
            pass
        cv2.waitKey(1)

        print('appeared: ' + str(appeared['id']))
        actors = actors_new
        leisure_time = max(1 / fps - (time.time() - start), 0)
        #print("leisure_time at " + str(fps) + "fps = " + str(leisure_time))
        print("counter", counter)
        time.sleep(leisure_time)

    return True
Exemplo n.º 8
0
# filename = 'obama.jpg'
model = 'faced'
scale = 1
path = '../data/29--Students_Schoolkids/'

for fn in os.listdir(path):
    filename = fn
    raw_img = cv2.imread(os.path.join(path, filename))
    out_file = '../data'
    detector = FaceDetector()
    name = fn.split('.')
    name = name[0]
    out_file = os.path.join(out_file, name.replace('jpg', 'txt'))
    t0 = time.time()
    print('start')
    face_locations = detector.predict(raw_img, 0.5)
    t1 = time.time()
    print(f'took {round(t1-t0, 3)} to get {len(face_locations)} faces')

    for (x, y, w, h, _) in face_locations:
        x1 = x - int(w / 2)
        x2 = x + int(w / 2)
        y1 = y - int(h / 2)
        y2 = y + int(h / 2)
        # print(x1, y1, x2-x1, y2-y1)
        # cv2.rectangle(raw_img, (x - int(w / 2), y - int(h / 2)), (x + int(w / 2), y + int(h / 2)), (80, 18, 236), 2)
        with open(out_file + '.txt', 'a') as f:
            f.write("%s %g %d %d %d %d\n" %
                    (str('face'), _, x1, y1, x2 - x1, y2 - y1))
    # while True:
    #     cv2.imshow('IMG', raw_img)
Exemplo n.º 9
0
face_detector = FaceDetector()
video_capture = cv2.VideoCapture(webcam_index)

fps = 0.0
while True:
    ret, frame = video_capture.read() # frame shape 640*480*3
    if frame.shape[0] == 0:
        break

    t1 = time.time()

    rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    # bboxes = face_detector.predict(rgb_frame, thresh)
    bboxes = face_detector.predict(rgb_frame)

    bg = np.zeros((map_height, map_width, 3))
    
    for box in bboxes:
        actual_face_size = math.sqrt(box[2] * box[3]) # use area of the face size as a measure of distance
        distance = max_face_size - actual_face_size # closest faces should be lowest values

        x = box[0] / max_x # scale to 0-1
        y = distance / max_y # scale to 0-1
        # print(str(x) + ", " + str(y))

        cv2.circle(bg, (int(map_width - (map_width * x)), int((map_height - map_height * y))), 8, (255, 255, 255), -1)

    if show_map:
        cv2.imshow('map', bg)
Exemplo n.º 10
0
    else:
        if _type == 1:
            return x + offset
        else:
            return x - offset


while video.isOpened():
    ret, frame = video.read()

    if ret is False:
        break

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    bboxes = face_detector.predict(frame, FACED_ACC)

    for (x, y, w, h, acc) in bboxes:
        x = x - (w // 2)
        y = y - (h // 2)
        sub_face = frame[
            limit(y, y_offset, frame_h, 0):limit(y + h, y_offset, frame_h, 1),
            limit(x, x_offset, frame_w, 0):limit(x + w, x_offset, frame_w, 1)]
        sub_face = cv2.GaussianBlur(sub_face, (BLUR, BLUR), 30)
        frame[limit(y, y_offset, frame_h, 0):limit(y +
                                                   h, y_offset, frame_h, 1),
              limit(x, x_offset, frame_w, 0):limit(x + w, x_offset, frame_w, 1
                                                   )] = sub_face

    video_output.write(frame)
    if DEBUG:
Exemplo n.º 11
0
import cv2
from faced import FaceDetector
from faced.utils import annotate_image
from time import process_time

#___________________________________________________For Image______________________________________________________
face_detector = FaceDetector()

img = cv2.imread("face_det.jpg")

rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)

# Receives RGB numpy image (HxWxC) and
# returns (x_center, y_center, width, height, prob) tuples.
bboxes = face_detector.predict(rgb_img, 0.7)

# Use this utils function to annotate the image.
ann_img = annotate_image(img, bboxes)

#save img
cv2.imwrite('face_detd.jpg', ann_img)

# Show the image
cv2.imshow('Result', ann_img)
cv2.waitKey(0)
cv2.destroyAllWindows()

#____________________________________________________For Video_______________________________________________________
video = 'Vid.mp4'
cap = cv2.VideoCapture(video)
Exemplo n.º 12
0
}



face_detector = FaceDetector()

while True:
    ret, frame = vcap.read()
    

    boxes = []
    if ret: #revisa que si se haya recibido una imagen para que no se rompa
        print("vcap worked")
        frame_small = cv2.resize(frame, (int(frame_size_x/factor), int(frame_size_y/factor)))
        rgb_img = cv2.cvtColor(frame_small, cv2.COLOR_BGR2RGB)
        bboxes = face_detector.predict(rgb_img, thresh=0.6)
        #print(bboxes)
        for i in bboxes:
            x,y,w,h = i[0:4]
            boxes.append([x,y,w,h])
            boxes = np.multiply(boxes,factor)
    try:
        n_a = actors['pos'].shape[0]
    except:
        n_a = 0
    if n_a > 0:
        for x,y,h,w in actors['pos']: #there are actors, draw them
            #print(x,y,h,w)
            #cv2.rectangle(frame,(x,y),(x+w,y+h),(0, 255, 0), 1)
            a=1
      
Exemplo n.º 13
0
def _download_fec_data(tmp_dir, meta, target_shape):

    nonfailed = [None for _ in range(len(meta))]
    last_idx = 0

    detector = FaceDetector()
    detect_threshold = 0.7

    # For now just download all images and consider filtering for presence
    # of faces a secondary step
    for i, item in enumerate(meta):

        failed = False
        face_data = {}
        for case in ["a", "b", "c"]:
            url = item[case]["url"]
            # Use the URL as a filename to avoid collisions for
            # different images with the same filename
            filename = url.replace("/", "-")

            try:
                generator_utils.maybe_download(tmp_dir, filename, url)

                image_path = os.path.join(tmp_dir, filename)
                img = _read_image(image_path)

                bounds = item[case]["bounds"]
                cropped = _crop_to_fractional_xy_bbox(img, bounds)
                if cropped.shape[0] != cropped.shape[1]:
                    failed = True

                #cropped = _normalize_dimensions(cropped, target_shape)

                face_data[case] = cropped

                # For now this should be fine. But alternatively could
                # hash the image content.
                # This being to give unique filename to which to write the
                # cropped image, given primary images may have multiple faces
                # within them thus we will be over-writing and mixing up faces
                # if we write different cropped regions of a primary image to the
                # same file.
                string_bounds = "-".join([str(thing) for thing in bounds])
                cropped_filename = "cropped@" + string_bounds + "#" + filename
                item[case]["cropped_filename"] = cropped_filename

                predictions = detector.predict(cropped, detect_threshold)
                has_face = len(predictions) > 0

                if not has_face:
                    failed = True

            except:
                tf.logging.info("Exception case.")
                failed = True

        # End of for case in ["a", "b", "c"]
        if not failed:
            # If we have detected faces in all three cases let's build and write an
            # example.
            for case in ["a", "b", "c"]:
                out_path = os.path.join(tmp_dir,
                                        item[case]["cropped_filename"])
                cv2.imwrite(out_path, face_data[case])

        if not failed:
            nonfailed[last_idx] = item
            last_idx += 1

    nonfailed = nonfailed[:last_idx]

    nonfailed_file = os.path.join(tmp_dir, "nonfailed.json")
    with tf.gfile.Open(nonfailed_file, "w") as f:
        f.write(json.dumps(nonfailed))

    return nonfailed
Exemplo n.º 14
0
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s %(message)s')

# for local testing
model_path = "../faced/models/"
face_detector = FaceDetector()

img_path = "faces.jpg"
thresh = 0.8
bgr_img = cv2.imread(img_path)
rgb_img = cv2.cvtColor(bgr_img.copy(), cv2.COLOR_BGR2RGB)

# Receives RGB numpy image (HxWxC) and
# returns (x_center, y_center, width, height, prob) tuples.
logger.info('starting face detection ...')
start_time = time()
face_detection_list = face_detector.predict(frame=rgb_img, thresh=thresh)
detected_faces = len(face_detection_list)
end_time = time()
duration = end_time - start_time
logger.info(f'face detection took {duration:.2f} seconds')
logger.info(f'found boxes: {detected_faces}')

increase_box_percentage = 0.5
for i in range(detected_faces):
    logger.info(face_detection_list[i])

ann_img = utils.annotate_image(bgr_img, face_detection_list)
cv2.imwrite("found_boxes.jpg", ann_img)
Exemplo n.º 15
0
def run(mode, localPath):
    global font
    global success
    global totalInferenceDuration
    print("CUDE usage status : " + str(dlib.DLIB_USE_CUDA))
    #faced
    face_detector = FaceDetector()
    startTS = time.time()
    """ Load models """
    predictor_path = "assets/shape_predictor_5_face_landmarks.dat"
    face_rec_model_path = "assets/dlib_face_recognition_resnet_model_v1.dat"
    facerec = dlib.face_recognition_model_v1(face_rec_model_path)
    sp = dlib.shape_predictor(predictor_path)
    """ Check local/stream availability """
    if (mode == "stream"):
        # initialize the video stream and allow the cammera sensor to warmup
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        w = int(vs.get(3))
        h = int(vs.get(4))
        time.sleep(2.0)
    elif (mode == "local"):
        vidcap = cv2.VideoCapture(localPath)
        success, frame = vidcap.read()
        fps = vidcap.get(cv2.CAP_PROP_FPS)
        frameCtr = 0
        w = int(vidcap.get(3))
        h = int(vidcap.get(4))

    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))

    while success:
        processStartTs = time.time()
        """ Acquire the next frame """
        if (mode == "stream"):
            frame = vs.read()

        elif (mode == "local"):
            success, frame = vidcap.read()

            frameCtr += 1
        """ grab the frame from the threaded video stream and resize it
		 to have a maximum width of 400 pixels """
        try:
            frame = imutils.resize(frame, width=400)
        except AttributeError:
            continue
        try:
            rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)
        except:
            break
        inferenceStartTs = time.time()
        #faced (thresh argument can be added, 0.85 by default-)
        bboxes = face_detector.predict(rgb_img)
        inferenceEndTs = time.time()
        totalInferenceDuration += inferenceEndTs - inferenceStartTs

        helpers.min_clusters = len(bboxes)
        if (mode == "stream"):
            timestamp = calendar.timegm(time.gmtime())
        elif (mode == "local"):
            timestamp = float(frameCtr / fps)

        for x, y, w, h, p in bboxes:
            top = int(y + h / 2)
            left = int(x - w / 2)
            bottom = int(y - h / 2)
            right = int(x + w / 2)
            cv2.rectangle(frame, (left, bottom), (right, top), (99, 44, 255),
                          1)
            cv2.putText(frame, str(p), (left, top + 5), font, 0.2,
                        (255, 255, 255), 1, cv2.LINE_AA)
            shape = sp(frame, dlib.rectangle(left, bottom, right, top))
            # Compute the 128D vector that describes the face in img identified by
            face_descriptor = facerec.compute_face_descriptor(frame, shape)
            bestIndex = cluster_faces.match(face_descriptor)
            if (bestIndex >= 0):
                cv2.putText(frame,
                            str(helpers.unique_persons[bestIndex]["uuid"]),
                            (left, top + 10), font, 0.2, (0, 255, 255), 1,
                            cv2.LINE_AA)
                data = [{
                    "uuid": helpers.unique_persons[bestIndex]["uuid"],
                    "timestamp": timestamp
                }]
                helpers.individual_stats.extend(data)
            else:
                cv2.putText(frame, "Learning...", (left, top + 10), font, 0.2,
                            (0, 255, 255), 1, cv2.LINE_AA)
                data = [{
                    "label": 0,
                    "timestamp": timestamp,
                    "encoding": face_descriptor
                }]
                helpers.candidate_persons.extend(data)

        try:
            frame = imutils.resize(frame, width=720)
        except AttributeError:
            continue

        cv2.putText(frame,
                    "FPS : " + str(int(1 / (time.time() - processStartTs))),
                    (20, 30), font, 1, (0, 255, 0), 3, cv2.LINE_AA, False)
        out.write(frame)

        #cv2.imshow("Frame", frame)
        if (len(helpers.candidate_persons) >=
            (helpers.MIN_FACES_PER_CLUSTER * helpers.min_clusters)):
            cluster_faces.cluster()
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    # do a bit of cleanup
    if (mode == "stream"):
        vs.stop()
    endTS = time.time()
    out.release()
    print("Total number of unique faces = ", len(helpers.unique_persons))
    print("Total duration")
    print(endTS - startTS)
    print("Total inference duration")
    print(totalInferenceDuration)
Exemplo n.º 16
0
    def on_created(self, event):
        print(f'event type: {event.event_type}  path : {event.src_path}')
        time.sleep(2)

        try:
            # the "on_created" event is called by a partially upload file
            # cut excess filename after '.png'
            #/var/nextcloud_data/c4p/files/camera_footage/Ko-retina.png.ocTransferId1983807786.part
            # /camera_footage/camera_1/raw_footage
            # /camera_footage/camera_1/anonymized_footage

            # todo : anonymizing more than 1 face
            # todo : put face over face

            sucessful_anonymization = False

            filetype = find_filetype(event.src_path)
            print("filetype", filetype)

            path_to_file = event.src_path.split(filetype, 1)[0] + filetype
            print("path to file", path_to_file)

            camera_folder = get_camera_folder(path_to_file)
            print("camera_id", camera_folder)

            picture_id = get_picture_id(path_to_file, camera_folder, filetype)
            print("picture_id", picture_id)

            an_path = get_path_for_anonymous_pic(anonymous_folder,
                                                 camera_folder, picture_id,
                                                 filetype)
            print("path to anonymous file", an_path)

            face_detector = FaceDetector()

            print("reading image", path_to_file)
            img = cv2.imread(path_to_file)
            rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)

            if thresh:
                bboxes = face_detector.predict(rgb_img, thresh)
            else:
                bboxes = face_detector.predict(rgb_img)

            # anonymize
            if not bboxes == []:
                try:
                    print("bboxes containing face", bboxes)

                    print("creating anonymous picture")
                    ann_img = annotate_image(img, bboxes)

                    print("write anonymized version to anonymous folder")
                    cv2.imwrite(an_path, ann_img)

                    sucessful_anonymization = True

                except Exception as ex:
                    print(ex)
                    print("Anonymizing failed")
                    print("writing anonymized version failed")
                    sucessful_anonymization = False

                # delete original if sucessfully anonymized
                if sucessful_anonymization:
                    if os.path.exists(path_to_file):
                        os.remove(path_to_file)
                    else:
                        print("Tried deleting, but the file does not exist",
                              path_to_file)

            # no faces found, picture is already anonymous
            else:
                print("no face found")
                if os.path.exists(path_to_file):
                    os.rename(path_to_file, an_path)

            print("refreshing owncloud")
            subprocess.call(cwd + "/refresh_nextcloud.sh", shell=True)

        except Exception as e:
            print(e)
            print("Anonymizing failed")
cap = cv2.VideoCapture(0)

cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

now = time.time()
while cap.isOpened():
    now = time.time()
    # Capture frame-by-frame
    ret, frame = cap.read()

    if frame.shape[0] == 0:
        break

    bboxes = face_detector.predict(frame)
    gray_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY)
    for x, y, h, w, _ in bboxes:
        crop = gray_img[int(y - h / 2):int(y + h / 2),
                        int(x - h / 3):int(x + h / 3)]
        roi = cv2.resize(crop, (28, 28))
        ROI = roi.astype('float') / 255.0
        roi = img_to_array(ROI)
        roi = np.expand_dims(roi, axis=0)
        neutral, smile = smile_model.predict(roi)[0]
        label = 'Smiling' if smile > neutral else "Not Smiling"
        if label == "Smiling":
            cv2.putText(frame, "{:.4f}".format(smile) + " :)",
                        (x - 30, int(y + h / 2 + 45)), cv2.FONT_HERSHEY_DUPLEX,
                        0.45, (0, 255, 0), 1)
        else: