def Detection_Faces(img_path):
    face_detector = FaceDetector()
    img = cv2.imread(img_path)
    rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)
    thresh = 0.85
    bboxes = face_detector.predict(rgb_img, thresh)
    return bboxes
Example #2
0
def test(request):
    # 获取 上传的 图片信息
    img = request.FILES.get('img')
    # 获取上传图片的名称
    img_name = img.name

    # 获取后缀
    ext = os.path.splitext(img_name)[1]
    # 加时间防止重名
    now = time.time()
    # 重新规定图片名称,图片类型
    img_name = f'imgs{now}{ext}'
    # 图片保存路径
    # img_path = os.path.join(settings.IMG_ROOT, img_name)
    BASE_DIR = Path(__file__).resolve().parent.parent
    img_path = os.path.join(os.path.join(BASE_DIR, "imgs/input"), img_name)

    # 写入 上传图片的 内容
    with open(img_path, 'ab') as fp:
        # 如果上传的图片非常大, 那么通过 img对象的 chunks() 方法 分割成多个片段来上传
        for chunk in img.chunks():
            fp.write(chunk)
    face_detector = FaceDetector()
    print(img_path)
    img = cv2.imread(img_path)
    rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)
    bboxes = face_detector.predict(rgb_img, 0.8)
    ann_img = annotate_image(img, bboxes)
    cv2.imwrite('C:/Users/Administrator/Desktop/HelloWorld/static/test1.png',
                ann_img)
    return render(request, 'face.html')
Example #3
0
 def __init__(self):
     self.fa = FaceDetector()
     self.fa2 = dlib.get_frontal_face_detector()
     self.fa3 = cv2.dnn.readNetFromCaffe(
         "./models/deploy.prototxt.txt",
         "./models/res10_300x300_ssd_iter_140000.caffemodel")
     self.fa4 = dlib.cnn_face_detection_model_v1(
         "./models/mmod_human_face_detector.dat")
     self.thresh = 0.5
Example #4
0
    def facedect(self):

        face_detector = FaceDetector()
        self.progressbar.setValue(0)

        if self.videoname != None:
            cap = cv2.VideoCapture(self.videoname)

            frame_width = int(cap.get(3))
            frame_height = int(cap.get(4))

            filename = self.videoname
            filename = filename.split('/')
            filename = filename[-1].split('.')

            fourcc = cv2.VideoWriter_fourcc(*'DIVX')
            out = cv2.VideoWriter(
                '{0}/masked_{1}.avi'.format(self.outputpath, filename[0]),
                fourcc, 25.0, (frame_width, frame_height))

            ret = True
            i = 0
            frames = []
            num_frame = 0
            while ret:
                ret, frame = cap.read()

                if ret:
                    frames.append(frame)
                    num_frame += 1
            step = 1

            for i, frame in enumerate(frames):

                rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)

                # Receives RGB numpy image (HxWxC) and
                # returns (x_center, y_center, width, height, prob) tuples.
                bboxes = face_detector.predict(rgb_img, 0.8)

                # Use this utils function to annotate the image.
                ann_img = annotate_image(frame, bboxes)
                out.write(ann_img)

                if (i / num_frame * 100) - step > 0:
                    step += 1
                    print(step)
                    self.progressbar.setValue(step)

            cap.release()
            out.release()
Example #5
0
    def __init__(self):
        file_paths, configs = read_config()
        if Camera.detector is None:
            print('[INFO] loading face detector...')
            Camera.detector = FaceDetector()

        if Camera.embedder is None:
            # load our serialized face embedding model from disk
            print('[INFO] loading embedder from {}'.format(
                file_paths['embedder_path']))
            Camera.embedder = cv2.dnn.readNetFromTorch(
                file_paths['embedder_path'])

        if Camera.recognizer is None:
            # load the actual face recognition model along with the label encoder
            print('[INFO] loading face recognizer from {}'.format(
                file_paths['recognizer_path']))
            Camera.recognizer = pickle.loads(
                open('output/recognizer.pickle', 'rb').read())

        if Camera.le is None:
            print('[INFO] loading le from {}'.format(file_paths['le_path']))
            Camera.le = pickle.loads(open('output/le.pickle', 'rb').read())

        print('[INFO] Confidence value is set to {}'.format(
            configs['confidence']))
        Camera.confidence = float(configs['confidence'])

        Camera.max_retry_count = int(configs['max_retry_count'])
Example #6
0
class FaceTrackServer(object):

    faces = []
    face_locations = []
    face_relative_locations = []
    cam_h = None
    cam_w = None
    camera_address = None

    def __init__(self, down_scale_factor=0.25):
        assert 0 <= down_scale_factor <= 1
        self.down_scale_factor = down_scale_factor
        self.face_detector = FaceDetector()

    def get_cam_info(self):
        return {
            'camera': {
                'width': self.cam_w,
                'height': self.cam_h,
                'address': self.camera_address
            }
        }

    def reset(self):
        self.face_relative_locations = []
        self.face_locations = []
        self.faces = []

    def process(self, frame):
        self.reset()
        self.cam_h, self.cam_w, _ = frame.shape
        # Resize frame of video to 1/4 size for faster face recognition processing

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)
        self.face_locations = self.face_detector.predict(rgb_img)
        # Display the results
        if len(self.face_locations) > 1:
            self.face_locations = []

        for x, y, w, h, _ in self.face_locations:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            x1 = int((x - int(w / 2)) * (1 - 0.1))
            y1 = int((y - int(h / 2)) * (1 - 0.1))
            x2 = int((x + int(w / 2)) * (1 + 0.1))
            y2 = int((y + int(h / 2)) * (1 + 0.1))

            _face_area = frame[y1:y2, x1:x2, :]

            if _face_area.size != 0:
                self.faces.append(_face_area)

        print('[FaceTracker Server] Found {} faces!'.format(len(self.faces)))
        return self.faces

    def get_faces_loc(self):
        return self.face_locations

    def get_faces(self):
        return self.faces
 def createFaceDetector(self):
     '''Instantiate the face detection network.'''
     start = datetime.now()
     fdet_network = FaceDetector()
     elapsed = datetime.now() - start
     # Assign the attributes
     self.fdet_network = fdet_network
     self.t_face_det = elapsed
Example #8
0
def process_and_encode(dataset):
    print("[LOG] Collecting images ...")
    images = []
    for direc, _, files in tqdm(os.walk(dataset)):
        for file in files:
            if file.endswith("jpg"):
                images.append(os.path.join(direc,file))
    # initialize the list of known encodings and known names
    known_encodings = []
    known_names = []
    print("[LOG] Encoding faces ...")
    model=vggface()
    vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
    face_detector = FaceDetector()
    face_alignment_predictor = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,flip_input=False)
    for image_path in tqdm(images):
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        boxes = face_detector.predict(image)
        if(boxes==[]):
            warnings.warn('system could not detect face in this image %s'%(image_path))
            continue
        (x,y,w,h,prob)=boxes[0]
        #TODO align faces
        x1 = int(x - w/2)
        y1 = int(y - h/2)
        x2 = int(x + w/2)
        y2 = int(y + h/2)
        part_image = image[y1:y2,x1:x2]
        landmarks=face_alignment_predictor.get_landmarks(part_image)
        if(landmarks!=[] and landmarks!=None):
            part_image=execute_alignment(part_image,landmarks)
        part_image=preprocess_image(part_image)
        encoding = vgg_face_descriptor.predict(part_image)[0,:]
        # the person's name is the name of the folder where the image comes from
        name = image_path.split(os.path.sep)[-2]
        if len(encoding) > 0 : 
            known_encodings.append(encoding)
            known_names.append(name)
    np.savez('data/encodings/encoding_vggface.npz',encodings=known_encodings,names=known_names)
    return 
Example #9
0
class FADetector():
    def __init__(self):
        self.fa = FaceDetector()
        self.fa2 = dlib.get_frontal_face_detector()
        self.fa3 = cv2.dnn.readNetFromCaffe(
            "./models/deploy.prototxt.txt",
            "./models/res10_300x300_ssd_iter_140000.caffemodel")
        self.fa4 = dlib.cnn_face_detection_model_v1(
            "./models/mmod_human_face_detector.dat")
        self.thresh = 0.5

    #DLIB SIMPLE LANDMARK DETECTION + CPU YOLO FACE DETECTION
    def cv2dnn_facedetection(self, rgb, pad=20):
        h, w = rgb.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(rgb, (300, 300)), 1.0,
                                     (300, 300), (103.93, 116.77, 123.68))
        self.fa3.setInput(blob)
        detections = self.fa3.forward()

        #get driver bounding box based on rightmost position
        rightmost = -1
        for i in range(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            if confidence > 0.7 and box[0] > rightmost:
                rightmost = box[0]
                box = box.astype("int")
                bbox = dlib.rectangle(box[0], box[1], box[2], box[3])
        if rightmost == -1: return

        return rgb[bbox.top():bbox.bottom(), bbox.left():bbox.right()]

    #YOLO FACE DETECTION FROM https://github.com/iitzco/faced
    def yolo_facedetection(self, rgb):
        bbox = self.fa.predict(rgb, self.thresh)
        box = bbox[0]
        l = box[1] - box[2] // 2
        t = box[0] - box[3] // 2
        r = box[1] + box[2] // 2
        b = box[0] + box[3] // 2
        return rgb[t:b, l:r]

    #CNN FACE DETECTION FROM DLIB
    def dlibcnn_facedetection(self, rgb, save=False):
        dets = self.fa4(rgb, 0)
        d = dets[0]
        return rgb[d.rect.top():d.rect.bottom(), d.rect.left():d.rect.right()]
Example #10
0
 def __init__(self, down_scale_factor=0.25):
     assert 0 <= down_scale_factor <= 1
     self.down_scale_factor = down_scale_factor
     self.face_detector = FaceDetector()
Example #11
0
def run(mode, localPath):
    global font
    global success
    global totalInferenceDuration
    print("CUDE usage status : " + str(dlib.DLIB_USE_CUDA))
    #faced
    face_detector = FaceDetector()
    startTS = time.time()
    """ Load models """
    predictor_path = "assets/shape_predictor_5_face_landmarks.dat"
    face_rec_model_path = "assets/dlib_face_recognition_resnet_model_v1.dat"
    facerec = dlib.face_recognition_model_v1(face_rec_model_path)
    sp = dlib.shape_predictor(predictor_path)
    """ Check local/stream availability """
    if (mode == "stream"):
        # initialize the video stream and allow the cammera sensor to warmup
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        w = int(vs.get(3))
        h = int(vs.get(4))
        time.sleep(2.0)
    elif (mode == "local"):
        vidcap = cv2.VideoCapture(localPath)
        success, frame = vidcap.read()
        fps = vidcap.get(cv2.CAP_PROP_FPS)
        frameCtr = 0
        w = int(vidcap.get(3))
        h = int(vidcap.get(4))

    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))

    while success:
        processStartTs = time.time()
        """ Acquire the next frame """
        if (mode == "stream"):
            frame = vs.read()

        elif (mode == "local"):
            success, frame = vidcap.read()

            frameCtr += 1
        """ grab the frame from the threaded video stream and resize it
		 to have a maximum width of 400 pixels """
        try:
            frame = imutils.resize(frame, width=400)
        except AttributeError:
            continue
        try:
            rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)
        except:
            break
        inferenceStartTs = time.time()
        #faced (thresh argument can be added, 0.85 by default-)
        bboxes = face_detector.predict(rgb_img)
        inferenceEndTs = time.time()
        totalInferenceDuration += inferenceEndTs - inferenceStartTs

        helpers.min_clusters = len(bboxes)
        if (mode == "stream"):
            timestamp = calendar.timegm(time.gmtime())
        elif (mode == "local"):
            timestamp = float(frameCtr / fps)

        for x, y, w, h, p in bboxes:
            top = int(y + h / 2)
            left = int(x - w / 2)
            bottom = int(y - h / 2)
            right = int(x + w / 2)
            cv2.rectangle(frame, (left, bottom), (right, top), (99, 44, 255),
                          1)
            cv2.putText(frame, str(p), (left, top + 5), font, 0.2,
                        (255, 255, 255), 1, cv2.LINE_AA)
            shape = sp(frame, dlib.rectangle(left, bottom, right, top))
            # Compute the 128D vector that describes the face in img identified by
            face_descriptor = facerec.compute_face_descriptor(frame, shape)
            bestIndex = cluster_faces.match(face_descriptor)
            if (bestIndex >= 0):
                cv2.putText(frame,
                            str(helpers.unique_persons[bestIndex]["uuid"]),
                            (left, top + 10), font, 0.2, (0, 255, 255), 1,
                            cv2.LINE_AA)
                data = [{
                    "uuid": helpers.unique_persons[bestIndex]["uuid"],
                    "timestamp": timestamp
                }]
                helpers.individual_stats.extend(data)
            else:
                cv2.putText(frame, "Learning...", (left, top + 10), font, 0.2,
                            (0, 255, 255), 1, cv2.LINE_AA)
                data = [{
                    "label": 0,
                    "timestamp": timestamp,
                    "encoding": face_descriptor
                }]
                helpers.candidate_persons.extend(data)

        try:
            frame = imutils.resize(frame, width=720)
        except AttributeError:
            continue

        cv2.putText(frame,
                    "FPS : " + str(int(1 / (time.time() - processStartTs))),
                    (20, 30), font, 1, (0, 255, 0), 3, cv2.LINE_AA, False)
        out.write(frame)

        #cv2.imshow("Frame", frame)
        if (len(helpers.candidate_persons) >=
            (helpers.MIN_FACES_PER_CLUSTER * helpers.min_clusters)):
            cluster_faces.cluster()
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    # do a bit of cleanup
    if (mode == "stream"):
        vs.stop()
    endTS = time.time()
    out.release()
    print("Total number of unique faces = ", len(helpers.unique_persons))
    print("Total duration")
    print(endTS - startTS)
    print("Total inference duration")
    print(totalInferenceDuration)
Example #12
0
class FaceTrackServer(object):
    face_detector = FaceDetector()
    faces = []
    face_locations = []
    face_relative_locations = []
    cam_h = None
    cam_w = None
    camera_address = None

    def __init__(self, down_scale_factor=0.25):
        assert 0 <= down_scale_factor <= 1
        self.down_scale_factor = down_scale_factor

    def get_cam_info(self):
        return {
            'camera': {
                'width': self.cam_w,
                'height': self.cam_h,
                'address': self.camera_address
            }
        }

    def reset(self):
        self.face_relative_locations = []
        self.face_locations = []
        self.faces = []

    def process(self, frame, called_from_encode=False):
        self.reset()
        self.cam_h, self.cam_w, _ = frame.shape
        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0),
                                 fx=self.down_scale_factor,
                                 fy=self.down_scale_factor)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]
        face_annotations = self.face_detector.predict(rgb_small_frame)
        # Display the results
        for (x, y, w, h, prob) in face_annotations:
            x1 = int(x - w / 2)
            y1 = int(y - h / 2)
            x2 = int(x + w / 2)
            y2 = int(y + h / 2)
            self.face_locations.append((y1, x2, y2, x1))
        for y1_sm, x2_sm, y2_sm, x1_sm in self.face_locations:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            x1 = int(x1_sm / self.down_scale_factor)
            x2 = int(x2_sm / self.down_scale_factor)
            y1 = int(y1_sm / self.down_scale_factor)
            y2 = int(y2_sm / self.down_scale_factor)

            x1_rltv = x1 / self.cam_w
            x2_rltv = x2 / self.cam_w
            y1_rltv = y1 / self.cam_h
            y2_rltv = y2 / self.cam_h

            _face_area = frame[x1:x2, y1:y2, :]
            if _face_area.size == 0:
                continue
            self.faces.append(_face_area)
            self.face_relative_locations.append(
                [x1_rltv, y1_rltv, x2_rltv, y2_rltv])
            # cv2.imshow('faces', frame[y1:y2, x1:x2, :])
            # cv2.waitKey(0)
        if (called_from_encode == False):
            print('[FaceTracker Server] Found {} faces!'.format(len(
                self.faces)))
        return self.faces

    def get_faces_loc(self, relative=True):
        if relative:
            return self.face_relative_locations
        else:
            return self.face_locations

    def get_faces(self):
        return self.faces
Example #13
0
def face_finder():
    fps = 12
    #camera_addr="/dev/video0"
    camera_ip = "10.32.89.135"
    camera_addr = "rtsp://*****:*****@" + camera_ip
    #camera_addr='0'
    camera_addr = "/dev/video1"

    print("Connecting to " + camera_ip + " ...")
    vcap = cv2.VideoCapture(camera_addr)
    vcap.set(20, 1)  #buffer size = 1
    vcap.set(
        cv2.CAP_PROP_FPS,
        60)  #camera.fps = fps Se tiene que configurar en la camara tambien
    vcap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
    vcap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    #vcap.set(cv2.CV_CAP_PROP_FPS, 60)
    vcap.set(15, -10)
    ret, frame = vcap.read(
    )  #hay que leerlo una vez para poder ver sus propiedades
    #print (frame.shape)
    frame_size_x = vcap.get(3)  #guarda el tama;o del frame
    frame_size_y = vcap.get(4)
    print("Succesfully connected :D")

    actors = init_actors_multi()
    #print('initial actors shape' + str(actors['pos'].shape))
    boxes = init_boxes()
    face_detector = FaceDetector()
    counter = 0
    centers = []
    while True:
        start = time.time()
        vcap.grab()  #leer imagen de vcap
        ret, frame = vcap.retrieve(0)
        boxes = np.empty([0, 4], dtype=int)
        if ret:  #revisa que si recibio una imagen
            #print("vcap worked")
            #frame_small = cv2.resize(frame, (int(frame_size_x/factor), int(frame_size_y/factor)))
            frame_ann = frame
            rgb_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            bboxes = face_detector.predict(rgb_img, thresh=0.8)
            #frame_ann = annotate_image(frame, bboxes)
            #transform boxes into x,y,w,h starting in corner
            for bb in bboxes:
                x, y, w, h = bb[0:4]
                x = x - w / 2
                y = y - w / 2
                boxes = np.vstack([boxes, [int(x), int(y), int(w), int(h)]])
            #print('boxes: '+str(boxes))
        actors_new = scene_multi(boxes, actors, frame_size_x, frame_size_y)

        appeared = init_actors_multi()

        for i in range(actors_new['id'].shape[0]):
            unique = True
            box = actors_new['pos'][i, 0]
            center_x, center_y = get_box_center(box)
            plot_traj(frame_ann, centers, center_x, center_y)
            x, y, w, h = expand_box(box, 2, frame_size_x, frame_size_y)
            cv2.rectangle(frame_ann, (x, y), (x + w, y + h), (0, 255, 0), 1)
            for j in range(actors['id'].shape[0]):
                #check if it existed already, else, append it to the appeared array
                if (actors_new['id'][i] == actors['id'][j]):
                    unique = False
            if unique:
                appeared['id'] = np.vstack(
                    [appeared['id'], actors_new['id'][i]])
                appeared['pos'] = np.vstack(
                    [appeared['pos'], [actors_new['pos'][i]]])

                print(x, y, w, h)
                face_crop = frame[y:y + h, x:x + w]
                cv2.imwrite('findings/' + str(time.time()) + '.jpg', face_crop)
                cv2.rectangle(frame_ann, (x, y), (x + w, y + h), (0, 255, 255),
                              4)
                counter += 1
                #print("Finding " + actors_new['id'][i][0] + ' saved')
        if len(centers) > 0 and actors_new['id'].shape[0] == 0:
            centers.pop(0)
        try:
            cv2.imshow("test", frame_ann)
        except:
            pass
        cv2.waitKey(1)

        print('appeared: ' + str(appeared['id']))
        actors = actors_new
        leisure_time = max(1 / fps - (time.time() - start), 0)
        #print("leisure_time at " + str(fps) + "fps = " + str(leisure_time))
        print("counter", counter)
        time.sleep(leisure_time)

    return True
Example #14
0
from faced import FaceDetector
from faced.utils import annotate_image

# Config
webcam_index = 0 # 0 for built-in webcam, 1 for external webcam (generally)
max_face_size = 220 # based on the actual_face_size of people very close to the camera
max_x = 640 # basically the webcam frame width
max_y = 200 # max distance at which we detect people (based on the actual_face_size)

print_fps = True # print FPS to stdout
show_webcam = True
show_map = True # show a map of the people in window while running
map_width = 400
map_height = 400

face_detector = FaceDetector()
video_capture = cv2.VideoCapture(webcam_index)

fps = 0.0
while True:
    ret, frame = video_capture.read() # frame shape 640*480*3
    if frame.shape[0] == 0:
        break

    t1 = time.time()

    rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    # bboxes = face_detector.predict(rgb_frame, thresh)
    bboxes = face_detector.predict(rgb_frame)
Example #15
0
import cv2
import os
import sys
from faced import FaceDetector
from faced.utils import annotate_image

if len(sys.argv) < 3:
    print('rutas de videos no entregadas')
    exit(-1)

face_detector = FaceDetector()

DEBUG = bool(int(0 if os.getenv('DEBUG') is None else os.getenv('DEBUG')))
BLUR = int(23 if os.getenv('BLUR') is None else os.getenv('BLUR'))
FACED_ACC = float(
    0.85 if os.getenv('FACED_ACC') is None else os.getenv('FACED_ACC'))

video = cv2.VideoCapture(sys.argv[1])

x_offset = 50
y_offset = 50

frame_index = 0

frame_w = int(video.get(3))
frame_h = int(video.get(4))

(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if int(major_ver) < 3:
    fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
    print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".
Example #16
0
import os

print(os.getcwd())
os.chdir(".../faced-master/")

import cv2
from faced import FaceDetector
from faced.utils import annotate_image
from time import process_time

#___________________________________________________For Image______________________________________________________
face_detector = FaceDetector()

img = cv2.imread("face_det.jpg")

rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)

# Receives RGB numpy image (HxWxC) and
# returns (x_center, y_center, width, height, prob) tuples.
bboxes = face_detector.predict(rgb_img, 0.7)

# Use this utils function to annotate the image.
ann_img = annotate_image(img, bboxes)

#save img
cv2.imwrite('face_detd.jpg', ann_img)

# Show the image
cv2.imshow('Result', ann_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #17
0
vcap.set(20,1)
vcap.set(5,6)
ret, frame = vcap.read() #hay que leerlo una vez para poder ver sus propiedades
print (frame.shape)
frame_size_x = vcap.get(3) #guarda el tama;o del frame
frame_size_y = vcap.get(4)
print("Succesfully connected :D")

actors={
    'id':np.empty([0,1], dtype = str),
    'pos':np.empty([0,4], dtype = int)
}



face_detector = FaceDetector()

while True:
    ret, frame = vcap.read()
    

    boxes = []
    if ret: #revisa que si se haya recibido una imagen para que no se rompa
        print("vcap worked")
        frame_small = cv2.resize(frame, (int(frame_size_x/factor), int(frame_size_y/factor)))
        rgb_img = cv2.cvtColor(frame_small, cv2.COLOR_BGR2RGB)
        bboxes = face_detector.predict(rgb_img, thresh=0.6)
        #print(bboxes)
        for i in bboxes:
            x,y,w,h = i[0:4]
            boxes.append([x,y,w,h])
Example #18
0
import cv2
import numpy as np

from faced import FaceDetector


from get_landmark import get_landmark

face_detector = FaceDetector()  # load from faced


def getFaceCoordinate(bboxes):
    """
    Return bbox from faced to face coordinate in ( x , y , x1 , y1 ) format and confidence

    Parameter
    -----------
    bboxes: tuple
        bbox from faced's predict. ( x , y , w , h , c )

    Returns
    -----------
    coordinate: np array
        [ x , y , x1 , y1 ]
    confidence: float
        0.00 ~ 1.00
    """

    converted_bboxes = []
    c = 0
    for bbox in bboxes:
Example #19
0
def _download_fec_data(tmp_dir, meta, target_shape):

    nonfailed = [None for _ in range(len(meta))]
    last_idx = 0

    detector = FaceDetector()
    detect_threshold = 0.7

    # For now just download all images and consider filtering for presence
    # of faces a secondary step
    for i, item in enumerate(meta):

        failed = False
        face_data = {}
        for case in ["a", "b", "c"]:
            url = item[case]["url"]
            # Use the URL as a filename to avoid collisions for
            # different images with the same filename
            filename = url.replace("/", "-")

            try:
                generator_utils.maybe_download(tmp_dir, filename, url)

                image_path = os.path.join(tmp_dir, filename)
                img = _read_image(image_path)

                bounds = item[case]["bounds"]
                cropped = _crop_to_fractional_xy_bbox(img, bounds)
                if cropped.shape[0] != cropped.shape[1]:
                    failed = True

                #cropped = _normalize_dimensions(cropped, target_shape)

                face_data[case] = cropped

                # For now this should be fine. But alternatively could
                # hash the image content.
                # This being to give unique filename to which to write the
                # cropped image, given primary images may have multiple faces
                # within them thus we will be over-writing and mixing up faces
                # if we write different cropped regions of a primary image to the
                # same file.
                string_bounds = "-".join([str(thing) for thing in bounds])
                cropped_filename = "cropped@" + string_bounds + "#" + filename
                item[case]["cropped_filename"] = cropped_filename

                predictions = detector.predict(cropped, detect_threshold)
                has_face = len(predictions) > 0

                if not has_face:
                    failed = True

            except:
                tf.logging.info("Exception case.")
                failed = True

        # End of for case in ["a", "b", "c"]
        if not failed:
            # If we have detected faces in all three cases let's build and write an
            # example.
            for case in ["a", "b", "c"]:
                out_path = os.path.join(tmp_dir,
                                        item[case]["cropped_filename"])
                cv2.imwrite(out_path, face_data[case])

        if not failed:
            nonfailed[last_idx] = item
            last_idx += 1

    nonfailed = nonfailed[:last_idx]

    nonfailed_file = os.path.join(tmp_dir, "nonfailed.json")
    with tf.gfile.Open(nonfailed_file, "w") as f:
        f.write(json.dumps(nonfailed))

    return nonfailed
Example #20
0
def main(mode: int = 0,
         device: int = 0,
         size: int = 480,
         cfg: str = 'YOLOv3-cfg',
         weights: str = 'YOLOv3-wider_16000.weights',
         ratio: float = 1.0,
         facemarks: bool = False,
         mosaic: bool = False) -> None:
    #
    # detection algorithm [0:hog, 1:haar, 2:YOLO, 3:faced]
    #
    import cv2
    if mode == 0:
        print(f'detecting faces: dlib hog/cnn.')

    elif mode == 1:
        print(f'detecting faces: AdaBoost using haarcascade.')
        global face_cascade
        face_cascade = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')

    elif mode == 2:
        print(f'detecting faces: YOLOv3.')
        global YOLOnet
        YOLOnet = cv2.dnn.readNetFromDarknet(cfg, weights)
        YOLOnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
        YOLOnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

    elif mode == 3:
        print(f'detecting faces: faced(FaceDetector).')
        global face_detector
        face_detector = FaceDetector()

    else:
        print(f'detecting faces: unknown={mode}.')
        return

    #
    # window for video capture
    #
    # import cv2
    windowName = 'Video'
    cv2.namedWindow(windowName)
    # cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
    cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)
    cv2.waitKey(10)
    cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_NORMAL)

    #
    # vide capture
    # Macbook12: fps=30.0, w=848.0, h=480.0
    # iMac5K   : fps=29.97002997002997, w=960.0, h=544.0, detecting faces: YOLOv3.
    #
    print(f'video capture device: {device}')
    video_capture = cv2.VideoCapture(device)
    if not video_capture.isOpened():
        print(f'Couldnt open video file or webcam, device={device}.')
        raise ImportError(
            f'Couldnt open video file or webcam, device={device}.')

    video_capture.set(cv2.CAP_PROP_FPS, 60)  # カメラFPSを60FPSに設定
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, size)  # カメラ画像の横幅を1280に設定 (640)
    # video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, size)	# カメラ画像の縦幅を720に設定 (480)
    print(f"video capture device: fps={video_capture.get(cv2.CAP_PROP_FPS)}")
    print(
        f"video capture device: w={video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)}"
    )
    print(
        f"video capture device: h={video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)}"
    )

    #
    # count
    #
    count = 0

    #
    # down sampling ratio
    #
    down_sampling_ratio = 1.0 / ratio

    #
    # fps
    #
    tm = cv2.TickMeter()
    tm.start()
    fps_count = 0
    fps_count_max = 10
    fps_number = 0

    #
    # capture video frame
    #
    while (video_capture.isOpened() == True):
        #
        # grab a single frame of video
        #
        ret, frame = video_capture.read()
        if not ret:
            print(f"-")
            continue
        # print( f"." )

        #
        # flip
        #
        frame = cv2.flip(frame, 1)  # Flip camera horizontaly

        #
        # Resize frame of video to 1/4 size for faster face recognition processing
        # frame_buffer = imutils.resize(frame, width=480)
        #
        if ratio > 1.0:
            # frame_buffer = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
            frame_buffer = cv2.resize(frame, (0, 0),
                                      fx=down_sampling_ratio,
                                      fy=down_sampling_ratio)
        else:
            frame_buffer = frame

        #
        # convert the image from BGR color (which OpenCV uses) to RGB color (which faced uses)
        # rgb_frame = frame[:, :, ::-1]
        # rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        #
        rgb_frame = cv2.cvtColor(frame_buffer, cv2.COLOR_BGR2RGB)

        #
        # Find all the faces and face enqcodings in the frame of video
        #
        face_locations = []
        if mode == 0:
            name = "hog"  # hog or cnn
            face_locations = faced.face_locations(
                rgb_frame, number_of_times_to_upsample=1, model=name)
        elif mode == 1:
            # global	face_cascade
            face_locations = face_locations_by_haarcascade(
                rgb_frame, face_cascade)

        elif mode == 2:
            # global	YOLOnet
            face_locations = face_locations_by_YOLO(rgb_frame, YOLOnet)

        elif mode == 3:
            face_locations = face_locations_by_FaceDetector(rgb_frame)

        else:
            print(f'detecting faces: unknown={mode}.')
            return

        #
        # check faces: (top, right, bottom, left)
        #
        if len(face_locations) > 0:
            x = face_locations[0][3]  # x		: left
            y = face_locations[0][0]  # y		: top
            w = face_locations[0][1] - x  # x + w	: right
            h = face_locations[0][2] - y  # y + h : bottom
            # print( f"face_locations={face_locations}, 0:x={x:3d}, y={y:3d}, w={w:3d}, h={h:3d}", end="")
            print(
                f'face_locations:{len(face_locations):2d} persons, x={x:3d}, y={y:3d}, w={w:3d}, h={h:3d} : ',
                end='\n')

        #
        # face encoding features(vector)
        #
        face_encodings = faced.face_encodings(rgb_frame, face_locations)

        #
        # Loop through each face in this frame of video
        #
        for (top, right, bottom,
             left), face_encoding in zip(face_locations, face_encodings):
            #
            # See if the face is a match for the known face(s)
            #
            matches = faced.compare_faces(known_face_encodings,
                                          face_encoding,
                                          tolerance=0.6)

            #
            # If a match was found in known_face_encodings, just use the first one.
            # if True in matches:
            #     first_match_index = matches.index(True)
            #     name = known_face_names[first_match_index]
            # Or instead, use the known face with the smallest distance to the new face
            #
            name = "Unknown"
            face_distances = faced.face_distance(known_face_encodings,
                                                 face_encoding)
            best_match_index = np.argmin(face_distances)
            if matches[best_match_index]:
                name = known_face_names[best_match_index]
                # print( f" !!!found...{name} d({best_match_index})={face_distances[best_match_index]}", end="\n" )
                # print( f"{name}\td[{best_match_index:2d}]={face_distances[best_match_index]:6.4f},\t", end="" )
                print(
                    f"{name}:d[{best_match_index:2d}]={face_distances[best_match_index]:6.4f},\t",
                    end="\n")
                #
                # face landmarks
                #
                if facemarks == True:
                    rgb_frame = draw_face_landmarks(rgb_frame, top, right,
                                                    bottom, left, name)
                    pass
            else:
                # print( f"1 {name}\td[{best_match_index:2d}]={face_distances[best_match_index]:6.4f},\t", end="\n" )
                pass

            # if facemarks == True:
            # 	rgb_frame = draw_face_landmarks(rgb_frame, top, right, bottom, left, name)

            #
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            #
            if ratio > 1.0:
                top *= int(ratio)
                right *= int(ratio)
                bottom *= int(ratio)
                left *= int(ratio)

            #
            # mosaic
            #
            if mosaic == True:
                rgb_frame = mosaic_area(rgb_frame,
                                        left,
                                        top,
                                        right - left,
                                        bottom - top,
                                        ratio=0.05)

            #
            # Draw a box,label(name) around the face
            #
            print(f'facemarks={facemarks}, name={name}')
            if facemarks == False:
                cv2.rectangle(rgb_frame, (left, top), (right, bottom),
                              (0, 0, 255), 2)
                cv2.rectangle(rgb_frame, (left, bottom - 35), (right, bottom),
                              (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(rgb_frame, name, (left + 6, bottom - 6), font, 1.0,
                            (255, 255, 255), 1, cv2.LINE_AA)
            else:
                x = left
                y = top
                w = right - left
                h = bottom - top
                # img = cv2.line(img,(0,0),(511,511),(255,0,0),5)
                s = [(x, y + int(h / 3)), (x, y), (x + int(w / 3), y)]
                cv2.line(rgb_frame,
                         s[0],
                         s[1],
                         color=(0, 255, 0),
                         thickness=3,
                         lineType=cv2.LINE_AA)
                cv2.line(rgb_frame,
                         s[1],
                         s[2],
                         color=(0, 255, 0),
                         thickness=3,
                         lineType=cv2.LINE_AA)

                s = [(x + w - int(w / 3), y), (x + w, y),
                     (x + w, y + int(h / 3))]
                cv2.line(rgb_frame,
                         s[0],
                         s[1],
                         color=(0, 255, 0),
                         thickness=3,
                         lineType=cv2.LINE_AA)
                cv2.line(rgb_frame,
                         s[1],
                         s[2],
                         color=(0, 255, 0),
                         thickness=3,
                         lineType=cv2.LINE_AA)

                s = [(x, y + h - int(h / 3)), (x, y + h),
                     (x + int(w / 3), y + h)]
                cv2.line(rgb_frame,
                         s[0],
                         s[1],
                         color=(0, 255, 0),
                         thickness=3,
                         lineType=cv2.LINE_AA)
                cv2.line(rgb_frame,
                         s[1],
                         s[2],
                         color=(0, 255, 0),
                         thickness=3,
                         lineType=cv2.LINE_AA)

                s = [(x + w - int(w / 3), y + h), (x + w, y + h),
                     (x + w, y + h - int(h / 3))]
                cv2.line(rgb_frame,
                         s[0],
                         s[1],
                         color=(0, 255, 0),
                         thickness=3,
                         lineType=cv2.LINE_AA)
                cv2.line(rgb_frame,
                         s[1],
                         s[2],
                         color=(0, 255, 0),
                         thickness=3,
                         lineType=cv2.LINE_AA)

                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(rgb_frame, name, (left + 6, bottom - 6), font, 1.0,
                            (0, 255, 0), 1, cv2.LINE_AA)

        if len(face_locations) > 0:
            print(f'.')

        #
        # fps
        #
        if fps_count == fps_count_max:
            tm.stop()
            fps_number = fps_count_max / tm.getTimeSec()
            tm.reset()
            tm.start()
            fps_count = 0

        cv2.putText(rgb_frame,
                    'FPS:{:.2f}'.format(fps_number), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1.0, (0, 255, 0),
                    thickness=2,
                    lineType=cv2.LINE_AA)
        fps_count += 1

        # Display the resulting image
        if ret:
            frame = cv2.cvtColor(rgb_frame, cv2.COLOR_RGB2BGR)
            cv2.imshow(windowName, frame)
            pass

        #
        # keyboard operation
        #
        global key_save
        global key_facelandmarks
        global key_mosaic
        global key_quit

        c = cv2.waitKey(1) & 0xff

        if c == ord(key_quit) or c == 27:
            break

        elif c == ord(key_save):  # spaceで保存 32
            # cv2.imwrite( './filename_' + str(count) + '.jpg', frame ) #001~連番で保存
            bewrite = True
            filename = "screenshot_" + str(count) + ".jpg"
            while os.path.exists(filename):
                print('skip:' + filename)
                count += 1
                filename = "screenshot_" + str(count) + ".jpg"
                if count >= 100:
                    beWrite = False

            if bewrite:
                cv2.imwrite(filename, frame)  #001~連番で保存
                count += 1
                print('save done:' + filename)

        elif c == ord(key_facelandmarks):
            facemarks = not facemarks

        elif c == ord(key_mosaic):
            mosaic = not mosaic

        elif c == ord(key_facelandmarks_ext):
            global facelandmarks_ext
            facelandmarks_ext += 1
            _, facelandmarks_ext = divmod(facelandmarks_ext, 3)

    #
    # release handle to the webcam
    #
    video_capture.release()
    cv2.destroyWindow(windowName)
    cv2.destroyAllWindows()
Example #21
0
# @Last Modified time: 2019-10-31 11:46:21
import os
import cv2
import time
from faced import FaceDetector

# filename = 'obama.jpg'
model = 'faced'
scale = 1
path = '../data/29--Students_Schoolkids/'

for fn in os.listdir(path):
    filename = fn
    raw_img = cv2.imread(os.path.join(path, filename))
    out_file = '../data'
    detector = FaceDetector()
    name = fn.split('.')
    name = name[0]
    out_file = os.path.join(out_file, name.replace('jpg', 'txt'))
    t0 = time.time()
    print('start')
    face_locations = detector.predict(raw_img, 0.5)
    t1 = time.time()
    print(f'took {round(t1-t0, 3)} to get {len(face_locations)} faces')

    for (x, y, w, h, _) in face_locations:
        x1 = x - int(w / 2)
        x2 = x + int(w / 2)
        y1 = y - int(h / 2)
        y2 = y + int(h / 2)
        # print(x1, y1, x2-x1, y2-y1)
Example #22
0
import youtube_dl
import cv2
import face_recognition
import sklearn
from sklearn.datasets import fetch_lfw_people

from faced import FaceDetector
from faced.utils import annotate_image

lfw_people = fetch_lfw_people()

face_detector = FaceDetector()


def process_video(vidfile):
    # start processing video
    input_movie = cv2.VideoCapture(vidfile)
    length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
    # Write the resulting image to the output video file
    codec = int(input_movie.get(cv2.CAP_PROP_FOURCC))
    fps = int(input_movie.get(cv2.CAP_PROP_FPS))
    frame_width = int(input_movie.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(input_movie.get(cv2.CAP_PROP_FRAME_HEIGHT))
    output_movie = cv2.VideoWriter('output.mp4', codec, fps,
                                   (frame_width, frame_height))
    frame_num = 0

    while frame_num < length:
        ret, frame = input_movie.read()
        frame_num += 1
        if not ret:
Example #23
0
import cv2
from time import time
from faced import FaceDetector
from faced import utils
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s %(message)s')

# for local testing
model_path = "../faced/models/"
face_detector = FaceDetector()

img_path = "faces.jpg"
thresh = 0.8
bgr_img = cv2.imread(img_path)
rgb_img = cv2.cvtColor(bgr_img.copy(), cv2.COLOR_BGR2RGB)

# Receives RGB numpy image (HxWxC) and
# returns (x_center, y_center, width, height, prob) tuples.
logger.info('starting face detection ...')
start_time = time()
face_detection_list = face_detector.predict(frame=rgb_img, thresh=thresh)
detected_faces = len(face_detection_list)
end_time = time()
duration = end_time - start_time
logger.info(f'face detection took {duration:.2f} seconds')
logger.info(f'found boxes: {detected_faces}')

increase_box_percentage = 0.5
for i in range(detected_faces):
Example #24
0
    def on_created(self, event):
        print(f'event type: {event.event_type}  path : {event.src_path}')
        time.sleep(2)

        try:
            # the "on_created" event is called by a partially upload file
            # cut excess filename after '.png'
            #/var/nextcloud_data/c4p/files/camera_footage/Ko-retina.png.ocTransferId1983807786.part
            # /camera_footage/camera_1/raw_footage
            # /camera_footage/camera_1/anonymized_footage

            # todo : anonymizing more than 1 face
            # todo : put face over face

            sucessful_anonymization = False

            filetype = find_filetype(event.src_path)
            print("filetype", filetype)

            path_to_file = event.src_path.split(filetype, 1)[0] + filetype
            print("path to file", path_to_file)

            camera_folder = get_camera_folder(path_to_file)
            print("camera_id", camera_folder)

            picture_id = get_picture_id(path_to_file, camera_folder, filetype)
            print("picture_id", picture_id)

            an_path = get_path_for_anonymous_pic(anonymous_folder,
                                                 camera_folder, picture_id,
                                                 filetype)
            print("path to anonymous file", an_path)

            face_detector = FaceDetector()

            print("reading image", path_to_file)
            img = cv2.imread(path_to_file)
            rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)

            if thresh:
                bboxes = face_detector.predict(rgb_img, thresh)
            else:
                bboxes = face_detector.predict(rgb_img)

            # anonymize
            if not bboxes == []:
                try:
                    print("bboxes containing face", bboxes)

                    print("creating anonymous picture")
                    ann_img = annotate_image(img, bboxes)

                    print("write anonymized version to anonymous folder")
                    cv2.imwrite(an_path, ann_img)

                    sucessful_anonymization = True

                except Exception as ex:
                    print(ex)
                    print("Anonymizing failed")
                    print("writing anonymized version failed")
                    sucessful_anonymization = False

                # delete original if sucessfully anonymized
                if sucessful_anonymization:
                    if os.path.exists(path_to_file):
                        os.remove(path_to_file)
                    else:
                        print("Tried deleting, but the file does not exist",
                              path_to_file)

            # no faces found, picture is already anonymous
            else:
                print("no face found")
                if os.path.exists(path_to_file):
                    os.rename(path_to_file, an_path)

            print("refreshing owncloud")
            subprocess.call(cwd + "/refresh_nextcloud.sh", shell=True)

        except Exception as e:
            print(e)
            print("Anonymizing failed")