Esempio n. 1
0
def compute_similarities():
    t = Timer()
    all_descriptors = db.get_all_descriptors()
    #print("get_all_descriptors():", t)
    print("Faces: %d" % len(all_descriptors), end='')
    if len(all_descriptors) < 2:
        print()
        return

    X = Y = np.array([json.loads(f[1]) for f in all_descriptors])
    #print("convert to array:", t)
    X2 = Y2 = np.sum(np.square(X), axis=-1)
    dists = np.sqrt(
        np.maximum(X2[:, np.newaxis] + Y2[np.newaxis] - 2 * np.dot(X, Y.T), 0))
    #print("calculate dists:", t)

    db.delete_similarities()
    #print("delete similarities:", t)
    num_similarities = 0
    for i, j in zip(*np.where(dists < args.similarity_threshold)):
        if i != j:
            db.insert_similarity(
                [all_descriptors[i][0], all_descriptors[j][0], dists[i, j]])
            num_similarities += 1
    #print("save similarities:", t)
    db.commit()
    #print("commit:", t)
    print(", Similarities: %d, Time: %.2fs" % (num_similarities, t.total()))
Esempio n. 2
0
def compute_similarities(data_dir, similarity_threshold=0.6, identity_threshold=0.4, criminal_fraction=0.1, **kwargs):
    t = Timer()
    all_descriptors = db.get_all_descriptors()
    descriptors = [json.loads(f[1]) for f in all_descriptors]
    face_ids = [f[0] for f in all_descriptors]
    num_faces = len(all_descriptors)
    #print("get_all_descriptors():", t)
    #print("Faces: %d" % len(all_descriptors), end='')
    if num_faces < 2:
        #print()
        return num_faces, 0, 0

    X = Y = np.array(descriptors)
    #print("convert to array:", t)
    X2 = Y2 = np.sum(np.square(X), axis=-1)
    dists = np.sqrt(np.maximum(X2[:, np.newaxis] + Y2[np.newaxis] - 2 * np.dot(X, Y.T), 0))
    #print("calculate dists:", t)

    db.delete_similarities()
    #print("delete similarities:", t)
    num_similarities = 0
    for i, j in zip(*np.where(dists < float(similarity_threshold))):
        if i != j:
            db.insert_similarity([face_ids[i], face_ids[j], dists[i, j]])
            num_similarities += 1
    #print("save similarities:", t)

    # cluster faces and update labels
    descriptors_dlib = [dlib.vector(d) for d in descriptors]
    clusters = dlib.chinese_whispers_clustering(descriptors_dlib, float(identity_threshold))
    db.update_labels(zip(clusters, face_ids))
    num_clusters = len(set(clusters))

    if args.save_clusters:
        for cluster_num, face_id in zip(clusters, face_ids):
            facefile = os.path.realpath(os.path.join(data_dir, args.save_faces, "face_%05d.jpg" % face_id))
            clusterdir = os.path.join(data_dir, args.save_clusters, str(cluster_num))
            makedirs(clusterdir)
            os.symlink(facefile, os.path.join(clusterdir, 'tmpfile'))
            os.rename(os.path.join(clusterdir, 'tmpfile'), os.path.join(clusterdir, "face_%05d.jpg" % face_id))

    # remove clusters with more than given amount of criminals
    criminal_clusters = db.get_clusters_with_criminals(criminal_fraction)
    for cluster in criminal_clusters:
        db.remove_cluster(cluster['cluster_num'])

    db.commit()
    #print("commit:", t)
    #print(", Similarities: %d, Time: %.2fs" % (num_similarities, t.total()))
    return num_faces, num_similarities, num_clusters
Esempio n. 3
0
def process_image(data_dir, relpath, img, image_type, image_source, frame_num=None, exif_data=None, 
        gps_lat=None, gps_lon=None, camera_side=None, rotate=0, timestamp=None, **kwargs):
    #if int(rotate) != 0:
    #    assert int(rotate) in [0, 90, 180, 270]
    #    img = cv2.rotate(img, int(rotate) // 90 - 1)
    #    print("Rotating", rotate)

    image_height, image_width, _ = img.shape
    resizepath, resized_height, resized_width = None, image_height, image_width

    if args.resize:
        img = resize_image(img, args.resize)
        resized_height, resized_width, _ = img.shape
        if args.save_resized:
            filename = os.path.basename(relpath)
            resizepath = os.path.join(args.save_resized, filename)
            basepath, ext = os.path.splitext(resizepath)
            if ext == '' or frame_num is not None:
                resizepath = basepath
                if frame_num is not None:
                    resizepath += "_%04d" % frame_num
                resizepath += '.jpg'

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = detector(gray, args.upscale)

    image_id = db.insert_image([image_type, image_source, relpath, image_width, image_height, 
        resizepath, resized_width, resized_height, frame_num, exif_data, len(faces),
        gps_lat, gps_lon, camera_side, rotate, timestamp])

    poses = dlib.full_object_detections()
    rects = []
    confs = []
    for rect in faces:
        if args.detector == 'cnn':
            confs.append(rect.confidence)
            rect = rect.rect
        else:
            confs.append(1.)
        pose = predictor(gray, rect)
        poses.append(pose)
        rects.append(rect)

    # do batched computation of face descriptors
    img_rgb = img[...,::-1] # BGR to RGB
    descriptors = facerec.compute_face_descriptor(img_rgb, poses, args.jitter)

    faceres = []
    for i, (rect, conf, pose, descriptor) in enumerate(zip(rects, confs, poses, descriptors)):
        face_left = float(rect.left()) / resized_width
        face_top = float(rect.top()) / resized_height
        face_right = float(rect.right()) / resized_width
        face_bottom = float(rect.bottom()) / resized_height
        face_width = face_right - face_left
        face_height = face_bottom - face_top
        landmarks = [[float(p.x) / resized_width, float(p.y) / resized_height] for p in pose.parts()]
        descriptor = list(descriptor)
        
        face_id = db.insert_face([image_id, i, face_left, face_top, face_right, face_bottom, face_width, face_height, conf, json.dumps(landmarks), json.dumps(descriptor)])

        # draw faces on resized images
        if args.draw_faces:
            cv2.rectangle(img, (rect.left(), rect.top()), (rect.right(), rect.bottom()), (255, 0, 255), 2)
            for p in pose.parts():
                cv2.circle(img, (p.x, p.y), 1, color=(0, 255, 255), thickness=-1)

        if args.save_faces:
            facepath = os.path.join(data_dir, args.save_faces, "face_%05d.jpg" % face_id)
            cv2.imwrite(facepath, img[max(rect.top(), 0):rect.bottom(), max(rect.left(), 0):rect.right()])

        faceres.append({'face_id': face_id, 'face_num': i, 'left': face_left, 'top': face_top, 'right': face_right, 'bottom': face_bottom, 'width': face_width, 'height': face_height,
            'confidence': conf, 'landmarks': landmarks, 'pose_coef': (1 - face_bottom) / face_height})

    db.commit()

    # save resized image after drawing faces
    if args.resize and args.save_resized:
        cv2.imwrite(os.path.join(data_dir, resizepath), img)

    res = {'relpath': relpath, 'frame_num': frame_num, 'resizepath': resizepath, 'image_id': image_id, 'image_type': image_type, 'num_faces': len(faces), 'faces': faceres}
    return len(faces), res
Esempio n. 4
0
def process_queue():
    global images_queue
    global grays_queue
    global data_queue
    global num_images
    global num_faces
    global num_files

    faces_queue = detector(grays_queue,
                           args.upscale,
                           batch_size=len(grays_queue))

    for faces, img, gray, data in zip(faces_queue, images_queue, grays_queue,
                                      data_queue):
        image_id = db.insert_image(data + [len(faces)])

        poses = dlib.full_object_detections()
        rects = []
        for face in faces:
            pose = predictor(gray, face.rect)
            poses.append(pose)
            rects.append(face.rect)

        # do batched computation of face descriptors
        img_rgb = img[..., ::-1]  # BGR to RGB
        descriptors = facerec.compute_face_descriptor(img_rgb, poses,
                                                      args.jitter)

        resized_width = data[5]
        resized_height = data[6]
        for i, (rect, pose,
                descriptor) in enumerate(zip(rects, poses, descriptors)):
            face_left = float(rect.left()) / resized_width
            face_top = float(rect.top()) / resized_height
            face_right = float(rect.right()) / resized_width
            face_bottom = float(rect.bottom()) / resized_height
            face_width = face_right - face_left
            face_height = face_bottom - face_top
            landmarks = [[
                float(p.x) / resized_width,
                float(p.y) / resized_height
            ] for p in pose.parts()]
            descriptor = list(descriptor)

            face_id = db.insert_face([
                image_id, i, face_left, face_top, face_right, face_bottom,
                face_width, face_height,
                json.dumps(landmarks),
                json.dumps(descriptor)
            ])

            if args.save_faces:
                facepath = os.path.join(args.save_faces,
                                        "face_%02d.jpg" % face_id)
                cv2.imwrite(
                    facepath, img[rect.top():rect.bottom(),
                                  rect.left():rect.right()])

        num_images += 1
        num_faces += len(faces)

    db.commit()

    images_queue.clear()
    grays_queue.clear()
    data_queue.clear()

    elapsed = time.time() - start_time
    print(
        "\rFiles: %d, Images: %d, Faces: %d, Elapsed: %.2fs, Images/s: %.1fs" %
        (num_files, num_images, num_faces, elapsed, num_images / elapsed),
        end='')
Esempio n. 5
0
def process_image(filepath, img, image_type, frame_num=None, exif_data=None):
    image_height, image_width, _ = img.shape
    resizepath, resized_height, resized_width = None, image_height, image_width

    if args.resize:
        img = resize_image(img, args.resize)
        if args.save_resized:
            filename = os.path.basename(filepath)
            resizepath = os.path.join(args.save_resized, filename)
            basepath, ext = os.path.splitext(resizepath)
            if ext == '' or frame_num is not None:
                resizepath = basepath
                if frame_num is not None:
                    resizepath += "_%04d" % frame_num
                resizepath += '.jpg'
            cv2.imwrite(resizepath, img)
            resized_height, resized_width, _ = img.shape
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = detector(gray, args.upscale)

    image_id = db.insert_image([
        image_type, filepath, image_width, image_height, resizepath,
        resized_width, resized_height, frame_num, exif_data,
        len(faces)
    ])

    poses = dlib.full_object_detections()
    rects = []
    for rect in faces:
        if args.detector == 'cnn':
            rect = rect.rect
        pose = predictor(gray, rect)
        poses.append(pose)
        rects.append(rect)

    # do batched computation of face descriptors
    img_rgb = img[..., ::-1]  # BGR to RGB
    descriptors = facerec.compute_face_descriptor(img_rgb, poses, args.jitter)

    for i, (rect, pose, descriptor) in enumerate(zip(rects, poses,
                                                     descriptors)):
        face_left = float(rect.left()) / resized_width
        face_top = float(rect.top()) / resized_height
        face_right = float(rect.right()) / resized_width
        face_bottom = float(rect.bottom()) / resized_height
        face_width = face_right - face_left
        face_height = face_bottom - face_top
        landmarks = [[float(p.x) / resized_width,
                      float(p.y) / resized_height] for p in pose.parts()]
        descriptor = list(descriptor)

        face_id = db.insert_face([
            image_id, i, face_left, face_top, face_right, face_bottom,
            face_width, face_height,
            json.dumps(landmarks),
            json.dumps(descriptor)
        ])

        if args.save_faces:
            facepath = os.path.join(args.save_faces, "face_%02d.jpg" % face_id)
            cv2.imwrite(
                facepath, img[rect.top():rect.bottom(),
                              rect.left():rect.right()])

    db.commit()
    return len(faces)