Пример #1
0
    def extract(self, im, detection):
        #import numpy as np
        from keras.preprocessing import image
        #from keras_vggface.vggface import VGGFace
        from keras_vggface import utils

        rect = pt.rect_proto2pv(detection.location).rescale(1.5)
        tile = im.crop(rect)
        tile = tile.resize((224, 224))

        #tile.show(delay=1000)

        img = tile.asOpenCV2()
        img = img[:, :, ::-1]  # Convert BGR to RGB
        #mat_ = cv2.cvtColor(mat,cv2.COLOR_RGB2GRAY)
        #mat = cv2.cvtColor(mat_,cv2.COLOR_GRAY2RGB)

        #img = image.load_img('../image/ajb.jpg', target_size=(224, 224))
        img = image.img_to_array(img)
        img = np.expand_dims(img, axis=0)
        img = utils.preprocess_input(img, version=1)  # or version=2
        #preds = model.predict(x)
        #print('Predicted:', utils.decode_predictions(preds))

        #mat = mat[:,:,::-1]

        #mat.shape = (1,224,224,3)

        # Needed in multithreaded applications
        with self.graph.as_default():
            tmp = self.recognizer.predict(img)

        return pv.meanUnit(tmp.flatten())
Пример #2
0
    def extract(self, img, face_records):
        '''Extract a template that allows the face to be matched.'''
        # Compute the 512D vector that describes the face in img identified by
        #shape.

        im = pv.Image(img[:, :, ::-1])

        for face_record in face_records.face_records:
            rect = pt.rect_proto2pv(face_record.detection.location)
            x, y, w, h = rect.asTuple()

            # Extract view
            rect = pv.Rect()
            cx, cy = x + 0.5 * w, y + 0.5 * h
            tmp = 1.5 * max(w, h)
            cw, ch = tmp, tmp
            crop = pv.AffineFromRect(pv.CenteredRect(cx, cy, cw, ch),
                                     (256, 256))

            pvim = pv.Image(img[:, :, ::-1])  # convert rgb to bgr
            pvim = crop(pvim)
            view = pt.image_pv2proto(pvim)
            face_record.view.CopyFrom(view)

            tile = pvim.resize((224, 224))
            tile = tile.resize((112, 112))

            face_im = tile.asOpenCV2()
            face_im = face_im[:, :, ::-1]  # Convert BGR to RGB

            features = self.fr_model.get_embedding(face_im)
            face_descriptor = pv.meanUnit(features.flatten())

            face_record.template.data.CopyFrom(
                pt.vector_np2proto(face_descriptor))
Пример #3
0
 def locate(self, img, face_records, options):
     '''Locate facial features.'''
     # Get the landmarks/parts for the face in box d.
     for face_record in face_records.face_records:
         rect = pt.rect_proto2pv(face_record.detection.location)
         x, y, w, h = rect.asTuple()
         l, t, r, b = [int(tmp) for tmp in [x, y, x + w, y + h]]
         d = dlib.rectangle(l, t, r, b)
         shape = self.shape_pred(img, d)
         print('s', shape)
Пример #4
0
    def extract(self,img,face_records):
        '''Extract a template that allows the face to be matched.'''
        # Compute the 128D vector that describes the face in img identified by
        # shape.  In general, if two face descriptor vectors have a Euclidean
        # distance between them less than 0.6 then they are from the same
        # person, otherwise they are from different people. Here we just print
        # the vector to the screen.
        
        # TODO: Make this an option
        JITTER_COUNT = 5
        
        for face_record in face_records.face_records:
            rect = pt.rect_proto2pv(face_record.detection.location)
            x,y,w,h = rect.asTuple()

            # Extract view
            rect = pv.Rect()
            cx,cy = x+0.5*w,y+0.5*h
            tmp = 1.5*max(w,h)
            cw,ch = tmp,tmp
            crop = pv.AffineFromRect(pv.CenteredRect(cx,cy,cw,ch),(256,256))
            #print (x,y,w,h,cx,cy,cw,ch,crop)
            pvim = pv.Image(img[:,:,::-1]) # convert rgb to bgr
            pvim = crop(pvim)
            view = pt.image_pv2proto(pvim)
            face_record.view.CopyFrom(view)
            
            # Extract landmarks
            l,t,r,b = [int(tmp) for tmp in [x,y,x+w,y+h]]
            d = dlib.rectangle(l,t,r,b)
            shape = self.shape_pred(img, d)
            #print('s',dir(shape))
            #print(shape.parts()[0])
            
            for i in range(len(shape.parts())):
                loc = shape.parts()[i]
                landmark = face_record.landmarks.add()
                landmark.landmark_id = "point_%02d"%i
                landmark.location.x = loc.x
                landmark.location.y = loc.y
            
            #print('fr',face_record)

            #print('shape:',face_records.face_records[0].landmarks)
            face_descriptor = self.face_rec.compute_face_descriptor(img, shape, JITTER_COUNT)
            face_descriptor = np.array(face_descriptor)
            
            vec = face_descriptor.flatten()
            face_record.template.data.CopyFrom(pt.vector_np2proto(vec))
Пример #5
0
    def extract(self, img, face_records):
        '''Extract a template that allows the face to be matched.'''
        # Compute the 512D vector that describes the face in img identified by
        #shape.
        #print(type(img),img.shape)
        img = img[:, :, ::
                  -1]  #convert from rgb to bgr. There is BGRtoRGB conversion in get_embedding

        for face_record in face_records.face_records:
            #print(face_record)
            if face_record.detection.score != -1:
                landmarks = np.zeros((5, 2), dtype=np.float)
                for i in range(0, len(face_record.landmarks)):
                    vals = face_record.landmarks[i]
                    landmarks[i, 0] = vals.location.x
                    landmarks[i, 1] = vals.location.y

                _img = self.preprocess.norm_crop(img, landmark=landmarks)
                #print(_img.shape)
                embedding = self.fr_model.get_embedding(_img).flatten()
                embedding_norm = np.linalg.norm(embedding)
                normed_embedding = embedding / embedding_norm
                #print(normed_embedding.shape)

                # Extract view
                x, y, w, h = pt.rect_proto2pv(
                    face_record.detection.location).asTuple()
                cx, cy = x + 0.5 * w, y + 0.5 * h
                tmp = 1.5 * max(w, h)
                cw, ch = tmp, tmp
                crop = pv.AffineFromRect(pv.CenteredRect(cx, cy, cw, ch),
                                         (256, 256))
                #print (x,y,w,h,cx,cy,cw,ch,crop)
                pvim = pv.Image(img[:, :, ::-1])  # convert rgb to bgr
                pvim = crop(pvim)
                view = pt.image_pv2proto(pvim)
                face_record.view.CopyFrom(view)

            else:
                normed_embedding = np.zeros(512, dtype=float)

            face_record.template.data.CopyFrom(
                pt.vector_np2proto(normed_embedding))
Пример #6
0
def processDetections(each):
    im, results, options = each
    if results.done():
        recs = results.result().face_records
        i = 0

        for face in recs:
            global FACE_COUNT
            FACE_COUNT += 1
            # Filter faces based on min size
            size = min(face.detection.location.width,
                       face.detection.location.height)
            if size < options.min_size:
                continue

            # Filter faces based on attributes
            if not processAttributeFilter(face, options):
                continue

            # Process Detections
            if options.detections_csv is not None:
                global DETECTIONS_CSV
                global DETECTIONS_FILE
                import csv
                if DETECTIONS_CSV == None:
                    DETECTIONS_FILE = open(options.detections_csv, 'w')
                    DETECTIONS_CSV = csv.writer(DETECTIONS_FILE)
                    DETECTIONS_CSV.writerow([
                        'source', 'frame', 'detect_id', 'type', 'score', 'x',
                        'y', 'w', 'h'
                    ])

                DETECTIONS_CSV.writerow([
                    face.source, face.frame, i, face.detection.detection_class,
                    face.detection.score, face.detection.location.x,
                    face.detection.location.y, face.detection.location.width,
                    face.detection.location.height
                ]),
                DETECTIONS_FILE.flush()

            # Process Detections
            if options.attributes_csv is not None:
                global ATTRIBUTES_CSV
                global ATTRIBUTES_FILE
                import csv

                if ATTRIBUTES_CSV == None:
                    ATTRIBUTES_FILE = open(options.attributes_csv, 'w')
                    ATTRIBUTES_CSV = csv.writer(ATTRIBUTES_FILE)
                    ATTRIBUTES_CSV.writerow([
                        'source', 'frame', 'detect_id', 'type', 'score', 'x',
                        'y', 'w', 'h', 'attribute', 'value'
                    ])

                attributes = list(face.attributes)
                attributes.sort(key=lambda x: x.key)
                for attribute in attributes:
                    key = attribute.key
                    value = attribute.fvalue
                    ATTRIBUTES_CSV.writerow([
                        face.source,
                        face.frame,
                        i,
                        face.detection.detection_class,
                        face.detection.score,
                        face.detection.location.x,
                        face.detection.location.y,
                        face.detection.location.width,
                        face.detection.location.height,
                        key,
                        value,
                    ]),
                ATTRIBUTES_FILE.flush()

                # Save Images with Detections

                # Save Detected Faces
                face_out_dir = ""

            if options.face_log:
                if not os.path.exists(options.face_log):
                    os.makedirs(options.face_log, exist_ok=True)
                #print(face.detection.location)

                rect = pt.rect_proto2pv(face.detection.location)
                rect = rect.rescale(1.5)
                affine = pv.AffineFromRect(rect, (128, 128))
                try:
                    pvim = pv.Image(im[:, :, ::-1])
                    view = affine(pvim)
                    #print('Face',rect)
                    #print(view)
                    base_name, ext = os.path.splitext(
                        os.path.basename(face.source))
                    out_path = os.path.join(
                        options.face_log,
                        os.path.basename(base_name) + '_face_%03d' %
                        (face.detection.detection_id, ) + ext)

                    view.save(out_path)
                    print('Saving face:', out_path)
                except:
                    print("WARNING: Image not processed correctly:",
                          face.source)

                out_path = os.path.join(
                    options.face_log,
                    os.path.basename(base_name) + '_orig' + ext)

                if not os.path.lexists(out_path):
                    os.symlink(os.path.abspath(face.source), out_path)

                #print(options.face_log)
                #pass
            i += 1

        return False
    return True
Пример #7
0
    def extract(self, img, face_records):
        '''Extract a template that allows the face to be matched.'''
        # Compute the 128D vector that describes the face in img identified by
        # shape.  In general, if two face descriptor vectors have a Euclidean
        # distance between them less than 0.6 then they are from the same
        # person, otherwise they are from different people. Here we just print
        # the vector to the screen.

        im = pv.Image(img[:, :, ::-1])

        for face_record in face_records.face_records:
            rect = pt.rect_proto2pv(face_record.detection.location)
            x, y, w, h = rect.asTuple()

            # Extract view
            rect = pv.Rect()
            cx, cy = x + 0.5 * w, y + 0.5 * h
            tmp = 1.5 * max(w, h)
            cw, ch = tmp, tmp
            crop = pv.AffineFromRect(pv.CenteredRect(cx, cy, cw, ch),
                                     (256, 256))

            pvim = pv.Image(img[:, :, ::-1])  # convert rgb to bgr
            pvim = crop(pvim)
            view = pt.image_pv2proto(pvim)
            face_record.view.CopyFrom(view)

            # Extract landmarks
            l, t, r, b = [int(tmp) for tmp in [x, y, x + w, y + h]]
            d = dlib.rectangle(l, t, r, b)
            shape = self.shape_pred(img, d)

            for i in range(len(shape.parts())):
                loc = shape.parts()[i]
                landmark = face_record.landmarks.add()
                landmark.landmark_id = "point_%02d" % i
                landmark.location.x = loc.x
                landmark.location.y = loc.y

            # Get detection rectangle and crop the face
            #rect = pt.rect_proto2pv(face_record.detection.location).rescale(1.5)
            #tile = im.crop(rect)
            tile = pvim.resize((224, 224))

            #tile.show(delay=1000)

            face_im = tile.asOpenCV2()
            face_im = face_im[:, :, ::-1]  # Convert BGR to RGB
            #mat_ = cv2.cvtColor(mat,cv2.COLOR_RGB2GRAY)
            #mat = cv2.cvtColor(mat_,cv2.COLOR_GRAY2RGB)

            #img = image.load_img('../image/ajb.jpg', target_size=(224, 224))

            from keras_vggface import utils
            from keras.preprocessing import image

            face_im = image.img_to_array(face_im)
            face_im = np.expand_dims(face_im, axis=0)
            face_im = utils.preprocess_input(face_im,
                                             version=2)  # or version=2

            # Needed in multithreaded applications
            with self.graph.as_default():
                tmp = self.recognizer.predict(face_im)

            face_descriptor = pv.meanUnit(tmp.flatten())

            #print('shape:',face_records.face_records[0].landmarks)
            #face_descriptor = self.face_rec.compute_face_descriptor(img, shape, JITTER_COUNT)
            #face_descriptor = np.array(face_descriptor)

            #vec = face_descriptor.flatten()
            face_record.template.data.CopyFrom(
                pt.vector_np2proto(face_descriptor))