コード例 #1
0
def group_feature_extractor(image_list):
    """
        Body of the thread that runs the face feature extraction for
        a list of images
        Arguments:
            image_list: List of images to be processed. Each item in the list corresponds to a dictionary with
                        at least two keys: "path" and "roi". The "path" should contain the full path to the image
                        file to be processed and "roi" the coordinates of the bounding-box of a face detected on
                        the image.
    """
    list_of_feats = []
    if len(image_list) > 0:
        try:
            # init feature extractor
            feature_extractor = face_features.FaceFeatureExtractor()
            for image in image_list:
                # read image
                theim = imutils.acquire_image(image["path"])
                det = image["roi"]
                # crop image to face detection bounding-box
                crop_img = theim[det[1]:det[3], det[0]:det[2], :]
                # extract features
                feat = feature_extractor.feature_compute(crop_img)
                # reshape for compatibility with ranking function
                feat = numpy.reshape(feat, (1, settings.FEATURES_VECTOR_SIZE))
                # add to list of features to be returned
                list_of_feats.append(feat)
        except Exception as e:
            print 'Exception in group_feature_extractor', e
            list_of_feats = []
            pass
    return list_of_feats
コード例 #2
0
        face_detector = face_detection_facenet.FaceDetectorFacenetMTCNN()
    # import and create face feature extractor
    import face_features
    feature_extractor = face_features.FaceFeatureExtractor()

    # Compute features for all image paths in args.images_list
    all_feats = {'paths': [], 'rois': [], 'feats': []}
    with open(args.images_list) as fin:
        for img_path in fin:
            img_path = img_path.replace('\n', '')
            if len(img_path) > 0:
                full_path = os.path.join(args.dataset_base_path, img_path)
                print 'Computing features for file %s' % (full_path)

                # read image
                img = imutils.acquire_image(full_path)

                # run face detector
                detections = face_detector.detect_faces(img)

                if numpy.all(detections != None):

                    for det in detections:

                        # The coordinates should be already integers, but some basic
                        # conversion is need for compatibility with all face detectors.
                        # Plus we have to get rid of the detection score det[4]
                        det = [int(det[0]), int(det[1]), int(det[2]), int(det[3])]

                        # crop image to detected face area.
                        crop_img = img[det[1]:det[3], det[0]:det[2], :]
コード例 #3
0
    def addTrs(self, req_params, pos=True):
        """
            Adds a training image for the classification process.
            Parameters:
                req_params: JSON object with at least the fields:
                            - query_id: the id of the query
                            - impath: full path to the training image
                            Other fields include:
                            - featpath: Full path to the feature file associated to the query
                            - training_started: boolean indicated that the training step has already started and
                                                therefore the image can be discarded
                            - extra_params: Another dictionary with the fields:
                                * from_dataset: Boolean indicating whether the training image
                                                is part of the dataset or not.
                                * uri: unique resource identifier
                                * roi: coordinates of a bounding-box defined on the image
                        the image.
            Returns:
                JSON formatted string with 'success' field set to 'False'
                in case of any problems. The 'success' field set to 'True'
                otherwise.
        """
        # check query id is present
        if 'query_id' in req_params:
            query_id = req_params['query_id']
        else:
            return self.prepare_success_json_str_(False)

        # check whether the training step has already started
        if self.query_data[str(query_id)]["training_started"]:
            # discard the image and return as if nothing happened
            if 'impath' in req_params:
                print "Training already started. Skipping", os.path.basename(req_params['impath'])
            else:
                print "Training already started. Skipping image."
            return self.prepare_success_json_str_(True)

        # check image path is present
        if 'impath' in req_params:
            impath = req_params['impath']
        else:
            return self.prepare_success_json_str_(False)

        # get the path to the feature file. Not used at the moment.
        if 'featpath' in req_params:
            featpath = req_params['featpath']

        # check for extra parameters
        if 'extra_params' in req_params:

            if 'from_dataset' in req_params['extra_params']:
                from_dataset = req_params['extra_params']['from_dataset']
            else:
                from_dataset = False

            if 'uri' in req_params['extra_params']:
                uri = req_params['extra_params']['uri']
            else:
                uri = -1

            if 'roi' in req_params['extra_params']:
                # if request specifies a ROI ...
                roi = req_params['extra_params']['roi']
                roi = numpy.array([int(x) for x in roi]).reshape(len(roi)/2, 2)
                xl, yl = roi.min(axis=0)
                xu, yu = roi.max(axis=0)
                roi = [xl, yl, xu, yu]
                print 'Request specifies ROI', roi
                # ... check there is a face on the roi
                theim = imutils.acquire_image(impath)
                crop_img = theim[yl:yu, xl:xu, :]
                det = self.face_detector.detect_faces(crop_img, return_best=True)
                if numpy.all(det == None):
                    print 'No detection found in specified ROI'
                    return self.prepare_success_json_str_(False)
                else:
                    # If found, replace the previous with a more accurate one
                    det = det[0]
                    # The coordinates should be already integers, but some basic
                    # conversion is need for compatibility with all face detectors.
                    # Plus we have to get rid of the detection score det[4]
                    det = [int(det[0]), int(det[1]), int(det[2]), int(det[3])]
                    roi = [det[0]+xl, det[1]+yl, det[2]+xl, det[3]+yl]
                    print 'Automatically adjusting ROI to more accurate region', roi
            else:
                roi = None
        else:
            from_dataset = False
            roi = None
            uri = -1

        # create empty dictionary for the image information
        img = dict()

        # if the image is brand new ..
        if uri == -1:

            # and no roi was specified ...
            if roi == None:

                # read image
                theim = imutils.acquire_image(impath)
                # run face detector, but only get the best detection.
                # multiple detections are not supported for on-the-fly training images
                det = self.face_detector.detect_faces(theim, return_best=True)

                if numpy.all(det != None):

                    # if a face is found, save it
                    print 'Single ROI detected'
                    # The coordinates should be already integers, but some basic
                    # conversion is need for compatibility with all face detectors.
                    # Plus we have to get rid of the detection score det[4]
                    det = [int(det[0][0]), int(det[0][1]), int(det[0][2]), int(det[0][3])]
                    print 'final det', det

                    img["path"] = impath
                    img["roi"] = det
                    if pos == True:
                        img["anno"] = 1
                    else:
                        img["anno"] = -1
                else:
                    print 'No detection found'
                    return self.prepare_success_json_str_(True)

            else:
                # just the save the image along with the specified roi
                img["path"] = impath
                img["roi"] = roi
                if pos == True:
                    img["anno"] = 1
                else:
                    img["anno"] = -1
        else:
            # just the save the image with default values
            # for the roi (if not specified) and the annotation type
            img["path"] = impath
            if roi == None:
                img["roi"] = [0, 0, 0, 0]
            else:
                img["roi"] = roi
            if pos == True:
                img["anno"] = 1
            else:
                img["anno"] = -1

        # save unique identifier (even if it is -1)
        img["uri"] = uri

        # save the image information, if we are still accepting training images
        if str(query_id) in self.query_data.keys():
            if self.query_data[str(query_id)]["training_started"] == False:
                self.query_data[str(query_id)]["images"].append(img)
            else:
                print "Training already started. Skipping", os.path.basename(impath)
        else:
            print "Query already finished. Skipping", os.path.basename(impath)

        # return with success==True
        return self.prepare_success_json_str_(True)