Exemple #1
0
 def detect_all_faces(self):
     bbox_landmark_dict = {}
     for img_path in self.image_list:
         print(img_path)
         try:
             cur_img = Image.open(img_path).convert('RGB')
         except:
             continue
         bbox, landmark = detect_faces(cur_img)
         bbox_landmark_dict[img_path] = [bbox, landmark]
     return bbox_landmark_dict
Exemple #2
0
def main():
    method = "haar"
    if len(sys.argv) == 5 and sys.argv[4] == "mtcnn":
        method = "mtcnn"
    f = fetch_file_for_image(sys.argv[1], sys.argv[2], sys.argv[3])
    if method == "haar":
        import haar
        faces = haar.detect_faces(f)
        upload_face_segments(sys.argv[1], sys.argv[3], faces)
    else:
        import mtcnn
        faces = mtcnn.detect_faces(f)
        upload_face_segments(sys.argv[1], sys.argv[3], faces)
    os.remove(f)
def emotion():
    model = Face_Emotion_CNN()
    model.load_state_dict(torch.load(
        '/home/cpz/Desktop/AlphaPose/emotion/models/FER_trained_model.pt',
        map_location=lambda storage, loc: storage),
                          strict=False)
    emotion_dict = {
        0: 'neutral',
        1: 'happiness',
        2: 'surprise',
        3: 'sadness',
        4: 'anger',
        5: 'disguest',
        6: 'fear'
    }
    val_transform = transforms.Compose([transforms.ToTensor()])

    img = cv2.imread('../input/2.jpg')
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    det_frame = Image.fromarray(img)
    bounding_boxes, landmarks = detect_faces(det_frame)

    for (xmin, ymin, xmax, ymax, confidence) in bounding_boxes:
        ymin, xmin, ymax, xmax = int(ymin), int(xmin), int(ymax), int(xmax)

        cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)
        resize_frame = cv2.resize(gray[ymin:ymax, xmin:xmax], (48, 48))

        X = resize_frame / 256
        X = Image.fromarray((resize_frame))
        X = val_transform(X).unsqueeze(0)
        with torch.no_grad():
            model.eval()
            log_ps = model.cpu()(X)
            ps = torch.exp(log_ps)
            top_p, top_class = ps.topk(1, dim=1)
            pred = emotion_dict[int(top_class.numpy())]
        cv2.putText(img, pred, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                    (0, 255, 0), 1)

    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.grid(False)
    plt.axis('off')
    plt.show()
Exemple #4
0
 def get_features(self, img):
     """
     A function which detects faces in the image, and computes the feature vector for each detection
     :param img: numpy.ndarray, dimensions: (height, weight, 3)
     :return: numpy.ndarray, an array of features, dimensions: (num_img, 1024)
     """
     features = []
     try:
         bboxes, landmarks = detect_faces(img)
         if len(bboxes) != 0:
             faces = []
             for box_landmarks in landmarks:
                 if img.ndim == 3:
                     # For some reason, the image sometimes contains all the colors and sometimes not -->
                     # doesn't matter as the classifier expects 2D image anyway
                     img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
                 face_img = self._frontalize_face(img, box_landmarks)
                 face_img = self._process_image(face_img)
                 faces.append(face_img)
             faces = np.vstack(faces)
             features = self._get_features_for_batch(faces)
     except Exception as err:
         print(f'\033[93mException: {err} --> skipping the frame classification\033[0m')
     return features
import cv2
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--with_draw', help='do draw?', default='True')
args = parser.parse_args()

bgr_img = cv2.imread('test.jpg', 1)
print (bgr_img.shape)

### detection
list_time = []
for idx in range(10):
    rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
    start = cv2.getTickCount()
    bounding_boxes, landmarks = detect_faces(rgb_img)
    time = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
    list_time.append(time)

print ('mtcnn average time: %.3f ms'%np.array(list_time[1:]).mean())

### draw rectangle bbox
if args.with_draw == 'True':
    for b in bounding_boxes:
        b = [int(round(value)) for value in b]
        cv2.rectangle(bgr_img, (b[0], b[1]), (b[2], b[3]), (0,255,0), 2)
        
    for p in landmarks:
        for i in range(5):
            cv2.circle(bgr_img, (p[i] , p[i + 5]), 3, (255,0,0), -1)
                        image_key = f'name={name_formatted}&video_date={video_date}&frame={frame}'

                        if image_key in file_map:
                            # Image already exists
                            image_path = path.join(conf.DATASET,
                                                   name_formatted,
                                                   file_map[image_key])
                            region = image_path.split(
                                '&region=')[1][:-4].split('_')
                            bbox = [float(x) for x in region]
                        else:
                            # Load the frame
                            im = get_frame(video, frame)

                            # 8) Get bboxes and landmarks
                            bboxes, _ = detect_faces(im)
                            bbox_i = get_bbox_i_by_IoU(bboxes,
                                                       rect,
                                                       threshold=0)
                            if bbox_i == -1:
                                bbox = []
                            else:
                                bbox = bboxes[bbox_i]
                                bbox = np.round(bbox, decimals=2)
                                bbox = bbox.tolist()

                        detection_dict['orig'] = rect
                        detection_dict['mtcnn'] = bbox
                        mtcnn_file.write(json.dumps(detection_dict) + '\n')

                except Exception as e:
Exemple #7
0
def mtcnn_detector(img):
    bounding_boxes, _ = detect_faces(img)
    faces = [(int(round(box[0])), int(round(box[1])),
              int(round(box[2] - box[0])), int(round(box[3] - box[1])))
             for box in bounding_boxes]
    return faces
Exemple #8
0
            try:
                # 6) Iterate over detections which belong to the name
                for detection in annotations[name]['detections']:
                    if N_counter != N:
                        N_counter += 1
                        continue
                    N_counter = 1

                    frame = detection['frame']
                    rect = detection['rect']

                    # Load the frame
                    im = get_frame(video, frame)

                    # 8) Get bboxes and landmarks
                    bboxes, landmarks = detect_faces(im)
                    bbox_i = get_bbox_i_by_IoU(bboxes, rect)

                    # 9) Skip the image if no bbox was selected
                    if bbox_i == -1:
                        with open(log_path, 'a') as f:
                            f.write(
                                '{"video_name": '
                                f'"{video_name}", "name": "{name}", "frame": {frame}, "rect": {rect}'
                                '}\n')
                        not_found_counter += 1
                        continue

                    face_landmarks = landmarks[bbox_i]
                    face_bbox = np.round(bboxes[bbox_i], decimals=2)
                    serialized_bbox = '_'.join(str(v) for v in face_bbox)