예제 #1
0
                rgb_file = os.path.join(rgb_path, rgb_file)
                thr_file = os.path.join(thr_path, thr_file)
                #print(rgb_file)
                #print(thr_file)

                # load rgb and thermal images
                rgb_image = cv2.imread(rgb_file)
                thr_image = cv2.imread(thr_file)

                # detect faces in the rgb image and return
                # corresponding bounding boxes
                if args["model"] != "dnn":
                    rgb_boxes = face_recognition.face_locations(
                        rgb_image, model=args["model"])
                else:
                    rgb_boxes = face_region_extractor(face_net, rgb_image,
                                                      args["confidence"])

                # if at least one face is detected
                if len(rgb_boxes):
                    # assume that only one person was detected and extract (x,y)
                    # coordinates of the corners of the bounding box
                    (startY, endX, endY, startX) = rgb_boxes[0]

                    # crop the detected faces
                    rgb_face = rgb_image[startY:endY, startX:endX]
                    thr_face = thr_image[startY:endY, startX:endX]

                    if args["show"]:
                        # make a copy of the rgb image then replace its RED channel with
                        # the RED channel of the thermal image
                        rgb_copy = rgb_image.copy()
예제 #2
0
        # to align with the thermal image
        rgb = cv2.warpPerspective(rgb,
                                  H, (W_thr, H_thr),
                                  flags=cv2.INTER_LINEAR,
                                  borderMode=cv2.BORDER_REPLICATE)

        # adjust the alignment if there is still
        # some misalignment among x and y axises
        rgb = rgb[dy:H_thr, dx:W_thr]
        thr = thr[0:H_thr - dy, 0:W_thr - dx]

        # give the rgb image to the face detector
        # as an input and get a bouding box of a face
        # as an output
        (startX, startY, endX,
         endY) = face_region_extractor(face_net, rgb, thr, args["confidence"])

        # if the face is not detected
        # then skip the current image
        if startX is None:
            continue

        # crop the detected faces
        rgb_face = rgb[startY:endY, startX:endX]
        thr_face = thr[startY:endY, startX:endX]

        if args["show"]:
            # make a copy of the rgb image
            # then replace its RED channel with
            # the RED channel of the thermal image
            rgb_copy = rgb.copy()