Ejemplo n.º 1
0
class ToDCT(object):
    def __init__(self, channels=192):
        self.channels = channels
        self.jpeg = TurboJPEG('/usr/lib/libturbojpeg.so')
        self.subset_channel_index = dct_channel_index
        self.subset_y = self.subset_channel_index[channels][0]
        self.subset_cb = self.subset_channel_index[channels][1]
        self.subset_cr = self.subset_channel_index[channels][2]   

    def __call__(self, results):
        img = np.ascontiguousarray(results['img'], dtype="uint8")
        img_encode = self.jpeg.encode(img, quality=100, jpeg_subsample=2)
        dct_y, dct_cb, dct_cr = loads(img_encode)   # 28
        chrome_w, chrome_h = dct_cb.shape[:-1]

        dct_cb_up = cv2.resize(dct_cb, dsize=(chrome_h*2, chrome_w*2), interpolation=cv2.INTER_LINEAR)
        dct_cr_up = cv2.resize(dct_cr, dsize=(chrome_h*2, chrome_w*2), interpolation=cv2.INTER_LINEAR)
           
        if self.channels == 192:
            results['img'] = np.concatenate((dct_y, dct_cb_up, dct_cr_up), axis=2).astype('float32')
        else:
            results['img'] = np.concatenate((dct_y[:, :, self.subset_y], dct_cb_up[:, :, self.subset_cb],
                                             dct_cr_up[:, :, self.subset_cr]), axis=2).astype('float32')
                      
        return results
Ejemplo n.º 2
0
def do_mem_profile_compress():
	jpeg = TurboJPEG()
	quality = 100
	out_img = 'jpegs/single_v0.jpg'
	in_img = 'imgs/output_fwd_v0.png'
	bgr_array = cv2.imread(in_img)
	out_file = open(out_img, 'wb')
	out_file.write(jpeg.encode(bgr_array, quality=quality))
	out_file.close()
	return None
Ejemplo n.º 3
0
class TurboJpegHandler(object):
    """The object handling JPEG compression/decompression"""
    def __init__(self, jpeg_quality):
        self.jpeg_quality = jpeg_quality
        self.jpeg = TurboJPEG()

    def compress(self, cv2_img):
        return self.jpeg.encode(cv2_img, quality=self.jpeg_quality)

    def decompress(self, img_buffer):
        return self.jpeg.decode(img_buffer)
class ToDCT(object):
    def __init__(self):
        self.jpeg = TurboJPEG('/usr/lib/libturbojpeg.so')

    def __call__(self, results):
        img = np.ascontiguousarray(results['img'], dtype="uint8")
        img_encode = self.jpeg.encode(img, quality=100, jpeg_subsample=2)
        dct_y, dct_cb, dct_cr = loads(img_encode)  # 28
        results['dct_y'] = dct_y
        results['dct_cb'] = dct_cb
        results['dct_cr'] = dct_cr
        return results
Ejemplo n.º 5
0
def do_benchmarking_compress(bgr_array, quality, out_img):
	timeval = []
	jpeg = TurboJPEG()
	for j in range(100):
		for i in range(100):
			out_file = open(out_img, 'wb')
			start = time.process_time()
			out_file.write(jpeg.encode(bgr_array, quality=quality))
			end = time.process_time()
			val = (end - start)*1000 #msec
			timeval.append(val)
			out_file.close()
	return timeval
Ejemplo n.º 6
0
def main():
    input_img = "rabbit.jpeg"
    output_name = 'output.jpg'
    jpeg = TurboJPEG()

    # print("=============Read Image=============")
    img = cv2.imread(input_img)

    with open(input_img, 'rb') as infile:
        img = jpeg.decode(infile.read())

    # print("=============Write Image=============")

    cv2.imwrite(output_name, img)

    with open(output_name, 'wb') as outfile:
        outfile.write(jpeg.encode(img, quality=30))
    base1 = jpeg.encode(img, quality=30)
    base64.b64encode(base1).decode("ascii")
    # print("=============Python Utils=============")

    base2 = compress_encode_image(img)
    decode_decompress_image(base2)
def test_turbo():
    img = np.load(op.join(op.dirname(__file__), 'data', 'mona_lisa.npy'))
    turbo = TurboJPEG()

    encoded = turbo.encode(img,
                           quality=95,
                           pixel_format=TJPF.BGR,
                           jpeg_subsample=TJSAMP.YUV420)
    assert len(img.data) > len(encoded)
    assert encoded == turbo.encode(img,
                                   quality=95,
                                   pixel_format=TJPF.BGR,
                                   jpeg_subsample=TJSAMP.YUV420)
    assert turbo.info(encoded) == (341, 229, 'YUV420', 'BGR')

    decoded = turbo.decode(encoded)
    np.testing.assert_equal(decoded, turbo.decode(encoded))
    assert not np.array_equal(
        decoded, turbo.decode(encoded, fast_dct=True, fast_upsample=False))
    assert not np.array_equal(
        decoded, turbo.decode(encoded, fast_dct=False, fast_upsample=True))
    assert not np.array_equal(
        decoded, turbo.decode(encoded, fast_dct=True, fast_upsample=True))
    assert phash_compare(img, decoded) <= 5
Ejemplo n.º 8
0
class VideoStream():
    def __init__(self):
        # initialize the video into our 
        video_url_path = os.path.join("face_mask_detect", "static", "footages", "cottonbro_2.mp4")
        self.vs = FileVideoStream(path=video_url_path).start()

        # Client list
        self.stream_list = []

        # Initialize video server
        self.video_server = StreamServer(
            5001, self.stream_list, "image/jpeg")
        self.video_server.activate()

        # Initialize json server
        self.js_server = ResponseServer(
            5003, "application/json")
        self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()


    def get_frame(self):
        frame = self.vs.read()
        return frame

    def reset(self):
        self.end_process()
        video_url_path = os.path.join("face_mask_detect", "static", "footages", "cottonbro_2.mp4")
        self.vs = FileVideoStream(path=video_url_path).start()

    def end_process(self):
        self.vs.stop()

    def send_image(self, queue_list, image, ts):
        encoded_image = self.jpeg.encode(image, quality=80)
        # Put image into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, encoded_image)
                q.put(block, True, 0.02)
            except queue.Full:
                pass
Ejemplo n.º 9
0
def unpack_4fr(file_path):
    jpeg_encoder = TurboJPEG('turbojpeg.dll')

    with open(file_path, 'rb') as f:
        data = f.read()

    header = data[:8]
    geo_size, texture_size = struct.unpack('II', header)

    geo_buffer = data[8:8 + geo_size]
    texture_buffer = data[8 + geo_size:]

    # obj
    geo_buffer = lz4framed.decompress(geo_buffer)
    point_list = np.frombuffer(geo_buffer, dtype=np.float32)
    point_list = point_list.reshape(-1, 5)

    pos_list = point_list[:, 0:3]
    uv_list = point_list[:, 3:5]

    uv_list = np.array(uv_list, np.float32)
    uv_list -= [0, 1.0]
    uv_list *= [1, -1]

    pos_strings = [f'v {x} {y} {z}' for x, y, z in pos_list]
    uv_strings = [f'vt {u} {v}' for u, v in uv_list]
    face_strings = [
        f'f {f}/{f} {f + 1}/{f + 1} {f + 2}/{f + 2}'
        for f in range(1, point_list.shape[0], 3)
    ]

    obj_data = ['g'] + pos_strings + uv_strings + ['g'] + face_strings
    obj_data = '\n'.join(obj_data)

    with open(file_path.replace('4dr', 'obj'), 'w') as f:
        f.write(obj_data)

    # jpg
    with open(file_path.replace('4dr', 'jpg'), 'wb') as f:
        im = jpeg_encoder.decode(texture_buffer, TJPF_RGB)
        f.write(jpeg_encoder.encode(im))

    return file_path
class ToDCTUpscaledStatic(object):
    def __init__(self, channels=None, is_test=False, interpolation='BILINEAR'):
        self.jpeg = TurboJPEG('/usr/lib/libturbojpeg.so')
        self.channels = channels
        self.is_test = is_test
        self.interpolation = interpolation

        if channels and channels != 192:
            self.subset_channel_index = dct_channel_index
            self.subset_y = self.subset_channel_index[channels][0]
            self.subset_cb = self.subset_channel_index[channels][1]
            self.subset_cr = self.subset_channel_index[channels][2]

    def __call__(self, results):
        h, w = results['img'].shape[:-1]
        if self.is_test:
            results['img_raw'] = results['img']
        img_raw_4x = cv2.resize(results['img'],
                                dsize=(w * 2, h * 2),
                                interpolation=INTER_MODE[self.interpolation])
        img_raw_8x = cv2.resize(results['img'],
                                dsize=(w * 4, h * 4),
                                interpolation=INTER_MODE[self.interpolation])
        img_4x = np.ascontiguousarray(img_raw_4x, dtype="uint8")
        img_8x = np.ascontiguousarray(img_raw_8x, dtype="uint8")
        img_encode_4x = self.jpeg.encode(img_4x, quality=100, jpeg_subsample=2)
        img_encode_8x = self.jpeg.encode(img_8x, quality=100, jpeg_subsample=2)
        dct_y, _, _ = loads(img_encode_4x)  # 28
        _, dct_cb, dct_cr = loads(img_encode_8x)  # 28

        plot_dct(dct_y, results['img_info']['filename'])

        if self.channels == 192:
            results['img'] = np.concatenate((dct_y, dct_cb, dct_cr), axis=2)
        else:
            results['img'] = np.concatenate(
                (dct_y[:, :, self.subset_y], dct_cb[:, :, self.subset_cb],
                 dct_cr[:, :, self.subset_cr]),
                axis=2)

        return results
class SocialDistancing:
    colors = [(0, 255, 0), (0, 0, 255)]

    nd_color = [(153, 0, 51), (153, 0, 0), (153, 51, 0), (153, 102, 0),
                (153, 153, 0), (102, 153, 0), (51, 153, 0), (0, 153, 0),
                (0, 102, 153), (0, 153, 51), (0, 153, 102), (0, 153, 153),
                (0, 102, 153), (0, 51, 153), (0, 0, 153), (153, 0, 102),
                (102, 0, 153), (153, 0, 153), (102, 0, 153), (0, 0, 153),
                (0, 0, 153), (0, 0, 153), (0, 153, 153), (0, 153, 153),
                (0, 153, 153)]

    connections = [(0, 16), (0, 15), (16, 18), (15, 17), (0, 1), (1, 2),
                   (2, 3), (3, 4), (1, 5), (5, 6), (6, 7), (1, 8), (8, 9),
                   (9, 10), (10, 11), (8, 12), (12, 13), (13, 14), (11, 24),
                   (11, 22), (22, 23), (14, 21), (14, 19), (19, 20)]
    '''
        Initialize Object
    '''
    def __init__(self, args):
        # Ratio params
        horizontal_ratio = float(args[0].horizontal_ratio)
        vertical_ratio = float(args[0].vertical_ratio)

        # Check video
        if args[0].video != "enabled" and args[0].video != "disabled":
            print("Error: set correct video mode, enabled or disabled")
            sys.exit(-1)

        # Check video
        if args[0].image != "enabled" and args[0].image != "disabled":
            print("Error: set correct image mode, enabled or disabled")
            sys.exit(-1)

        # Convert args to boolean
        self.use_video = True if args[0].video == "enabled" else False

        self.use_image = True if args[0].image == "enabled" else False

        self.use_preview = True if args[0].preview == "enabled" else False

        # Unable to use video and image mode at same time
        if self.use_video and self.use_image:
            print(
                "Error: unable to use video and image mode at the same time!")
            sys.exit(-1)

        # Unable to not use or video or image mode at same time
        if self.use_video and self.use_image:
            print("Error: enable or video or image mode!")
            sys.exit(-1)

        self.streaming = True if args[0].streaming == "enabled" else False

        if self.use_video:
            # Open video capture
            self.cap = cv2.VideoCapture(args[0].stream_in)

            if not self.cap.isOpened():
                print("Error: Opening video stream or file {0}".format(
                    args[0].stream_in))
                sys.exit(-1)

            # Get input size
            width = int(self.cap.get(3))
            height = int(self.cap.get(4))

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(args[0].stream_out,
                                           cv2.VideoWriter_fourcc(*'XVID'),
                                           int(self.cap.get(cv2.CAP_PROP_FPS)),
                                           (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out))
                    sys.exit(-1)

            # Get image size
            im_size = (width, height)

        if self.use_image:
            self.image = cv2.imread(args[0].image_in)
            if self.image is None:
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in))
                sys.exit(-1)

            self.image_out = args[0].image_out

            # Get image size
            im_size = (self.image.shape[1], self.image.shape[0])

        # Compute Homograpy
        self.homography_matrix = self.compute_homography(
            horizontal_ratio, vertical_ratio, im_size)

        self.background_masked = False
        # Open image backgrouns, if it is necessary
        if args[0].masked == "enabled":
            # Set masked flag
            self.background_masked = True

            # Load static background
            self.background_image = cv2.imread(args[0].background_in)

            # Close, if no background, but required
            if self.background_image is None:
                print("Error: Unable to load background image (flag enabled)")
                sys.exit(-1)

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = args[0].openpose_folder

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = args[0].net_size

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        # Set GPU start id (not considering previous)
        params["num_gpu_start"] = 0

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        # Json server
        self.dt_vector = {}

        # Client list
        self.stream_list = []

        if self.streaming:
            # Initialize video server
            self.video_server = StreamServer(int(args[0].video_port),
                                             self.stream_list, "image/jpeg")
            self.video_server.activate()

            # Initialize json server
            self.js_server = ResponseServer(int(args[0].js_port),
                                            "application/json")
            self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()

        # Calibrate heigh value
        self.calibrate = float(args[0].calibration)

        # Actually unused
        self.ellipse_angle = 0

        # Body confidence threshold
        self.body_th = float(args[0].body_threshold)

        # Show confidence
        self.show_confidence = True if args[
            0].show_confidence == "enabled" else False

    '''
        Draw Skelethon
    '''

    def draw_skeleton(self, frame, keypoints, colour):

        for keypoint_id1, keypoint_id2 in self.connections:
            x1, y1 = keypoints[keypoint_id1]
            x2, y2 = keypoints[keypoint_id2]

            if 0 in (x1, y1, x2, y2):
                continue

            pt1 = int(round(x1)), int(round(y1))
            pt2 = int(round(x2)), int(round(y2))

            cv2.circle(frame,
                       center=pt1,
                       radius=4,
                       color=self.nd_color[keypoint_id2],
                       thickness=-1)
            cv2.line(frame,
                     pt1=pt1,
                     pt2=pt2,
                     color=self.nd_color[keypoint_id2],
                     thickness=2)

    '''
        Compute skelethon bounding box
    '''

    def compute_simple_bounding_box(self, skeleton):
        x = skeleton[::2]
        x = np.where(x == 0.0, np.nan, x)
        left, right = int(round(np.nanmin(x))), int(round(np.nanmax(x)))
        y = skeleton[1::2]
        y = np.where(y == 0.0, np.nan, y)
        top, bottom = int(round(np.nanmin(y))), int(round(np.nanmax(y)))
        return left, right, top, bottom

    '''
        Compute Homograpy
    '''

    def compute_homography(self, H_ratio, V_ratio, im_size):
        rationed_hight = im_size[1] * V_ratio
        rationed_width = im_size[0] * H_ratio
        src = np.array([[0, 0], [0, im_size[1]], [im_size[0], im_size[1]],
                        [im_size[0], 0]])
        dst = np.array([[0 + rationed_width / 2, 0 + rationed_hight],
                        [0, im_size[1]], [im_size[0], im_size[1]],
                        [im_size[0] - rationed_width / 2, 0 + rationed_hight]],
                       np.int32)
        h, status = cv2.findHomography(src, dst)
        return h

    '''
        Compute overlap
    '''

    def compute_overlap(self, rect_1, rect_2):
        x_overlap = max(0,
                        min(rect_1[1], rect_2[1]) - max(rect_1[0], rect_2[0]))
        y_overlap = max(0,
                        min(rect_1[3], rect_2[3]) - max(rect_1[2], rect_2[2]))
        overlapArea = x_overlap * y_overlap
        if overlapArea:
            overlaps = True
        else:
            overlaps = False
        return overlaps

    '''
        Trace results
    '''

    def trace(self, image, skeletal_coordinates, draw_ellipse_requirements,
              is_skeletal_overlapped):
        bodys = []

        # Trace ellipses and body on target image
        i = 0

        for skeletal_coordinate in skeletal_coordinates[0]:
            if float(skeletal_coordinates[1][i]) < self.body_th:
                continue

            # Trace ellipse
            cv2.ellipse(image, (int(draw_ellipse_requirements[i][0]),
                                int(draw_ellipse_requirements[i][1])),
                        (int(draw_ellipse_requirements[i][2]),
                         int(draw_ellipse_requirements[i][3])), 0, 0, 360,
                        self.colors[int(is_skeletal_overlapped[i])], 3)

            # Trace skelethon
            skeletal_coordinate = np.array(skeletal_coordinate)
            self.draw_skeleton(image, skeletal_coordinate.reshape(-1, 2),
                               (255, 0, 0))

            if int(skeletal_coordinate[2]) != 0 and int(
                    skeletal_coordinate[3]) != 0 and self.show_confidence:
                cv2.putText(
                    image, "{0:.2f}".format(skeletal_coordinates[1][i]),
                    (int(skeletal_coordinate[2]), int(skeletal_coordinate[3])),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

            # Append json body data, joints coordinates, ground ellipses
            bodys.append([[round(x) for x in skeletal_coordinate],
                          draw_ellipse_requirements[i],
                          int(is_skeletal_overlapped[i])])

            i += 1

        self.dt_vector["bodys"] = bodys

    '''
        Evaluate skelethon height
    '''

    def evaluate_height(self, skeletal_coordinate):
        # Calculate skeleton height
        calculated_height = 0
        pointer = -1

        # Left side body joints from top to down
        #joint_set = [0, 1, 8, 12, 13, 14]

        # Left leg
        joint_set = [12, 13, 14]

        # Check if leg is complete
        left_leg = True
        for k in joint_set:
            x = int(skeletal_coordinate[k * 2])
            y = int(skeletal_coordinate[k * 2 + 1])
            if x == 0 or y == 0:
                # No left leg, try right_leg
                joint_set = [9, 10, 11]
                left_leg = False
                break

        if not left_leg:
            joint_set = [9, 10, 11]
            # Check if leg is complete
            for k in joint_set:
                x = int(skeletal_coordinate[k * 2])
                y = int(skeletal_coordinate[k * 2 + 1])
                if x == 0 or y == 0:
                    # No left leg, no right leg, unable to evaluate ellipse
                    return 0

        # Evaluate leg height
        pointer = -1
        for k in joint_set[:-1]:
            pointer += 1
            if skeletal_coordinate[joint_set[pointer]*2]\
                    and skeletal_coordinate[joint_set[pointer+1]*2]\
                    and skeletal_coordinate[joint_set[pointer]*2+1]\
                    and skeletal_coordinate[joint_set[pointer+1]*2+1]:
                calculated_height = calculated_height +\
                    math.sqrt(((skeletal_coordinate[joint_set[pointer]*2] -
                                skeletal_coordinate[joint_set[pointer+1]*2])**2) +
                              ((skeletal_coordinate[joint_set[pointer]*2+1] -
                                skeletal_coordinate[joint_set[pointer+1]*2+1])**2))

        return calculated_height * self.calibrate

    '''
        Evaluate overlapping
    '''

    def evaluate_overlapping(self, ellipse_boxes, is_skeletal_overlapped,
                             ellipse_pool):
        # checks for overlaps between people's ellipses, to determine risky or not
        for ind1, ind2 in itertools.combinations(
                list(range(0, len(ellipse_pool))), 2):

            is_overlap = cv2.bitwise_and(ellipse_pool[ind1],
                                         ellipse_pool[ind2])

            if is_overlap.any() and (not is_skeletal_overlapped[ind1]
                                     or not is_skeletal_overlapped[ind2]):
                is_skeletal_overlapped[ind1] = 1
                is_skeletal_overlapped[ind2] = 1

    '''
        Create Joint Array
    '''

    def create_joint_array(self, skeletal_coordinates):
        # Get joints sequence
        bodys_sequence = []
        bodys_probability = []
        for body in skeletal_coordinates:
            body_sequence = []
            body_probability = 0.0
            # For each joint put it in vetcor list
            for joint in body:
                body_sequence.append(joint[0])
                body_sequence.append(joint[1])

                # Sum joints probability to find body probability
                body_probability += joint[2]

            body_probability = body_probability / len(body)

            # Add body sequence to list
            bodys_sequence.append(body_sequence)
            bodys_probability.append(body_probability)

        # Assign coordiates sequence
        return [bodys_sequence, bodys_probability]

    '''
        Evaluate ellipses shadow, for each body
    '''

    def evaluate_ellipses(self, skeletal_coordinates,
                          draw_ellipse_requirements, ellipse_boxes,
                          ellipse_pool):
        for skeletal_coordinate in skeletal_coordinates:
            # Evaluate skeleton bounding box
            left, right, top, bottom = self.compute_simple_bounding_box(
                np.array(skeletal_coordinate))

            bb_center = np.array([(left + right) / 2, (top + bottom) / 2],
                                 np.int32)

            calculated_height = self.evaluate_height(skeletal_coordinate)

            # computing how the height of the circle varies in perspective
            pts = np.array([[bb_center[0], top], [bb_center[0], bottom]],
                           np.float32)

            pts1 = pts.reshape(-1, 1, 2).astype(np.float32)  # (n, 1, 2)

            dst1 = cv2.perspectiveTransform(pts1, self.homography_matrix)

            # height of the ellipse in perspective
            width = int(dst1[1, 0][1] - dst1[0, 0][1])

            # Bounding box surrending the ellipses, useful to compute whether there is any overlap between two ellipses
            ellipse_bbx = [
                bb_center[0] - calculated_height,
                bb_center[0] + calculated_height, bottom - width,
                bottom + width
            ]

            # Add boundig box to ellipse list
            ellipse_boxes.append(ellipse_bbx)

            ellipse = [
                int(bb_center[0]),
                int(bottom),
                int(calculated_height),
                int(width)
            ]

            mask_copy = self.mask.copy()

            ellipse_pool.append(
                cv2.ellipse(mask_copy, (bb_center[0], bottom),
                            (int(calculated_height), width), 0, 0, 360,
                            (255, 255, 255), -1))

            draw_ellipse_requirements.append(ellipse)

    '''
        Analyze image and evaluate distances
    '''

    def distances_evaluate(self, image, background):
        ellipse_boxes = []

        draw_ellipse_requirements = []

        ellipse_pool = []

        # Assign input image to openpose
        self.datum.cvInputData = image

        # Start wrapper
        self.opWrapper.emplaceAndPop([self.datum])

        # Get openpose coordinates (rounding values)
        skeletal_coordinates = self.datum.poseKeypoints.tolist()

        # Trace on background
        if self.background_masked:
            image = background

        self.dt_vector['ts'] = int(round(time.time() * 1000))
        self.dt_vector['bodys'] = []

        if type(skeletal_coordinates) is list:
            # Remove probability from joints and get a joint position list
            skeletal_coordinates = self.create_joint_array(
                skeletal_coordinates)

            # Initialize overlapped buffer
            is_skeletal_overlapped = np.zeros(
                np.shape(skeletal_coordinates[0])[0])

            # Evaluate ellipses for each body detected by openpose
            self.evaluate_ellipses(skeletal_coordinates[0],
                                   draw_ellipse_requirements, ellipse_boxes,
                                   ellipse_pool)

            # Evaluate overlapping
            self.evaluate_overlapping(ellipse_boxes, is_skeletal_overlapped,
                                      ellipse_pool)

            # Trace results over output image
            self.trace(image, skeletal_coordinates, draw_ellipse_requirements,
                       is_skeletal_overlapped)

        if self.streaming:
            # Send video to client queues
            self.send_image(self.stream_list, image, int(self.dt_vector['ts']))

            # Put json vector availble to rest requests
            self.js_server.put(bytes(json.dumps(self.dt_vector), "UTF-8"))

        return image

    '''
        Send image over queue list and then over http mjpeg stream
    '''

    def send_image(self, queue_list, image, ts):

        encoded_image = self.jpeg.encode(image, quality=80)
        # Put image into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, encoded_image)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Analyze video
    '''

    def analyze_video(self):
        while self.cap.isOpened():
            # Capture from image/video
            ret, image = self.cap.read()

            # Check image
            if image is None or not ret:
                os._exit(0)

            self.mask = np.zeros(image.shape, dtype=np.uint8)

            # Get openpose output
            if self.background_masked:
                background = self.background_image.copy()
            else:
                background = image

            image = self.distances_evaluate(image, background)

            # Write image
            if not self.streaming:
                self.out.write(image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', image)
                cv2.waitKey(1)

    '''
        Analyze image
    '''

    def analyze_image(self):

        # Get openpose output
        if self.background_masked:
            background = self.background_image.copy()
        else:
            background = self.image

        self.mask = np.zeros(self.image.shape, dtype=np.uint8)

        self.image = self.distances_evaluate(self.image, background)

        # Write image
        cv2.imwrite(self.image_out, self.image)

        # Show image and wait some time
        if self.use_preview:
            cv2.imshow('Social Distance', self.image)
            cv2.waitKey(1000)

    '''
        Analyze image/video
    '''

    def analyze(self):
        if self.use_image:
            self.analyze_image()

        if self.use_video:
            self.analyze_video()
Ejemplo n.º 12
0
class ProcessSource:
    '''
        Initialize
    '''
    def __init__(self, args):

        # Social Distancing arguments
        arguments = {}

        # Arguments
        arguments["horizontal_ratio"] = args[0].horizontal_ratio
        arguments["vertical_ratio"] = args[0].vertical_ratio
        arguments["calibration"] = args[0].calibration
        arguments["body_threshold"] = args[0].body_threshold
        arguments["show_confidence"] = args[0].show_confidence
        arguments["show_sketch"] = args[0].show_sketch

        # Initialize social distancing
        self.social_distancing = SocialDistancing(arguments)

        # Initialize Openpose
        self.initialize_openpose(args)

        # Initialize file opening/writing and streaming
        self.initialize_others(args)

    '''
        Initialize openpose
    '''

    def initialize_openpose(self, args):
        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = args[0].openpose_folder

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = args[0].net_size

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        # Set GPU start id (not considering previous)
        params["num_gpu_start"] = 0

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        self.datum = op.Datum()

    '''
        Initialize acquiring methods (video, mjpeg preprocessed json, jetson, etc), sockets, output files
    '''

    def initialize_others(self, args):
        # Convert args to boolean
        self.use_video = True if args[0].video == "enabled" else False

        # Process image
        self.use_image = True if args[0].image == "enabled" else False

        # Use preview
        self.use_preview = True if args[0].preview == "enabled" else False

        # Jetson internal camera enabled
        self.jetson_video = True if args[0].jetson_video == "enabled" else False

        # Mjpeg video reader
        self.use_mjpeg = True if args[0].use_mjpeg == "enabled" else False

        # Enable streaming ption
        self.streaming = True if args[0].streaming == "enabled" else False

        # Use json as input
        self.use_js = True if args[0].use_js == "enabled" else False

        # Json input file
        self.js_in = args[0].js_in

        if self.use_video:
            # Open video capture
            if not self.jetson_video:
                # Use standard cv2 capture library
                self.cap = cv2.VideoCapture(args[0].stream_in)
            else:
                # Connect Standard cv2 capture library to gstreamer
                print(gstreamer_pipeline(flip_method=0))
                self.cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0),
                                            cv2.CAP_GSTREAMER)

            if not self.cap.isOpened():
                print("Error: Opening video stream or file {0}".format(
                    args[0].stream_in),
                      flush=True)
                sys.exit(-1)

            # Get input size
            width = int(self.cap.get(3))
            height = int(self.cap.get(4))

            self.mask_in = cv2.imread(args[0].mask_in)

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(
                    args[0].stream_out,
                    cv2.VideoWriter_fourcc(*args[0].encoding_codec), 25,
                    (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out),
                          flush=True)
                    sys.exit(-1)

            # Get image size
            im_size = (width, height)

        if self.use_image:
            self.mask_in = cv2.imread(args[0].mask_in)

            self.image = cv2.imread(args[0].image_in)

            if self.image is None:
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in),
                      flush=True)
                sys.exit(-1)

            self.image_out = args[0].image_out

            # Get image size
            im_size = (self.image.shape[1], self.image.shape[0])

        if self.use_mjpeg:
            # Create mjpeg reader
            self.mjpeg_reader = MjpegReader(args[0].stream_in)

            # Read first image to get image size
            image = self.mjpeg_reader.get_image()

            if not self.mjpeg_reader.is_opened():
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in),
                      flush=True)
                exit(-1)

            # Get input size
            width = int(image.shape[1])
            height = int(image.shape[0])

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(
                    args[0].stream_out,
                    cv2.VideoWriter_fourcc(*args[0].encoding_codec),
                    int(args[0].dummy_fps), (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out),
                          flush=True)
                    sys.exit(-1)

            print("Mjpeg multipart file:{0}x{1}".format(width, height))

            # Get image size
            im_size = (width, height)

        if not self.use_js:
            # Compute Homograpy
            self.social_distancing.compute_homography(im_size)

        self.background_masked = False
        # Open image backgrouns, if it is necessary
        if args[0].masked == "enabled":
            # Set masked flag
            self.background_masked = True

            # Load static background
            self.background_image = cv2.imread(args[0].background_in)

            # Close, if no background, but required
            if self.background_image is None:
                print("Error: Unable to load background image (flag enabled)",
                      flush=True)
                sys.exit(-1)

        if self.use_js:
            im_size = (self.background_image.shape[1],
                       self.background_image.shape[0])

            self.out = cv2.VideoWriter(
                args[0].stream_out,
                cv2.VideoWriter_fourcc(*args[0].encoding_codec),
                int(args[0].dummy_fps), im_size)

            self.social_distancing.compute_homography(im_size)

        # Json server
        self.dt_vector = {}

        # Client list
        self.stream_list = []
        self.js_list = []

        if self.streaming:
            # Initialize video server
            self.video_server = StreamServer(int(args[0].video_port),
                                             self.stream_list, "image/jpeg")
            self.video_server.activate()

            # Initialize stream server
            self.stream_server = StreamServer(int(args[0].stream_port),
                                              self.js_list, "application/json")
            self.stream_server.activate()

            # Initialize json server
            self.js_server = ResponseServer(int(args[0].js_port),
                                            "application/json")
            self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()

        # Json recorder
        self.js_recording = False
        if args[0].js_out != "":
            self.js_recording = True
            self.js_out = open(args[0].js_out, "w")

        # Mjpeg recorder
        self.mjpeg_recorder = False
        if args[0].mjpeg_out != "":
            self.mjpeg_recorder = True
            self.mjpeg_out = open(args[0].mjpeg_out, "wb")

        # Json version
        self.dt_vector["vs"] = 1

        # Fps evaluation init
        self.millis = 0
        self.frames = 0

    '''
        Process source and save on image/video/js file, distribuite on network
    '''

    def process_source(self, source, background):
        start = round(time.time() * 1000)

        if self.mask_in is not None:
            source = cv2.bitwise_and(source, self.mask_in)

        # Check if pre-processed json is used
        if not self.use_js:
            # Assign input image to openpose
            self.datum.cvInputData = source

            # Use Openpose to extract poses
            self.opWrapper.emplaceAndPop([self.datum])

            # Get openpose coordinates (rounding values)
            skeletals = np.around(
                np.array(self.datum.poseKeypoints).tolist(), 2).tolist()
        else:
            # Copy json data
            skeletals = source

        # Trace on background
        if self.background_masked:
            source = background

        if type(skeletals) is not list:
            return background

        # Evaluate distances, draw body and ellipses and get json bodies and ellipses list
        image, bodies, ellipses = self.social_distancing.distances_calculate(
            source, skeletals, [1 for k in range(len(skeletals))])

        # Save data to json vector
        self.dt_vector["bodies"] = bodies
        self.dt_vector["ellipses"] = ellipses

        if self.streaming:
            # Send video to client queues
            self.send_image(self.stream_list, image, int(self.dt_vector['ts']))

            # Put json vector availble to rest requests
            self.js_server.put(bytes(json.dumps(self.dt_vector), "UTF-8"))

            # Send json vestor available to streaming
            self.send_js(self.js_list,
                         bytes(json.dumps(self.dt_vector), "UTF-8"),
                         int(self.dt_vector['ts']))

        # Write json data
        if self.js_recording:
            self.js_out.write(json.dumps(self.dt_vector) + "\n")

        stop = round(time.time() * 1000)

        if self.millis > 1000:
            print("Analyzing at {0} Fps".format(self.frames),
                  end="\r",
                  flush=True)
            self.millis = 0
            self.frames = 0

        self.millis += stop - start
        self.frames += 1

        return image

    '''
        Send image over queue list and then over http mjpeg stream
    '''

    def send_image(self, queue_list, image, ts):

        encoded_image = self.jpeg.encode(image, quality=80)
        # Put image into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, encoded_image)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Send json over queue list and then over http multipart stream
    '''

    def send_js(self, queue_list, js, ts):

        # Put json into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, js)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Analyze video
    '''

    def analyze_video(self):
        first_frame = True
        counter = 0
        while self.cap.isOpened():
            # Get a global image ts
            self.dt_vector['ts'] = int(round(time.time() * 1000))

            # Capture from image/video
            ret, image = self.cap.read()

            # Check image
            if image is None or not ret:
                os._exit(0)

            # Record image
            if self.mjpeg_recorder:
                encoded_image = self.jpeg.encode(image, quality=80)

                header = "--myboundary\r\n" \
                    "X-TimeStamp: " + str(self.dt_vector['ts']) + "\r\n" \
                    "Content-Type: image/jpeg\r\n" \
                    "Content-Length: " + \
                    str(len(encoded_image)) + "\r\n\r\n"

                self.mjpeg_out.write(bytes(header, "UTF-8"))
                self.mjpeg_out.write(encoded_image)

            # Get openpose output
            if self.background_masked:
                background = self.background_image.copy()
            else:
                background = image

            image = self.process_source(image, background)

            # Write image
            if not self.streaming:
                self.out.write(image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', image)
                cv2.waitKey(1)

            #print(counter, end="\r", flush=True)
            counter += 1

    '''
        Analyze image
    '''

    def analyze_image(self):

        # Get openpose output
        if self.background_masked:
            background = self.background_image.copy()
        else:
            background = self.image

        self.image = self.process_source(self.image, background)

        # Write image
        cv2.imwrite(self.image_out, self.image)

        # Show image and wait some time
        if self.use_preview:
            cv2.imshow('Social Distance', self.image)
            cv2.waitKey(1000)

    '''
        Analyze json data
    '''

    def analyze_js(self):
        # Read json files
        lines = open(self.js_in, "r").read().split("\n")

        # While there are lines
        for line in lines[:-1]:
            js_line = json.loads(line)

            # Create
            background = self.background_image.copy()

            if 'vs' in js_line.keys():
                self.image = self.process_source(js_line['bodies'], background)
            else:
                self.image = self.process_source(js_line['bodys'], background)

            # Write image
            if not self.streaming:
                self.out.write(self.image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', self.image)
                cv2.waitKey(1)

    '''
        Analyze mjpeg (timestamped jpeg sequence)
    '''

    def analyze_mjpeg(self):
        first_frame = True
        counter = 0

        old_timestamp = self.mjpeg_reader.get_ts()
        while True:
            # Capture from image/video
            image = self.mjpeg_reader.get_image()

            # Get a global image ts
            self.dt_vector['ts'] = self.mjpeg_reader.get_ts()

            # Check image
            if image is None:
                os._exit(0)

            # Record image
            if self.mjpeg_recorder:
                encoded_image = self.jpeg.encode(image, quality=80)

                header = "--myboundary\r\n" \
                    "X-TimeStamp: " + str(self.dt_vector['ts']) + "\r\n" \
                    "Content-Type: image/jpeg\r\n" \
                    "Content-Length: " + \
                    str(len(encoded_image)) + "\r\n\r\n"

                self.mjpeg_out.write(bytes(header, "UTF-8"))
                self.mjpeg_out.write(encoded_image)

            # Get openpose output
            if self.background_masked:
                background = self.background_image.copy()
            else:
                background = image

            image = self.process_source(image, background)

            # Write image
            if not self.streaming:
                self.out.write(image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', image)
                cv2.waitKey(1)

            # Wait timestamp difference
            time.sleep((self.mjpeg_reader.get_ts() - old_timestamp) / 1000)

            # print(counter, end = "\n", flush=True)

            # Store old timestamp
            old_timestamp = self.mjpeg_reader.get_ts()

            counter += 1

    '''
        Analyze image/video/json/mjpeg
    '''

    def analyze(self):
        if self.use_image:
            self.analyze_image()

        if self.use_video:
            self.analyze_video()

        if self.use_js:
            self.analyze_js()

        if self.use_mjpeg:
            self.analyze_mjpeg()
Ejemplo n.º 13
0
@profile
def encode(img: np.ndarray):
    pass 
def decode(): 
    pass


if __name__ == '__main__':
    cfg = CFG()
    jpeg = TurboJPEG()
    input_img = 'rabbit.jpeg'
    with open(input_img, 'rb') as infile:
        bgr_array = jpeg.decode(infile.read())


    img_enc = jpeg.encode(bgr_array,quality=20)
    print(type(img_enc))
    print(sys.getsizeof(bgr_array))
    print(sys.getsizeof(img_enc))

    print(cfg.url)
    try:
        response = requests.post(cfg.url,data=img_enc)
        if response.status_code==200:
            print("Success")
        else:
            raise(response.status_code)
    except:
        raise("fail")

Ejemplo n.º 14
0
 im_cuda = jetson.utils.cudaFromNumpy(img)
 detections = net.Detect(im_cuda, w, h, 'none')
 # process detections (if anything was found)
 if len(detections) > 0:
     # build dictionary of detection info for this frame
     analytics = {'frame': nf, 'detections': []}
     for i, d in enumerate(detections):
         # generate info for the current detection in the current frame
         left = int(py_clip(d.Left, 0, w - 1))
         top = int(py_clip(d.Top, 0, h - 1))
         right = int(py_clip(d.Right, 0, w - 1))
         bottom = int(py_clip(d.Bottom, 0, h - 1))
         # extract and resize from original frame (without alpha)
         chip = cv2.resize(frame[top:bottom, left:right, :], (160, 160),
                           interpolation=cv2.INTER_AREA).copy()
         jchip = jpeg.encode(chip)
         # debug: uncomment the following 2 lines to write chips - should be 160x160
         #fn = 'frame{0}.det{1}.jpg'.format(nf,i)
         #with open(fn,'wb') as f: f.write(jchip)
         # convert chip to a character string using ASCII-85 encoding
         b64jchip = base64.b64encode(jchip)
         # assemble dictionary info for this detection
         an = {
             'index': i,
             'conf': d.Confidence,
             'left': d.Left,
             'right': d.Right,
             'top': d.Top,
             'bottom': d.Bottom,
             'chip': b64jchip,
             'classID': d.ClassID,
Ejemplo n.º 15
0
class AprilTagDetector(DTROS):

    def __init__(self):
        super(AprilTagDetector, self).__init__(
            node_name='apriltag_detector_node',
            node_type=NodeType.PERCEPTION
        )
        # get static parameters
        self.family = rospy.get_param('~family', 'tag36h11')
        self.ndetectors = rospy.get_param('~ndetectors', 1)
        self.nthreads = rospy.get_param('~nthreads', 1)
        self.quad_decimate = rospy.get_param('~quad_decimate', 1.0)
        self.quad_sigma = rospy.get_param('~quad_sigma', 0.0)
        self.refine_edges = rospy.get_param('~refine_edges', 1)
        self.decode_sharpening = rospy.get_param('~decode_sharpening', 0.25)
        self.tag_size = rospy.get_param('~tag_size', 0.065)
        self.rectify_alpha = rospy.get_param('~rectify_alpha', 0.0)
        # dynamic parameter
        self.detection_freq = DTParam(
            '~detection_freq',
            default=-1,
            param_type=ParamType.INT,
            min_value=-1,
            max_value=30
        )
        self._detection_reminder = DTReminder(frequency=self.detection_freq.value)
        # camera info
        self._camera_parameters = None
        self._mapx, self._mapy = None, None
        # create detector object
        self._detectors = [Detector(
            families=self.family,
            nthreads=self.nthreads,
            quad_decimate=self.quad_decimate,
            quad_sigma=self.quad_sigma,
            refine_edges=self.refine_edges,
            decode_sharpening=self.decode_sharpening
        ) for _ in range(self.ndetectors)]
        self._renderer_busy = False
        # create a CV bridge object
        self._jpeg = TurboJPEG()
        # create subscribers
        self._img_sub = rospy.Subscriber(
            '~image',
            CompressedImage,
            self._img_cb,
            queue_size=1,
            buff_size='20MB'
        )
        self._cinfo_sub = rospy.Subscriber(
            '~camera_info',
            CameraInfo,
            self._cinfo_cb,
            queue_size=1
        )
        # create publisher
        self._tag_pub = rospy.Publisher(
            '~detections',
            AprilTagDetectionArray,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_help='Tag detections',
        )
        self._img_pub = rospy.Publisher(
            '~detections/image/compressed',
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.VISUALIZATION,
            dt_help='Camera image with tag publishs superimposed',
        )
        # create thread pool
        self._workers = ThreadPoolExecutor(self.ndetectors)
        self._tasks = [None] * self.ndetectors
        # create TF broadcaster
        self._tf_bcaster = tf.TransformBroadcaster()

    def on_shutdown(self):
        self.loginfo('Shutting down workers pool')
        self._workers.shutdown()

    def _cinfo_cb(self, msg):
        # create mapx and mapy
        H, W = msg.height, msg.width
        # create new camera info
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(msg)
        # find optimal rectified pinhole camera
        with self.profiler('/cb/camera_info/get_optimal_new_camera_matrix'):
            rect_K, _ = cv2.getOptimalNewCameraMatrix(
                self.camera_model.K,
                self.camera_model.D,
                (W, H),
                self.rectify_alpha
            )
            # store new camera parameters
            self._camera_parameters = (rect_K[0, 0], rect_K[1, 1], rect_K[0, 2], rect_K[1, 2])
        # create rectification map
        with self.profiler('/cb/camera_info/init_undistort_rectify_map'):
            self._mapx, self._mapy = cv2.initUndistortRectifyMap(
                self.camera_model.K,
                self.camera_model.D,
                None,
                rect_K,
                (W, H),
                cv2.CV_32FC1
            )
        # once we got the camera info, we can stop the subscriber
        self.loginfo('Camera info message received. Unsubscribing from camera_info topic.')
        # noinspection PyBroadException
        try:
            self._cinfo_sub.shutdown()
        except BaseException:
            pass

    def _detect(self, detector_id, msg):
        # turn image message into grayscale image
        with self.profiler('/cb/image/decode'):
            img = self._jpeg.decode(msg.data, pixel_format=TJPF_GRAY)
        # run input image through the rectification map
        with self.profiler('/cb/image/rectify'):
            img = cv2.remap(img, self._mapx, self._mapy, cv2.INTER_NEAREST)
        # detect tags
        with self.profiler('/cb/image/detection'):
            tags = self._detectors[detector_id].detect(
                img, True, self._camera_parameters, self.tag_size)
        # pack detections into a message
        tags_msg = AprilTagDetectionArray()
        tags_msg.header.stamp = msg.header.stamp
        tags_msg.header.frame_id = msg.header.frame_id
        for tag in tags:
            # turn rotation matrix into quaternion
            q = _matrix_to_quaternion(tag.pose_R)
            p = tag.pose_t.T[0]
            # create single tag detection object
            detection = AprilTagDetection(
                transform=Transform(
                    translation=Vector3(
                        x=p[0],
                        y=p[1],
                        z=p[2]
                    ),
                    rotation=Quaternion(
                        x=q[0],
                        y=q[1],
                        z=q[2],
                        w=q[3]
                    )
                ),
                tag_id=tag.tag_id,
                tag_family=str(tag.tag_family),
                hamming=tag.hamming,
                decision_margin=tag.decision_margin,
                homography=tag.homography.flatten().astype(np.float32).tolist(),
                center=tag.center.tolist(),
                corners=tag.corners.flatten().tolist(),
                pose_error=tag.pose_err
            )
            # add detection to array
            tags_msg.detections.append(detection)
            # publish tf
            self._tf_bcaster.sendTransform(
                p.tolist(),
                q.tolist(),
                msg.header.stamp,
                'tag/{:s}'.format(str(tag.tag_id)),
                msg.header.frame_id
            )
        # publish detections
        self._tag_pub.publish(tags_msg)
        # update healthy frequency metadata
        self._tag_pub.set_healthy_freq(self._img_sub.get_frequency())
        self._img_pub.set_healthy_freq(self._img_sub.get_frequency())
        # render visualization (if needed)
        if self._img_pub.anybody_listening() and not self._renderer_busy:
            self._renderer_busy = True
            Thread(target=self._render_detections, args=(msg, img, tags)).start()

    def _img_cb(self, msg):
        # make sure we have received camera info
        if self._camera_parameters is None:
            return
        # make sure we have a rectification map available
        if self._mapx is None or self._mapy is None:
            return
        # make sure somebody wants this
        if (not self._img_pub.anybody_listening()) and (not self._tag_pub.anybody_listening()):
            return
        # make sure this is a good time to detect (always keep this as last check)
        if not self._detection_reminder.is_time(frequency=self.detection_freq.value):
            return
        # make sure we are still running
        if self.is_shutdown:
            return
        # ---
        # find the first available worker (if any)
        for i in range(self.ndetectors):
            if self._tasks[i] is None or self._tasks[i].done():
                # submit this image to the pool
                self._tasks[i] = self._workers.submit(self._detect, i, msg)
                break

    def _render_detections(self, msg, img, detections):
        with self.profiler('/publishs_image'):
            # get a color buffer from the BW image
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
            # draw each tag
            for detection in detections:
                for idx in range(len(detection.corners)):
                    cv2.line(
                        img,
                        tuple(detection.corners[idx - 1, :].astype(int)),
                        tuple(detection.corners[idx, :].astype(int)),
                        (0, 255, 0)
                    )
                # draw the tag ID
                cv2.putText(
                    img,
                    str(detection.tag_id),
                    org=(
                        detection.corners[0, 0].astype(int) + 10,
                        detection.corners[0, 1].astype(int) + 10
                    ),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.8,
                    color=(0, 0, 255)
                )
            # pack image into a message
            img_msg = CompressedImage()
            img_msg.header.stamp = msg.header.stamp
            img_msg.header.frame_id = msg.header.frame_id
            img_msg.format = 'jpeg'
            img_msg.data = self._jpeg.encode(img)
        # ---
        self._img_pub.publish(img_msg)
        self._renderer_busy = False
Ejemplo n.º 16
0
import sys
import numpy as np
import matplotlib.pyplot as plt

jpeg = TurboJPEG()

w = 3264
h = 2448

imgdata = np.zeros((h, w, 3), dtype=np.int8)
for y in range(h):
    #val = 23 + int(np.floor(((230.0-23.0)*y)/h))
    #val = 150
    val = 24 + int(np.floor(((226.0 - 24.0) * y) / h))
    if (1):
        for x in range(w):
            if (x < w / 4):
                imgdata[y][x] = [val, val, val]
            elif (x < w / 2):
                imgdata[y][x] = [val, 24, 24]
            elif (x < 3 * w / 4):
                imgdata[y][x] = [24, val, 24]
            else:
                imgdata[y][x] = [24, 24, val]
    else:
        for x in range(w):
            imgdata[y][x] = [val, val, val]

with open('multihue.jpg', 'wb') as outfile:
    outfile.write(jpeg.encode(imgdata, quality=99, jpeg_subsample=TJSAMP_420))
Ejemplo n.º 17
0
class AiThermometer:
    '''
        Initialize parameters
    '''
    def __init__(self, args):
        # Create configurator
        config = configparser.ConfigParser()

        # Read configuration
        try:
            config.read(args[0].config_file)
        except:
            print("unable to find configuration file", flush=True)
            sys.exit(-1)

        # Print configuration
        print("Configuration:", flush=True)

        # Print Configuration
        for key in config:
            print("[{0}]".format(key), flush=True)
            for argument in config[key]:
                print("{0} = {1}".format(argument, config[key][argument]),
                      flush=True)

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = config['openpose']['models']

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = config['openpose']['network']

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        params[
            "num_gpu_start"] = 0  # Set GPU start id (not considering previous)

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        # Continue recordings until this param is True
        self.continue_recording = True

        # Listen thermal port
        self.thermal_port = int(config['network']['thermal_port'])

        # Listen openpose port
        self.openpose_port = int(config['network']['openpose_port'])

        # Listen openpose port
        self.js_port = int(config['network']['js_port'])

        # Listen openpose port
        self.image_port = int(config['network']['image_port'])

        # Create a queue list to store client queues (thermal images)
        self.thermal_list = []

        # Create a queue list to store client queues (openpose images)
        self.openpose_list = []

        # Create a queue list to store client queues (json frames)
        self.js_list = []

        # Minimal temperature (image reconstuction)
        self.min_temperature = float(config['thermal']['min_temperature'])

        # Maaximum temperature (image reconstruction)
        self.max_temperature = float(config['thermal']['max_temperature'])

        # Set camera id (multiple camera available)
        self.id = int(config['thermal']['id'])

        # Set face min size x
        self.min_sizex = int(config['face']['min_sizex'])

        # Set face min size y
        self.min_sizey = int(config['face']['min_sizey'])

        # Set font size
        self.font_scale = float(config['face']['font_scale'])

        # Set alarm temperature
        self.alarm_temperature = float(config['face']['alarm_temperature'])

        # Camera resolution x
        self.resolution_x = int(config['thermal']['resolution_x'])

        # Camera resolution y
        self.resolution_y = int(config['thermal']['resolution_y'])

        # Reflected temperature
        self.reflected_temperature = float(
            config['thermal']['reflected_temperature'])

        # Atmosferic temperature
        self.atmospheric_temperature = float(
            config['thermal']['atmospheric_temperature'])

        # Object distance
        self.object_distance = float(config['thermal']['object_distance'])

        # Object emissivity
        self.object_emissivity = float(config['thermal']['object_emissivity'])

        # Relative humidity
        self.relative_humidity = float(config['thermal']['relative_humidity'])

        # ext_optics_temperature
        self.extoptics_temperature = float(
            config['thermal']['extoptics_temperature'])

        # ext_optics_transmission
        self.extoptics_transmission = float(
            config['thermal']['extoptics_transmission'])

        # ext_optics_transmission
        self.estimated_transmission = float(
            config['thermal']['estimated_transmission'])

        # Lines to be removed to correct a camera error on retrived image
        self.unused_lines = int(config['thermal']['unused_lines'])

        # Set compression
        self.compression = int(config['mjpeg']['compression'])

        # Show video
        self.show_video = True if int(
            config['debug']['show_video']) == 1 else False

        # Min detected temperature
        self.min_detection_temperature = int(
            config['face']['min_detection_temperature'])

        # Max detected temperature
        self.max_detection_temperature = int(
            config['face']['max_detection_temperature'])

        # Min detected temperature
        self.delta_temperature = float(config['face']['delta_temperature'])

        # Record secquence
        self.record_image = True if int(
            config['debug']['record_image']) == 1 else False

        # Record dir
        self.record_dir = config['debug']['record_dir']

        # Record csv
        self.record_csv = True if int(
            config['debug']['record_csv']) == 1 else False

        # Record csv filename
        self.filename_csv = config['debug']['filename_csv']

        # Record csv
        self.debug = True if int(config['debug']['debug']) == 1 else False

        # Record raw
        self.recorder_raw = True if int(
            config['debug']['recorder_raw']) == 1 else False

        # Player raw
        self.player_raw = True if int(
            config['debug']['player_raw']) == 1 else False

        # Record raw filename
        self.filename_raw = config['debug']['filename_raw']

        # DEBUG
        try:
            # Open recorder file
            if self.recorder_raw:
                self.raw = open(config['debug']['filename_raw'], "wb")

            # Open player file
            if self.player_raw:
                self.raw = open(config['debug']['filename_raw'], "rb")
        except:
            print("Unable to open {0} local file!".format(
                config['debug']['filename_raw']),
                  flush=True)
            os._exit(-1)

        try:
            # Create target Directory
            os.mkdir(self.record_dir)
            print("Directory ", self.record_dir, " Created ", flush=True)
        except FileExistsError:
            print("Directory ", self.record_dir, " already exists", flush=True)

        # Initialize thermal server
        self.thermal_server = StreamServer(self.thermal_port,
                                           self.thermal_list, "image/jpeg")

        # Initialize openpose server
        self.openpose_server = StreamServer(self.openpose_port,
                                            self.openpose_list, "image/jpeg")

        # Initialize json server
        self.js_server = ResponseServer(self.js_port, "application/json")

        # Initialize image server
        self.image_server = ResponseServer(self.image_port, "image/jpeg")

        # Initialize temperature FIFO length and array
        self.max_t_fifo = []
        self.fifo_size = 15

        self.max_t_face_fifo = []
        self.fifo_face_size = 15

        # Initializing the mask
        self.mask = cv2.imread(config['thermal']['mask_filename'], 1)
        self.mask = cv2.cvtColor(self.mask, cv2.COLOR_BGR2GRAY)
        self.mask = self.mask > 100

        # Create jpeg object
        self.jpeg = TurboJPEG()

    '''
        Connect to thermal camera gigE
    '''

    def connect(self):
        # DEBUG: select file if debug and player are selected
        if self.player_raw:
            print("Read images from file, skip camera connect")
            return True

        # Retrieve singleton reference to system object
        self.system = PySpin.System.GetInstance()

        # Get current library version
        version = self.system.GetLibraryVersion()
        print('Spinnaker Library version: %d.%d.%d.%d' %
              (version.major, version.minor, version.type, version.build))

        # Retrieve list of cameras from the system
        cam_list = self.system.GetCameras()

        # Get camera number
        num_cameras = cam_list.GetSize()

        # Print detected camera number
        print('Number of cameras detected: %d' % num_cameras)

        # Finish if there are no cameras
        if num_cameras == 0:

            # Clear camera list before releasing system
            cam_list.Clear()

            # Release system instance
            self.system.ReleaseInstance()

            print('Not enough cameras!')

            return False

        # Use first camera (we use one camere)
        self.camera = cam_list[self.id]

        # Clear camera list before releasing system
        cam_list.Clear()

        return True

    '''
        Acquire data from remote thermal camera
    '''

    def acquire_process(self):
        if self.player_raw:
            return self.player()

        return self.run_camera()

    '''
        Disconnect from camera and close all
    '''

    def disconnect(self):
        try:
            # Stop data recording
            self.continue_recording = False

            time.sleep(1)

            # Thermal server
            self.thermal_server.disconnect()

            # Openpose server
            self.openpose_server.disconnect()

            # js server
            self.js_server.disconnect()

            # image server
            self.image_server.disconnect()

            # DEBUG: reading raw
            if self.player_raw:
                return

            # Stopping acquisition
            self.camera.EndAcquisition()

            # Wait some time to stop recording
            time.sleep(5)

            # Deinitialize camera
            self.camera.DeInit()

            # Wait some time
            time.sleep(1)

            # Delete camera
            del self.camera

            # Release system instance
            self.system.ReleaseInstance()
        except PySpin.SpinnakerException as ex:
            print('Error: %s' % ex)

    '''
        Get images from camera, analyze it with openpose and evaluate temperature into a box over face 
    '''

    def acquire_images(self, cam, nodemap, nodemap_tldevice):

        sNodemap = cam.GetTLStreamNodeMap()

        # Change bufferhandling mode to NewestOnly
        node_bufferhandling_mode = PySpin.CEnumerationPtr(
            sNodemap.GetNode('StreamBufferHandlingMode'))
        if not PySpin.IsAvailable(
                node_bufferhandling_mode) or not PySpin.IsWritable(
                    node_bufferhandling_mode):
            print('Unable to set stream buffer handling mode.. Aborting...')
            return False

        # Retrieve entry node from enumeration node
        node_newestonly = node_bufferhandling_mode.GetEntryByName('NewestOnly')
        if not PySpin.IsAvailable(node_newestonly) or not PySpin.IsReadable(
                node_newestonly):
            print('Unable to set stream buffer handling mode.. Aborting...')
            return False

        # Retrieve integer value from entry node
        node_newestonly_mode = node_newestonly.GetValue()

        # Set integer value from entry node as new value of enumeration node
        node_bufferhandling_mode.SetIntValue(node_newestonly_mode)

        print('*** IMAGE ACQUISITION ***\n')

        try:
            # Get acquisition mode
            node_acquisition_mode = PySpin.CEnumerationPtr(
                nodemap.GetNode('AcquisitionMode'))
            if not PySpin.IsAvailable(
                    node_acquisition_mode) or not PySpin.IsWritable(
                        node_acquisition_mode):
                print(
                    'Unable to set acquisition mode to continuous (enum retrieval). Aborting...'
                )
                return False

            # Retrieve entry node from enumeration node
            node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(
                'Continuous')
            if not PySpin.IsAvailable(
                    node_acquisition_mode_continuous) or not PySpin.IsReadable(
                        node_acquisition_mode_continuous):
                print(
                    'Unable to set acquisition mode to continuous (entry retrieval). Aborting...'
                )
                return False

            # Retrieve integer value from entry node
            acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue(
            )

            # Set integer value from entry node as new value of enumeration node
            node_acquisition_mode.SetIntValue(acquisition_mode_continuous)

            print('Acquisition mode set to continuous...')

            #  Begin acquiring images
            cam.BeginAcquisition()

            print('Acquiring images...')

            #  Retrieve device serial number for filename
            device_serial_number = ''
            node_device_serial_number = PySpin.CStringPtr(
                nodemap_tldevice.GetNode('DeviceSerialNumber'))

            if PySpin.IsAvailable(
                    node_device_serial_number) and PySpin.IsReadable(
                        node_device_serial_number):
                device_serial_number = node_device_serial_number.GetValue()
                print('Device serial number retrieved as %s...' %
                      device_serial_number)

            # Retrieve and display images
            while (self.continue_recording):
                #  Retrieve next received image
                image_result = cam.GetNextImage()

                #  Ensure image completion
                if image_result.IsIncomplete():
                    print('Image incomplete with image status %d ...' %
                          image_result.GetImageStatus())
                    image_result.Release()
                    continue

                # DEBUG: record image on raw sequence
                if self.recorder_raw:
                    self.rec_image(image_result)

                # Analyze image
                self.analyze_image(image_result)

                #  Release image
                image_result.Release()

            #  End acquisition
            cam.EndAcquisition()

            # DEBUG: Close csv file
            if self.record_csv:
                self.csv.flush()
                self.csv.close()

            # DEBUG: Close raw file
            if self.recorder_raw:
                self.raw.flush()
                self.raw.close()

        except PySpin.SpinnakerException as ex:
            print('Error: %s' % ex, flush=True)
            return False

        return True

    '''
        Configure selected camera
    '''

    def run_camera(self):
        try:
            nodemap_tldevice = self.camera.GetTLDeviceNodeMap()

            # Initialize camera
            self.camera.Init()

            # Retrieve GenICam nodemap
            nodemap = self.camera.GetNodeMap()

            # Retrive IRFormat node
            node_irformat_mode = PySpin.CEnumerationPtr(
                nodemap.GetNode("IRFormat"))

            # Check if param is available and writable
            if PySpin.IsAvailable(node_irformat_mode) and PySpin.IsWritable(
                    node_irformat_mode):
                # Turn to IRRadiation Temperature linear, 0.01K resolution
                node_irformat_mode.SetIntValue(2)

                # Read value from IRFormat node
                print("IRFormat:{0}".format(node_irformat_mode.GetIntValue()))

            time.sleep(0.1)
            # Retrive Width
            node_width = PySpin.CIntegerPtr(nodemap.GetNode("Width"))
            # Check if param is available and writable
            if PySpin.IsAvailable(node_width) and PySpin.IsWritable(
                    node_width):
                # Width
                node_width.SetValue(self.resolution_x)

                # Read value from IRFormat node
                print("Image width:{0}".format(node_width.GetValue()))

            time.sleep(0.1)
            # Retrive Height node
            node_height = PySpin.CIntegerPtr(nodemap.GetNode("Height"))
            # Check if param is available and writable
            if PySpin.IsAvailable(node_height) and PySpin.IsWritable(
                    node_height):
                # Set Height
                node_height.SetValue(
                    self.resolution_y
                )  # 246 not good temperature because 6 black lines, but 240 generate incomplete images

                # Read value from IRFormat node
                print("Image height:{0}".format(node_height.GetValue()))

            time.sleep(0.1)
            # Retrive PixelFormat node
            node_pixelformat = PySpin.CEnumerationPtr(
                nodemap.GetNode("PixelFormat"))
            # Check if param is available and writable
            if PySpin.IsAvailable(node_pixelformat) and PySpin.IsWritable(
                    node_pixelformat):
                # Set Mono16
                node_pixelformat.SetIntValue(
                    node_pixelformat.GetEntryByName("Mono16").GetValue())

                # Print pixel format
                print("PixelFormat:{0}".format(node_pixelformat.GetIntValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive Reflected Temperature node
            node_reflected_temperature = PySpin.CFloatPtr(
                nodemap.GetNode("ReflectedTemperature"))
            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_reflected_temperature) and PySpin.IsWritable(
                        node_reflected_temperature):
                # Set Value
                node_reflected_temperature.SetValue(self.reflected_temperature)

                # Print Reflected Temperature
                print("ReflectedTemperature:{0}".format(
                    node_reflected_temperature.GetValue()),
                      flush=True)

            # Retrive Atmospheric Temperature node
            node_atmospheric_temperature = PySpin.CFloatPtr(
                nodemap.GetNode("AtmosphericTemperature"))
            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_atmospheric_temperature) and PySpin.IsWritable(
                        node_atmospheric_temperature):
                # Set Value
                node_atmospheric_temperature.SetValue(
                    self.atmospheric_temperature)

                # Print Atmospheric Temperature
                print("AtmosphericTemperature:{0}".format(
                    node_atmospheric_temperature.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive Object Emissivity node
            node_object_emissivity = PySpin.CFloatPtr(
                nodemap.GetNode("ObjectEmissivity"))
            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_object_emissivity) and PySpin.IsWritable(
                        node_object_emissivity):
                # Set Value
                node_object_emissivity.SetValue(self.object_emissivity)

                # Print Object Emissivity
                print("ObjectEmissivity:{0}".format(
                    node_object_emissivity.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive and Change Object Emissivity node
            node_relative_humidity = PySpin.CFloatPtr(
                nodemap.GetNode("RelativeHumidity"))
            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_relative_humidity) and PySpin.IsWritable(
                        node_relative_humidity):
                # Set Value
                node_relative_humidity.SetValue(self.relative_humidity)

                # Print Object Emissivity
                print("Changed RelativeHumidity To {0}".format(
                    node_relative_humidity.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive and Change ExtOpticsTemperature
            node_extoptics_temperature = PySpin.CFloatPtr(
                nodemap.GetNode("ExtOpticsTemperature"))

            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_extoptics_temperature) and PySpin.IsWritable(
                        node_extoptics_temperature):
                # Set Value
                node_extoptics_temperature.SetValue(self.extoptics_temperature)

                # Print Object Emissivity
                print("Changed ExtOpticsTemperature To {0}".format(
                    node_extoptics_temperature.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive and Change ExtOpticsTransmission
            node_extoptics_transmission = PySpin.CFloatPtr(
                nodemap.GetNode("ExtOpticsTransmission"))

            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_extoptics_transmission) and PySpin.IsWritable(
                        node_extoptics_transmission):

                # Set Value
                node_extoptics_transmission.SetValue(
                    self.extoptics_transmission)

                # Print Object Emissivity
                print("ExtOpticsTransmission:{0}".format(
                    node_extoptics_transmission.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive and change Estimated Transmission
            node_estimated_transmission = PySpin.CFloatPtr(
                nodemap.GetNode("EstimatedTransmission"))
            if PySpin.IsAvailable(
                    node_estimated_transmission) and PySpin.IsWritable(
                        node_estimated_transmission):

                # Set Value
                node_estimated_transmission.SetValue(
                    self.estimated_transmission)

                # Print Object Emissivity
                print("EstimatedTransmission:{0}".format(
                    node_estimated_transmission.GetValue()),
                      flush=True)

            time.sleep(1)

            # Start video servers
            self.thermal_server.activate()

            self.openpose_server.activate()

            # Start json server
            self.js_server.activate()

            # Start image server
            self.image_server.activate()

            # Acquire images
            self.acquire_images(self.camera, nodemap, nodemap_tldevice)

        except PySpin.SpinnakerException as ex:
            print('Error: %s' % ex)

    '''
        Helper function for recovering temperature from raw pixel value
        Get temperature https://graftek.biz/system/files/15690/original/FLIR_Genicam.pdf?1571772310
    '''

    def get_temperature(self, pixel_value):
        temperature = pixel_value * 0.01 - 273.15
        return temperature

    '''
        Helper function for updating a FIFO
    '''

    def update_fifo(self, fifo, fifo_size, new_value):
        if (len(fifo) > fifo_size):
            fifo.pop(0)

        fifo.append(new_value)
        return fifo

    '''
        Helper function for collecting average FIFO value
    '''

    def get_fifo_avg(self, fifo, min_len_to_compute=0):
        if (len(fifo) > min_len_to_compute):
            return np.average(fifo)
        else:
            return 0

    '''
        Analyze images (and send over network queues)
    '''

    def analyze_image(self, image_result):

        # Get image dimensions
        width = image_result.GetWidth()
        height = image_result.GetHeight()

        # Getting the image data as a numpy array
        image_data = image_result.GetNDArray()

        ## MBMB ->
        # Updating the FIFO
        min_image_temperature = self.get_temperature(
            np.amin(image_data)) + self.delta_temperature

        max_image_temperature = self.get_temperature(
            np.amax(image_data * self.mask)) + self.delta_temperature

        if max_image_temperature > 25:
            self.max_t_fifo = self.update_fifo(self.max_t_fifo, self.fifo_size,
                                               max_image_temperature)
        else:
            self.max_t_fifo = []

        temp_smooth = self.get_fifo_avg(self.max_t_fifo, self.fifo_size)
        ## <- MBMB
        '''
            Calculate image to send via mjpeg to remote client and openpose compliant image
        '''
        # Convert image to BGR, using threshold temperatures (manual parameters)
        in_img = image_result.GetData().reshape((height, width))

        temp_max_thr = self.max_temperature  # Max temperature
        temp_min_thr = self.min_temperature  # Min temperature

        # Calculate thresholds
        pixel_max_thr = int((temp_max_thr + 273.15) / 0.01)
        pixel_min_thr = int((temp_min_thr + 273.15) / 0.01)

        # Threshold image
        in_img_rw = copy.deepcopy(in_img)
        in_img_rw[in_img_rw > pixel_max_thr] = pixel_max_thr
        in_img_rw[in_img_rw < pixel_min_thr] = pixel_min_thr
        in_img_rw[0, 0] = pixel_max_thr
        in_img_rw[0, 1] = pixel_min_thr

        # Normalize image
        raw_frame = cv2.normalize(in_img_rw,
                                  None,
                                  0,
                                  255,
                                  cv2.NORM_MINMAX,
                                  dtype=cv2.CV_8U)

        # Get correct image
        raw_frame = raw_frame[0:self.resolution_x][0:self.resolution_y -
                                                   self.unused_lines]

        # Invert levels
        gray_inverted = cv2.bitwise_not(raw_frame)

        # Convert inverted grayscale to Color RGB format (openpose input)
        image_openpose = cv2.cvtColor(gray_inverted, cv2.COLOR_GRAY2BGR)

        # Colorize Image (Use it to write geometry and send to streaming)
        to_send_image = cv2.applyColorMap(raw_frame, cv2.COLORMAP_JET)

        ## MBMB ->
        # DEBUG: Plotting the location of the max temperature
        if self.debug:
            # Trace circles on minimal and maximum temperature
            coords_max = np.unravel_index(
                np.argmax(image_data * self.mask, axis=None), image_data.shape)
            cv2.circle(to_send_image, (coords_max[1], coords_max[0]),
                       radius=10,
                       color=(255, 255, 255),
                       thickness=2)

            coords_min = np.unravel_index(np.argmin(image_data, axis=None),
                                          image_data.shape)
            cv2.circle(to_send_image, (coords_min[1], coords_min[0]),
                       radius=10,
                       color=(0, 0, 0),
                       thickness=2)

            # Print Temperatures
            if temp_smooth > 0:
                text_str = 'Max T: {:.2f}C - Min T: {:.2f}C - Smooth: {:.2f}C'.format(
                    self.get_temperature(np.amax(image_data * self.mask)),
                    self.get_temperature(np.amin(image_data)), temp_smooth)
            else:
                text_str = 'Max T: {:.2f}C - Min T: {:.2f}C'.format(
                    self.get_temperature(np.amax(image_data * self.mask)),
                    self.get_temperature(np.amin(image_data)))

            font_temperature = cv2.FONT_HERSHEY_DUPLEX
            font_scale = self.font_scale
            font_thickness = 1
            color = (255, 255, 255)

            text_w, text_h = cv2.getTextSize(text_str, font_temperature,
                                             font_scale, font_thickness)[0]

            px = int(5)
            py = int(5)

            # Draw text rectangle
            cv2.rectangle(to_send_image, (px - 5, py - 5),
                          (px + text_w + 5, py + text_h + 5), color, -1)

            # Draw Text
            cv2.putText(to_send_image, text_str, (px, py + text_h),
                        font_temperature, font_scale, (0, 0, 0),
                        font_thickness, cv2.LINE_AA)
        ## <- MBMB

        # Set image to openpose video
        self.datum.cvInputData = image_openpose

        self.opWrapper.emplaceAndPop([self.datum])

        # Get openpose output (convert to int all value)
        bodys = np.array(self.datum.poseKeypoints).astype(int).tolist()

        # record only one thermal shot
        one_snapshot = True

        # Open csv
        if self.record_csv:
            self.csv = open(self.filename_csv, "a")

        #face geometry and temperature container
        js_packet = {}

        # Json dataset
        body_packet = []

        # If a body is recognized
        if type(bodys) is list:
            # Remove probability from joints
            temporary_bodys = []
            for body in bodys:
                temporary_bodys.append([reduced[0:2] for reduced in body])

            bodys = temporary_bodys

            for body in bodys:
                # Face points (0, 15, 16) refered to body_25 openpose format
                face = [
                    [int(body[0][0]), int(body[0][1])],  # Nose
                    [int(body[15][0]), int(body[15][1])],  # Right eye
                    [int(body[16][0]), int(body[16][1])],  # Left eye
                    [int(body[17][0]), int(body[17][1])],  # Right ear
                    [int(body[18][0]), int(body[18][1])]
                ]  # Left ear

                # Select the best face size
                if 0 not in face[0] and 0 not in face[1] and 0 not in face[2]:
                    # Get line values from eyes line to neck
                    if (0 not in face[4]) and (
                            0 not in face[3]):  # Both ears visibles
                        size_x = int(abs(face[3][0] - face[4][0]) / 2)
                        size_y = int(abs(face[3][0] - face[4][0]) / 2)
                    elif (0 in face[4]) and (
                            0 not in face[3]):  # Right Ear, no Left Ear
                        size_x = int(abs(face[3][0] - face[2][0]) / 2)
                        size_y = int(abs(face[3][0] - face[2][0]) / 2)
                    elif (0 not in face[4]) and (
                            0 in face[3]):  # Left and Right ears ok
                        size_x = int(abs(face[1][0] - face[4][0]) / 2)
                        size_y = int(abs(face[1][0] - face[4][0]) / 2)
                    else:  # Left and Right ears are not available
                        size_x = int(abs(face[1][0] - face[2][0]) / 2)
                        size_y = int(abs(face[1][0] - face[2][0]) / 2)

                    # Set min face size x
                    min_sx = self.min_sizex

                    # Set min face size y
                    min_sy = self.min_sizey

                    # If face is to smal select minimal size
                    size_x = size_x if size_x > min_sx else min_sx
                    size_y = size_y if size_y > min_sy else min_sy

                    # Set face center
                    reference_x = face[0][0]
                    reference_y = face[0][1]

                    offset_x = 0
                    offset_y = 0

                    # Calculate average values in face rect
                    counter = 0
                    average = 0
                    max_temperature = 0.0
                    for y in range(reference_x - size_x + offset_x,
                                   reference_x + size_x + offset_x):
                        for x in range(reference_y - size_y + offset_y,
                                       reference_y + size_y + offset_y):
                            # Get temperature https://graftek.biz/system/files/15690/original/FLIR_Genicam.pdf?1571772310
                            if x < self.resolution_y and y < self.resolution_x:
                                temperature = self.get_temperature(
                                    image_data[x][y])

                                # Find max temperature
                                if temperature > max_temperature:
                                    max_temperature = temperature

                                average += temperature
                                counter += 1

                    # Calculate average
                    if counter != 0:
                        temperature = average / counter

                    # Compensate uncalibrated temperature
                    temperature += self.delta_temperature
                    max_temperature += self.delta_temperature

                    # Filter too low temperature face detection error
                    if temperature < self.min_detection_temperature:
                        continue

                    # Filter too hig temperature face detection error
                    if temperature > self.max_detection_temperature:
                        continue

                    # json alarm flag
                    alarm = 0

                    # Alarm temperature show red color rectangle
                    if max_temperature > self.alarm_temperature:
                        color = (0, 0, 255)
                        alarm = 1
                    else:
                        color = (255, 0, 0)
                        alarm = 0

                    # Draw face Rectangle
                    cv2.rectangle(to_send_image,
                                  (reference_x - size_x + offset_x,
                                   reference_y - size_y + offset_y),
                                  (reference_x + size_x + offset_x,
                                   reference_y + size_y + offset_y), color, 5)

                    # Write temperature
                    ## MBMB

                    text_str = '{0:.2f}C'.format(max_temperature)
                    font_temperature = cv2.FONT_HERSHEY_DUPLEX
                    font_scale = self.font_scale
                    font_thickness = 1

                    text_w, text_h = cv2.getTextSize(text_str,
                                                     font_temperature,
                                                     font_scale,
                                                     font_thickness)[0]

                    px = int(reference_x)
                    py = int(reference_y + size_y / 2)

                    # Draw text rectangle
                    cv2.rectangle(to_send_image, (px, py),
                                  (px + text_w, py - text_h), color, -1)

                    # Draw Text
                    cv2.putText(to_send_image, text_str, (px, py),
                                font_temperature, font_scale, (255, 255, 255),
                                font_thickness, cv2.LINE_AA)

                    # Get right eye temperature form thermal image
                    righteye_temperature = image_data[face[1][1]][
                        face[1][0]] * 0.01 - 273.15 + self.delta_temperature

                    cv2.circle(to_send_image, (face[1][0], face[1][1]), 2,
                               color, 2)

                    cv2.putText(to_send_image,
                                "{0:.2f}".format(righteye_temperature),
                                (face[1][0], face[1][1]), font_temperature,
                                font_scale / 2, (255, 255, 255),
                                font_thickness, cv2.LINE_AA)

                    # Get left eye temperature form thermal image
                    lefteye_temperature = image_data[face[2][1]][
                        face[2][0]] * 0.01 - 273.15 + self.delta_temperature

                    cv2.circle(to_send_image, (face[2][0], face[2][1]), 2,
                               color, 2)

                    cv2.putText(to_send_image,
                                "{0:.2f}".format(lefteye_temperature),
                                (face[2][0], face[2][1]), font_temperature,
                                font_scale / 2, (255, 255, 255),
                                font_thickness, cv2.LINE_AA)

                    # Print data
                    ts = int(round(time.time() * 1000))

                    dt_string = "{0:.2f},{1:.2f},{2:.2f},{3:.2f},{4:.2f},{5:.2f},{6:.2f},{7}\n".format(
                        temperature, max_temperature, min_image_temperature,
                        max_image_temperature, lefteye_temperature,
                        righteye_temperature, temp_smooth, ts)

                    print(dt_string, end="", flush=True)

                    if self.record_image and one_snapshot:
                        f = open(self.record_dir + "/" + str(ts) + ".raw",
                                 "wb")
                        f.write(image_result.GetData())
                        f.close()
                        one_snapshot = False

                    if self.record_csv:
                        self.csv.write(dt_string)
                        self.csv.flush()

                    body_packet.append([
                        body, reference_x, reference_y, size_x, size_y,
                        "{0:.2f}".format(max_temperature), alarm
                    ])

        # Store face geometry
        js_packet["geometries"] = body_packet

        if self.show_video:
            # Show openpose
            cv2.imshow("Openpose output", self.datum.cvOutputData)

            # Show mjpeg output
            cv2.imshow("Mjpeg colorized", to_send_image)

            # Handle signals and wait some time
            cv2.waitKey(1)

        # Get timestamp
        ts = int(round(time.time() * 1000))

        # Store timestamp
        js_packet["ts"] = ts

        # Put thermal image into queue for each server thread
        self.send_image(self.thermal_list, to_send_image, ts)

        # Put openpose image into queue for each server thread
        self.send_image(self.openpose_list, self.datum.cvOutputData, ts)

        # Put json into instant locked memory
        self.js_server.put(bytes(json.dumps(js_packet), "UTF-8"))

        # Put image into instant locked memory
        self.image_server.put(
            self.jpeg.encode(to_send_image, quality=self.compression))

    '''
        Send image over queue list and then over http mjpeg stream
    '''

    def send_image(self, queue_list, image, ts):

        encoded_image = self.jpeg.encode(image, quality=self.compression)
        # Put thermal image into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, encoded_image)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Send block over queue list and then over http mjpeg stream
    '''

    def send_jsdata(self, queue_list, js_data, ts):
        for q in queue_list:
            try:
                block = (ts, js_data)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        DEBUG: Record images on raw stream
    '''

    def rec_image(self, image_result):
        self.raw.write(image_result)

    '''
        DEBUG: Play images directly from raw file
    '''

    def player(self):
        # Start video servers
        self.thermal_server.activate()

        self.openpose_server.activate()

        # Start json server
        self.js_server.activate()

        # Start image server
        self.image_server.activate()

        time.sleep(1)

        while self.continue_recording:
            # Read image from file raw
            image_result = self.raw.read(self.resolution_x *
                                         self.resolution_y * 2)

            # If file is eof, rewind
            if len(image_result) != self.resolution_x * self.resolution_y * 2:
                print("Eof, Rewind!", flush=True)
                self.raw.seek(0)
                continue

            # Create Image
            image = PySpin.Image.Create(self.resolution_x, self.resolution_y,
                                        0, 0, PySpin.PixelFormat_Mono16,
                                        np.array(image_result))

            # Analyze image
            self.analyze_image(image)

            # Simulate real acquisition
            time.sleep(0.02)

        self.raw.close()
Ejemplo n.º 18
0
from PIL import Image
from time import perf_counter
import numpy as np
from mmap import mmap
mm = mmap(-1, 2**24, tagname='SharedMemory')


def timing(n, f, initial):
    t = 0
    for i in range(n):
        initial()
        start = perf_counter()
        f(i)
        t += perf_counter() - start
    return t


initial = lambda: mm.seek(0)
jpeg = TurboJPEG("C:/Users/lotress/MoePhoto/test/libturbojpeg.dll")
filePath = r'C:\Users\lotress\Documents\福州轨道交通线路图(2050+)@chinho.jpg'
imgFile = open(filePath, 'rb')
imgBuf = imgFile.read()
imgFile.seek(0)
img1 = jpeg.decode(imgBuf)
img2 = Image.fromarray(np.array(Image.open(imgFile)))
imgFile.close()
f1 = lambda kwargs: lambda _: mm.write(jpeg.encode(img1, **kwargs))
f2 = lambda _: img2.save(mm, 'jpeg')
print('Timing JPEG encoding by libjpeg-turbo: ',
      timing(1, f1({'jpeg_subsample': TJSAMP_420}), initial))
print('Timing JPEG encoding by Pillow: ', timing(1, f2, initial))
Ejemplo n.º 19
0
from turbojpeg import TurboJPEG
import requests

url = "https://upload.wikimedia.org/wikipedia/commons/9/98/03-bryone-dioique.jpg"
r = requests.get(url, allow_redirects=True)

jpeg = TurboJPEG()

with open("03-bryone-dioique.jpg", "wb") as test_file:
    test_file.write(r.content)

with open("03-bryone-dioique.jpg", "rb") as in_file:
    bgr_array = jpeg.decode(in_file.read())

with open("output.jpg", "wb") as out_file:
    out_file.write(jpeg.encode(bgr_array))
Ejemplo n.º 20
0
class DatasetFolderDCT(VisionDataset):
    def __init__(self,
                 root,
                 loader,
                 extensions=None,
                 transform=None,
                 target_transform=None,
                 is_valid_file=None,
                 subset=0):
        super(DatasetFolderDCT, self).__init__(root)
        self.transform = transform
        self.target_transform = target_transform
        classes, class_to_idx = self._find_classes(self.root)
        samples = make_dataset(self.root, class_to_idx, extensions,
                               is_valid_file)
        if len(samples) == 0:
            raise (RuntimeError("Found 0 files in subfolders of: " +
                                self.root + "\n"
                                "Supported extensions are: " +
                                ",".join(extensions)))

        self.loader = loader
        self.extensions = extensions

        self.classes = classes
        self.class_to_idx = class_to_idx
        self.samples = samples
        self.targets = [s[1] for s in samples]
        # self.jpeg = TurboJPEG('/home/kai.x/work/local/lib/libturbojpeg.so')
        self.jpeg = TurboJPEG('/usr/lib/libturbojpeg.so')
        self.subset = list(map(int, subset.split(','))) if subset else []

    def _find_classes(self, dir):
        if sys.version_info >= (3, 5):
            # Faster and available in Python 3.5 and above
            classes = [d.name for d in os.scandir(dir) if d.is_dir()]
        else:
            classes = [
                d for d in os.listdir(dir)
                if os.path.isdir(os.path.join(dir, d))
            ]
        classes.sort()
        class_to_idx = {classes[i]: i for i in range(len(classes))}
        return classes, class_to_idx

    def __getitem__(self, index):
        path, target = self.samples[index]
        sample = self.loader(path)

        # with open(path, 'rb') as src:
        #     buffer = src.read()
        # dct_y_bak, dct_cb_bak, dct_cr_bak = loads(buffer)

        if self.transform is not None:
            sample = self.transform(sample)

        # sample_resize = sample.resize((224*2, 224*2), resample=0)
        # PIL to numpy
        sample = np.asarray(sample)
        # RGB to BGR
        sample = sample[:, :, ::-1]
        # JPEG Encode
        sample = np.ascontiguousarray(sample, dtype="uint8")
        sample = self.jpeg.encode(sample, quality=100, jpeg_subsample=2)
        dct_y, dct_cb, dct_cr = loads(sample)  # 28

        # sample_resize = np.asarray(sample_resize)
        # sample_resize = sample_resize[:, :, ::-1]
        # sample_resize = np.ascontiguousarray(sample_resize, dtype="uint8")
        # sample_resize = self.jpeg.encode(sample_resize, quality=100)
        # _, dct_cb_resize, dct_cr_resize = loads(sample_resize)   # 28
        # dct_cb_resize, dct_cr_resize = torch.from_numpy(dct_cb_resize).permute(2, 0, 1).float(), \
        #                  torch.from_numpy(dct_cr_resize).permute(2, 0, 1).float()

        # dct_y_unnormalized, dct_cb_unnormalized, dct_cr_unnormalized = loads(sample, normalized=False)   # 28
        # dct_y_normalized, dct_cb_normalized, dct_cr_normalized = loads(sample, normalized=True)   # 28
        # total_y = (dct_y-dct_y_bak).sum()
        # total_cb = (dct_cb-dct_cb_bak).sum()
        # total_cr = (dct_cr-dct_cr_bak).sum()
        # print('{}, {}, {}'.format(total_y, total_cb, total_cr))
        dct_y, dct_cb, dct_cr = torch.from_numpy(dct_y).permute(2, 0, 1).float(), \
                                torch.from_numpy(dct_cb).permute(2, 0, 1).float(), \
                                torch.from_numpy(dct_cr).permute(2, 0, 1).float()

        # transform = transforms.Resize(28, interpolation=2)
        # dct_cb_resize2 = [transform(Image.fromarray(dct_c.numpy())) for dct_c in dct_cb]

        if self.subset:
            dct_y, dct_cb, dct_cr = dct_y[self.subset[0]:self.subset[1]], dct_cb[self.subset[0]:self.subset[1]], \
                                    dct_cr[self.subset[0]:self.subset[1]]

        if self.target_transform is not None:
            dct_y = self.target_transform[0](dct_y)
            dct_cb = self.target_transform[1](dct_cb)
            dct_cr = self.target_transform[2](dct_cr)

        return dct_y, dct_cb, dct_cr, target

    def __len__(self):
        return len(self.samples)
Ejemplo n.º 21
0
class RectifierNode(DTROS):
    def __init__(self, node_name):
        super().__init__(node_name, node_type=NodeType.PERCEPTION)

        # parameters
        self.publish_freq = DTParam("~publish_freq", -1)
        self.alpha = DTParam("~alpha", 0.0)

        # utility objects
        self.jpeg = TurboJPEG()
        self.reminder = DTReminder(frequency=self.publish_freq.value)
        self.camera_model = None
        self.rect_camera_info = None
        self.mapx, self.mapy = None, None

        # subscribers
        self.sub_img = rospy.Subscriber("~image_in",
                                        CompressedImage,
                                        self.cb_image,
                                        queue_size=1,
                                        buff_size="10MB")
        self.sub_camera_info = rospy.Subscriber("~camera_info_in",
                                                CameraInfo,
                                                self.cb_camera_info,
                                                queue_size=1)

        # publishers
        self.pub_img = rospy.Publisher(
            "~image/compressed",
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_healthy_freq=self.publish_freq.value,
            dt_help=
            "Rectified image (i.e., image with no distortion effects from the lens).",
        )
        self.pub_camera_info = rospy.Publisher(
            "~camera_info",
            CameraInfo,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_healthy_freq=self.publish_freq.value,
            dt_help="Camera parameters for the (virtual) rectified camera.",
        )

    def cb_camera_info(self, msg):
        # unsubscribe from camera_info
        self.loginfo(
            "Camera info message received. Unsubscribing from camera_info topic."
        )
        # noinspection PyBroadException
        try:
            self.sub_camera_info.shutdown()
        except BaseException:
            pass
        # ---
        H, W = msg.height, msg.width
        # create new camera info
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(msg)
        # find optimal rectified pinhole camera
        with self.profiler("/cb/camera_info/get_optimal_new_camera_matrix"):
            rect_camera_K, _ = cv2.getOptimalNewCameraMatrix(
                self.camera_model.K, self.camera_model.D, (W, H),
                self.alpha.value)
        # create rectification map
        with self.profiler("/cb/camera_info/init_undistort_rectify_map"):
            self.mapx, self.mapy = cv2.initUndistortRectifyMap(
                self.camera_model.K, self.camera_model.D, None, rect_camera_K,
                (W, H), cv2.CV_32FC1)
        # pack rectified camera info into a CameraInfo message
        self.rect_camera_info = CameraInfo(
            width=W,
            height=H,
            K=rect_camera_K.flatten().tolist(),
            R=np.eye(3).flatten().tolist(),
            P=np.zeros((3, 4)).flatten().tolist(),
        )

    def cb_image(self, msg):
        # make sure this matters to somebody
        if not self.pub_img.anybody_listening(
        ) and not self.pub_camera_info.anybody_listening():
            return
        # make sure we have a map to use
        if self.mapx is None or self.mapy is None:
            return
        # make sure the node is not switched off
        if not self.switch:
            return
        # make sure this is a good time to publish (always keep this as last check)
        if not self.reminder.is_time(frequency=self.publish_freq.value):
            return
        # turn 'compressed distorted image message' into 'raw distorted image'
        with self.profiler("/cb/image/decode"):
            dist_img = self.jpeg.decode(msg.data)
        # run input image through the lens map
        with self.profiler("/cb/image/rectify"):
            rect_img = cv2.remap(dist_img, self.mapx, self.mapy,
                                 cv2.INTER_NEAREST)
        # turn 'raw rectified image' into 'compressed rectified image message'
        with self.profiler("/cb/image/encode"):
            # rect_img_msg = self.bridge.cv2_to_compressed_imgmsg(rect_img)
            rect_img_msg = CompressedImage(format="jpeg",
                                           data=self.jpeg.encode(rect_img))
        # maintain original header
        rect_img_msg.header.stamp = msg.header.stamp
        rect_img_msg.header.frame_id = msg.header.frame_id
        self.rect_camera_info.header.stamp = msg.header.stamp
        self.rect_camera_info.header.frame_id = msg.header.frame_id
        # publish image
        self.pub_img.publish(rect_img_msg)
        # publish camera info
        self.pub_camera_info.publish(self.rect_camera_info)
Ejemplo n.º 22
0
class TinyDatasetFolder(VisionDataset):
    def __init__(self,
                 root,
                 loader,
                 extensions=None,
                 transform=None,
                 target_transform=None,
                 is_valid_file=None,
                 quality=None):
        super(TinyDatasetFolder, self).__init__(root)
        self.transform = transform
        self.target_transform = target_transform
        classes, class_to_idx = self._find_classes(self.root)
        samples = make_dataset(self.root, class_to_idx, extensions,
                               is_valid_file)
        if len(samples) == 0:
            raise (RuntimeError("Found 0 files in subfolders of: " +
                                self.root + "\n"
                                "Supported extensions are: " +
                                ",".join(extensions)))
        self.jpeg = TurboJPEG('/home/kai.x/work/local/lib/libturbojpeg.so')

        self.loader = loader
        self.extensions = extensions

        self.classes = classes
        self.class_to_idx = class_to_idx
        self.samples = samples
        self.targets = [s[1] for s in samples]
        self.quality = quality

    def _find_classes(self, dir):
        if sys.version_info >= (3, 5):
            # Faster and available in Python 3.5 and above
            classes = [d.name for d in os.scandir(dir) if d.is_dir()]
        else:
            classes = [
                d for d in os.listdir(dir)
                if os.path.isdir(os.path.join(dir, d))
            ]
        classes.sort()
        class_to_idx = {classes[i]: i for i in range(len(classes))}
        return classes, class_to_idx

    def __getitem__(self, index):
        path, target = self.samples[index]
        sample = self.loader(path)

        # RGB -> BGR
        img = np.asarray(sample)
        img = img[:, :, ::-1]
        # Convert to uint8, this is critical
        img = np.ascontiguousarray(img, dtype="uint8")

        encoded_img = self.jpeg.encode(img, quality=self.quality)
        decoded_img = self.jpeg.decode(encoded_img)  # BGR

        # BGR -> RGB
        sample = decoded_img[:, :, ::-1]
        sample = Image.fromarray(sample)

        if self.transform is not None:
            sample = self.transform(sample)
        if self.target_transform is not None:
            target = self.target_transform(target)

        return sample, target

    def __len__(self):
        return len(self.samples)
Ejemplo n.º 23
0
	out_img = 'jpegs/single_v0.jpg'
	in_file = open(out_img, 'rb')
	bgr_array = jpeg.decode(in_file.read())
	in_file.close()
	return None
	
if __name__ == "__main__":
	args = parse_options()
	if args.time_compress_decompress:
		jpeg = TurboJPEG()
		quality = 100
		out_img = 'jpegs/single_v0.jpg'
		in_img = 'imgs/output_fwd_v0.png'
		bgr_array = cv2.imread(in_img)
		out_file = open(out_img, 'wb')
		out_file.write(jpeg.encode(bgr_array, quality=quality))
		out_file.close()
		####### decompress #########
		print('Decompress:\n')
		timeval = do_benchmarking_decompress(out_img)
		out_csv = 'chrono/runtime_decompress_v0_single_py.csv'
		f = open(out_csv, 'w')
		f.write('v0\n')
		for value in timeval:
			f.write(str(value))
			f.write('\n')
		f.close()
		####### compress #########
		print('Compress:\n')
		timeval = do_benchmarking_compress(bgr_array, quality, out_img)
		out_csv = 'chrono/runtime_compress_v0_single_py.csv'