示例#1
0
class ImageFolder(torchvision.datasets.ImageFolder):
    """A generic data loader where the images are arranged in this way: ::

        root/dog/xxx.png
        root/dog/xxy.png
        root/dog/[...]/xxz.png

        root/cat/123.png
        root/cat/nsdf3.png
        root/cat/[...]/asd932_.png

    Args:
        root (string): Root directory path.
        transform (callable, optional): A function/transform that  takes in an PIL image
            and returns a transformed version. E.g, ``transforms.RandomCrop``
        target_transform (callable, optional): A function/transform that takes in the
            target and transforms it.

     Attributes:
        classes (list): List of the class names sorted alphabetically.
        class_to_idx (dict): Dict with items (class_name, class_index).
        imgs (list): List of (image path, class_index) tuples
    """

    def __init__(
            self,
            root: str,
            transform: Optional[Callable] = None,
            target_transform: Optional[Callable] = None
    ):
        super(ImageFolder, self).__init__(root, transform, target_transform)
        self.jpeg: Optional[TurboJPEG] = None

    def read_image_to_bytes(self, path: str):
        fd = open(path, 'rb')
        img_str = fd.read()
        fd.close()
        return img_str

    def decode_img_libjpeg_turbo(self, img_str: str):
        if self.jpeg is None:
            self.jpeg = TurboJPEG(lib_path=local_libturbo_path)
        bgr_array = self.jpeg.decode(img_str)
        return bgr_array

    def __getitem__(self, idx: int):
        path = self.imgs[idx][0]
        label = self.imgs[idx][1]

        if path.endswith(".jpg") or path.endswith(".jpeg"):
            img_str = self.read_image_to_bytes(path)
            img = self.decode_img_libjpeg_turbo(img_str)
        else:
            img = cv2.imread(path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        if self.transform:
            img = self.transform(img)

        img = img.numpy()
        return img.astype('float32'), label
示例#2
0
    from nvidia.dali.plugin.pytorch import feed_ndarray
    from nvidia.dali.plugin.pytorch import DALIGenericIterator

    @pipeline_def(batch_size=1,
                  num_threads=cfg['datasets']['train']['pipeline_threads'],
                  device_id=0)
    def pipe_gds():
        data = fn.readers.numpy(
            device='gpu', file_root=cfg['datasets']['train']['dataroot_HR'])
        return data


if cfg['datasets']['train']['loading_backend'] == "turboJPEG" or cfg[
        'datasets']['val']['loading_backend'] == "turboJPEG":
    from turbojpeg import TurboJPEG
    jpeg_reader = TurboJPEG()


class VimeoTriplet(Dataset):
    def __init__(self, data_root):
        upper_folders = glob.glob(data_root + "/*/")

        self.samples = []

        # getting subfolders of folders
        for f in upper_folders:
            self.samples.append(glob.glob(f + "/*/"))

        # for subfolders
        self.samples = [item for sublist in self.samples for item in sublist]
        self.samples = upper_folders
示例#3
0
import cv2
import numpy as np
from turbojpeg import TurboJPEG

from .consts import jpg_file

jpeg = TurboJPEG()
with open(jpg_file, 'rb') as in_file:
    imgbytes = in_file.read()


def check(img):
    assert isinstance(img, np.ndarray)
    assert img.shape == (300, 400, 3)
    assert img.dtype == np.uint8
    assert (img[0, 0] == [4, 3, 5]).all()
    assert (img == run_opencv()).all()


def run_opencv():
    img_np = np.frombuffer(imgbytes, np.uint8)
    img = cv2.imdecode(img_np, flags=cv2.IMREAD_COLOR)
    return img


def run_turbojpeg():
    img = jpeg.decode(imgbytes)
    return img
示例#4
0
class SocialDistancing:
    colors = [(0, 255, 0), (0, 0, 255)]

    nd_color = [(153, 0, 51), (153, 0, 0), (153, 51, 0), (153, 102, 0),
                (153, 153, 0), (102, 153, 0), (51, 153, 0), (0, 153, 0),
                (0, 102, 153), (0, 153, 51), (0, 153, 102), (0, 153, 153),
                (0, 102, 153), (0, 51, 153), (0, 0, 153), (153, 0, 102),
                (102, 0, 153), (153, 0, 153), (102, 0, 153), (0, 0, 153),
                (0, 0, 153), (0, 0, 153), (0, 153, 153), (0, 153, 153),
                (0, 153, 153)]

    connections = [(0, 16), (0, 15), (16, 18), (15, 17), (0, 1), (1, 2),
                   (2, 3), (3, 4), (1, 5), (5, 6), (6, 7), (1, 8), (8, 9),
                   (9, 10), (10, 11), (8, 12), (12, 13), (13, 14), (11, 24),
                   (11, 22), (22, 23), (14, 21), (14, 19), (19, 20)]
    '''
        Initialize Object
    '''
    def __init__(self, args):
        # Ratio params
        horizontal_ratio = float(args[0].horizontal_ratio)
        vertical_ratio = float(args[0].vertical_ratio)

        # Check video
        if args[0].video != "enabled" and args[0].video != "disabled":
            print("Error: set correct video mode, enabled or disabled",
                  flush=True)
            sys.exit(-1)

        # Check video
        if args[0].image != "enabled" and args[0].image != "disabled":
            print("Error: set correct image mode, enabled or disabled",
                  flush=True)
            sys.exit(-1)

        # Convert args to boolean
        self.use_video = True if args[0].video == "enabled" else False

        self.use_image = True if args[0].image == "enabled" else False

        self.use_preview = True if args[0].preview == "enabled" else False

        # Unable to use video and image mode at same time
        if self.use_video and self.use_image:
            print(
                "Error: unable to use video and image mode at the same time!",
                flush=True)
            sys.exit(-1)

        # Unable to not use or video or image mode at same time
        if self.use_video and self.use_image:
            print("Error: enable or video or image mode!", flush=True)
            sys.exit(-1)

        self.streaming = True if args[0].streaming == "enabled" else False

        if self.use_video:
            # Open video capture
            self.cap = cv2.VideoCapture(args[0].stream_in)

            if not self.cap.isOpened():
                print("Error: Opening video stream or file {0}".format(
                    args[0].stream_in),
                      flush=True)
                sys.exit(-1)

            # Get input size
            width = int(self.cap.get(3))
            height = int(self.cap.get(4))

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(
                    args[0].stream_out,
                    cv2.VideoWriter_fourcc(*args[0].encoding_codec),
                    int(self.cap.get(cv2.CAP_PROP_FPS)), (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out),
                          flush=True)
                    sys.exit(-1)

            # Get image size
            im_size = (width, height)

        if self.use_image:
            self.image = cv2.imread(args[0].image_in)
            if self.image is None:
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in),
                      flush=True)
                sys.exit(-1)

            self.image_out = args[0].image_out

            # Get image size
            im_size = (self.image.shape[1], self.image.shape[0])

        # Compute Homograpy
        self.homography_matrix = self.compute_homography(
            horizontal_ratio, vertical_ratio, im_size)

        self.background_masked = False
        # Open image backgrouns, if it is necessary
        if args[0].masked == "enabled":
            # Set masked flag
            self.background_masked = True

            # Load static background
            self.background_image = cv2.imread(args[0].background_in)

            # Close, if no background, but required
            if self.background_image is None:
                print("Error: Unable to load background image (flag enabled)",
                      flush=True)
                sys.exit(-1)

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = args[0].openpose_folder

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = args[0].net_size

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        # Set GPU start id (not considering previous)
        params["num_gpu_start"] = 0

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        # Json server
        self.dt_vector = {}

        # Client list
        self.stream_list = []

        if self.streaming:
            # Initialize stream server
            self.video_server = StreamServer(int(args[0].video_port),
                                             self.stream_list, "image/jpeg")
            self.video_server.activate()

            # Initialize json server
            self.js_server = ResponseServer(int(args[0].js_port),
                                            "application/json")
            self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()

        # Calibrate heigh value
        self.calibrate = float(args[0].calibration)

        # Actually unused
        self.ellipse_angle = 0

        # Body confidence threshold
        self.body_th = float(args[0].body_threshold)

        # Show confidence
        self.show_confidence = True if args[
            0].show_confidence == "enabled" else False

        # Set mask precision (mask division factor)
        self.overlap_precision = int(args[0].overlap_precision)

        # Check user value
        self.overlap_precision = 16 if self.overlap_precision > 16 else self.overlap_precision

        self.overlap_precision = 1 if self.overlap_precision < 0 else self.overlap_precision

    '''
        Draw Skelethon
    '''

    def draw_skeleton(self, frame, keypoints, colour):

        for keypoint_id1, keypoint_id2 in self.connections:
            x1, y1 = keypoints[keypoint_id1]
            x2, y2 = keypoints[keypoint_id2]

            if 0 in (x1, y1, x2, y2):
                continue

            pt1 = int(round(x1)), int(round(y1))
            pt2 = int(round(x2)), int(round(y2))

            cv2.circle(frame,
                       center=pt1,
                       radius=4,
                       color=self.nd_color[keypoint_id2],
                       thickness=-1)
            cv2.line(frame,
                     pt1=pt1,
                     pt2=pt2,
                     color=self.nd_color[keypoint_id2],
                     thickness=2)

    '''
        Compute skelethon bounding box
    '''

    def compute_simple_bounding_box(self, skeleton):
        x = skeleton[::2]
        x = np.where(x == 0.0, np.nan, x)
        left, right = int(round(np.nanmin(x))), int(round(np.nanmax(x)))
        y = skeleton[1::2]
        y = np.where(y == 0.0, np.nan, y)
        top, bottom = int(round(np.nanmin(y))), int(round(np.nanmax(y)))
        return left, right, top, bottom

    '''
        Compute Homograpy
    '''

    def compute_homography(self, H_ratio, V_ratio, im_size):
        rationed_hight = im_size[1] * V_ratio
        rationed_width = im_size[0] * H_ratio
        src = np.array([[0, 0], [0, im_size[1]], [im_size[0], im_size[1]],
                        [im_size[0], 0]])
        dst = np.array([[0 + rationed_width / 2, 0 + rationed_hight],
                        [0, im_size[1]], [im_size[0], im_size[1]],
                        [im_size[0] - rationed_width / 2, 0 + rationed_hight]],
                       np.int32)
        h, status = cv2.findHomography(src, dst)
        return h

    '''
        Compute overlap
    '''

    def compute_overlap(self, rect_1, rect_2):
        x_overlap = max(0,
                        min(rect_1[1], rect_2[1]) - max(rect_1[0], rect_2[0]))
        y_overlap = max(0,
                        min(rect_1[3], rect_2[3]) - max(rect_1[2], rect_2[2]))
        overlapArea = x_overlap * y_overlap
        if overlapArea:
            overlaps = True
        else:
            overlaps = False
        return overlaps

    '''
        Trace results
    '''

    def trace(self, image, skeletal_coordinates, draw_ellipse_requirements,
              is_skeletal_overlapped):
        bodys = []

        # Trace ellipses and body on target image
        i = 0

        for skeletal_coordinate in skeletal_coordinates[0]:
            if float(skeletal_coordinates[1][i]) < self.body_th:
                continue

            # Trace ellipse
            cv2.ellipse(image, (int(draw_ellipse_requirements[i][0]),
                                int(draw_ellipse_requirements[i][1])),
                        (int(draw_ellipse_requirements[i][2]),
                         int(draw_ellipse_requirements[i][3])), 0, 0, 360,
                        self.colors[int(is_skeletal_overlapped[i])], 3)

            # Trace skelethon
            skeletal_coordinate = np.array(skeletal_coordinate)
            self.draw_skeleton(image, skeletal_coordinate.reshape(-1, 2),
                               (255, 0, 0))

            if int(skeletal_coordinate[2]) != 0 and int(
                    skeletal_coordinate[3]) != 0 and self.show_confidence:
                cv2.putText(
                    image, "{0:.2f}".format(skeletal_coordinates[1][i]),
                    (int(skeletal_coordinate[2]), int(skeletal_coordinate[3])),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

            # Append json body data, joints coordinates, ground ellipses
            bodys.append([[round(x) for x in skeletal_coordinate],
                          draw_ellipse_requirements[i],
                          int(is_skeletal_overlapped[i])])

            i += 1

        self.dt_vector["bodys"] = bodys

    '''
        Evaluate skelethon height
    '''

    def evaluate_height(self, skeletal_coordinate):
        # Calculate skeleton height
        calculated_height = 0
        pointer = -1

        # Left leg
        joint_set = [12, 13, 14]

        # Check if leg is complete
        left_leg = True
        for k in joint_set:
            x = int(skeletal_coordinate[k * 2])
            y = int(skeletal_coordinate[k * 2 + 1])
            if x == 0 or y == 0:
                # No left leg, try right_leg
                joint_set = [9, 10, 11]
                left_leg = False
                break

        if not left_leg:
            joint_set = [9, 10, 11]
            # Check if leg is complete
            for k in joint_set:
                x = int(skeletal_coordinate[k * 2])
                y = int(skeletal_coordinate[k * 2 + 1])
                if x == 0 or y == 0:
                    # No left leg, no right leg, then body
                    joint_set = [0, 1, 8]
                    break

        # Evaluate leg height
        pointer = -1
        for k in joint_set[:-1]:
            pointer += 1
            if skeletal_coordinate[joint_set[pointer]*2]\
                    and skeletal_coordinate[joint_set[pointer+1]*2]\
                    and skeletal_coordinate[joint_set[pointer]*2+1]\
                    and skeletal_coordinate[joint_set[pointer+1]*2+1]:
                calculated_height = calculated_height +\
                    math.sqrt(((skeletal_coordinate[joint_set[pointer]*2] -
                                skeletal_coordinate[joint_set[pointer+1]*2])**2) +
                              ((skeletal_coordinate[joint_set[pointer]*2+1] -
                                skeletal_coordinate[joint_set[pointer+1]*2+1])**2))

        # Set parameter (calibrate) to optimize settings (camera dependent)
        return calculated_height * self.calibrate

    '''
        Evaluate overlapping
    '''

    def evaluate_overlapping(self, ellipse_boxes, is_skeletal_overlapped,
                             ellipse_pool):
        # checks for overlaps between people's ellipses, to determine risky or not
        for ind1, ind2 in itertools.combinations(
                list(range(0, len(ellipse_pool))), 2):

            is_overlap = cv2.bitwise_and(ellipse_pool[ind1],
                                         ellipse_pool[ind2])

            if is_overlap.any() and (not is_skeletal_overlapped[ind1]
                                     or not is_skeletal_overlapped[ind2]):
                is_skeletal_overlapped[ind1] = 1
                is_skeletal_overlapped[ind2] = 1

    '''
        Create Joint Array
    '''

    def create_joint_array(self, skeletal_coordinates):
        # Get joints sequence
        bodys_sequence = []
        bodys_probability = []
        for body in skeletal_coordinates:
            body_sequence = []
            body_probability = 0.0
            # For each joint put it in vetcor list
            for joint in body:
                body_sequence.append(joint[0])
                body_sequence.append(joint[1])

                # Sum joints probability to find body probability
                body_probability += joint[2]

            body_probability = body_probability / len(body)

            # Add body sequence to list
            bodys_sequence.append(body_sequence)
            bodys_probability.append(body_probability)

        # Assign coordiates sequence
        return [bodys_sequence, bodys_probability]

    '''
        Evaluate ellipses shadow, for each body
    '''

    def evaluate_ellipses(self, skeletal_coordinates,
                          draw_ellipse_requirements, ellipse_boxes,
                          ellipse_pool):
        for skeletal_coordinate in skeletal_coordinates:
            # Evaluate skeleton bounding box
            left, right, top, bottom = self.compute_simple_bounding_box(
                np.array(skeletal_coordinate))

            bb_center = np.array([(left + right) / 2, (top + bottom) / 2],
                                 np.int32)

            calculated_height = self.evaluate_height(skeletal_coordinate)

            # computing how the height of the circle varies in perspective
            pts = np.array([[bb_center[0], top], [bb_center[0], bottom]],
                           np.float32)

            pts1 = pts.reshape(-1, 1, 2).astype(np.float32)  # (n, 1, 2)

            dst1 = cv2.perspectiveTransform(pts1, self.homography_matrix)

            # height of the ellipse in perspective
            width = int(dst1[1, 0][1] - dst1[0, 0][1])

            # Bounding box surrending the ellipses, useful to compute whether there is any overlap between two ellipses
            ellipse_bbx = [
                bb_center[0] - calculated_height,
                bb_center[0] + calculated_height, bottom - width,
                bottom + width
            ]

            # Add boundig box to ellipse list
            ellipse_boxes.append(ellipse_bbx)

            ellipse = [
                int(bb_center[0]),
                int(bottom),
                int(calculated_height),
                int(width)
            ]

            mask_copy = self.mask.copy()

            ellipse_pool.append(
                cv2.ellipse(mask_copy,
                            (int(bb_center[0] / self.overlap_precision),
                             int(bottom / self.overlap_precision)),
                            (int(calculated_height / self.overlap_precision),
                             int(width / self.overlap_precision)), 0, 0, 360,
                            (255, 255, 255), -1))

            draw_ellipse_requirements.append(ellipse)

    '''
        Analyze image and evaluate distances
    '''

    def distances_evaluate(self, image, background):
        ellipse_boxes = []

        draw_ellipse_requirements = []

        ellipse_pool = []

        # Assign input image to openpose
        self.datum.cvInputData = image

        # Start WrapperPython
        self.opWrapper.emplaceAndPop([self.datum])

        # Get openpose coordinates (rounding values)
        skeletal_coordinates = self.datum.poseKeypoints.tolist()

        # Trace on background
        if self.background_masked:
            image = background

        self.dt_vector['ts'] = int(round(time.time() * 1000))
        self.dt_vector['bodys'] = []

        if type(skeletal_coordinates) is list:
            # Remove probability from joints and get a joint position list
            skeletal_coordinates = self.create_joint_array(
                skeletal_coordinates)

            # Initialize overlapped buffer
            is_skeletal_overlapped = np.zeros(
                np.shape(skeletal_coordinates[0])[0])

            # Evaluate ellipses for each body detected by openpose
            self.evaluate_ellipses(skeletal_coordinates[0],
                                   draw_ellipse_requirements, ellipse_boxes,
                                   ellipse_pool)

            # Evaluate overlapping
            self.evaluate_overlapping(ellipse_boxes, is_skeletal_overlapped,
                                      ellipse_pool)

            # Trace results over output image
            self.trace(image, skeletal_coordinates, draw_ellipse_requirements,
                       is_skeletal_overlapped)

        if self.streaming:
            # Send video to client queues
            self.send_image(self.stream_list, image, int(self.dt_vector['ts']))

            # Put json vector availble to rest requests
            self.js_server.put(bytes(json.dumps(self.dt_vector), "UTF-8"))

        return image

    '''
        Send image over queue list and then over http mjpeg stream
    '''

    def send_image(self, queue_list, image, ts):

        encoded_image = self.jpeg.encode(image, quality=80)
        # Put image into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, encoded_image)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Analyze video
    '''

    def analyze_video(self):
        first_frame = True
        while self.cap.isOpened():
            # Capture from image/video
            ret, image = self.cap.read()

            # Check image
            if image is None or not ret:
                os._exit(0)

            # create a mask from image
            if first_frame:
                self.mask = np.zeros(
                    (int(image.shape[0] / self.overlap_precision),
                     int(image.shape[1] / self.overlap_precision),
                     image.shape[2]),
                    dtype=np.uint8)
                first_frame = False

            # Get openpose output
            if self.background_masked:
                background = self.background_image.copy()
            else:
                background = image

            image = self.distances_evaluate(image, background)

            # Write image
            if not self.streaming:
                self.out.write(image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', image)
                cv2.waitKey(1)

    '''
        Analyze image
    '''

    def analyze_image(self):

        # Get openpose output
        if self.background_masked:
            background = self.background_image.copy()
        else:
            background = self.image

        # Create mask from image
        self.mask = np.zeros(
            (int(image.shape[0] / self.overlap_precision),
             int(image.shape[1] / self.overlap_precision), image.shape[2]),
            dtype=np.uint8)

        self.image = self.distances_evaluate(self.image, background)

        # Write image
        cv2.imwrite(self.image_out, self.image)

        # Show image and wait some time
        if self.use_preview:
            cv2.imshow('Social Distance', self.image)
            cv2.waitKey(1000)

    '''
        Analyze image/video
    '''

    def analyze(self):
        if self.use_image:
            self.analyze_image()

        if self.use_video:
            self.analyze_video()
示例#5
0
    try:
        bgr_array = jpeg.decode(in_file.read())
        bgr_array = cv2.resize(bgr_array, (224, 224))
        in_file.close()
    except Exception, e:
        bgr_array = None


parser = argparse.ArgumentParser()
parser.add_argument("--numproc", type=int, default=8)
parser.add_argument("--numiter", type=int, default=1)
parser.add_argument("--dir", type=str)
parser.add_argument("--lib", type=str, default="./lib")
args = parser.parse_args()
# using default library installation
jpeg = TurboJPEG(args.lib + "/libturbojpeg.so")

file_dir = absoluteFilePaths(args.dir)
flist = []
for i in range(args.numiter):
    flist += file_dir

p = mp.Pool(processes=args.numproc)
elapsed = timeit.default_timer()
for i in flist:
    p.apply_async(img_decode, args=(i, ))

p.close()
p.join()
print("%g img/s\n" % (len(flist) / (timeit.default_timer() - elapsed)))
示例#6
0
    def __init__(self, args):
        # Create configurator
        config = configparser.ConfigParser()

        # Read configuration
        try:
            config.read(args[0].config_file)
        except:
            print("unable to find configuration file", flush=True)
            sys.exit(-1)

        # Print configuration
        print("Configuration:", flush=True)

        # Print Configuration
        for key in config:
            print("[{0}]".format(key), flush=True)
            for argument in config[key]:
                print("{0} = {1}".format(argument, config[key][argument]),
                      flush=True)

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = config['openpose']['models']

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = config['openpose']['network']

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        params[
            "num_gpu_start"] = 0  # Set GPU start id (not considering previous)

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        # Continue recordings until this param is True
        self.continue_recording = True

        # Listen thermal port
        self.thermal_port = int(config['network']['thermal_port'])

        # Listen openpose port
        self.openpose_port = int(config['network']['openpose_port'])

        # Listen openpose port
        self.js_port = int(config['network']['js_port'])

        # Listen openpose port
        self.image_port = int(config['network']['image_port'])

        # Create a queue list to store client queues (thermal images)
        self.thermal_list = []

        # Create a queue list to store client queues (openpose images)
        self.openpose_list = []

        # Create a queue list to store client queues (json frames)
        self.js_list = []

        # Minimal temperature (image reconstuction)
        self.min_temperature = float(config['thermal']['min_temperature'])

        # Maaximum temperature (image reconstruction)
        self.max_temperature = float(config['thermal']['max_temperature'])

        # Set camera id (multiple camera available)
        self.id = int(config['thermal']['id'])

        # Set face min size x
        self.min_sizex = int(config['face']['min_sizex'])

        # Set face min size y
        self.min_sizey = int(config['face']['min_sizey'])

        # Set font size
        self.font_scale = float(config['face']['font_scale'])

        # Set alarm temperature
        self.alarm_temperature = float(config['face']['alarm_temperature'])

        # Camera resolution x
        self.resolution_x = int(config['thermal']['resolution_x'])

        # Camera resolution y
        self.resolution_y = int(config['thermal']['resolution_y'])

        # Reflected temperature
        self.reflected_temperature = float(
            config['thermal']['reflected_temperature'])

        # Atmosferic temperature
        self.atmospheric_temperature = float(
            config['thermal']['atmospheric_temperature'])

        # Object distance
        self.object_distance = float(config['thermal']['object_distance'])

        # Object emissivity
        self.object_emissivity = float(config['thermal']['object_emissivity'])

        # Relative humidity
        self.relative_humidity = float(config['thermal']['relative_humidity'])

        # ext_optics_temperature
        self.extoptics_temperature = float(
            config['thermal']['extoptics_temperature'])

        # ext_optics_transmission
        self.extoptics_transmission = float(
            config['thermal']['extoptics_transmission'])

        # ext_optics_transmission
        self.estimated_transmission = float(
            config['thermal']['estimated_transmission'])

        # Lines to be removed to correct a camera error on retrived image
        self.unused_lines = int(config['thermal']['unused_lines'])

        # Set compression
        self.compression = int(config['mjpeg']['compression'])

        # Show video
        self.show_video = True if int(
            config['debug']['show_video']) == 1 else False

        # Min detected temperature
        self.min_detection_temperature = int(
            config['face']['min_detection_temperature'])

        # Max detected temperature
        self.max_detection_temperature = int(
            config['face']['max_detection_temperature'])

        # Min detected temperature
        self.delta_temperature = float(config['face']['delta_temperature'])

        # Record secquence
        self.record_image = True if int(
            config['debug']['record_image']) == 1 else False

        # Record dir
        self.record_dir = config['debug']['record_dir']

        # Record csv
        self.record_csv = True if int(
            config['debug']['record_csv']) == 1 else False

        # Record csv filename
        self.filename_csv = config['debug']['filename_csv']

        # Record csv
        self.debug = True if int(config['debug']['debug']) == 1 else False

        # Record raw
        self.recorder_raw = True if int(
            config['debug']['recorder_raw']) == 1 else False

        # Player raw
        self.player_raw = True if int(
            config['debug']['player_raw']) == 1 else False

        # Record raw filename
        self.filename_raw = config['debug']['filename_raw']

        # DEBUG
        try:
            # Open recorder file
            if self.recorder_raw:
                self.raw = open(config['debug']['filename_raw'], "wb")

            # Open player file
            if self.player_raw:
                self.raw = open(config['debug']['filename_raw'], "rb")
        except:
            print("Unable to open {0} local file!".format(
                config['debug']['filename_raw']),
                  flush=True)
            os._exit(-1)

        try:
            # Create target Directory
            os.mkdir(self.record_dir)
            print("Directory ", self.record_dir, " Created ", flush=True)
        except FileExistsError:
            print("Directory ", self.record_dir, " already exists", flush=True)

        # Initialize thermal server
        self.thermal_server = StreamServer(self.thermal_port,
                                           self.thermal_list, "image/jpeg")

        # Initialize openpose server
        self.openpose_server = StreamServer(self.openpose_port,
                                            self.openpose_list, "image/jpeg")

        # Initialize json server
        self.js_server = ResponseServer(self.js_port, "application/json")

        # Initialize image server
        self.image_server = ResponseServer(self.image_port, "image/jpeg")

        # Initialize temperature FIFO length and array
        self.max_t_fifo = []
        self.fifo_size = 15

        self.max_t_face_fifo = []
        self.fifo_face_size = 15

        # Initializing the mask
        self.mask = cv2.imread(config['thermal']['mask_filename'], 1)
        self.mask = cv2.cvtColor(self.mask, cv2.COLOR_BGR2GRAY)
        self.mask = self.mask > 100

        # Create jpeg object
        self.jpeg = TurboJPEG()
示例#7
0
                             target_transform=target_transform,
                             is_valid_file=is_valid_file,
                             subset=subset)
        self.imgs = self.samples


if __name__ == '__main__':
    dataset = 'imagenet'

    import torch
    import datasets.cvtransforms as transforms
    import matplotlib.pyplot as plt
    from sklearn.preprocessing import minmax_scale

    # jpeg_encoder = TurboJPEG('/home/kai.x/work/local/lib/libturbojpeg.so')
    jpeg_encoder = TurboJPEG('/usr/lib/libturbojpeg.so')
    if dataset == 'imagenet':
        input_normalize = []
        input_normalize_y = transforms.Normalize(mean=train_y_mean_resized,
                                                 std=train_y_std_resized)
        input_normalize_cb = transforms.Normalize(mean=train_cb_mean_resized,
                                                  std=train_cb_std_resized)
        input_normalize_cr = transforms.Normalize(mean=train_cr_mean_resized,
                                                  std=train_cr_std_resized)
        input_normalize.append(input_normalize_y)
        input_normalize.append(input_normalize_cb)
        input_normalize.append(input_normalize_cr)
        val_loader = torch.utils.data.DataLoader(
            # ImageFolderDCT('/mnt/ssd/kai.x/dataset/ILSVRC2012/val', transforms.Compose([
            ImageFolderDCT(
                '/storage-t1/user/kaixu/datasets/ILSVRC2012/val',
# использовать URL: http://127.0.0.1:8080/basics/jpg

app = Flask(__name__)
app.config.update(FAKE_LATENCY_BEFORE=1, )
app.debug = True

font = cv2.FONT_HERSHEY_SIMPLEX
boundary = 'frame'  # multipart boundary

root_dir = dirname(dirname(realpath(__file__)))
turbo_jpeg = None
try:
    if platform.system() == "Windows":
        try:
            turbo_jpeg = TurboJPEG(
                os.path.join(root_dir, "turbojpeg", "turbojpeg.dll"))
        except:
            turbo_jpeg = TurboJPEG(
                os.path.join(root_dir, "turbojpeg", "turbojpeg64.dll"))
except (FileNotFoundError, OSError):
    pass

W, H = 1920, 1440
SSP = 240  # screen size pixels


def draw_cubenet() -> np.ndarray:
    img = np.zeros((H, W, 3), np.uint8)
    cv2.rectangle(img, (0, 0), (W, H), (200, 255, 255), -1)
    cv2.putText(img,
                'Hello World!', (10, 600),
示例#9
0
def printErase(arg):
    try:
        tsiz = tsize()[0]
        print("\r{}{}\n".format(
            arg, " " * (tsiz * math.ceil(len(arg) / tsiz) - len(arg) - 1)),
              end="",
              flush=True)
    except:
        #raise
        pass


# note that these are all 64 bit libraries since factorio doesnt support 32 bit.
if os.name == "nt":
    jpeg = TurboJPEG("mozjpeg/turbojpeg.dll")
# elif _platform == "darwin":						# I'm not actually sure if mac can run linux libraries or not.
# 	jpeg = TurboJPEG("mozjpeg/libturbojpeg.dylib")	# If anyone on mac has problems with the line below please make an issue :)
else:
    jpeg = TurboJPEG("mozjpeg/libturbojpeg.so")


def saveCompress(img, path, inpath=None):
    if maxQuality:  # do not waste any time compressing the image
        return img.save(path, subsampling=0, quality=100)

    out_file = open(path, 'wb')
    out_file.write(jpeg.encode(numpy.array(img)[:, :, ::-1].copy()))
    out_file.close()

示例#10
0
def printErase(arg):
    try:
        tsiz = tsize()[0]
        print("\r{}{}\n".format(
            arg, " " * (tsiz * math.ceil(len(arg) / tsiz) - len(arg) - 1)),
              end="",
              flush=True)
    except:
        #raise
        pass


# note that these are all 64 bit libraries since factorio doesnt support 32 bit.
if os.name == "nt":
    jpeg = TurboJPEG(
        Path(__file__, "..", "mozjpeg/turbojpeg.dll").resolve().as_posix())
# elif _platform == "darwin":						# I'm not actually sure if mac can run linux libraries or not.
# 	jpeg = TurboJPEG("mozjpeg/libturbojpeg.dylib")	# If anyone on mac has problems with the line below please make an issue :)
else:
    jpeg = TurboJPEG(
        Path(__file__, "..", "mozjpeg/libturbojpeg.so").resolve().as_posix())


def saveCompress(img, path: Path):
    if maxQuality:  # do not waste any time compressing the image
        return img.save(path, subsampling=0, quality=100)

    outFile = path.open("wb")
    outFile.write(jpeg.encode(numpy.array(img)[:, :, ::-1].copy()))
    outFile.close()
示例#11
0
from cheroot.wsgi import Server, PathInfoDispatcher
from flask import Flask, send_file, Response, request, abort
import numpy as np
import cv2
import io
import platform
from turbojpeg import TurboJPEG
import threading
import time
import sys
from picamera.array import PiRGBArray
from picamera import PiCamera
stream_framerate = 0
stream_target_size = (0, 99999)

jpeg = TurboJPEG(lib_path='turbojpeg.dll' if platform.system() ==
                 'Windows' else '/opt/libjpeg-turbo/lib32/libturbojpeg.so.0')

app = Flask(__name__, static_url_path='', template_folder='static')

new_img_ev = threading.Condition()


@app.route('/now.jpg')
def img_now():
    if cur_image is None:
        abort(500)
    buf = jpeg.encode(cur_image)
    return send_file(io.BytesIO(buf), mimetype='image/jpeg')


@app.route('/new.jpg')
示例#12
0
class RSNADataset(Dataset):
    def __init__(
        self,
        csv_path: str,
        img_dir: str,
        file_extension: str = 'dcm',
        mode: str = 'train',
        fold: int = 0,
        k_fold: int = 5,
        transform=None,
        network_type: str = 'cnn',
        max_sequence: int = 1083,
    ):
        assert mode in ['train', 'val', 'test']
        assert network_type in ['cnn', 'rnn', 'cnn_rnn']
        assert -1 <= fold < 5
        assert 15 % k_fold == 0

        self.transform = transform
        self.csv_path = Path(csv_path)
        self.img_dir = Path(img_dir)
        self.file_extension = file_extension
        self.mode = mode
        self.network_type = network_type
        self.max_sequence = max_sequence

        if network_type == 'cnn':
            self.target_cols = [
                'pe_present_on_image',
            ]
        elif network_type == 'rnn':
            self.target_cols = [
                'negative_exam_for_pe', 
                'indeterminate',
                'chronic_pe', 'acute_and_chronic_pe',           # not indeterminate. Only One is true.
                'central_pe', 'leftsided_pe', 'rightsided_pe',  # not indeterminate. At least One is true.
                'rv_lv_ratio_gte_1', 'rv_lv_ratio_lt_1',        # not indeterminate. Only One is true.
                'pe_present_on_image',
            ]
        
        if self.file_extension == 'jpg':
            self.jpeg_reader = TurboJPEG()

        df = pd.read_csv(self.csv_path)
        df["file_name"] = df.SOPInstanceUID + '.' + self.file_extension
        if self.file_extension != 'dcm':
            df.z_pos_order = df.z_pos_order.map(lambda x: f'{x:04}')
            df.file_name = df.z_pos_order + '_' + df.file_name
        df["image_name"] = str(self.img_dir) + '/' + \
            df.StudyInstanceUID + '/' +  df.SeriesInstanceUID + '/' +  df.file_name
        self.df = df if fold == -1 else self._make_fold(df, fold, k_fold, mode=mode)

        if self.network_type == 'rnn' or self.network_type == 'cnn_rnn':
            self.df["path_to_series_id"] = str(self.img_dir) + '/' + \
                self.df.StudyInstanceUID + '/' + self.df.SeriesInstanceUID
            self.path_to_series_id = self.df["path_to_series_id"].unique()
        
    def __len__(self):
        if self.network_type == 'cnn':
            return len(self.df)
        elif self.network_type == 'rnn':
            return len(self.path_to_series_id)
        elif self.network_type == 'cnn_rnn':
            return len(self.path_to_series_id)

    def __getitem__(self, index):
        if self.network_type == 'cnn':
            return self._get_single_image(index)
        else:
            return self._get_series(index)
    
    def _get_single_image(self, index):
        data = self.df.iloc[index]

        return self._get_img_label(data)
    
    def _get_series(self, index):
        if self.network_type == 'rnn':
            return self._read_embeddings(index)
        elif self.network_type == 'cnn_rnn':
            if self.file_extension == 'npz':
                return self._read_series_npz(index)
            else:
                return self._read_series_images(index)
    
    def _read_embeddings(self, index):
        data_path = self.path_to_series_id[index]
        data_path = Path(data_path).with_suffix('.npz')
        data = np.load(data_path)
        embeddings = data['embeddings']
        labels = data['labels']
        sequence_length, _ = embeddings.shape
        
        embeddings = self._padding_sequence(sequence_length, embeddings, 0)
        labels = self._padding_sequence(sequence_length, labels, -1)
        
        return embeddings, labels, sequence_length

    def _read_series_npz(self, index):
        data_path = Path(self.path_to_series_id[index]).with_suffix('.npz')
        data = np.load(data_path)
        imgs = data['imgs'] #(sequence, 3, h, w)
        labels = data['labels'] #(sequence, n_class)
        if self.transform is not None:
            imgs = imgs.transpose(0, 2, 3, 1)
            imgs = [self.transform(image=img).transpose( 
                2, 0, 1) for img in imgs]
            imgs = np.stack(imgs)

        imgs = imgs.astype('float32')
        labels = labels.astype('float32')

        return imgs, labels

    def _read_series_images(self, index):
        # use only when inference.
        data_path = self.path_to_series_id[index]
        dicoms, dicom_files = self._load_dicom_array(data_path)
        imgs = self._get_three_windowing_image(dicoms)
        if self.transform is not None:
            imgs = imgs.transpose(0, 2, 3, 1)
            imgs = [self.transform(image=img).transpose( 
                2, 0, 1) for img in imgs]
            imgs = np.stack(imgs)

        imgs = imgs.astype('float32')

        exam_level_name, image_level_name = self._get_file_names(dicom_files)
        
        return imgs, exam_level_name, image_level_name

    def _get_img_label(self, data):
        if self.file_extension == 'jpg':
            binary = open(data.image_name, "rb")
            img = self.jpeg_reader.decode(binary.read(), 0)
        elif self.file_extension == 'dcm':
            raise NotImplementedError
        if self.transform is not None:
            img = self.transform(image=img)
        img = img.transpose(2, 0, 1).astype('float32')

        labels = data[self.target_cols].values.astype('float32')

        return img, labels

    def _make_fold(self, df, fold, k_fold, mode='train'):
        df_new = df.copy()
        offset = 15 // k_fold
        target = [i + fold * offset for i in range(offset)]

        if mode == 'train':
            df_new = df_new.query(f'fold not in {target}')
        else:
            df_new = df_new.query(f'fold in {target}')
        
        return df_new
    
    def _padding_sequence(self, sequence_length, target, value):
        pad_len = self.max_sequence - sequence_length
        assert pad_len >= 0
            
        if pad_len > 0:
            padding = [np.full_like(target[0], value)] * pad_len
            target = np.concatenate([target, padding])
        
        return target

    def _load_dicom_array(self, path_to_series_id):
        dicom_files = list(Path(path_to_series_id).glob('*.dcm'))
        dicoms = [pydicom.dcmread(d) for d in dicom_files]
        slope = float(dicoms[0].RescaleSlope)
        intercept = float(dicoms[0].RescaleIntercept)
        # Assume all images are axial
        z_pos = [float(d.ImagePositionPatient[-1]) for d in dicoms]
        dicoms = np.asarray([d.pixel_array for d in dicoms])
        dicoms = dicoms[np.argsort(z_pos)]
        dicoms = dicoms * slope
        dicoms = dicoms + intercept

        dicom_files = np.array(dicom_files)[np.argsort(z_pos)]

        return dicoms, dicom_files
    
    def _windowing(self, img, window_length, window_width):
        upper = window_length + window_width // 2
        lower = window_length - window_width // 2
        x = np.clip(img.copy(), lower, upper)
        x = x - np.min(x)
        x = x / np.max(x)
        x = (x * 255.0).astype('uint8')

        return x
    
    def _get_three_windowing_image(self, dicoms):
        img_lung = np.expand_dims(
            self._windowing(dicoms, -600, 1500), axis=1)
        img_mediastinal = np.expand_dims(
            self._windowing(dicoms, 40, 400), axis=1)
        img_pe_specific = np.expand_dims(
            self._windowing(dicoms, 100, 700), axis=1)
        
        return np.concatenate([
            img_lung, img_pe_specific, img_mediastinal], axis=1)
    
    def _get_file_names(self, dicom_files):
        exam_level_name = str(dicom_files[0].parent.parent.stem)
        dicom_files = dicom_files.tolist()
        image_level_name = list(map(lambda x: str(x.stem), dicom_files))
        
        return exam_level_name, image_level_name
示例#13
0
 def decode_img_libjpeg_turbo(self, img_str: str):
     if self.jpeg is None:
         self.jpeg = TurboJPEG(lib_path=local_libturbo_path)
     bgr_array = self.jpeg.decode(img_str)
     return bgr_array
from turbojpeg import TurboJPEG
import requests

url = "https://raw.githubusercontent.com/libjpeg-turbo/libjpeg-turbo/main/testimages/testorig.jpg"
r = requests.get(url, allow_redirects=True)
r.raise_for_status()

jpeg = TurboJPEG()

with open("testorig.jpg", "wb") as test_file:
    test_file.write(r.content)

with open("testorig.jpg", "rb") as in_file:
    bgr_array = jpeg.decode(in_file.read())

with open("output.jpg", "wb") as out_file:
    out_file.write(jpeg.encode(bgr_array))
示例#15
0
class ImageLoader(multiprocessing.Process):


	def __init__(self, filename_queue, parser_queue, *args, **kwargs):
		super().__init__(*args, **kwargs)
		self.parser_queue = parser_queue
		self.jpeg_loader = TurboJPEG()
		self.num_images = 0
		self.file_size_sum = 0
		self.pixel_sum = 0
		self.filename_queue = filename_queue
		self.is_running = multiprocessing.Value(ctypes.c_bool, True)

	def read_png(self, buf):
		x = np.frombuffer(buf, dtype = np.uint8)
		img_np = cv2.imdecode(x, cv2.IMREAD_UNCHANGED)
		if img_np is not None:
			if img_np.dtype == np.uint16 and img_np.max() > 255:
				img_np = (img_np // 256).astype(np.uint8)
		return img_np

	def read_jpeg(self, buf):
		with warnings.catch_warnings():
			warnings.simplefilter("ignore")
			return self.jpeg_loader.decode(buf)

	def read_image(self, filename, buf):

		if filename.endswith(".png"):
			bgr_array = self.read_png(buf)
		else:
			try:
				bgr_array = self.read_jpeg(buf)
			except OSError:
				bgr_array = None
		if bgr_array is not None:
			if len(bgr_array.shape) > 2 and bgr_array.shape[2] == 4:
				# return None
				# print("need to realign memory")
				bgr_array = np.ascontiguousarray(bgr_array[:,:,:3])
			if len(bgr_array.shape) == 2:
				new_array = np.zeros(bgr_array.shape + (3,), dtype = np.uint8)
				for i in range(3):
					new_array[:,:,i] = bgr_array
				bgr_array = new_array
				# print(bgr_array.shape)
		return bgr_array

	def print_stats(self, i, t0, t1, cl0, cl1):
		mp = self.pixel_sum / 1024**2
		mb = self.file_size_sum / 1024**2
		mp_per_second = mp / (t1 - t0)
		mb_per_second = mb / (t1 - t0)
		print(f"\r{i:4d}", end = "\t", flush = True)
		print(f"{mp_per_second:8.1f}MP/s", end = "\t", flush = True)
		print(f"{mb_per_second:.2f}MB/s", end = "\t")
		print(f"({(cl1-cl0) * 1e3:6.1f}ms) ({mp:7.1f}MP)", end = "")

	def load_single_image(self, filename, in_file):
		cl0 = clock()
		buf = in_file.read()
		bgr_array = self.read_image(filename, buf)
		if bgr_array is None:
			return
		assert bgr_array.dtype == np.uint8
		self.parser_queue.put(bgr_array)
		# self.image_parser.add_image(bgr_array)
		cl1 = clock()
		self.file_size_sum += os.path.getsize(directory + filename)
		self.pixel_sum += bgr_array.size // 3
		# print(f"{filename} parsed")
		# t1 = time.time()
		# self.print_stats(i, t0, t1, cl0, cl1)


	def load_all_images(self):
		t0 = time.time()

		# for i, filename in enumerate(os.listdir(directory)):
			# with open(directory + filename, 'rb') as in_file:
				
		print()
		self.finalize_parser(t0)

	def run(self):
		while self.is_running.value:
		# while True:
			if self.parser_queue.qsize() > 100:
				# print(self.parser_queue.qsize())
				time.sleep(.1)
				continue
			try:
				image_data = self.filename_queue.get(True, 1)
			except queue.Empty:
				continue
			filename = f"{image_data['filename']}.{image_data['filetype']}"
			with open(directory + filename, 'rb') as in_file:
				self.load_single_image(filename, in_file)
			# time.sleep(1)
			# print(f"Completed {image_data}")
		print("worker finished", self.filename_queue.qsize(), self.parser_queue.qsize())

	@property
	def color_buffer(self):
		return self._color_buffer
            # Protected by a lock
            # As the main thread may asks to access the buffer
            self.lock.acquire()
            self.buffer = self.jpeg_handler.compress(img)
            self.lock.release()


if __name__ == '__main__':

    jpeg_quality = 100

    grabber = VideoGrabber(jpeg_quality, jpeg_lib='turbo')
    grabber.start()
    time.sleep(1)

    turbo_jpeg = TurboJPEG()

    cv2.namedWindow("Image")

    keep_running = True
    idx = 0
    t0 = time.time()

    while keep_running:
        data = grabber.get_buffer()
        if data is None:
            time.sleep(1)
            continue
        img = turbo_jpeg.decode(data)
        cv2.imshow("Image", img)
        keep_running = not (cv2.waitKey(1) & 0xFF == ord('q'))
示例#17
0
dimx, dimy = (218+subx-1)//subx, (178+suby-1)//suby
print(dimx)
print(dimy)
data = np.zeros((len(files), dimx*dimy), dtype=np.float32)

def worker(num,num2):
    print( 'spawning: '+str(num)+' '+str(num2)+'\n')
    for i in range(num,num2+1):
      in_file=open(dirname+files[i], 'rb')
      gray=jpeg.decode(in_file.read(), pixel_format=TJPF_GRAY, scaling_factor=(1,4))
      data[i]=gray.ravel()
      in_file.close()
      #data[i]=np.mean(imread(dirname+files[i])[::4,::4,:],axis=2).ravel()
    return

jpeg = TurboJPEG('/usr/lib/x86_64-linux-gnu/libturbojpeg.so')

nr_threads=8
imgcount=len(files)
batches=imgcount//nr_threads
threads = []
idxstart=0
idxstop=batches-1
print( 'images: ' + str(imgcount) + ' in batches of size: ' + str(batches) + ' with ' + str(nr_threads) + ' threads')

for i in range(nr_threads):
    t = threading.Thread(target=worker, args=[idxstart,idxstop])
    threads.append(t)
    t.start()
    idxstart +=batches
    idxstop  +=batches
示例#18
0
    # print help information and exit:
    print(err)  # will print something like "option -a not recognized"
    sys.exit(-1)

for o, a in opts:
    print('processing option {0} and arg {1}'.format(o, a))
    if o == '-n': netname = a
    elif o == '-c': conf_thresh = float(a)
    elif o == '-u': uri = a
    elif o == '-p': port = int(a)

#print('Using ZMQ port {0}'.format(port))

net = jetson.inference.detectNet(netname, [], conf_thresh)

jpeg = TurboJPEG()

# get the video stream
# the URL below can be as short as the ID (e.g., 'iYhCn0jf46U')
# if the URL is a 'file://' URL, it is handled as a special case.

up = urllib.parse.urlparse(uri)
if up.scheme in ['file', 'rtsp']:
    cap_url = up.path
else:
    vPafy = pafy.new(URL)
    my_stream = vPafy.getbest()
    cap_url = my_stream.url
cap = cv2.VideoCapture(cap_url)

nf = 0
示例#19
0
class RectifierNode(DTROS):
    def __init__(self, node_name):
        super().__init__(node_name, node_type=NodeType.PERCEPTION)

        # parameters
        self.publish_freq = DTParam("~publish_freq", -1)
        self.alpha = DTParam("~alpha", 0.0)

        # utility objects
        self.jpeg = TurboJPEG()
        self.reminder = DTReminder(frequency=self.publish_freq.value)
        self.camera_model = None
        self.rect_camera_info = None
        self.mapx, self.mapy = None, None

        # subscribers
        self.sub_img = rospy.Subscriber("~image_in",
                                        CompressedImage,
                                        self.cb_image,
                                        queue_size=1,
                                        buff_size="10MB")
        self.sub_camera_info = rospy.Subscriber("~camera_info_in",
                                                CameraInfo,
                                                self.cb_camera_info,
                                                queue_size=1)

        # publishers
        self.pub_img = rospy.Publisher(
            "~image/compressed",
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_healthy_freq=self.publish_freq.value,
            dt_help=
            "Rectified image (i.e., image with no distortion effects from the lens).",
        )
        self.pub_camera_info = rospy.Publisher(
            "~camera_info",
            CameraInfo,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_healthy_freq=self.publish_freq.value,
            dt_help="Camera parameters for the (virtual) rectified camera.",
        )

    def cb_camera_info(self, msg):
        # unsubscribe from camera_info
        self.loginfo(
            "Camera info message received. Unsubscribing from camera_info topic."
        )
        # noinspection PyBroadException
        try:
            self.sub_camera_info.shutdown()
        except BaseException:
            pass
        # ---
        H, W = msg.height, msg.width
        # create new camera info
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(msg)
        # find optimal rectified pinhole camera
        with self.profiler("/cb/camera_info/get_optimal_new_camera_matrix"):
            rect_camera_K, _ = cv2.getOptimalNewCameraMatrix(
                self.camera_model.K, self.camera_model.D, (W, H),
                self.alpha.value)
        # create rectification map
        with self.profiler("/cb/camera_info/init_undistort_rectify_map"):
            self.mapx, self.mapy = cv2.initUndistortRectifyMap(
                self.camera_model.K, self.camera_model.D, None, rect_camera_K,
                (W, H), cv2.CV_32FC1)
        # pack rectified camera info into a CameraInfo message
        self.rect_camera_info = CameraInfo(
            width=W,
            height=H,
            K=rect_camera_K.flatten().tolist(),
            R=np.eye(3).flatten().tolist(),
            P=np.zeros((3, 4)).flatten().tolist(),
        )

    def cb_image(self, msg):
        # make sure this matters to somebody
        if not self.pub_img.anybody_listening(
        ) and not self.pub_camera_info.anybody_listening():
            return
        # make sure we have a map to use
        if self.mapx is None or self.mapy is None:
            return
        # make sure the node is not switched off
        if not self.switch:
            return
        # make sure this is a good time to publish (always keep this as last check)
        if not self.reminder.is_time(frequency=self.publish_freq.value):
            return
        # turn 'compressed distorted image message' into 'raw distorted image'
        with self.profiler("/cb/image/decode"):
            dist_img = self.jpeg.decode(msg.data)
        # run input image through the lens map
        with self.profiler("/cb/image/rectify"):
            rect_img = cv2.remap(dist_img, self.mapx, self.mapy,
                                 cv2.INTER_NEAREST)
        # turn 'raw rectified image' into 'compressed rectified image message'
        with self.profiler("/cb/image/encode"):
            # rect_img_msg = self.bridge.cv2_to_compressed_imgmsg(rect_img)
            rect_img_msg = CompressedImage(format="jpeg",
                                           data=self.jpeg.encode(rect_img))
        # maintain original header
        rect_img_msg.header.stamp = msg.header.stamp
        rect_img_msg.header.frame_id = msg.header.frame_id
        self.rect_camera_info.header.stamp = msg.header.stamp
        self.rect_camera_info.header.frame_id = msg.header.frame_id
        # publish image
        self.pub_img.publish(rect_img_msg)
        # publish camera info
        self.pub_camera_info.publish(self.rect_camera_info)
示例#20
0
class AiThermometer:
    '''
        Initialize parameters
    '''
    def __init__(self, args):
        # Create configurator
        config = configparser.ConfigParser()

        # Read configuration
        try:
            config.read(args[0].config_file)
        except:
            print("unable to find configuration file", flush=True)
            sys.exit(-1)

        # Print configuration
        print("Configuration:", flush=True)

        # Print Configuration
        for key in config:
            print("[{0}]".format(key), flush=True)
            for argument in config[key]:
                print("{0} = {1}".format(argument, config[key][argument]),
                      flush=True)

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = config['openpose']['models']

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = config['openpose']['network']

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        params[
            "num_gpu_start"] = 0  # Set GPU start id (not considering previous)

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        # Continue recordings until this param is True
        self.continue_recording = True

        # Listen thermal port
        self.thermal_port = int(config['network']['thermal_port'])

        # Listen openpose port
        self.openpose_port = int(config['network']['openpose_port'])

        # Listen openpose port
        self.js_port = int(config['network']['js_port'])

        # Listen openpose port
        self.image_port = int(config['network']['image_port'])

        # Create a queue list to store client queues (thermal images)
        self.thermal_list = []

        # Create a queue list to store client queues (openpose images)
        self.openpose_list = []

        # Create a queue list to store client queues (json frames)
        self.js_list = []

        # Minimal temperature (image reconstuction)
        self.min_temperature = float(config['thermal']['min_temperature'])

        # Maaximum temperature (image reconstruction)
        self.max_temperature = float(config['thermal']['max_temperature'])

        # Set camera id (multiple camera available)
        self.id = int(config['thermal']['id'])

        # Set face min size x
        self.min_sizex = int(config['face']['min_sizex'])

        # Set face min size y
        self.min_sizey = int(config['face']['min_sizey'])

        # Set font size
        self.font_scale = float(config['face']['font_scale'])

        # Set alarm temperature
        self.alarm_temperature = float(config['face']['alarm_temperature'])

        # Camera resolution x
        self.resolution_x = int(config['thermal']['resolution_x'])

        # Camera resolution y
        self.resolution_y = int(config['thermal']['resolution_y'])

        # Reflected temperature
        self.reflected_temperature = float(
            config['thermal']['reflected_temperature'])

        # Atmosferic temperature
        self.atmospheric_temperature = float(
            config['thermal']['atmospheric_temperature'])

        # Object distance
        self.object_distance = float(config['thermal']['object_distance'])

        # Object emissivity
        self.object_emissivity = float(config['thermal']['object_emissivity'])

        # Relative humidity
        self.relative_humidity = float(config['thermal']['relative_humidity'])

        # ext_optics_temperature
        self.extoptics_temperature = float(
            config['thermal']['extoptics_temperature'])

        # ext_optics_transmission
        self.extoptics_transmission = float(
            config['thermal']['extoptics_transmission'])

        # ext_optics_transmission
        self.estimated_transmission = float(
            config['thermal']['estimated_transmission'])

        # Lines to be removed to correct a camera error on retrived image
        self.unused_lines = int(config['thermal']['unused_lines'])

        # Set compression
        self.compression = int(config['mjpeg']['compression'])

        # Show video
        self.show_video = True if int(
            config['debug']['show_video']) == 1 else False

        # Min detected temperature
        self.min_detection_temperature = int(
            config['face']['min_detection_temperature'])

        # Max detected temperature
        self.max_detection_temperature = int(
            config['face']['max_detection_temperature'])

        # Min detected temperature
        self.delta_temperature = float(config['face']['delta_temperature'])

        # Record secquence
        self.record_image = True if int(
            config['debug']['record_image']) == 1 else False

        # Record dir
        self.record_dir = config['debug']['record_dir']

        # Record csv
        self.record_csv = True if int(
            config['debug']['record_csv']) == 1 else False

        # Record csv filename
        self.filename_csv = config['debug']['filename_csv']

        # Record csv
        self.debug = True if int(config['debug']['debug']) == 1 else False

        # Record raw
        self.recorder_raw = True if int(
            config['debug']['recorder_raw']) == 1 else False

        # Player raw
        self.player_raw = True if int(
            config['debug']['player_raw']) == 1 else False

        # Record raw filename
        self.filename_raw = config['debug']['filename_raw']

        # DEBUG
        try:
            # Open recorder file
            if self.recorder_raw:
                self.raw = open(config['debug']['filename_raw'], "wb")

            # Open player file
            if self.player_raw:
                self.raw = open(config['debug']['filename_raw'], "rb")
        except:
            print("Unable to open {0} local file!".format(
                config['debug']['filename_raw']),
                  flush=True)
            os._exit(-1)

        try:
            # Create target Directory
            os.mkdir(self.record_dir)
            print("Directory ", self.record_dir, " Created ", flush=True)
        except FileExistsError:
            print("Directory ", self.record_dir, " already exists", flush=True)

        # Initialize thermal server
        self.thermal_server = StreamServer(self.thermal_port,
                                           self.thermal_list, "image/jpeg")

        # Initialize openpose server
        self.openpose_server = StreamServer(self.openpose_port,
                                            self.openpose_list, "image/jpeg")

        # Initialize json server
        self.js_server = ResponseServer(self.js_port, "application/json")

        # Initialize image server
        self.image_server = ResponseServer(self.image_port, "image/jpeg")

        # Initialize temperature FIFO length and array
        self.max_t_fifo = []
        self.fifo_size = 15

        self.max_t_face_fifo = []
        self.fifo_face_size = 15

        # Initializing the mask
        self.mask = cv2.imread(config['thermal']['mask_filename'], 1)
        self.mask = cv2.cvtColor(self.mask, cv2.COLOR_BGR2GRAY)
        self.mask = self.mask > 100

        # Create jpeg object
        self.jpeg = TurboJPEG()

    '''
        Connect to thermal camera gigE
    '''

    def connect(self):
        # DEBUG: select file if debug and player are selected
        if self.player_raw:
            print("Read images from file, skip camera connect")
            return True

        # Retrieve singleton reference to system object
        self.system = PySpin.System.GetInstance()

        # Get current library version
        version = self.system.GetLibraryVersion()
        print('Spinnaker Library version: %d.%d.%d.%d' %
              (version.major, version.minor, version.type, version.build))

        # Retrieve list of cameras from the system
        cam_list = self.system.GetCameras()

        # Get camera number
        num_cameras = cam_list.GetSize()

        # Print detected camera number
        print('Number of cameras detected: %d' % num_cameras)

        # Finish if there are no cameras
        if num_cameras == 0:

            # Clear camera list before releasing system
            cam_list.Clear()

            # Release system instance
            self.system.ReleaseInstance()

            print('Not enough cameras!')

            return False

        # Use first camera (we use one camere)
        self.camera = cam_list[self.id]

        # Clear camera list before releasing system
        cam_list.Clear()

        return True

    '''
        Acquire data from remote thermal camera
    '''

    def acquire_process(self):
        if self.player_raw:
            return self.player()

        return self.run_camera()

    '''
        Disconnect from camera and close all
    '''

    def disconnect(self):
        try:
            # Stop data recording
            self.continue_recording = False

            time.sleep(1)

            # Thermal server
            self.thermal_server.disconnect()

            # Openpose server
            self.openpose_server.disconnect()

            # js server
            self.js_server.disconnect()

            # image server
            self.image_server.disconnect()

            # DEBUG: reading raw
            if self.player_raw:
                return

            # Stopping acquisition
            self.camera.EndAcquisition()

            # Wait some time to stop recording
            time.sleep(5)

            # Deinitialize camera
            self.camera.DeInit()

            # Wait some time
            time.sleep(1)

            # Delete camera
            del self.camera

            # Release system instance
            self.system.ReleaseInstance()
        except PySpin.SpinnakerException as ex:
            print('Error: %s' % ex)

    '''
        Get images from camera, analyze it with openpose and evaluate temperature into a box over face 
    '''

    def acquire_images(self, cam, nodemap, nodemap_tldevice):

        sNodemap = cam.GetTLStreamNodeMap()

        # Change bufferhandling mode to NewestOnly
        node_bufferhandling_mode = PySpin.CEnumerationPtr(
            sNodemap.GetNode('StreamBufferHandlingMode'))
        if not PySpin.IsAvailable(
                node_bufferhandling_mode) or not PySpin.IsWritable(
                    node_bufferhandling_mode):
            print('Unable to set stream buffer handling mode.. Aborting...')
            return False

        # Retrieve entry node from enumeration node
        node_newestonly = node_bufferhandling_mode.GetEntryByName('NewestOnly')
        if not PySpin.IsAvailable(node_newestonly) or not PySpin.IsReadable(
                node_newestonly):
            print('Unable to set stream buffer handling mode.. Aborting...')
            return False

        # Retrieve integer value from entry node
        node_newestonly_mode = node_newestonly.GetValue()

        # Set integer value from entry node as new value of enumeration node
        node_bufferhandling_mode.SetIntValue(node_newestonly_mode)

        print('*** IMAGE ACQUISITION ***\n')

        try:
            # Get acquisition mode
            node_acquisition_mode = PySpin.CEnumerationPtr(
                nodemap.GetNode('AcquisitionMode'))
            if not PySpin.IsAvailable(
                    node_acquisition_mode) or not PySpin.IsWritable(
                        node_acquisition_mode):
                print(
                    'Unable to set acquisition mode to continuous (enum retrieval). Aborting...'
                )
                return False

            # Retrieve entry node from enumeration node
            node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(
                'Continuous')
            if not PySpin.IsAvailable(
                    node_acquisition_mode_continuous) or not PySpin.IsReadable(
                        node_acquisition_mode_continuous):
                print(
                    'Unable to set acquisition mode to continuous (entry retrieval). Aborting...'
                )
                return False

            # Retrieve integer value from entry node
            acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue(
            )

            # Set integer value from entry node as new value of enumeration node
            node_acquisition_mode.SetIntValue(acquisition_mode_continuous)

            print('Acquisition mode set to continuous...')

            #  Begin acquiring images
            cam.BeginAcquisition()

            print('Acquiring images...')

            #  Retrieve device serial number for filename
            device_serial_number = ''
            node_device_serial_number = PySpin.CStringPtr(
                nodemap_tldevice.GetNode('DeviceSerialNumber'))

            if PySpin.IsAvailable(
                    node_device_serial_number) and PySpin.IsReadable(
                        node_device_serial_number):
                device_serial_number = node_device_serial_number.GetValue()
                print('Device serial number retrieved as %s...' %
                      device_serial_number)

            # Retrieve and display images
            while (self.continue_recording):
                #  Retrieve next received image
                image_result = cam.GetNextImage()

                #  Ensure image completion
                if image_result.IsIncomplete():
                    print('Image incomplete with image status %d ...' %
                          image_result.GetImageStatus())
                    image_result.Release()
                    continue

                # DEBUG: record image on raw sequence
                if self.recorder_raw:
                    self.rec_image(image_result)

                # Analyze image
                self.analyze_image(image_result)

                #  Release image
                image_result.Release()

            #  End acquisition
            cam.EndAcquisition()

            # DEBUG: Close csv file
            if self.record_csv:
                self.csv.flush()
                self.csv.close()

            # DEBUG: Close raw file
            if self.recorder_raw:
                self.raw.flush()
                self.raw.close()

        except PySpin.SpinnakerException as ex:
            print('Error: %s' % ex, flush=True)
            return False

        return True

    '''
        Configure selected camera
    '''

    def run_camera(self):
        try:
            nodemap_tldevice = self.camera.GetTLDeviceNodeMap()

            # Initialize camera
            self.camera.Init()

            # Retrieve GenICam nodemap
            nodemap = self.camera.GetNodeMap()

            # Retrive IRFormat node
            node_irformat_mode = PySpin.CEnumerationPtr(
                nodemap.GetNode("IRFormat"))

            # Check if param is available and writable
            if PySpin.IsAvailable(node_irformat_mode) and PySpin.IsWritable(
                    node_irformat_mode):
                # Turn to IRRadiation Temperature linear, 0.01K resolution
                node_irformat_mode.SetIntValue(2)

                # Read value from IRFormat node
                print("IRFormat:{0}".format(node_irformat_mode.GetIntValue()))

            time.sleep(0.1)
            # Retrive Width
            node_width = PySpin.CIntegerPtr(nodemap.GetNode("Width"))
            # Check if param is available and writable
            if PySpin.IsAvailable(node_width) and PySpin.IsWritable(
                    node_width):
                # Width
                node_width.SetValue(self.resolution_x)

                # Read value from IRFormat node
                print("Image width:{0}".format(node_width.GetValue()))

            time.sleep(0.1)
            # Retrive Height node
            node_height = PySpin.CIntegerPtr(nodemap.GetNode("Height"))
            # Check if param is available and writable
            if PySpin.IsAvailable(node_height) and PySpin.IsWritable(
                    node_height):
                # Set Height
                node_height.SetValue(
                    self.resolution_y
                )  # 246 not good temperature because 6 black lines, but 240 generate incomplete images

                # Read value from IRFormat node
                print("Image height:{0}".format(node_height.GetValue()))

            time.sleep(0.1)
            # Retrive PixelFormat node
            node_pixelformat = PySpin.CEnumerationPtr(
                nodemap.GetNode("PixelFormat"))
            # Check if param is available and writable
            if PySpin.IsAvailable(node_pixelformat) and PySpin.IsWritable(
                    node_pixelformat):
                # Set Mono16
                node_pixelformat.SetIntValue(
                    node_pixelformat.GetEntryByName("Mono16").GetValue())

                # Print pixel format
                print("PixelFormat:{0}".format(node_pixelformat.GetIntValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive Reflected Temperature node
            node_reflected_temperature = PySpin.CFloatPtr(
                nodemap.GetNode("ReflectedTemperature"))
            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_reflected_temperature) and PySpin.IsWritable(
                        node_reflected_temperature):
                # Set Value
                node_reflected_temperature.SetValue(self.reflected_temperature)

                # Print Reflected Temperature
                print("ReflectedTemperature:{0}".format(
                    node_reflected_temperature.GetValue()),
                      flush=True)

            # Retrive Atmospheric Temperature node
            node_atmospheric_temperature = PySpin.CFloatPtr(
                nodemap.GetNode("AtmosphericTemperature"))
            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_atmospheric_temperature) and PySpin.IsWritable(
                        node_atmospheric_temperature):
                # Set Value
                node_atmospheric_temperature.SetValue(
                    self.atmospheric_temperature)

                # Print Atmospheric Temperature
                print("AtmosphericTemperature:{0}".format(
                    node_atmospheric_temperature.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive Object Emissivity node
            node_object_emissivity = PySpin.CFloatPtr(
                nodemap.GetNode("ObjectEmissivity"))
            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_object_emissivity) and PySpin.IsWritable(
                        node_object_emissivity):
                # Set Value
                node_object_emissivity.SetValue(self.object_emissivity)

                # Print Object Emissivity
                print("ObjectEmissivity:{0}".format(
                    node_object_emissivity.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive and Change Object Emissivity node
            node_relative_humidity = PySpin.CFloatPtr(
                nodemap.GetNode("RelativeHumidity"))
            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_relative_humidity) and PySpin.IsWritable(
                        node_relative_humidity):
                # Set Value
                node_relative_humidity.SetValue(self.relative_humidity)

                # Print Object Emissivity
                print("Changed RelativeHumidity To {0}".format(
                    node_relative_humidity.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive and Change ExtOpticsTemperature
            node_extoptics_temperature = PySpin.CFloatPtr(
                nodemap.GetNode("ExtOpticsTemperature"))

            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_extoptics_temperature) and PySpin.IsWritable(
                        node_extoptics_temperature):
                # Set Value
                node_extoptics_temperature.SetValue(self.extoptics_temperature)

                # Print Object Emissivity
                print("Changed ExtOpticsTemperature To {0}".format(
                    node_extoptics_temperature.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive and Change ExtOpticsTransmission
            node_extoptics_transmission = PySpin.CFloatPtr(
                nodemap.GetNode("ExtOpticsTransmission"))

            # Check if param is available and writable
            if PySpin.IsAvailable(
                    node_extoptics_transmission) and PySpin.IsWritable(
                        node_extoptics_transmission):

                # Set Value
                node_extoptics_transmission.SetValue(
                    self.extoptics_transmission)

                # Print Object Emissivity
                print("ExtOpticsTransmission:{0}".format(
                    node_extoptics_transmission.GetValue()),
                      flush=True)

            time.sleep(0.1)
            # Retrive and change Estimated Transmission
            node_estimated_transmission = PySpin.CFloatPtr(
                nodemap.GetNode("EstimatedTransmission"))
            if PySpin.IsAvailable(
                    node_estimated_transmission) and PySpin.IsWritable(
                        node_estimated_transmission):

                # Set Value
                node_estimated_transmission.SetValue(
                    self.estimated_transmission)

                # Print Object Emissivity
                print("EstimatedTransmission:{0}".format(
                    node_estimated_transmission.GetValue()),
                      flush=True)

            time.sleep(1)

            # Start video servers
            self.thermal_server.activate()

            self.openpose_server.activate()

            # Start json server
            self.js_server.activate()

            # Start image server
            self.image_server.activate()

            # Acquire images
            self.acquire_images(self.camera, nodemap, nodemap_tldevice)

        except PySpin.SpinnakerException as ex:
            print('Error: %s' % ex)

    '''
        Helper function for recovering temperature from raw pixel value
        Get temperature https://graftek.biz/system/files/15690/original/FLIR_Genicam.pdf?1571772310
    '''

    def get_temperature(self, pixel_value):
        temperature = pixel_value * 0.01 - 273.15
        return temperature

    '''
        Helper function for updating a FIFO
    '''

    def update_fifo(self, fifo, fifo_size, new_value):
        if (len(fifo) > fifo_size):
            fifo.pop(0)

        fifo.append(new_value)
        return fifo

    '''
        Helper function for collecting average FIFO value
    '''

    def get_fifo_avg(self, fifo, min_len_to_compute=0):
        if (len(fifo) > min_len_to_compute):
            return np.average(fifo)
        else:
            return 0

    '''
        Analyze images (and send over network queues)
    '''

    def analyze_image(self, image_result):

        # Get image dimensions
        width = image_result.GetWidth()
        height = image_result.GetHeight()

        # Getting the image data as a numpy array
        image_data = image_result.GetNDArray()

        ## MBMB ->
        # Updating the FIFO
        min_image_temperature = self.get_temperature(
            np.amin(image_data)) + self.delta_temperature

        max_image_temperature = self.get_temperature(
            np.amax(image_data * self.mask)) + self.delta_temperature

        if max_image_temperature > 25:
            self.max_t_fifo = self.update_fifo(self.max_t_fifo, self.fifo_size,
                                               max_image_temperature)
        else:
            self.max_t_fifo = []

        temp_smooth = self.get_fifo_avg(self.max_t_fifo, self.fifo_size)
        ## <- MBMB
        '''
            Calculate image to send via mjpeg to remote client and openpose compliant image
        '''
        # Convert image to BGR, using threshold temperatures (manual parameters)
        in_img = image_result.GetData().reshape((height, width))

        temp_max_thr = self.max_temperature  # Max temperature
        temp_min_thr = self.min_temperature  # Min temperature

        # Calculate thresholds
        pixel_max_thr = int((temp_max_thr + 273.15) / 0.01)
        pixel_min_thr = int((temp_min_thr + 273.15) / 0.01)

        # Threshold image
        in_img_rw = copy.deepcopy(in_img)
        in_img_rw[in_img_rw > pixel_max_thr] = pixel_max_thr
        in_img_rw[in_img_rw < pixel_min_thr] = pixel_min_thr
        in_img_rw[0, 0] = pixel_max_thr
        in_img_rw[0, 1] = pixel_min_thr

        # Normalize image
        raw_frame = cv2.normalize(in_img_rw,
                                  None,
                                  0,
                                  255,
                                  cv2.NORM_MINMAX,
                                  dtype=cv2.CV_8U)

        # Get correct image
        raw_frame = raw_frame[0:self.resolution_x][0:self.resolution_y -
                                                   self.unused_lines]

        # Invert levels
        gray_inverted = cv2.bitwise_not(raw_frame)

        # Convert inverted grayscale to Color RGB format (openpose input)
        image_openpose = cv2.cvtColor(gray_inverted, cv2.COLOR_GRAY2BGR)

        # Colorize Image (Use it to write geometry and send to streaming)
        to_send_image = cv2.applyColorMap(raw_frame, cv2.COLORMAP_JET)

        ## MBMB ->
        # DEBUG: Plotting the location of the max temperature
        if self.debug:
            # Trace circles on minimal and maximum temperature
            coords_max = np.unravel_index(
                np.argmax(image_data * self.mask, axis=None), image_data.shape)
            cv2.circle(to_send_image, (coords_max[1], coords_max[0]),
                       radius=10,
                       color=(255, 255, 255),
                       thickness=2)

            coords_min = np.unravel_index(np.argmin(image_data, axis=None),
                                          image_data.shape)
            cv2.circle(to_send_image, (coords_min[1], coords_min[0]),
                       radius=10,
                       color=(0, 0, 0),
                       thickness=2)

            # Print Temperatures
            if temp_smooth > 0:
                text_str = 'Max T: {:.2f}C - Min T: {:.2f}C - Smooth: {:.2f}C'.format(
                    self.get_temperature(np.amax(image_data * self.mask)),
                    self.get_temperature(np.amin(image_data)), temp_smooth)
            else:
                text_str = 'Max T: {:.2f}C - Min T: {:.2f}C'.format(
                    self.get_temperature(np.amax(image_data * self.mask)),
                    self.get_temperature(np.amin(image_data)))

            font_temperature = cv2.FONT_HERSHEY_DUPLEX
            font_scale = self.font_scale
            font_thickness = 1
            color = (255, 255, 255)

            text_w, text_h = cv2.getTextSize(text_str, font_temperature,
                                             font_scale, font_thickness)[0]

            px = int(5)
            py = int(5)

            # Draw text rectangle
            cv2.rectangle(to_send_image, (px - 5, py - 5),
                          (px + text_w + 5, py + text_h + 5), color, -1)

            # Draw Text
            cv2.putText(to_send_image, text_str, (px, py + text_h),
                        font_temperature, font_scale, (0, 0, 0),
                        font_thickness, cv2.LINE_AA)
        ## <- MBMB

        # Set image to openpose video
        self.datum.cvInputData = image_openpose

        self.opWrapper.emplaceAndPop([self.datum])

        # Get openpose output (convert to int all value)
        bodys = np.array(self.datum.poseKeypoints).astype(int).tolist()

        # record only one thermal shot
        one_snapshot = True

        # Open csv
        if self.record_csv:
            self.csv = open(self.filename_csv, "a")

        #face geometry and temperature container
        js_packet = {}

        # Json dataset
        body_packet = []

        # If a body is recognized
        if type(bodys) is list:
            # Remove probability from joints
            temporary_bodys = []
            for body in bodys:
                temporary_bodys.append([reduced[0:2] for reduced in body])

            bodys = temporary_bodys

            for body in bodys:
                # Face points (0, 15, 16) refered to body_25 openpose format
                face = [
                    [int(body[0][0]), int(body[0][1])],  # Nose
                    [int(body[15][0]), int(body[15][1])],  # Right eye
                    [int(body[16][0]), int(body[16][1])],  # Left eye
                    [int(body[17][0]), int(body[17][1])],  # Right ear
                    [int(body[18][0]), int(body[18][1])]
                ]  # Left ear

                # Select the best face size
                if 0 not in face[0] and 0 not in face[1] and 0 not in face[2]:
                    # Get line values from eyes line to neck
                    if (0 not in face[4]) and (
                            0 not in face[3]):  # Both ears visibles
                        size_x = int(abs(face[3][0] - face[4][0]) / 2)
                        size_y = int(abs(face[3][0] - face[4][0]) / 2)
                    elif (0 in face[4]) and (
                            0 not in face[3]):  # Right Ear, no Left Ear
                        size_x = int(abs(face[3][0] - face[2][0]) / 2)
                        size_y = int(abs(face[3][0] - face[2][0]) / 2)
                    elif (0 not in face[4]) and (
                            0 in face[3]):  # Left and Right ears ok
                        size_x = int(abs(face[1][0] - face[4][0]) / 2)
                        size_y = int(abs(face[1][0] - face[4][0]) / 2)
                    else:  # Left and Right ears are not available
                        size_x = int(abs(face[1][0] - face[2][0]) / 2)
                        size_y = int(abs(face[1][0] - face[2][0]) / 2)

                    # Set min face size x
                    min_sx = self.min_sizex

                    # Set min face size y
                    min_sy = self.min_sizey

                    # If face is to smal select minimal size
                    size_x = size_x if size_x > min_sx else min_sx
                    size_y = size_y if size_y > min_sy else min_sy

                    # Set face center
                    reference_x = face[0][0]
                    reference_y = face[0][1]

                    offset_x = 0
                    offset_y = 0

                    # Calculate average values in face rect
                    counter = 0
                    average = 0
                    max_temperature = 0.0
                    for y in range(reference_x - size_x + offset_x,
                                   reference_x + size_x + offset_x):
                        for x in range(reference_y - size_y + offset_y,
                                       reference_y + size_y + offset_y):
                            # Get temperature https://graftek.biz/system/files/15690/original/FLIR_Genicam.pdf?1571772310
                            if x < self.resolution_y and y < self.resolution_x:
                                temperature = self.get_temperature(
                                    image_data[x][y])

                                # Find max temperature
                                if temperature > max_temperature:
                                    max_temperature = temperature

                                average += temperature
                                counter += 1

                    # Calculate average
                    if counter != 0:
                        temperature = average / counter

                    # Compensate uncalibrated temperature
                    temperature += self.delta_temperature
                    max_temperature += self.delta_temperature

                    # Filter too low temperature face detection error
                    if temperature < self.min_detection_temperature:
                        continue

                    # Filter too hig temperature face detection error
                    if temperature > self.max_detection_temperature:
                        continue

                    # json alarm flag
                    alarm = 0

                    # Alarm temperature show red color rectangle
                    if max_temperature > self.alarm_temperature:
                        color = (0, 0, 255)
                        alarm = 1
                    else:
                        color = (255, 0, 0)
                        alarm = 0

                    # Draw face Rectangle
                    cv2.rectangle(to_send_image,
                                  (reference_x - size_x + offset_x,
                                   reference_y - size_y + offset_y),
                                  (reference_x + size_x + offset_x,
                                   reference_y + size_y + offset_y), color, 5)

                    # Write temperature
                    ## MBMB

                    text_str = '{0:.2f}C'.format(max_temperature)
                    font_temperature = cv2.FONT_HERSHEY_DUPLEX
                    font_scale = self.font_scale
                    font_thickness = 1

                    text_w, text_h = cv2.getTextSize(text_str,
                                                     font_temperature,
                                                     font_scale,
                                                     font_thickness)[0]

                    px = int(reference_x)
                    py = int(reference_y + size_y / 2)

                    # Draw text rectangle
                    cv2.rectangle(to_send_image, (px, py),
                                  (px + text_w, py - text_h), color, -1)

                    # Draw Text
                    cv2.putText(to_send_image, text_str, (px, py),
                                font_temperature, font_scale, (255, 255, 255),
                                font_thickness, cv2.LINE_AA)

                    # Get right eye temperature form thermal image
                    righteye_temperature = image_data[face[1][1]][
                        face[1][0]] * 0.01 - 273.15 + self.delta_temperature

                    cv2.circle(to_send_image, (face[1][0], face[1][1]), 2,
                               color, 2)

                    cv2.putText(to_send_image,
                                "{0:.2f}".format(righteye_temperature),
                                (face[1][0], face[1][1]), font_temperature,
                                font_scale / 2, (255, 255, 255),
                                font_thickness, cv2.LINE_AA)

                    # Get left eye temperature form thermal image
                    lefteye_temperature = image_data[face[2][1]][
                        face[2][0]] * 0.01 - 273.15 + self.delta_temperature

                    cv2.circle(to_send_image, (face[2][0], face[2][1]), 2,
                               color, 2)

                    cv2.putText(to_send_image,
                                "{0:.2f}".format(lefteye_temperature),
                                (face[2][0], face[2][1]), font_temperature,
                                font_scale / 2, (255, 255, 255),
                                font_thickness, cv2.LINE_AA)

                    # Print data
                    ts = int(round(time.time() * 1000))

                    dt_string = "{0:.2f},{1:.2f},{2:.2f},{3:.2f},{4:.2f},{5:.2f},{6:.2f},{7}\n".format(
                        temperature, max_temperature, min_image_temperature,
                        max_image_temperature, lefteye_temperature,
                        righteye_temperature, temp_smooth, ts)

                    print(dt_string, end="", flush=True)

                    if self.record_image and one_snapshot:
                        f = open(self.record_dir + "/" + str(ts) + ".raw",
                                 "wb")
                        f.write(image_result.GetData())
                        f.close()
                        one_snapshot = False

                    if self.record_csv:
                        self.csv.write(dt_string)
                        self.csv.flush()

                    body_packet.append([
                        body, reference_x, reference_y, size_x, size_y,
                        "{0:.2f}".format(max_temperature), alarm
                    ])

        # Store face geometry
        js_packet["geometries"] = body_packet

        if self.show_video:
            # Show openpose
            cv2.imshow("Openpose output", self.datum.cvOutputData)

            # Show mjpeg output
            cv2.imshow("Mjpeg colorized", to_send_image)

            # Handle signals and wait some time
            cv2.waitKey(1)

        # Get timestamp
        ts = int(round(time.time() * 1000))

        # Store timestamp
        js_packet["ts"] = ts

        # Put thermal image into queue for each server thread
        self.send_image(self.thermal_list, to_send_image, ts)

        # Put openpose image into queue for each server thread
        self.send_image(self.openpose_list, self.datum.cvOutputData, ts)

        # Put json into instant locked memory
        self.js_server.put(bytes(json.dumps(js_packet), "UTF-8"))

        # Put image into instant locked memory
        self.image_server.put(
            self.jpeg.encode(to_send_image, quality=self.compression))

    '''
        Send image over queue list and then over http mjpeg stream
    '''

    def send_image(self, queue_list, image, ts):

        encoded_image = self.jpeg.encode(image, quality=self.compression)
        # Put thermal image into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, encoded_image)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Send block over queue list and then over http mjpeg stream
    '''

    def send_jsdata(self, queue_list, js_data, ts):
        for q in queue_list:
            try:
                block = (ts, js_data)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        DEBUG: Record images on raw stream
    '''

    def rec_image(self, image_result):
        self.raw.write(image_result)

    '''
        DEBUG: Play images directly from raw file
    '''

    def player(self):
        # Start video servers
        self.thermal_server.activate()

        self.openpose_server.activate()

        # Start json server
        self.js_server.activate()

        # Start image server
        self.image_server.activate()

        time.sleep(1)

        while self.continue_recording:
            # Read image from file raw
            image_result = self.raw.read(self.resolution_x *
                                         self.resolution_y * 2)

            # If file is eof, rewind
            if len(image_result) != self.resolution_x * self.resolution_y * 2:
                print("Eof, Rewind!", flush=True)
                self.raw.seek(0)
                continue

            # Create Image
            image = PySpin.Image.Create(self.resolution_x, self.resolution_y,
                                        0, 0, PySpin.PixelFormat_Mono16,
                                        np.array(image_result))

            # Analyze image
            self.analyze_image(image)

            # Simulate real acquisition
            time.sleep(0.02)

        self.raw.close()
示例#21
0
# jpeg = TurboJPEG('/usr/local/lib/libturbojpeg.dylib')
def absoluteFilePaths(directory):
    dirlist = []
    for dirpath, _, filenames in os.walk(directory):
        for f in filenames:
            dirlist.append(os.path.abspath(os.path.join(dirpath, f)))

    return dirlist


# decoding input.jpg to BGR array
def img_decode(f):
    in_file = open(f, 'rb')
    bgr_array = jpeg.decode(in_file.read())
    bgr_array = cv2.resize(bgr_array, (224, 224))
    in_file.close()
    return bgr_array


# using default library installation
jpeg = TurboJPEG('../libjpeg-turbo/lib/libturbojpeg.so')

file_dir = absoluteFilePaths("/tmp/ilsvrc12_img_val/")
elapsed = time.time()
numiter = 50
p = mp.Pool(processes=num_proc)
for i in range(numiter):
    result = p.map(img_decode, file_dir)

print("time:", (time.time() - elapsed) / (len(file_dir) * numiter))
示例#22
0
文件: tools.py 项目: Zepyhrus/hie
def imread(image):
    try:
        with open(image, 'rb') as _i:
            return TurboJPEG.decode(_i.read())
    except:
        return cv2.imread(image)
示例#23
0
 def __init__(self):
     self.jpeg = TurboJPEG('/usr/lib/libturbojpeg.so')
示例#24
0
    def __init__(self, args):
        # Ratio params
        horizontal_ratio = float(args[0].horizontal_ratio)
        vertical_ratio = float(args[0].vertical_ratio)

        # Check video
        if args[0].video != "enabled" and args[0].video != "disabled":
            print("Error: set correct video mode, enabled or disabled",
                  flush=True)
            sys.exit(-1)

        # Check video
        if args[0].image != "enabled" and args[0].image != "disabled":
            print("Error: set correct image mode, enabled or disabled",
                  flush=True)
            sys.exit(-1)

        # Convert args to boolean
        self.use_video = True if args[0].video == "enabled" else False

        self.use_image = True if args[0].image == "enabled" else False

        self.use_preview = True if args[0].preview == "enabled" else False

        # Unable to use video and image mode at same time
        if self.use_video and self.use_image:
            print(
                "Error: unable to use video and image mode at the same time!",
                flush=True)
            sys.exit(-1)

        # Unable to not use or video or image mode at same time
        if self.use_video and self.use_image:
            print("Error: enable or video or image mode!", flush=True)
            sys.exit(-1)

        self.streaming = True if args[0].streaming == "enabled" else False

        if self.use_video:
            # Open video capture
            self.cap = cv2.VideoCapture(args[0].stream_in)

            if not self.cap.isOpened():
                print("Error: Opening video stream or file {0}".format(
                    args[0].stream_in),
                      flush=True)
                sys.exit(-1)

            # Get input size
            width = int(self.cap.get(3))
            height = int(self.cap.get(4))

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(
                    args[0].stream_out,
                    cv2.VideoWriter_fourcc(*args[0].encoding_codec),
                    int(self.cap.get(cv2.CAP_PROP_FPS)), (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out),
                          flush=True)
                    sys.exit(-1)

            # Get image size
            im_size = (width, height)

        if self.use_image:
            self.image = cv2.imread(args[0].image_in)
            if self.image is None:
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in),
                      flush=True)
                sys.exit(-1)

            self.image_out = args[0].image_out

            # Get image size
            im_size = (self.image.shape[1], self.image.shape[0])

        # Compute Homograpy
        self.homography_matrix = self.compute_homography(
            horizontal_ratio, vertical_ratio, im_size)

        self.background_masked = False
        # Open image backgrouns, if it is necessary
        if args[0].masked == "enabled":
            # Set masked flag
            self.background_masked = True

            # Load static background
            self.background_image = cv2.imread(args[0].background_in)

            # Close, if no background, but required
            if self.background_image is None:
                print("Error: Unable to load background image (flag enabled)",
                      flush=True)
                sys.exit(-1)

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = args[0].openpose_folder

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = args[0].net_size

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        # Set GPU start id (not considering previous)
        params["num_gpu_start"] = 0

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        # Json server
        self.dt_vector = {}

        # Client list
        self.stream_list = []

        if self.streaming:
            # Initialize stream server
            self.video_server = StreamServer(int(args[0].video_port),
                                             self.stream_list, "image/jpeg")
            self.video_server.activate()

            # Initialize json server
            self.js_server = ResponseServer(int(args[0].js_port),
                                            "application/json")
            self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()

        # Calibrate heigh value
        self.calibrate = float(args[0].calibration)

        # Actually unused
        self.ellipse_angle = 0

        # Body confidence threshold
        self.body_th = float(args[0].body_threshold)

        # Show confidence
        self.show_confidence = True if args[
            0].show_confidence == "enabled" else False

        # Set mask precision (mask division factor)
        self.overlap_precision = int(args[0].overlap_precision)

        # Check user value
        self.overlap_precision = 16 if self.overlap_precision > 16 else self.overlap_precision

        self.overlap_precision = 1 if self.overlap_precision < 0 else self.overlap_precision
示例#25
0
import sys
import numpy as np
import CanonLib
from ui_canon_test import Ui_MainWindow
from PyQt5 import QtCore, QtGui, QtWidgets
import cv2
import time
from turbojpeg import TurboJPEG
jpeg = TurboJPEG("turbojpeg.dll")
from ThorlabsKST101 import *
from threading import Timer
import extraLib

global liveImage
global Z_axis, pos_Z


class GUIMainWindow(Ui_MainWindow, QtWidgets.QMainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.label_image_show.setScaledContents(True)
        self.show()
        self.init_UI()

        Z_axis = Motor('26000236')
        Z_axis.connect()
        time.sleep(0.5)
        Z_axis.set_vel_params(100000, 1000000)
        Z_axis.start_polling(50)
示例#26
0
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
from turbojpeg import TurboJPEG
import cv2
import os
dir_path = os.path.dirname(os.path.realpath(__file__))

lib_jpeg_turbo = TurboJPEG(dir_path + "/lib/libturbojpeg.so")


def imread(f):
    img = None
    try:
        with open(f, 'rb') as in_file:
            img = lib_jpeg_turbo.decode(in_file.read())
        #img = cv2.imread  (f )
    except Exception, e:
        print(e)
        print("Falling back to OpenCV JPEG decode ...")
        img = cv2.imread(f)

    return img
示例#27
0
class SegmentationImageFolder:
    def __init__(
            self,
            root: str,
            image_folder: str,
            mask_folder: str,
            transforms: Optional[Callable] = None,
    ):
        self.image_folder = os.path.join(root, image_folder)
        self.mask_folder = os.path.join(root, mask_folder)
        self.imgs = list(sorted(os.listdir(self.image_folder)))
        self.masks = list(sorted(os.listdir(self.mask_folder)))
        self.transforms = transforms
        self.jpeg = TurboJPEG(lib_path=local_libturbo_path)

    def __getitem__(self, idx: int):
        img_path = os.path.join(self.image_folder, self.imgs[idx])
        mask_path = os.path.join(self.mask_folder, self.masks[idx])

        # img = Image.open(img_path).convert("RGB")
        # note that we haven't converted the mask to RGB,
        # because each color corresponds to a different instance
        # with 0 being background
        if img_path.endswith(".jpg") or img_path.endswith(".jpeg"):
            fd = open(img_path, 'rb')
            img = self.jpeg.decode(fd.read())
            fd.close()
        else:
            img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        if mask_path.endswith(".jpg") or mask_path.endswith(".jpeg"):
            fd = open(mask_path, 'rb')
            mask = self.jpeg.decode(fd.read(), pixel_format=TJPF_GRAY)
            fd.close()
        else:
            mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
        mask = np.array(mask)

        # instances are encoded as different colors
        obj_ids = np.unique(mask)
        # first id is the background, so remove it
        obj_ids = obj_ids[1:]

        # split the color-encoded mask into a set
        # of binary masks
        masks = mask == obj_ids[:, None, None]

        # get bounding box coordinates for each mask
        num_objs = len(obj_ids)
        boxes = []
        area = []
        for i in range(num_objs):
            pos = np.where(masks[i])
            xmin = np.min(pos[1])
            xmax = np.max(pos[1])
            ymin = np.min(pos[0])
            ymax = np.max(pos[0])
            boxes.append([xmin, ymin, xmax, ymax])
            area.append((ymax - ymin) * (xmax - xmin))

        target = {}
        target["boxes"] = torch.as_tensor(boxes, dtype=torch.float32)
        # there is only one class
        target["labels"] = torch.ones((num_objs,), dtype=torch.int64)
        target["masks"] = torch.as_tensor(masks, dtype=torch.uint8)
        target["image_id"] = torch.tensor([idx])
        target["area"] = torch.as_tensor(area, dtype=torch.float32)
        # suppose all instances are not crowd
        target["iscrowd"] = torch.zeros((num_objs,), dtype=torch.int64)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target

    def __len__(self):
        return len(self.imgs)