Exemple #1
0
def _create_consumer(competition, competition_config):
    ''' Starts the consumer for streams of predictions sent by users.'''
    options = (('grpc.so_reuseport', 1),)
    grpc_server = StreamServer('0.0.0.0:50051', options=options)
    try:
        streamer = DataStreamerServicer(SERVER_HOST, competition, competition_config)
        grpc_server.add_server(streamer, competition)
        grpc_server.start_server()
        grpc_server._wait_forever()
    except Exception as e:
        logging.debug("Inside consumer process: {}".format(e))
class SocialDistancing:
    colors = [(0, 255, 0), (0, 0, 255)]

    nd_color = [(153, 0, 51), (153, 0, 0), (153, 51, 0), (153, 102, 0),
                (153, 153, 0), (102, 153, 0), (51, 153, 0), (0, 153, 0),
                (0, 102, 153), (0, 153, 51), (0, 153, 102), (0, 153, 153),
                (0, 102, 153), (0, 51, 153), (0, 0, 153), (153, 0, 102),
                (102, 0, 153), (153, 0, 153), (102, 0, 153), (0, 0, 153),
                (0, 0, 153), (0, 0, 153), (0, 153, 153), (0, 153, 153),
                (0, 153, 153)]

    connections = [(0, 16), (0, 15), (16, 18), (15, 17), (0, 1), (1, 2),
                   (2, 3), (3, 4), (1, 5), (5, 6), (6, 7), (1, 8), (8, 9),
                   (9, 10), (10, 11), (8, 12), (12, 13), (13, 14), (11, 24),
                   (11, 22), (22, 23), (14, 21), (14, 19), (19, 20)]
    '''
        Initialize Object
    '''
    def __init__(self, args):
        # Ratio params
        horizontal_ratio = float(args[0].horizontal_ratio)
        vertical_ratio = float(args[0].vertical_ratio)

        # Check video
        if args[0].video != "enabled" and args[0].video != "disabled":
            print("Error: set correct video mode, enabled or disabled")
            sys.exit(-1)

        # Check video
        if args[0].image != "enabled" and args[0].image != "disabled":
            print("Error: set correct image mode, enabled or disabled")
            sys.exit(-1)

        # Convert args to boolean
        self.use_video = True if args[0].video == "enabled" else False

        self.use_image = True if args[0].image == "enabled" else False

        self.use_preview = True if args[0].preview == "enabled" else False

        # Unable to use video and image mode at same time
        if self.use_video and self.use_image:
            print(
                "Error: unable to use video and image mode at the same time!")
            sys.exit(-1)

        # Unable to not use or video or image mode at same time
        if self.use_video and self.use_image:
            print("Error: enable or video or image mode!")
            sys.exit(-1)

        self.streaming = True if args[0].streaming == "enabled" else False

        if self.use_video:
            # Open video capture
            self.cap = cv2.VideoCapture(args[0].stream_in)

            if not self.cap.isOpened():
                print("Error: Opening video stream or file {0}".format(
                    args[0].stream_in))
                sys.exit(-1)

            # Get input size
            width = int(self.cap.get(3))
            height = int(self.cap.get(4))

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(args[0].stream_out,
                                           cv2.VideoWriter_fourcc(*'XVID'),
                                           int(self.cap.get(cv2.CAP_PROP_FPS)),
                                           (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out))
                    sys.exit(-1)

            # Get image size
            im_size = (width, height)

        if self.use_image:
            self.image = cv2.imread(args[0].image_in)
            if self.image is None:
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in))
                sys.exit(-1)

            self.image_out = args[0].image_out

            # Get image size
            im_size = (self.image.shape[1], self.image.shape[0])

        # Compute Homograpy
        self.homography_matrix = self.compute_homography(
            horizontal_ratio, vertical_ratio, im_size)

        self.background_masked = False
        # Open image backgrouns, if it is necessary
        if args[0].masked == "enabled":
            # Set masked flag
            self.background_masked = True

            # Load static background
            self.background_image = cv2.imread(args[0].background_in)

            # Close, if no background, but required
            if self.background_image is None:
                print("Error: Unable to load background image (flag enabled)")
                sys.exit(-1)

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = args[0].openpose_folder

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = args[0].net_size

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        # Set GPU start id (not considering previous)
        params["num_gpu_start"] = 0

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        # Json server
        self.dt_vector = {}

        # Client list
        self.stream_list = []

        if self.streaming:
            # Initialize video server
            self.video_server = StreamServer(int(args[0].video_port),
                                             self.stream_list, "image/jpeg")
            self.video_server.activate()

            # Initialize json server
            self.js_server = ResponseServer(int(args[0].js_port),
                                            "application/json")
            self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()

        # Calibrate heigh value
        self.calibrate = float(args[0].calibration)

        # Actually unused
        self.ellipse_angle = 0

        # Body confidence threshold
        self.body_th = float(args[0].body_threshold)

        # Show confidence
        self.show_confidence = True if args[
            0].show_confidence == "enabled" else False

    '''
        Draw Skelethon
    '''

    def draw_skeleton(self, frame, keypoints, colour):

        for keypoint_id1, keypoint_id2 in self.connections:
            x1, y1 = keypoints[keypoint_id1]
            x2, y2 = keypoints[keypoint_id2]

            if 0 in (x1, y1, x2, y2):
                continue

            pt1 = int(round(x1)), int(round(y1))
            pt2 = int(round(x2)), int(round(y2))

            cv2.circle(frame,
                       center=pt1,
                       radius=4,
                       color=self.nd_color[keypoint_id2],
                       thickness=-1)
            cv2.line(frame,
                     pt1=pt1,
                     pt2=pt2,
                     color=self.nd_color[keypoint_id2],
                     thickness=2)

    '''
        Compute skelethon bounding box
    '''

    def compute_simple_bounding_box(self, skeleton):
        x = skeleton[::2]
        x = np.where(x == 0.0, np.nan, x)
        left, right = int(round(np.nanmin(x))), int(round(np.nanmax(x)))
        y = skeleton[1::2]
        y = np.where(y == 0.0, np.nan, y)
        top, bottom = int(round(np.nanmin(y))), int(round(np.nanmax(y)))
        return left, right, top, bottom

    '''
        Compute Homograpy
    '''

    def compute_homography(self, H_ratio, V_ratio, im_size):
        rationed_hight = im_size[1] * V_ratio
        rationed_width = im_size[0] * H_ratio
        src = np.array([[0, 0], [0, im_size[1]], [im_size[0], im_size[1]],
                        [im_size[0], 0]])
        dst = np.array([[0 + rationed_width / 2, 0 + rationed_hight],
                        [0, im_size[1]], [im_size[0], im_size[1]],
                        [im_size[0] - rationed_width / 2, 0 + rationed_hight]],
                       np.int32)
        h, status = cv2.findHomography(src, dst)
        return h

    '''
        Compute overlap
    '''

    def compute_overlap(self, rect_1, rect_2):
        x_overlap = max(0,
                        min(rect_1[1], rect_2[1]) - max(rect_1[0], rect_2[0]))
        y_overlap = max(0,
                        min(rect_1[3], rect_2[3]) - max(rect_1[2], rect_2[2]))
        overlapArea = x_overlap * y_overlap
        if overlapArea:
            overlaps = True
        else:
            overlaps = False
        return overlaps

    '''
        Trace results
    '''

    def trace(self, image, skeletal_coordinates, draw_ellipse_requirements,
              is_skeletal_overlapped):
        bodys = []

        # Trace ellipses and body on target image
        i = 0

        for skeletal_coordinate in skeletal_coordinates[0]:
            if float(skeletal_coordinates[1][i]) < self.body_th:
                continue

            # Trace ellipse
            cv2.ellipse(image, (int(draw_ellipse_requirements[i][0]),
                                int(draw_ellipse_requirements[i][1])),
                        (int(draw_ellipse_requirements[i][2]),
                         int(draw_ellipse_requirements[i][3])), 0, 0, 360,
                        self.colors[int(is_skeletal_overlapped[i])], 3)

            # Trace skelethon
            skeletal_coordinate = np.array(skeletal_coordinate)
            self.draw_skeleton(image, skeletal_coordinate.reshape(-1, 2),
                               (255, 0, 0))

            if int(skeletal_coordinate[2]) != 0 and int(
                    skeletal_coordinate[3]) != 0 and self.show_confidence:
                cv2.putText(
                    image, "{0:.2f}".format(skeletal_coordinates[1][i]),
                    (int(skeletal_coordinate[2]), int(skeletal_coordinate[3])),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

            # Append json body data, joints coordinates, ground ellipses
            bodys.append([[round(x) for x in skeletal_coordinate],
                          draw_ellipse_requirements[i],
                          int(is_skeletal_overlapped[i])])

            i += 1

        self.dt_vector["bodys"] = bodys

    '''
        Evaluate skelethon height
    '''

    def evaluate_height(self, skeletal_coordinate):
        # Calculate skeleton height
        calculated_height = 0
        pointer = -1

        # Left side body joints from top to down
        #joint_set = [0, 1, 8, 12, 13, 14]

        # Left leg
        joint_set = [12, 13, 14]

        # Check if leg is complete
        left_leg = True
        for k in joint_set:
            x = int(skeletal_coordinate[k * 2])
            y = int(skeletal_coordinate[k * 2 + 1])
            if x == 0 or y == 0:
                # No left leg, try right_leg
                joint_set = [9, 10, 11]
                left_leg = False
                break

        if not left_leg:
            joint_set = [9, 10, 11]
            # Check if leg is complete
            for k in joint_set:
                x = int(skeletal_coordinate[k * 2])
                y = int(skeletal_coordinate[k * 2 + 1])
                if x == 0 or y == 0:
                    # No left leg, no right leg, unable to evaluate ellipse
                    return 0

        # Evaluate leg height
        pointer = -1
        for k in joint_set[:-1]:
            pointer += 1
            if skeletal_coordinate[joint_set[pointer]*2]\
                    and skeletal_coordinate[joint_set[pointer+1]*2]\
                    and skeletal_coordinate[joint_set[pointer]*2+1]\
                    and skeletal_coordinate[joint_set[pointer+1]*2+1]:
                calculated_height = calculated_height +\
                    math.sqrt(((skeletal_coordinate[joint_set[pointer]*2] -
                                skeletal_coordinate[joint_set[pointer+1]*2])**2) +
                              ((skeletal_coordinate[joint_set[pointer]*2+1] -
                                skeletal_coordinate[joint_set[pointer+1]*2+1])**2))

        return calculated_height * self.calibrate

    '''
        Evaluate overlapping
    '''

    def evaluate_overlapping(self, ellipse_boxes, is_skeletal_overlapped,
                             ellipse_pool):
        # checks for overlaps between people's ellipses, to determine risky or not
        for ind1, ind2 in itertools.combinations(
                list(range(0, len(ellipse_pool))), 2):

            is_overlap = cv2.bitwise_and(ellipse_pool[ind1],
                                         ellipse_pool[ind2])

            if is_overlap.any() and (not is_skeletal_overlapped[ind1]
                                     or not is_skeletal_overlapped[ind2]):
                is_skeletal_overlapped[ind1] = 1
                is_skeletal_overlapped[ind2] = 1

    '''
        Create Joint Array
    '''

    def create_joint_array(self, skeletal_coordinates):
        # Get joints sequence
        bodys_sequence = []
        bodys_probability = []
        for body in skeletal_coordinates:
            body_sequence = []
            body_probability = 0.0
            # For each joint put it in vetcor list
            for joint in body:
                body_sequence.append(joint[0])
                body_sequence.append(joint[1])

                # Sum joints probability to find body probability
                body_probability += joint[2]

            body_probability = body_probability / len(body)

            # Add body sequence to list
            bodys_sequence.append(body_sequence)
            bodys_probability.append(body_probability)

        # Assign coordiates sequence
        return [bodys_sequence, bodys_probability]

    '''
        Evaluate ellipses shadow, for each body
    '''

    def evaluate_ellipses(self, skeletal_coordinates,
                          draw_ellipse_requirements, ellipse_boxes,
                          ellipse_pool):
        for skeletal_coordinate in skeletal_coordinates:
            # Evaluate skeleton bounding box
            left, right, top, bottom = self.compute_simple_bounding_box(
                np.array(skeletal_coordinate))

            bb_center = np.array([(left + right) / 2, (top + bottom) / 2],
                                 np.int32)

            calculated_height = self.evaluate_height(skeletal_coordinate)

            # computing how the height of the circle varies in perspective
            pts = np.array([[bb_center[0], top], [bb_center[0], bottom]],
                           np.float32)

            pts1 = pts.reshape(-1, 1, 2).astype(np.float32)  # (n, 1, 2)

            dst1 = cv2.perspectiveTransform(pts1, self.homography_matrix)

            # height of the ellipse in perspective
            width = int(dst1[1, 0][1] - dst1[0, 0][1])

            # Bounding box surrending the ellipses, useful to compute whether there is any overlap between two ellipses
            ellipse_bbx = [
                bb_center[0] - calculated_height,
                bb_center[0] + calculated_height, bottom - width,
                bottom + width
            ]

            # Add boundig box to ellipse list
            ellipse_boxes.append(ellipse_bbx)

            ellipse = [
                int(bb_center[0]),
                int(bottom),
                int(calculated_height),
                int(width)
            ]

            mask_copy = self.mask.copy()

            ellipse_pool.append(
                cv2.ellipse(mask_copy, (bb_center[0], bottom),
                            (int(calculated_height), width), 0, 0, 360,
                            (255, 255, 255), -1))

            draw_ellipse_requirements.append(ellipse)

    '''
        Analyze image and evaluate distances
    '''

    def distances_evaluate(self, image, background):
        ellipse_boxes = []

        draw_ellipse_requirements = []

        ellipse_pool = []

        # Assign input image to openpose
        self.datum.cvInputData = image

        # Start wrapper
        self.opWrapper.emplaceAndPop([self.datum])

        # Get openpose coordinates (rounding values)
        skeletal_coordinates = self.datum.poseKeypoints.tolist()

        # Trace on background
        if self.background_masked:
            image = background

        self.dt_vector['ts'] = int(round(time.time() * 1000))
        self.dt_vector['bodys'] = []

        if type(skeletal_coordinates) is list:
            # Remove probability from joints and get a joint position list
            skeletal_coordinates = self.create_joint_array(
                skeletal_coordinates)

            # Initialize overlapped buffer
            is_skeletal_overlapped = np.zeros(
                np.shape(skeletal_coordinates[0])[0])

            # Evaluate ellipses for each body detected by openpose
            self.evaluate_ellipses(skeletal_coordinates[0],
                                   draw_ellipse_requirements, ellipse_boxes,
                                   ellipse_pool)

            # Evaluate overlapping
            self.evaluate_overlapping(ellipse_boxes, is_skeletal_overlapped,
                                      ellipse_pool)

            # Trace results over output image
            self.trace(image, skeletal_coordinates, draw_ellipse_requirements,
                       is_skeletal_overlapped)

        if self.streaming:
            # Send video to client queues
            self.send_image(self.stream_list, image, int(self.dt_vector['ts']))

            # Put json vector availble to rest requests
            self.js_server.put(bytes(json.dumps(self.dt_vector), "UTF-8"))

        return image

    '''
        Send image over queue list and then over http mjpeg stream
    '''

    def send_image(self, queue_list, image, ts):

        encoded_image = self.jpeg.encode(image, quality=80)
        # Put image into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, encoded_image)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Analyze video
    '''

    def analyze_video(self):
        while self.cap.isOpened():
            # Capture from image/video
            ret, image = self.cap.read()

            # Check image
            if image is None or not ret:
                os._exit(0)

            self.mask = np.zeros(image.shape, dtype=np.uint8)

            # Get openpose output
            if self.background_masked:
                background = self.background_image.copy()
            else:
                background = image

            image = self.distances_evaluate(image, background)

            # Write image
            if not self.streaming:
                self.out.write(image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', image)
                cv2.waitKey(1)

    '''
        Analyze image
    '''

    def analyze_image(self):

        # Get openpose output
        if self.background_masked:
            background = self.background_image.copy()
        else:
            background = self.image

        self.mask = np.zeros(self.image.shape, dtype=np.uint8)

        self.image = self.distances_evaluate(self.image, background)

        # Write image
        cv2.imwrite(self.image_out, self.image)

        # Show image and wait some time
        if self.use_preview:
            cv2.imshow('Social Distance', self.image)
            cv2.waitKey(1000)

    '''
        Analyze image/video
    '''

    def analyze(self):
        if self.use_image:
            self.analyze_image()

        if self.use_video:
            self.analyze_video()
    def __init__(self, args):
        # Ratio params
        horizontal_ratio = float(args[0].horizontal_ratio)
        vertical_ratio = float(args[0].vertical_ratio)

        # Check video
        if args[0].video != "enabled" and args[0].video != "disabled":
            print("Error: set correct video mode, enabled or disabled")
            sys.exit(-1)

        # Check video
        if args[0].image != "enabled" and args[0].image != "disabled":
            print("Error: set correct image mode, enabled or disabled")
            sys.exit(-1)

        # Convert args to boolean
        self.use_video = True if args[0].video == "enabled" else False

        self.use_image = True if args[0].image == "enabled" else False

        self.use_preview = True if args[0].preview == "enabled" else False

        # Unable to use video and image mode at same time
        if self.use_video and self.use_image:
            print(
                "Error: unable to use video and image mode at the same time!")
            sys.exit(-1)

        # Unable to not use or video or image mode at same time
        if self.use_video and self.use_image:
            print("Error: enable or video or image mode!")
            sys.exit(-1)

        self.streaming = True if args[0].streaming == "enabled" else False

        if self.use_video:
            # Open video capture
            self.cap = cv2.VideoCapture(args[0].stream_in)

            if not self.cap.isOpened():
                print("Error: Opening video stream or file {0}".format(
                    args[0].stream_in))
                sys.exit(-1)

            # Get input size
            width = int(self.cap.get(3))
            height = int(self.cap.get(4))

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(args[0].stream_out,
                                           cv2.VideoWriter_fourcc(*'XVID'),
                                           int(self.cap.get(cv2.CAP_PROP_FPS)),
                                           (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out))
                    sys.exit(-1)

            # Get image size
            im_size = (width, height)

        if self.use_image:
            self.image = cv2.imread(args[0].image_in)
            if self.image is None:
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in))
                sys.exit(-1)

            self.image_out = args[0].image_out

            # Get image size
            im_size = (self.image.shape[1], self.image.shape[0])

        # Compute Homograpy
        self.homography_matrix = self.compute_homography(
            horizontal_ratio, vertical_ratio, im_size)

        self.background_masked = False
        # Open image backgrouns, if it is necessary
        if args[0].masked == "enabled":
            # Set masked flag
            self.background_masked = True

            # Load static background
            self.background_image = cv2.imread(args[0].background_in)

            # Close, if no background, but required
            if self.background_image is None:
                print("Error: Unable to load background image (flag enabled)")
                sys.exit(-1)

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = args[0].openpose_folder

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = args[0].net_size

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        # Set GPU start id (not considering previous)
        params["num_gpu_start"] = 0

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        # Json server
        self.dt_vector = {}

        # Client list
        self.stream_list = []

        if self.streaming:
            # Initialize video server
            self.video_server = StreamServer(int(args[0].video_port),
                                             self.stream_list, "image/jpeg")
            self.video_server.activate()

            # Initialize json server
            self.js_server = ResponseServer(int(args[0].js_port),
                                            "application/json")
            self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()

        # Calibrate heigh value
        self.calibrate = float(args[0].calibration)

        # Actually unused
        self.ellipse_angle = 0

        # Body confidence threshold
        self.body_th = float(args[0].body_threshold)

        # Show confidence
        self.show_confidence = True if args[
            0].show_confidence == "enabled" else False
Exemple #4
0
class ProcessSource:
    '''
        Initialize
    '''
    def __init__(self, args):

        # Social Distancing arguments
        arguments = {}

        # Arguments
        arguments["horizontal_ratio"] = args[0].horizontal_ratio
        arguments["vertical_ratio"] = args[0].vertical_ratio
        arguments["calibration"] = args[0].calibration
        arguments["body_threshold"] = args[0].body_threshold
        arguments["show_confidence"] = args[0].show_confidence
        arguments["show_sketch"] = args[0].show_sketch

        # Initialize social distancing
        self.social_distancing = SocialDistancing(arguments)

        # Initialize Openpose
        self.initialize_openpose(args)

        # Initialize file opening/writing and streaming
        self.initialize_others(args)

    '''
        Initialize openpose
    '''

    def initialize_openpose(self, args):
        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = args[0].openpose_folder

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = args[0].net_size

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        # Set GPU start id (not considering previous)
        params["num_gpu_start"] = 0

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        self.datum = op.Datum()

    '''
        Initialize acquiring methods (video, mjpeg preprocessed json, jetson, etc), sockets, output files
    '''

    def initialize_others(self, args):
        # Convert args to boolean
        self.use_video = True if args[0].video == "enabled" else False

        # Process image
        self.use_image = True if args[0].image == "enabled" else False

        # Use preview
        self.use_preview = True if args[0].preview == "enabled" else False

        # Jetson internal camera enabled
        self.jetson_video = True if args[0].jetson_video == "enabled" else False

        # Mjpeg video reader
        self.use_mjpeg = True if args[0].use_mjpeg == "enabled" else False

        # Enable streaming ption
        self.streaming = True if args[0].streaming == "enabled" else False

        # Use json as input
        self.use_js = True if args[0].use_js == "enabled" else False

        # Json input file
        self.js_in = args[0].js_in

        if self.use_video:
            # Open video capture
            if not self.jetson_video:
                # Use standard cv2 capture library
                self.cap = cv2.VideoCapture(args[0].stream_in)
            else:
                # Connect Standard cv2 capture library to gstreamer
                print(gstreamer_pipeline(flip_method=0))
                self.cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0),
                                            cv2.CAP_GSTREAMER)

            if not self.cap.isOpened():
                print("Error: Opening video stream or file {0}".format(
                    args[0].stream_in),
                      flush=True)
                sys.exit(-1)

            # Get input size
            width = int(self.cap.get(3))
            height = int(self.cap.get(4))

            self.mask_in = cv2.imread(args[0].mask_in)

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(
                    args[0].stream_out,
                    cv2.VideoWriter_fourcc(*args[0].encoding_codec), 25,
                    (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out),
                          flush=True)
                    sys.exit(-1)

            # Get image size
            im_size = (width, height)

        if self.use_image:
            self.mask_in = cv2.imread(args[0].mask_in)

            self.image = cv2.imread(args[0].image_in)

            if self.image is None:
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in),
                      flush=True)
                sys.exit(-1)

            self.image_out = args[0].image_out

            # Get image size
            im_size = (self.image.shape[1], self.image.shape[0])

        if self.use_mjpeg:
            # Create mjpeg reader
            self.mjpeg_reader = MjpegReader(args[0].stream_in)

            # Read first image to get image size
            image = self.mjpeg_reader.get_image()

            if not self.mjpeg_reader.is_opened():
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in),
                      flush=True)
                exit(-1)

            # Get input size
            width = int(image.shape[1])
            height = int(image.shape[0])

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(
                    args[0].stream_out,
                    cv2.VideoWriter_fourcc(*args[0].encoding_codec),
                    int(args[0].dummy_fps), (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out),
                          flush=True)
                    sys.exit(-1)

            print("Mjpeg multipart file:{0}x{1}".format(width, height))

            # Get image size
            im_size = (width, height)

        if not self.use_js:
            # Compute Homograpy
            self.social_distancing.compute_homography(im_size)

        self.background_masked = False
        # Open image backgrouns, if it is necessary
        if args[0].masked == "enabled":
            # Set masked flag
            self.background_masked = True

            # Load static background
            self.background_image = cv2.imread(args[0].background_in)

            # Close, if no background, but required
            if self.background_image is None:
                print("Error: Unable to load background image (flag enabled)",
                      flush=True)
                sys.exit(-1)

        if self.use_js:
            im_size = (self.background_image.shape[1],
                       self.background_image.shape[0])

            self.out = cv2.VideoWriter(
                args[0].stream_out,
                cv2.VideoWriter_fourcc(*args[0].encoding_codec),
                int(args[0].dummy_fps), im_size)

            self.social_distancing.compute_homography(im_size)

        # Json server
        self.dt_vector = {}

        # Client list
        self.stream_list = []
        self.js_list = []

        if self.streaming:
            # Initialize video server
            self.video_server = StreamServer(int(args[0].video_port),
                                             self.stream_list, "image/jpeg")
            self.video_server.activate()

            # Initialize stream server
            self.stream_server = StreamServer(int(args[0].stream_port),
                                              self.js_list, "application/json")
            self.stream_server.activate()

            # Initialize json server
            self.js_server = ResponseServer(int(args[0].js_port),
                                            "application/json")
            self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()

        # Json recorder
        self.js_recording = False
        if args[0].js_out != "":
            self.js_recording = True
            self.js_out = open(args[0].js_out, "w")

        # Mjpeg recorder
        self.mjpeg_recorder = False
        if args[0].mjpeg_out != "":
            self.mjpeg_recorder = True
            self.mjpeg_out = open(args[0].mjpeg_out, "wb")

        # Json version
        self.dt_vector["vs"] = 1

        # Fps evaluation init
        self.millis = 0
        self.frames = 0

    '''
        Process source and save on image/video/js file, distribuite on network
    '''

    def process_source(self, source, background):
        start = round(time.time() * 1000)

        if self.mask_in is not None:
            source = cv2.bitwise_and(source, self.mask_in)

        # Check if pre-processed json is used
        if not self.use_js:
            # Assign input image to openpose
            self.datum.cvInputData = source

            # Use Openpose to extract poses
            self.opWrapper.emplaceAndPop([self.datum])

            # Get openpose coordinates (rounding values)
            skeletals = np.around(
                np.array(self.datum.poseKeypoints).tolist(), 2).tolist()
        else:
            # Copy json data
            skeletals = source

        # Trace on background
        if self.background_masked:
            source = background

        if type(skeletals) is not list:
            return background

        # Evaluate distances, draw body and ellipses and get json bodies and ellipses list
        image, bodies, ellipses = self.social_distancing.distances_calculate(
            source, skeletals, [1 for k in range(len(skeletals))])

        # Save data to json vector
        self.dt_vector["bodies"] = bodies
        self.dt_vector["ellipses"] = ellipses

        if self.streaming:
            # Send video to client queues
            self.send_image(self.stream_list, image, int(self.dt_vector['ts']))

            # Put json vector availble to rest requests
            self.js_server.put(bytes(json.dumps(self.dt_vector), "UTF-8"))

            # Send json vestor available to streaming
            self.send_js(self.js_list,
                         bytes(json.dumps(self.dt_vector), "UTF-8"),
                         int(self.dt_vector['ts']))

        # Write json data
        if self.js_recording:
            self.js_out.write(json.dumps(self.dt_vector) + "\n")

        stop = round(time.time() * 1000)

        if self.millis > 1000:
            print("Analyzing at {0} Fps".format(self.frames),
                  end="\r",
                  flush=True)
            self.millis = 0
            self.frames = 0

        self.millis += stop - start
        self.frames += 1

        return image

    '''
        Send image over queue list and then over http mjpeg stream
    '''

    def send_image(self, queue_list, image, ts):

        encoded_image = self.jpeg.encode(image, quality=80)
        # Put image into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, encoded_image)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Send json over queue list and then over http multipart stream
    '''

    def send_js(self, queue_list, js, ts):

        # Put json into queue for each server thread
        for q in queue_list:
            try:
                block = (ts, js)
                q.put(block, True, 0.02)
            except queue.Full:
                pass

    '''
        Analyze video
    '''

    def analyze_video(self):
        first_frame = True
        counter = 0
        while self.cap.isOpened():
            # Get a global image ts
            self.dt_vector['ts'] = int(round(time.time() * 1000))

            # Capture from image/video
            ret, image = self.cap.read()

            # Check image
            if image is None or not ret:
                os._exit(0)

            # Record image
            if self.mjpeg_recorder:
                encoded_image = self.jpeg.encode(image, quality=80)

                header = "--myboundary\r\n" \
                    "X-TimeStamp: " + str(self.dt_vector['ts']) + "\r\n" \
                    "Content-Type: image/jpeg\r\n" \
                    "Content-Length: " + \
                    str(len(encoded_image)) + "\r\n\r\n"

                self.mjpeg_out.write(bytes(header, "UTF-8"))
                self.mjpeg_out.write(encoded_image)

            # Get openpose output
            if self.background_masked:
                background = self.background_image.copy()
            else:
                background = image

            image = self.process_source(image, background)

            # Write image
            if not self.streaming:
                self.out.write(image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', image)
                cv2.waitKey(1)

            #print(counter, end="\r", flush=True)
            counter += 1

    '''
        Analyze image
    '''

    def analyze_image(self):

        # Get openpose output
        if self.background_masked:
            background = self.background_image.copy()
        else:
            background = self.image

        self.image = self.process_source(self.image, background)

        # Write image
        cv2.imwrite(self.image_out, self.image)

        # Show image and wait some time
        if self.use_preview:
            cv2.imshow('Social Distance', self.image)
            cv2.waitKey(1000)

    '''
        Analyze json data
    '''

    def analyze_js(self):
        # Read json files
        lines = open(self.js_in, "r").read().split("\n")

        # While there are lines
        for line in lines[:-1]:
            js_line = json.loads(line)

            # Create
            background = self.background_image.copy()

            if 'vs' in js_line.keys():
                self.image = self.process_source(js_line['bodies'], background)
            else:
                self.image = self.process_source(js_line['bodys'], background)

            # Write image
            if not self.streaming:
                self.out.write(self.image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', self.image)
                cv2.waitKey(1)

    '''
        Analyze mjpeg (timestamped jpeg sequence)
    '''

    def analyze_mjpeg(self):
        first_frame = True
        counter = 0

        old_timestamp = self.mjpeg_reader.get_ts()
        while True:
            # Capture from image/video
            image = self.mjpeg_reader.get_image()

            # Get a global image ts
            self.dt_vector['ts'] = self.mjpeg_reader.get_ts()

            # Check image
            if image is None:
                os._exit(0)

            # Record image
            if self.mjpeg_recorder:
                encoded_image = self.jpeg.encode(image, quality=80)

                header = "--myboundary\r\n" \
                    "X-TimeStamp: " + str(self.dt_vector['ts']) + "\r\n" \
                    "Content-Type: image/jpeg\r\n" \
                    "Content-Length: " + \
                    str(len(encoded_image)) + "\r\n\r\n"

                self.mjpeg_out.write(bytes(header, "UTF-8"))
                self.mjpeg_out.write(encoded_image)

            # Get openpose output
            if self.background_masked:
                background = self.background_image.copy()
            else:
                background = image

            image = self.process_source(image, background)

            # Write image
            if not self.streaming:
                self.out.write(image)

            # Show image and wait some time
            if self.use_preview:
                cv2.imshow('Social Distance', image)
                cv2.waitKey(1)

            # Wait timestamp difference
            time.sleep((self.mjpeg_reader.get_ts() - old_timestamp) / 1000)

            # print(counter, end = "\n", flush=True)

            # Store old timestamp
            old_timestamp = self.mjpeg_reader.get_ts()

            counter += 1

    '''
        Analyze image/video/json/mjpeg
    '''

    def analyze(self):
        if self.use_image:
            self.analyze_image()

        if self.use_video:
            self.analyze_video()

        if self.use_js:
            self.analyze_js()

        if self.use_mjpeg:
            self.analyze_mjpeg()
Exemple #5
0
    def initialize_others(self, args):
        # Convert args to boolean
        self.use_video = True if args[0].video == "enabled" else False

        # Process image
        self.use_image = True if args[0].image == "enabled" else False

        # Use preview
        self.use_preview = True if args[0].preview == "enabled" else False

        # Jetson internal camera enabled
        self.jetson_video = True if args[0].jetson_video == "enabled" else False

        # Mjpeg video reader
        self.use_mjpeg = True if args[0].use_mjpeg == "enabled" else False

        # Enable streaming ption
        self.streaming = True if args[0].streaming == "enabled" else False

        # Use json as input
        self.use_js = True if args[0].use_js == "enabled" else False

        # Json input file
        self.js_in = args[0].js_in

        if self.use_video:
            # Open video capture
            if not self.jetson_video:
                # Use standard cv2 capture library
                self.cap = cv2.VideoCapture(args[0].stream_in)
            else:
                # Connect Standard cv2 capture library to gstreamer
                print(gstreamer_pipeline(flip_method=0))
                self.cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0),
                                            cv2.CAP_GSTREAMER)

            if not self.cap.isOpened():
                print("Error: Opening video stream or file {0}".format(
                    args[0].stream_in),
                      flush=True)
                sys.exit(-1)

            # Get input size
            width = int(self.cap.get(3))
            height = int(self.cap.get(4))

            self.mask_in = cv2.imread(args[0].mask_in)

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(
                    args[0].stream_out,
                    cv2.VideoWriter_fourcc(*args[0].encoding_codec), 25,
                    (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out),
                          flush=True)
                    sys.exit(-1)

            # Get image size
            im_size = (width, height)

        if self.use_image:
            self.mask_in = cv2.imread(args[0].mask_in)

            self.image = cv2.imread(args[0].image_in)

            if self.image is None:
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in),
                      flush=True)
                sys.exit(-1)

            self.image_out = args[0].image_out

            # Get image size
            im_size = (self.image.shape[1], self.image.shape[0])

        if self.use_mjpeg:
            # Create mjpeg reader
            self.mjpeg_reader = MjpegReader(args[0].stream_in)

            # Read first image to get image size
            image = self.mjpeg_reader.get_image()

            if not self.mjpeg_reader.is_opened():
                print("Error: Unable to open input image file {0}".format(
                    args[0].image_in),
                      flush=True)
                exit(-1)

            # Get input size
            width = int(image.shape[1])
            height = int(image.shape[0])

            if not self.streaming:
                # Open video output (if output is not an image)
                self.out = cv2.VideoWriter(
                    args[0].stream_out,
                    cv2.VideoWriter_fourcc(*args[0].encoding_codec),
                    int(args[0].dummy_fps), (width, height))

                if self.out is None:
                    print("Error: Unable to open output video file {0}".format(
                        args[0].stream_out),
                          flush=True)
                    sys.exit(-1)

            print("Mjpeg multipart file:{0}x{1}".format(width, height))

            # Get image size
            im_size = (width, height)

        if not self.use_js:
            # Compute Homograpy
            self.social_distancing.compute_homography(im_size)

        self.background_masked = False
        # Open image backgrouns, if it is necessary
        if args[0].masked == "enabled":
            # Set masked flag
            self.background_masked = True

            # Load static background
            self.background_image = cv2.imread(args[0].background_in)

            # Close, if no background, but required
            if self.background_image is None:
                print("Error: Unable to load background image (flag enabled)",
                      flush=True)
                sys.exit(-1)

        if self.use_js:
            im_size = (self.background_image.shape[1],
                       self.background_image.shape[0])

            self.out = cv2.VideoWriter(
                args[0].stream_out,
                cv2.VideoWriter_fourcc(*args[0].encoding_codec),
                int(args[0].dummy_fps), im_size)

            self.social_distancing.compute_homography(im_size)

        # Json server
        self.dt_vector = {}

        # Client list
        self.stream_list = []
        self.js_list = []

        if self.streaming:
            # Initialize video server
            self.video_server = StreamServer(int(args[0].video_port),
                                             self.stream_list, "image/jpeg")
            self.video_server.activate()

            # Initialize stream server
            self.stream_server = StreamServer(int(args[0].stream_port),
                                              self.js_list, "application/json")
            self.stream_server.activate()

            # Initialize json server
            self.js_server = ResponseServer(int(args[0].js_port),
                                            "application/json")
            self.js_server.activate()

        # turbo jpeg initialization
        self.jpeg = TurboJPEG()

        # Json recorder
        self.js_recording = False
        if args[0].js_out != "":
            self.js_recording = True
            self.js_out = open(args[0].js_out, "w")

        # Mjpeg recorder
        self.mjpeg_recorder = False
        if args[0].mjpeg_out != "":
            self.mjpeg_recorder = True
            self.mjpeg_out = open(args[0].mjpeg_out, "wb")

        # Json version
        self.dt_vector["vs"] = 1

        # Fps evaluation init
        self.millis = 0
        self.frames = 0