def handle_connection(connection, config, thermal_config_file):
    headers, extra_b = handle_headers(connection)
    thermal_config = ThermalConfig.load_from_file(thermal_config_file,
                                                  headers.model)
    logging.info("parsed camera headers %s running with config %s", headers,
                 thermal_config)

    process_queue = multiprocessing.Queue()

    processor = get_processor(process_queue, config, thermal_config, headers)
    processor.start()

    edge = config.tracking.edge_pixels
    crop_rectangle = tools.Rectangle(edge, edge, headers.res_x - 2 * edge,
                                     headers.res_y - 2 * edge)
    raw_frame = lepton3.Lepton3(headers)
    read = 0
    try:
        while True:
            if extra_b is not None:
                data = extra_b + connection.recv(
                    headers.frame_size - len(extra_b), socket.MSG_WAITALL)
                extra_b = None
            else:
                data = connection.recv(headers.frame_size, socket.MSG_WAITALL)

            if not data:
                logging.info("disconnected from camera")
                process_queue.put(STOP_SIGNAL)
                break
            try:
                message = data[:5].decode("utf-8")
                if message == "clear":
                    logging.info("processing error from camera")
                    process_queue.put(STOP_SIGNAL)
                    break
            except:
                pass
            read += 1
            frame = raw_frame.parse(data)
            frame.received_at = time.time()
            cropped_frame = crop_rectangle.subimage(frame.pix)
            t_max = np.amax(cropped_frame)
            t_min = np.amin(cropped_frame)
            # seems to happen if pi is working hard
            if t_min == 0:
                logging.warning(
                    "received frame has odd values skipping thermal frame max {} thermal frame min {} cpu % {} memory % {}"
                    .format(t_max, t_min, psutil.cpu_percent(),
                            psutil.virtual_memory()[2]))
                process_queue.put(SKIP_SIGNAL)
            elif read < 100:
                process_queue.put(SKIP_SIGNAL)
            else:
                process_queue.put(frame)
    finally:
        time.sleep(5)
        # give it a moment to close down properly
        processor.terminate()
 def calculate_frame_crop(self):
     # frames are always square, but bounding rect may not be, so to see how much we clipped I need to create a square
     # bounded rect and check it against frame size.
     self.frame_crop = []
     for rect in self.regions:
         rx, ry = rect.mid_x, rect.mid_y
         size = max(rect.width, rect.height)
         adjusted_rect = tools.Rectangle(rx - size / 2, ry - size / 2, size,
                                         size)
         self.frame_crop.append(
             get_cropped_fraction(adjusted_rect, self.res_x, self.res_y))
Exemplo n.º 3
0
    def __init__(self, config, thermal_config, classifier):
        self.frame_num = 0
        self.clip = None
        self.tracking = False
        self.enable_per_track_information = False
        self.rolling_track_classify = {}
        self.skip_classifying = 0
        self.classified_consec = 0
        self.config = config
        self.classifier = classifier
        self.num_labels = len(classifier.labels)
        self._res_x = self.config.res_x
        self._res_y = self.config.res_y
        self.predictions = Predictions(classifier.labels)
        self.preview_frames = (thermal_config.recorder.preview_secs *
                               thermal_config.recorder.frame_rate)
        edge = self.config.tracking.edge_pixels
        self.crop_rectangle = tools.Rectangle(edge, edge,
                                              self.res_x - 2 * edge,
                                              self.res_y - 2 * edge)

        try:
            self.fp_index = self.classifier.labels.index("false-positive")
        except ValueError:
            self.fp_index = None

        self.track_extractor = ClipTrackExtractor(
            self.config.tracking,
            self.config.use_opt_flow,
            self.config.classify.cache_to_disk,
            keep_frames=False,
            calc_stats=False,
        )
        self.motion_config = thermal_config.motion
        self.min_frames = (thermal_config.recorder.min_secs *
                           thermal_config.recorder.frame_rate)
        self.max_frames = (thermal_config.recorder.max_secs *
                           thermal_config.recorder.frame_rate)
        self.motion_detector = MotionDetector(
            self.res_x,
            self.res_y,
            thermal_config,
            self.config.tracking.dynamic_thresh,
            CPTVRecorder(thermal_config),
        )
        self.startup_classifier()

        self._output_dir = thermal_config.recorder.output_dir
        self.meta_dir = os.path.join(thermal_config.recorder.output_dir,
                                     "metadata")
        if not os.path.exists(self.meta_dir):
            os.makedirs(self.meta_dir)
Exemplo n.º 4
0
 def calculate_frame_crop(self):
     # frames are always square, but bounding rect may not be, so to see how much we clipped I need to create a square
     # bounded rect and check it against frame size.
     self.frame_crop = []
     for rect in self.track_bounds:
         rect = tools.Rectangle.from_ltrb(*rect)
         rx, ry = rect.mid_x, rect.mid_y
         size = max(rect.width, rect.height)
         adjusted_rect = tools.Rectangle(rx - size / 2, ry - size / 2, size,
                                         size)
         self.frame_crop.append(
             get_cropped_fraction(adjusted_rect, CPTV_FILE_WIDTH,
                                  CPTV_FILE_HEIGHT))
Exemplo n.º 5
0
    def from_meta(clip_id, clip_meta, track_meta):
        """ Creates a track header from given metadata. """

        # kind of checky way to get camera name from clip_id, in the future camera will be included in the metadata.
        camera = os.path.splitext(os.path.basename(clip_id))[0].split('-')[-1]

        # get the reference levels from clip_meta and load them into the track.
        track_start_frame = track_meta['start_frame']
        track_end_frame = track_meta['start_frame'] + int(round(track_meta['duration']*9))
        thermal_reference_level = np.float32(clip_meta['frame_temp_median'][track_start_frame:track_end_frame])

        # calculate the frame velocities
        bounds_history = track_meta['bounds_history']
        frame_center = [((left + right)/2, (top + bottom)/2) for left, top, right, bottom in bounds_history]
        frame_velocity = []
        prev = None
        for x, y in frame_center:
            if prev is None:
                frame_velocity.append((0.0,0.0))
            else:
                frame_velocity.append((x-prev[0], y-prev[1]))
            prev = (x, y)

        result = TrackHeader(
            clip_id=clip_id, track_number=track_meta['id'], label=track_meta['tag'],
            start_time=parser.parse(track_meta['start_time']),
            duration=float(track_meta['duration']),
            camera=camera,
            score=float(track_meta['score'])
        )
        result.thermal_reference_level = thermal_reference_level
        result.frame_velocity = frame_velocity
        result.track_bounds = np.asarray(bounds_history)

        # frames are always square, but bounding rect may not be, so to see how much we clipped I need to create a square
        # bounded rect and check it against frame size.
        result.frame_crop = []
        for rect in result.track_bounds:
            rect = tools.Rectangle.from_ltrb(*rect)
            rx, ry = rect.mid_x, rect.mid_y
            size = max(rect.width, rect.height)
            adjusted_rect = tools.Rectangle(rx-size/2, ry-size/2, size, size)
            result.frame_crop.append(get_cropped_fraction(adjusted_rect, CPTV_FILE_WIDTH, CPTV_FILE_HEIGHT))

        return result
Exemplo n.º 6
0
def preprocess_segment(
    frames,
    reference_level=None,
    frame_velocity=None,
    augment=False,
    encode_frame_offsets_in_flow=False,
    default_inset=2,
    filter_to_delta=True,
    keep_aspect=False,
    frame_size=48,
):
    """
    Preprocesses the raw track data, scaling it to correct size, and adjusting to standard levels
    :param frames: a list of np array of shape [C, H, W]
    :param reference_level: thermal reference level for each frame in data
    :param frame_velocity: velocity (x,y) for each frame.
    :param augment: if true applies a slightly random crop / scale
    :param default_inset: the default number of pixels to inset when no augmentation is applied.
    :param filter_to_delta: If true change filterted channel to be the delta of thermal frames.
    """

    if reference_level:
        # -------------------------------------------
        # next adjust temperature and flow levels
        # get reference level for thermal channel
        assert len(frames) == len(
            reference_level
        ), "Reference level shape and data shape not match."

    # -------------------------------------------
    # first we scale to the standard size

    # adjusting the corners makes the algorithm robust to tracking differences.
    top_offset = random.randint(0, 5) if augment else default_inset
    bottom_offset = random.randint(0, 5) if augment else default_inset
    left_offset = random.randint(0, 5) if augment else default_inset
    right_offset = random.randint(0, 5) if augment else default_inset

    scaled_frames = []

    for i, frame in enumerate(frames):
        channels = frame.shape[0]
        frame_height, frame_width = frame[0].shape
        if frame_height < MIN_SIZE or frame_width < MIN_SIZE:
            continue

        frame_bounds = tools.Rectangle(0, 0, frame_width, frame_height)

        # set up a cropping frame
        crop_region = tools.Rectangle.from_ltrb(
            left_offset,
            top_offset,
            frame_width - right_offset,
            frame_height - bottom_offset,
        )

        # if the frame is too small we make it a little larger
        while crop_region.width < MIN_SIZE:
            crop_region.left -= 1
            crop_region.right += 1
            crop_region.crop(frame_bounds)
        while crop_region.height < MIN_SIZE:
            crop_region.top -= 1
            crop_region.bottom += 1
            crop_region.crop(frame_bounds)

        cropped_frame = frame[
            :,
            crop_region.top : crop_region.bottom,
            crop_region.left : crop_region.right,
        ]

        scaled_frame = [
            resize_frame(cropped_frame[channel], channel, frame_size, keep_aspect)
            for channel in range(channels)
        ]
        if reference_level:
            scaled_frame[0] -= np.float32(reference_level[i])
        scaled_frames.append(scaled_frame)

    # convert back into [F,C,H,W] array.
    data = np.float32(scaled_frames)
    if len(data) == 0:
        return None
    # map optical flow down to right level,
    # we pre-multiplied by 256 to fit into a 16bit int
    data[:, 2 : 3 + 1, :, :] *= 1.0 / 256.0

    # write frame motion into center of frame
    if encode_frame_offsets_in_flow:
        F, C, H, W = data.shape
        for x in range(-2, 2 + 1):
            for y in range(-2, 2 + 1):
                data[:, 2 : 3 + 1, H // 2 + y, W // 2 + x] = frame_velocity[:, :]

    # set filtered track to delta frames
    if filter_to_delta:
        reference = np.clip(data[:, 0], 20, 999)
        data[0, 1] = 0
        data[1:, 1] = reference[1:] - reference[:-1]

    # -------------------------------------------
    # finally apply and additional augmentation

    if augment:
        if random.random() <= 0.75:
            # we will adjust contrast and levels, but only within these bounds.
            # that is a bright input may have brightness reduced, but not increased.
            LEVEL_OFFSET = 4

            # apply level and contrast shift
            level_adjust = random.normalvariate(0, LEVEL_OFFSET)
            contrast_adjust = tools.random_log(0.9, (1 / 0.9))

            data[:, 0] *= contrast_adjust
            data[:, 0] += level_adjust
            data[:, 1] *= contrast_adjust

        if random.random() <= 0.50:
            # when we flip the frame remember to flip the horizontal velocity as well
            data = np.flip(data, axis=3)
            data[:, 2] = -data[:, 2]
    np.clip(data[:, 0, :, :], a_min=0, a_max=None, out=data[:, 0, :, :])

    return data
Exemplo n.º 7
0
def get_cropped_fraction(region: tools.Rectangle, width, height):
    """ Returns the fraction regions mass outside the rect ((0,0), (width, height)"""
    bounds = tools.Rectangle(0, 0, width - 1, height - 1)
    return 1 - (bounds.overlap_area(region) / region.area)
def preprocess_segment(
    frames,
    frame_size,
    reference_level=None,
    frame_velocity=None,
    augment=False,
    default_inset=0,
    keep_edge=False,
):
    """
    Preprocesses the raw track data, scaling it to correct size, and adjusting to standard levels
    :param frames: a list of Frames
    :param reference_level: thermal reference level for each frame in data
    :param frame_velocity: velocity (x,y) for each frame.
    :param augment: if true applies a slightly random crop / scale
    :param default_inset: the default number of pixels to inset when no augmentation is applied.
    """
    if reference_level is not None:
        # -------------------------------------------
        # next adjust temperature and flow levels
        # get reference level for thermal channel
        assert len(frames) == len(
            reference_level), "Reference level shape and data shape not match."

    crop_rectangle = tools.Rectangle(EDGE, EDGE, res_x - 2 * EDGE,
                                     res_y - 2 * EDGE)

    # -------------------------------------------
    # first we scale to the standard size
    data = []
    flip = False
    chance = random.random()
    if augment:
        contrast_adjust = None
        level_adjust = None
        if chance <= 0.75:
            # we will adjust contrast and levels, but only within these bounds.
            # that is a bright input may have brightness reduced, but not increased.
            LEVEL_OFFSET = 4

            # apply level and contrast shift
            level_adjust = float(random.normalvariate(0, LEVEL_OFFSET))
            contrast_adjust = float(tools.random_log(0.9, (1 / 0.9)))
        if chance <= 0.50:
            flip = True
    for i, frame in enumerate(frames):
        frame.float_arrays()
        frame_height, frame_width = frame.thermal.shape
        # adjusting the corners makes the algorithm robust to tracking differences.
        # gp changed to 0,1 maybe should be a percent of the frame size
        max_height_offset = int(np.clip(frame_height * 0.1, 1, 2))
        max_width_offset = int(np.clip(frame_width * 0.1, 1, 2))
        top_offset = (int(random.random() *
                          max_height_offset) if augment else default_inset)
        bottom_offset = (int(random.random() *
                             max_height_offset) if augment else default_inset)
        left_offset = (int(random.random() *
                           max_width_offset) if augment else default_inset)
        right_offset = (int(random.random() *
                            max_width_offset) if augment else default_inset)
        if frame_height < MIN_SIZE or frame_width < MIN_SIZE:
            continue

        frame_bounds = tools.Rectangle(0, 0, frame_width, frame_height)
        # rotate then crop
        if augment and chance <= 0.75:
            # degress = 0

            degrees = int(chance * 40) - 20
            frame.rotate(degrees)

        # set up a cropping frame
        crop_region = tools.Rectangle.from_ltrb(
            left_offset,
            top_offset,
            frame_width - right_offset,
            frame_height - bottom_offset,
        )

        # if the frame is too small we make it a little larger
        while crop_region.width < MIN_SIZE:
            crop_region.left -= 1
            crop_region.right += 1
            crop_region.crop(frame_bounds)
        while crop_region.height < MIN_SIZE:
            crop_region.top -= 1
            crop_region.bottom += 1
            crop_region.crop(frame_bounds)
        frame.crop_by_region(crop_region, out=frame)
        # if frame.mask is not None:
        #     assert np.all(np.mod(frame.mask, 1) == 0), "Mask isn't integer"

        try:
            frame.resize_with_aspect((frame_size, frame_size),
                                     crop_rectangle,
                                     keep_edge=keep_edge)
        except Exception as e:
            logging.error("Error resizing frame %s exception %s", frame, e)
            continue
        if reference_level is not None:
            frame.thermal -= reference_level[i]
            np.clip(frame.thermal, a_min=0, a_max=None, out=frame.thermal)

        frame.normalize()

        if augment:
            if level_adjust is not None:
                frame.brightness_adjust(level_adjust)
            if contrast_adjust is not None:
                frame.contrast_adjust(contrast_adjust)
            if flip:
                frame.flip()
        data.append(frame)

    return data, flip
Exemplo n.º 9
0
    def apply(frames, reference_level, frame_velocity=None, augment=False, encode_frame_offsets_in_flow=False, default_inset=2):
        """
        Preprocesses the raw track data, scaling it to correct size, and adjusting to standard levels
        :param frames: a list of np array of shape [C, H, W]
        :param reference_level: thermal reference level for each frame in data
        :param frame_velocity: velocity (x,y) for each frame.
        :param augment: if true applies a slightly random crop / scale
        :param default_inset: the default number of pixels to inset when no augmentation is applied.
        """

        # -------------------------------------------
        # first we scale to the standard size

        # adjusting the corners makes the algorithm robust to tracking differences.
        top_offset = random.randint(0, 5) if augment else default_inset
        bottom_offset = random.randint(0, 5) if augment else default_inset
        left_offset = random.randint(0, 5) if augment else default_inset
        right_offset = random.randint(0, 5) if augment else default_inset

        scaled_frames = []

        for frame in frames:

            channels, frame_height, frame_width = frame.shape

            frame_bounds = tools.Rectangle(0, 0, frame_width, frame_height)

            # set up a cropping frame
            crop_region = tools.Rectangle.from_ltrb(left_offset, top_offset, frame_width - right_offset, frame_height - bottom_offset)

            # if the frame is too small we make it a little larger
            while crop_region.width < 4:
                crop_region.left -=1
                crop_region.right += 1
                crop_region.crop(frame_bounds)
            while crop_region.height < 4:
                crop_region.top -=1
                crop_region.bottom += 1
                crop_region.crop(frame_bounds)

            cropped_frame = frame[:,
                            crop_region.top: crop_region.bottom,
                            crop_region.left: crop_region.right]

            scaled_frame = [cv2.resize(np.float32(cropped_frame[channel]), dsize=(Preprocessor.FRAME_SIZE, Preprocessor.FRAME_SIZE),
                                       interpolation=cv2.INTER_LINEAR if channel != TrackChannels.mask else cv2.INTER_NEAREST)
                            for channel in range(channels)]
            scaled_frame = np.float32(scaled_frame)

            scaled_frames.append(scaled_frame)

        # convert back into [F,C,H,W] array.
        data = np.float32(scaled_frames)

        # the segment will be processed in float32 so we may as well convert it here.
        # also optical flow is stored as a scaled integer, but we want it in float32 format.
        data = np.asarray(data, dtype=np.float32)

        # -------------------------------------------
        # next adjust temperature and flow levels

        # get reference level for thermal channel
        assert len(data) == len(reference_level), "Reference level shape and data shape not match."

        # reference thermal levels to the reference level
        data[:, 0, :, :] -= np.float32(reference_level)[:, np.newaxis, np.newaxis]

        # map optical flow down to right level,
        # we pre-multiplied by 256 to fit into a 16bit int
        data[:, 2:3 + 1, :, :] *= (1.0/256.0)

        # write frame motion into center of frame
        if encode_frame_offsets_in_flow:
            F, C, H, W = data.shape
            for x in range(-2,2+1):
                for y in range(-2,2+1):
                    data[:, 2:3 + 1, H//2+y, W//2+x] = frame_velocity[:, :]

        # set filtered track to delta frames
        reference = np.clip(data[:, 0], 20, 999)
        data[0, 1] = 0
        data[1:, 1] = reference[1:] - reference[:-1]

        # -------------------------------------------
        # finally apply and additional augmentation

        if augment:
            if (random.random() <= 0.75):
                # we will adjust contrast and levels, but only within these bounds.
                # that is a bright input may have brightness reduced, but not increased.
                LEVEL_OFFSET = 4

                # apply level and contrast shift
                level_adjust = random.normalvariate(0, LEVEL_OFFSET)
                contrast_adjust = tools.random_log(0.9, (1/0.9))

                data[:, 0] *= contrast_adjust
                data[:, 0] += level_adjust

            if random.random() <= 0.50:
                # when we flip the frame remember to flip the horizontal velocity as well
                data = np.flip(data, axis=3)
                data[:, 2] = -data[:, 2]

        return data