예제 #1
0
    def _push_sample(self, sample):
        # Calculate whether we need to draw any annotations on the output video.
        now = sample.time
        annotations = []
        with self.annotations_lock:
            # Remove expired annotations
            self.text_annotations = [
                x for x in self.text_annotations if now < x.end_time
            ]
            current_texts = [x for x in self.text_annotations if x.time <= now]
            for annotation in list(self.annotations):
                if annotation.time == now:
                    annotations.append(annotation)
                if now >= annotation.time:
                    self.annotations.remove(annotation)

        sample = gst_sample_make_writable(sample)
        img = array_from_sample(sample, readwrite=True)
        # Text:
        _draw_text(img,
                   datetime.datetime.now().strftime("%H:%M:%S.%f")[:-4],
                   (10, 30), (255, 255, 255))
        for i, x in enumerate(reversed(current_texts)):
            origin = (10, (i + 2) * 30)
            age = float(now - x.time) / 3
            color = (int(255 * max([1 - age, 0.5])), ) * 3
            _draw_text(img, x.text, origin, color)

        # Regions:
        for annotation in annotations:
            annotation.draw(img)

        self.appsrc.props.caps = sample.get_caps()
        self.appsrc.emit("push-buffer", sample.get_buffer())
        self._sample_count += 1
예제 #2
0
    def _push_sample(self, sample):
        # Calculate whether we need to draw any annotations on the output video.
        now = sample.time
        annotations = []
        with self.annotations_lock:
            # Remove expired annotations
            self.text_annotations = [x for x in self.text_annotations
                                     if now < x.end_time]
            current_texts = [x for x in self.text_annotations if x.time <= now]
            for annotation in list(self.annotations):
                if annotation.time == now:
                    annotations.append(annotation)
                if now >= annotation.time:
                    self.annotations.remove(annotation)

        sample = gst_sample_make_writable(sample)
        img = array_from_sample(sample, readwrite=True)
        # Text:
        _draw_text(
            img, datetime.datetime.now().strftime("%H:%M:%S.%f")[:-4],
            (10, 30), (255, 255, 255))
        for i, x in enumerate(reversed(current_texts)):
            origin = (10, (i + 2) * 30)
            age = float(now - x.time) / 3
            color = (int(255 * max([1 - age, 0.5])),) * 3
            _draw_text(img, x.text, origin, color)

        # Regions:
        for annotation in annotations:
            annotation.draw(img)

        self.appsrc.props.caps = sample.get_caps()
        self.appsrc.emit("push-buffer", sample.get_buffer())
        self._sample_count += 1
예제 #3
0
def svg_to_array(svg):
    from _stbt.gst_utils import array_from_sample
    pipeline = Gst.parse_launch(
        'appsrc name="src" caps="image/svg" ! rsvgdec ! '
        'videoconvert ! appsink caps="video/x-raw,format=BGR" name="sink"')
    src = pipeline.get_by_name('src')
    sink = pipeline.get_by_name('sink')
    pipeline.set_state(Gst.State.PLAYING)
    buf = Gst.Buffer.new_wrapped(svg)
    src.emit('push-buffer', buf)
    sample = sink.emit('pull-sample')
    src.emit("end-of-stream")
    pipeline.set_state(Gst.State.NULL)
    pipeline.get_state(0)
    return array_from_sample(sample)
예제 #4
0
    def _push_sample(self, sample):
        # Calculate whether we need to draw any annotations on the output video.
        now = sample.time
        annotations = []
        with self.annotations_lock:
            # Remove expired annotations
            self.text_annotations = [x for x in self.text_annotations
                                     if now < x.end_time]
            current_texts = [x for x in self.text_annotations if x.time <= now]
            for annotation in list(self.annotations):
                if annotation.time == now:
                    annotations.append(annotation)
                if now >= annotation.time:
                    self.annotations.remove(annotation)

        sample = gst_sample_make_writable(sample)
        img = array_from_sample(sample, readwrite=True)
        # Text:
        _draw_text(
            img,
            datetime.datetime.fromtimestamp(now).strftime("%H:%M:%S.%f")[:-4],
            (10, 30), (255, 255, 255))
        for i, x in enumerate(reversed(current_texts)):
            origin = (10, (i + 2) * 30)
            age = float(now - x.time) / 3
            color = (native(int(255 * max([1 - age, 0.5]))).__int__(),) * 3
            _draw_text(img, x.text, origin, color)

        # Regions:
        for annotation in annotations:
            _draw_annotation(img, annotation)

        APPSRC_LIMIT_BYTES = 100 * 1024 * 1024  # 100MB
        if self.appsrc.props.current_level_bytes > APPSRC_LIMIT_BYTES:
            # appsrc is backed-up, perhaps something's gone wrong.  We don't
            # want to use up all RAM, so let's drop the buffer on the floor.
            if not self._appsrc_was_full:
                warn("sink pipeline appsrc is full, dropping buffers from now "
                     "on")
                self._appsrc_was_full = True
            return
        elif self._appsrc_was_full:
            debug("sink pipeline appsrc no longer full, pushing buffers again")
            self._appsrc_was_full = False

        self.appsrc.props.caps = sample.get_caps()
        self.appsrc.emit("push-buffer", sample.get_buffer())
        self._sample_count += 1
예제 #5
0
def svg_to_array(svg):
    from _stbt.gst_utils import array_from_sample

    pipeline = Gst.parse_launch(
        'appsrc name="src" caps="image/svg" ! rsvgdec ! '
        'videoconvert ! appsink caps="video/x-raw,format=BGR" name="sink"'
    )
    src = pipeline.get_by_name("src")
    sink = pipeline.get_by_name("sink")
    pipeline.set_state(Gst.State.PLAYING)
    buf = Gst.Buffer.new_wrapped(svg)
    src.emit("push-buffer", buf)
    sample = sink.emit("pull-sample")
    src.emit("end-of-stream")
    pipeline.set_state(Gst.State.NULL)
    pipeline.get_state(0)
    return array_from_sample(sample)
예제 #6
0
    def on_new_sample(self, appsink):
        sample = appsink.emit("pull-sample")

        running_time = sample.get_segment().to_running_time(
            Gst.Format.TIME, sample.get_buffer().pts)
        sample.time = float(appsink.base_time + running_time) / 1e9

        if (sample.time > self.init_time + 31536000 or
                sample.time < self.init_time - 31536000):  # 1 year
            warn("Received frame with suspicious timestamp: %f. Check your "
                 "source-pipeline configuration." % sample.time)

        frame = array_from_sample(sample)
        frame.flags.writeable = False

        # See also: logging.draw_on
        frame._draw_sink = weakref.ref(self._sink_pipeline)  # pylint: disable=protected-access
        self.tell_user_thread(frame)
        self._sink_pipeline.on_sample(sample)
        return Gst.FlowReturn.OK
def chessboard_calibration(dut, timeout=10):
    from _stbt.gst_utils import array_from_sample

    undistorted_appsink = \
        dut._display.source_pipeline.get_by_name('undistorted_appsink')

    sys.stderr.write("Searching for chessboard\n")
    endtime = time.time() + timeout
    while time.time() < endtime:
        sample = undistorted_appsink.emit('pull-sample')
        try:
            input_image = array_from_sample(sample)
            params = chessboard.calculate_calibration_params(input_image)
            break
        except chessboard.NoChessboardError:
            if time.time() > endtime:
                raise

    geometriccorrection = dut._display.source_pipeline.get_by_name(
        'geometric_correction')

    geometriccorrection_params = {
        'camera-matrix': ('{fx}    0 {cx}'
                          '   0 {fy} {cy}'
                          '   0    0    1').format(**params),
        'distortion-coefficients': '{k1} {k2} {p1} {p2} {k3}'.format(**params),
        'inv-homography-matrix': (
            '{ihm11} {ihm21} {ihm31} '
            '{ihm12} {ihm22} {ihm32} '
            '{ihm13} {ihm23} {ihm33}').format(**params),
    }
    for key, value in geometriccorrection_params.items():
        geometriccorrection.set_property(key, value)

    print_error_map(
        sys.stderr,
        *chessboard.find_corrected_corners(params, input_image))

    set_config(
        'global', 'geometriccorrection_params',
        ' '.join('%s="%s"' % v for v in geometriccorrection_params.items()))
def chessboard_calibration(timeout=10):
    from _stbt.gst_utils import array_from_sample

    undistorted_appsink = \
        stbt._dut._display.source_pipeline.get_by_name('undistorted_appsink')

    sys.stderr.write("Searching for chessboard\n")
    endtime = time.time() + timeout
    while time.time() < endtime:
        sample = undistorted_appsink.emit('pull-sample')
        try:
            input_image = array_from_sample(sample)
            params = chessboard.calculate_calibration_params(input_image)
            break
        except chessboard.NoChessboardError:
            if time.time() > endtime:
                raise

    geometriccorrection = stbt._dut._display.source_pipeline.get_by_name(
        'geometric_correction')

    geometriccorrection_params = {
        'camera-matrix': ('{fx}    0 {cx}'
                          '   0 {fy} {cy}'
                          '   0    0    1').format(**params),
        'distortion-coefficients': '{k1} {k2} {p1} {p2} {k3}'.format(**params),
        'inv-homography-matrix': (
            '{ihm11} {ihm21} {ihm31} '
            '{ihm12} {ihm22} {ihm32} '
            '{ihm13} {ihm23} {ihm33}').format(**params),
    }
    for key, value in geometriccorrection_params.items():
        geometriccorrection.set_property(key, value)

    print_error_map(
        sys.stderr,
        *chessboard.find_corrected_corners(params, input_image))

    set_config(
        'global', 'geometriccorrection_params',
        ' '.join('%s="%s"' % v for v in geometriccorrection_params.items()))
예제 #9
0
    def on_new_sample(self, appsink):
        sample = appsink.emit("pull-sample")

        running_time = sample.get_segment().to_running_time(
            Gst.Format.TIME, sample.get_buffer().pts)
        sample.time = (
            float(appsink.base_time + running_time) / 1e9)

        if (sample.time > self.init_time + 31536000 or
                sample.time < self.init_time - 31536000):  # 1 year
            warn("Received frame with suspicious timestamp: %f. Check your "
                 "source-pipeline configuration." % sample.time)

        frame = array_from_sample(sample)
        frame.flags.writeable = False

        # See also: logging.draw_on
        frame._draw_sink = weakref.ref(self._sink_pipeline)  # pylint: disable=protected-access
        self.tell_user_thread(frame)
        self._sink_pipeline.on_sample(sample)
        return Gst.FlowReturn.OK