Esempio n. 1
0
def test_gst_buffer_channels():

    assert bool(8 & (1 << (4 - 1)))
    assert utils.is_kbit_set(2, 2)
    assert utils.is_kbit_set(8, 4)
    assert utils.is_kbit_set(8 | 1, 1)

    assert utils.get_num_channels(GstVideo.VideoFormat.RGB) == 3
    assert utils.get_num_channels(GstVideo.VideoFormat.RGBA) == 4
    assert utils.get_num_channels(GstVideo.VideoFormat.GRAY8) == 1
    assert utils.get_num_channels(GstVideo.VideoFormat.I420) == -1
def extract_buffer(sample: Gst.Sample) -> np.ndarray:
    """Extracts Gst.Buffer from Gst.Sample and converts to np.ndarray"""

    buffer = sample.get_buffer()  # Gst.Buffer

    print("timestamp: ", Gst.TIME_ARGS(buffer.pts), "offset: ", buffer.offset)

    caps_format = sample.get_caps().get_structure(0)  # Gst.Structure

    # GstVideo.VideoFormat
    video_format = GstVideo.VideoFormat.from_string(
        caps_format.get_value('format'))

    w, h = caps_format.get_value('width'), caps_format.get_value('height')
    c = utils.get_num_channels(video_format)

    buffer_size = buffer.get_size()
    shape = (h, w, c) if (h * w * c == buffer_size) else buffer_size

    format_info = GstVideo.VideoFormat.get_info(
        video_format)  # GstVideo.VideoFormatInfo
    array = np.ndarray(shape=shape //
                       (format_info.bits // utils.BITS_PER_BYTE),
                       buffer=buffer.extract_dup(0, buffer_size),
                       dtype=utils.get_np_dtype(video_format))

    return np.squeeze(array)  # remove single dimension if exists
Esempio n. 3
0
    def gst_to_opencv(self, sample):
        if self.flag:
            self.flag = False
            self.pipeline.send_event(Gst.Event.new_latency(0))
        
        self.log_fps()

        buffer = sample.get_buffer()
        caps_format = sample.get_caps().get_structure(0)

        (result, mapinfo) = buffer.map(Gst.MapFlags.READ)
        assert result
        
        buffer.unmap(mapinfo)
        
        frmt_str = caps_format.get_value('format') 
        video_format = GstVideo.VideoFormat.from_string(frmt_str)
        
        w, h = caps_format.get_value('width'), caps_format.get_value('height')
        c = utils.get_num_channels(video_format)

        buffer_size = buffer.get_size()
        shape = (h, w, c) if (h * w * c == buffer_size) else buffer_size
        arr = np.ndarray(shape=shape, buffer=buffer.extract_dup(0, buffer_size),
                         dtype=utils.get_np_dtype(video_format))
        return arr
Esempio n. 4
0
def gst_buffer_with_caps_for_tensor(buffer: Gst.Buffer,
                                    caps: Gst.Caps) -> np.ndarray:
    """ Converts Gst.Buffer with Gst.Caps (stores buffer info) to np.ndarray """

    structure = caps.get_structure(0)  # Gst.Structure

    width, height = structure.get_value("width"), structure.get_value("height")

    # GstVideo.VideoFormat
    video_format = utils.gst_video_format_from_string(
        structure.get_value('format'))

    channels = utils.get_num_channels(video_format)

    dtype = get_np_dtype(video_format)  # np.dtype

    format_info = GstVideo.VideoFormat.get_info(
        video_format)  # GstVideo.VideoFormatInfo

    return gst_buffer_for_tensor(buffer,
                                 width=width,
                                 height=height,
                                 channels=channels,
                                 dtype=dtype,
                                 bpp=format_info.bits)
Esempio n. 5
0
def test_gst_buffer_to_ndarray():

    caps = Gst.Caps.from_string(
        "video/x-raw,format={},width={},height={}".format(
            FORMAT, WIDTH, HEIGHT))

    video_format = utils.gst_video_format_from_string(FORMAT)
    channels = utils.get_num_channels(video_format)
    dtype = utils.get_np_dtype(video_format)

    npndarray = np.random.randint(low=0,
                                  high=255,
                                  size=(HEIGHT, WIDTH, channels),
                                  dtype=dtype)
    gst_buffer = utils.ndarray_to_gst_buffer(npndarray)

    res_npndarray = utils.gst_buffer_with_caps_to_ndarray(gst_buffer, caps)

    assert (npndarray == res_npndarray).all()
 def __init__(self,
              properties_str: str,
              flow_id: str,
              sink_handler: Callable,
              command: str,
              height_default: int = 480,
              width_default: int = 640,
              framerate_default: Fraction = Fraction(30),
              video_format_default: str = 'RGB'):
     super().__init__(
         properties_str=properties_str,
         tag_groups=[
             'com.adlinktech.vision.inference/2.000/DetectionBoxTagGroup',
             'com.adlinktech.vision.capture/2.000/VideoFrameTagGroup'
         ],
         thing_cls=['com.adlinktech.vision/ObjectDetector'])
     self.__flow_id = flow_id
     self.__sink_handler = sink_handler
     self.__frame_data_class = class_from_thing_input(
         self.dr, self.thing, 'VideoFrameData')
     self.__frame_subject = Subject()
     self.__listener = FrameListener(self.__frame_subject,
                                     self.__frame_data_class)
     args_caps = parse_caps(command)
     self.command = command
     self.width = int(args_caps.get('width', width_default))
     self.height = int(args_caps.get('height', height_default))
     fps = Fraction(args_caps.get('framerate', framerate_default))
     self.video_format = args_caps.get('format', video_format_default)
     self.channels = utils.get_num_channels(self.video_format)
     self.dtype = utils.get_np_dtype(self.video_format)
     self.fps_str = fraction_to_str(fps)
     self.caps = f'video/x-raw,forma={self.video_format},width={self.width},height={self.height},framerate={self.fps_str}'
     self.duration = 10**9 / (fps.numerator / fps.denominator
                              )  # frame duration
     self.pipeline = None
     self.app_src = None
     self.app_sink = None
     self.terminated = False
     self.pts = self._pts()
     self.__frame_subject.map(lambda s: self.__emit(s[0], s[1]))
Esempio n. 7
0
                help="Num buffers to pass")

args = vars(ap.parse_args())

command = args["pipeline"]

args_caps = parse_caps(command)
NUM_BUFFERS = int(args['num_buffers'])

WIDTH = int(args_caps.get("width", WIDTH))
HEIGHT = int(args_caps.get("height", HEIGHT))
FPS = Fraction(args_caps.get("framerate", FPS))

GST_VIDEO_FORMAT = GstVideo.VideoFormat.from_string(
    args_caps.get("format", VIDEO_FORMAT))
CHANNELS = utils.get_num_channels(GST_VIDEO_FORMAT)
DTYPE = utils.get_np_dtype(GST_VIDEO_FORMAT)

CAPS = f"video/x-raw,format={VIDEO_FORMAT},width={WIDTH},height={HEIGHT},framerate={fraction_to_str(FPS)}"

with GstContext():  # create GstContext (hides MainLoop)

    pipeline = GstPipeline(command)

    def on_pipeline_init(self):
        """Setup AppSrc element"""
        appsrc = self.get_by_cls(GstApp.AppSrc)[0]  # get AppSrc

        # instructs appsrc that we will be dealing with timed buffer
        appsrc.set_property("format", Gst.Format.TIME)