def add_data(self, data, metadata=None): if not self.src: log("add_data(..) dropped, no source") return if self.state=="stopped": log("add_data(..) dropped, pipeline is stopped") return #having a timestamp causes problems with the queue and overruns: log("add_data(%s bytes, %s) queue_state=%s", len(data), metadata, self.queue_state) buf = gst.new_buffer(data) if metadata: #having a timestamp causes problems with the queue and overruns: #ts = metadata.get("timestamp") #if ts is not None: # buf.timestamp = normv(ts) d = metadata.get("duration") if d is not None: d = normv(d) if d>0: buf.duration = normv(d) if self.push_buffer(buf): self.buffer_count += 1 self.byte_count += len(data) clt = self.queue.get_property("current-level-time")//MS_TO_NS log("pushed %5i bytes, new buffer level: %3ims, queue state=%s", len(data), clt, self.queue_state) self.levels.append((time.time(), clt)) if self.queue_state=="pushing": self.set_min_level() self.set_max_level() self.emit_info()
def emit_buffer(self, sample): buf = sample.get_buffer() pts = normv(buf.pts) if self.min_timestamp > 0 and pts < self.min_timestamp: gstlog("cutter: skipping buffer with pts=%s (min-timestamp=%s)", pts, self.min_timestamp) return 0 elif self.max_timestamp > 0 and pts > self.max_timestamp: gstlog("cutter: skipping buffer with pts=%s (max-timestamp=%s)", pts, self.max_timestamp) return 0 size = buf.get_size() data = buf.extract_dup(0, size) duration = normv(buf.duration) metadata = { "timestamp": pts, "duration": duration, } if self.timestamp: delta = self.timestamp.get_property("delta") ts = (pts + delta) // 1000000 #ns to ms now = monotonic_time() latency = int(1000 * now) - ts #log.info("emit_buffer: delta=%i, pts=%i, ts=%s, time=%s, latency=%ims", delta, pts, ts, now, (latency//1000000)) ts_info = { "ts": ts, "latency": latency, } metadata.update(ts_info) self.info.update(ts_info) if pts == -1 and duration == -1 and BUNDLE_METADATA and len( self.pending_metadata) < 10: self.pending_metadata.append(data) return 0 return self._emit_buffer(data, metadata)
def emit_buffer1(self, sample): buf = sample.get_buffer() #info = sample.get_info() size = buf.get_size() data = buf.extract_dup(0, size) self.do_emit_buffer(data, {"timestamp" : normv(buf.pts), "duration" : normv(buf.duration), })
def emit_buffer1(self, sample): buf = sample.get_buffer() #info = sample.get_info() size = buf.get_size() data = buf.extract_dup(0, size) self.do_emit_buffer(data, { "timestamp": normv(buf.pts), "duration": normv(buf.duration), })
def emit_buffer0(self, buf, metadata={}): """ convert pygst structure into something more generic for the wire """ #none of the metadata is really needed at present, but it may be in the future: #metadata = {"caps" : buf.get_caps().to_string(), # "size" : buf.size, # "timestamp" : buf.timestamp, # "duration" : buf.duration, # "offset" : buf.offset, # "offset_end": buf.offset_end} self.do_emit_buffer(buf.data, { #"caps" : buf.get_caps().to_string(), "timestamp" : normv(buf.timestamp), "duration" : normv(buf.duration) })
def emit_buffer1(self, sample): buf = sample.get_buffer() #info = sample.get_info() size = buf.get_size() extract_dup = getattr(buf, "extract_dup", None) if extract_dup: data = extract_dup(0, size) else: #crappy gi bindings detected, using workaround: from xpra.sound.gst_hacks import map_gst_buffer with map_gst_buffer(buf) as a: data = bytes(a[:]) return self.emit_buffer(data, {"timestamp" : normv(buf.pts), "duration" : normv(buf.duration), })
def emit_buffer0(self, buf, metadata={}): """ convert pygst structure into something more generic for the wire """ #none of the metadata is really needed at present, but it may be in the future: #metadata = {"caps" : buf.get_caps().to_string(), # "size" : buf.size, # "timestamp" : buf.timestamp, # "duration" : buf.duration, # "offset" : buf.offset, # "offset_end": buf.offset_end} self.do_emit_buffer( buf.data, { #"caps" : buf.get_caps().to_string(), "timestamp": normv(buf.timestamp), "duration": normv(buf.duration) })
def emit_buffer0(self, buf): """ convert pygst structure into something more generic for the wire """ #none of the metadata is really needed at present, but it may be in the future: #metadata = {"caps" : buf.get_caps().to_string(), # "size" : buf.size, # "timestamp" : buf.timestamp, # "duration" : buf.duration, # "offset" : buf.offset, # "offset_end": buf.offset_end} log("emit buffer: %s bytes, timestamp=%s", len(buf.data), buf.timestamp//MS_TO_NS) metadata = { "timestamp" : normv(buf.timestamp), "duration" : normv(buf.duration) } d = self.caps_to_dict(buf.get_caps()) if not self.caps or self.caps!=d: self.caps = d metadata["caps"] = self.caps return self.emit_buffer(buf.data, metadata)
def emit_buffer0(self, buf): """ convert pygst structure into something more generic for the wire """ #none of the metadata is really needed at present, but it may be in the future: #metadata = {"caps" : buf.get_caps().to_string(), # "size" : buf.size, # "timestamp" : buf.timestamp, # "duration" : buf.duration, # "offset" : buf.offset, # "offset_end": buf.offset_end} log("emit buffer: %s bytes, timestamp=%s", len(buf.data), buf.timestamp//MS_TO_NS) metadata = { "timestamp" : normv(buf.timestamp), "duration" : normv(buf.duration) } d = self.caps_to_dict(buf.get_caps()) if not self.caps or self.caps!=d: self.caps = d self.info["caps"] = self.caps metadata["caps"] = self.caps return self.emit_buffer(buf.data, metadata)
def do_add_data(self, data, metadata=None): #having a timestamp causes problems with the queue and overruns: log("do_add_data(%s bytes, %s) queue_state=%s", len(data), metadata, self.queue_state) buf = gst.new_buffer(data) if metadata: #having a timestamp causes problems with the queue and overruns: #ts = metadata.get("timestamp") #if ts is not None: # buf.timestamp = normv(ts) # log.info("timestamp=%s", ts) d = metadata.get("duration") if d is not None: d = normv(d) if d>0: buf.duration = normv(d) if self.push_buffer(buf): self.inc_buffer_count() self.inc_byte_count(len(data)) return True return False
def do_add_data(self, data, metadata=None): #having a timestamp causes problems with the queue and overruns: log("add_data(%s bytes, %s) queue_state=%s", len(data), metadata, self.queue_state) buf = gst.new_buffer(data) if metadata: #having a timestamp causes problems with the queue and overruns: #ts = metadata.get("timestamp") #if ts is not None: # buf.timestamp = normv(ts) # log.info("timestamp=%s", ts) d = metadata.get("duration") if d is not None: d = normv(d) if d > 0: buf.duration = normv(d) if self.push_buffer(buf): self.inc_buffer_count() self.inc_byte_count(len(data)) return True return False
def emit_buffer(self, sample): buf = sample.get_buffer() #info = sample.get_info() size = buf.get_size() extract_dup = getattr(buf, "extract_dup", None) if extract_dup: data = extract_dup(0, size) else: #crappy gi bindings detected, using workaround: from xpra.sound.gst_hacks import map_gst_buffer with map_gst_buffer(buf) as a: data = bytes(a[:]) pts = normv(buf.pts) duration = normv(buf.duration) if pts==-1 and duration==-1 and BUNDLE_METADATA and len(self.pending_metadata)<10: self.pending_metadata.append(data) return 0 return self._emit_buffer(data, { "timestamp" : pts, "duration" : duration, })
def emit_buffer1(self, sample): buf = sample.get_buffer() #info = sample.get_info() size = buf.get_size() extract_dup = getattr(buf, "extract_dup", None) if extract_dup: data = extract_dup(0, size) else: #crappy gi bindings detected, using workaround: from xpra.sound.gst_hacks import map_gst_buffer with map_gst_buffer(buf) as a: data = bytes(a[:]) pts = normv(buf.pts) duration = normv(buf.duration) if pts==-1 and duration==-1 and BUNDLE_METADATA and len(self.pending_metadata)<10: self.pending_metadata.append(data) return 0 return self.emit_buffer(data, { "timestamp" : pts, "duration" : duration, })
def add_data(self, data, metadata=None): # debug("adding %s bytes to %s, metadata: %s, level=%s", len(data), self.src, metadata, int(self.queue.get_property("current-level-time")/MS_TO_NS)) if not self.src: log("add_data(..) dropped, no source") return if self.state == "stopped": log("add_data(..) dropped, pipeline is stopped") return log("add_data(%s bytes, %s) queue_state=%s, src=%s", len(data), metadata, self.queue_state, self.src) if self.queue_state == "overrun": clt = self.queue.get_property("current-level-time") qpct = int(min(QUEUE_TIME, clt) * 100.0 / QUEUE_TIME) if qpct < 50: self.queue_state = "running" else: log("dropping new data because of overrun: %s%%", qpct) return buf = gst.new_buffer(data) if metadata: ts = metadata.get("timestamp") if ts is not None: buf.timestamp = normv(ts) d = metadata.get("duration") if d is not None: buf.duration = normv(d) # for seeing how the elapsed time evolves # (cannot be used for much else as client and server may have different times!) # t = metadata.get("time") # if t: # log("elapsed=%s (..)", int(time.time()*1000)-t) # if we have caps, use them: # caps = metadata.get("caps") # if caps: # buf.set_caps(gst.caps_from_string(caps)) if self.push_buffer(buf): self.buffer_count += 1 self.byte_count += len(data) ltime = int(self.queue.get_property("current-level-time") / MS_TO_NS) log("pushed %s bytes, new buffer level: %sms", len(data), ltime) self.emit_info()
def add_data(self, data, metadata=None): #debug("adding %s bytes to %s, metadata: %s, level=%s", len(data), self.src, metadata, int(self.queue.get_property("current-level-time")/MS_TO_NS)) if not self.src: log("add_data(..) dropped, no source") return if self.state=="stopped": log("add_data(..) dropped, pipeline is stopped") return log("add_data(%s bytes, %s) queue_state=%s, src=%s", len(data), metadata, self.queue_state, self.src) if self.queue_state == "overrun": clt = self.queue.get_property("current-level-time") qpct = int(min(QUEUE_TIME, clt)*100.0/QUEUE_TIME) if qpct<50: self.queue_state = "running" else: log("dropping new data because of overrun: %s%%", qpct) return buf = gst.new_buffer(data) if metadata: ts = metadata.get("timestamp") if ts is not None: buf.timestamp = normv(ts) d = metadata.get("duration") if d is not None: buf.duration = normv(d) #for seeing how the elapsed time evolves #(cannot be used for much else as client and server may have different times!) #t = metadata.get("time") #if t: # log("elapsed=%s (..)", int(time.time()*1000)-t) #if we have caps, use them: #caps = metadata.get("caps") #if caps: # buf.set_caps(gst.caps_from_string(caps)) if self.push_buffer(buf): self.buffer_count += 1 self.byte_count += len(data) ltime = int(self.queue.get_property("current-level-time")/MS_TO_NS) log("pushed %s bytes, new buffer level: %sms", len(data), ltime) self.emit_info()