def add_data(self, data, metadata=None): if not self.src: log("add_data(..) dropped, no source") return if self.state=="stopped": log("add_data(..) dropped, pipeline is stopped") return #having a timestamp causes problems with the queue and overruns: log("add_data(%s bytes, %s) queue_state=%s", len(data), metadata, self.queue_state) buf = gst.new_buffer(data) if metadata: #having a timestamp causes problems with the queue and overruns: #ts = metadata.get("timestamp") #if ts is not None: # buf.timestamp = normv(ts) d = metadata.get("duration") if d is not None: d = normv(d) if d>0: buf.duration = normv(d) if self.push_buffer(buf): self.buffer_count += 1 self.byte_count += len(data) clt = self.queue.get_property("current-level-time")//MS_TO_NS log("pushed %5i bytes, new buffer level: %3ims, queue state=%s", len(data), clt, self.queue_state) self.levels.append((time.time(), clt)) if self.queue_state=="pushing": self.set_min_level() self.set_max_level() self.emit_info()
def add_data(self, data, metadata=None): #debug("sound sink: adding %s bytes to %s, metadata: %s, level=%s", len(data), self.src, metadata, int(self.queue.get_property("current-level-time")/MS_TO_NS)) if not self.src: return buf = gst.new_buffer(data) d = 10*MS_TO_NS if metadata and False: ts = metadata.get("timestamp") if ts is not None: buf.timestamp = ts d = metadata.get("duration") if d is not None: buf.duration = d log("add_data(..) queue_state=%s", self.queue_state) if self.push_buffer(buf): self.buffer_count += 1 self.byte_count += len(data) ltime = int(self.queue.get_property("current-level-time")/MS_TO_NS) log("sound sink: pushed %s bytes, new buffer level: %sms", len(data), ltime)
def do_add_data(self, data, metadata=None): #having a timestamp causes problems with the queue and overruns: log("do_add_data(%s bytes, %s) queue_state=%s", len(data), metadata, self.queue_state) buf = gst.new_buffer(data) if metadata: #having a timestamp causes problems with the queue and overruns: #ts = metadata.get("timestamp") #if ts is not None: # buf.timestamp = normv(ts) # log.info("timestamp=%s", ts) d = metadata.get("duration") if d is not None: d = normv(d) if d>0: buf.duration = normv(d) if self.push_buffer(buf): self.inc_buffer_count() self.inc_byte_count(len(data)) return True return False
def do_add_data(self, data, metadata=None): #having a timestamp causes problems with the queue and overruns: log("add_data(%s bytes, %s) queue_state=%s", len(data), metadata, self.queue_state) buf = gst.new_buffer(data) if metadata: #having a timestamp causes problems with the queue and overruns: #ts = metadata.get("timestamp") #if ts is not None: # buf.timestamp = normv(ts) # log.info("timestamp=%s", ts) d = metadata.get("duration") if d is not None: d = normv(d) if d > 0: buf.duration = normv(d) if self.push_buffer(buf): self.inc_buffer_count() self.inc_byte_count(len(data)) return True return False
def add_data(self, data, metadata=None): # debug("adding %s bytes to %s, metadata: %s, level=%s", len(data), self.src, metadata, int(self.queue.get_property("current-level-time")/MS_TO_NS)) if not self.src: log("add_data(..) dropped, no source") return if self.state == "stopped": log("add_data(..) dropped, pipeline is stopped") return log("add_data(%s bytes, %s) queue_state=%s, src=%s", len(data), metadata, self.queue_state, self.src) if self.queue_state == "overrun": clt = self.queue.get_property("current-level-time") qpct = int(min(QUEUE_TIME, clt) * 100.0 / QUEUE_TIME) if qpct < 50: self.queue_state = "running" else: log("dropping new data because of overrun: %s%%", qpct) return buf = gst.new_buffer(data) if metadata: ts = metadata.get("timestamp") if ts is not None: buf.timestamp = normv(ts) d = metadata.get("duration") if d is not None: buf.duration = normv(d) # for seeing how the elapsed time evolves # (cannot be used for much else as client and server may have different times!) # t = metadata.get("time") # if t: # log("elapsed=%s (..)", int(time.time()*1000)-t) # if we have caps, use them: # caps = metadata.get("caps") # if caps: # buf.set_caps(gst.caps_from_string(caps)) if self.push_buffer(buf): self.buffer_count += 1 self.byte_count += len(data) ltime = int(self.queue.get_property("current-level-time") / MS_TO_NS) log("pushed %s bytes, new buffer level: %sms", len(data), ltime) self.emit_info()
def add_data(self, data, metadata=None): #debug("adding %s bytes to %s, metadata: %s, level=%s", len(data), self.src, metadata, int(self.queue.get_property("current-level-time")/MS_TO_NS)) if not self.src: log("add_data(..) dropped, no source") return if self.state=="stopped": log("add_data(..) dropped, pipeline is stopped") return log("add_data(%s bytes, %s) queue_state=%s, src=%s", len(data), metadata, self.queue_state, self.src) if self.queue_state == "overrun": clt = self.queue.get_property("current-level-time") qpct = int(min(QUEUE_TIME, clt)*100.0/QUEUE_TIME) if qpct<50: self.queue_state = "running" else: log("dropping new data because of overrun: %s%%", qpct) return buf = gst.new_buffer(data) if metadata: ts = metadata.get("timestamp") if ts is not None: buf.timestamp = normv(ts) d = metadata.get("duration") if d is not None: buf.duration = normv(d) #for seeing how the elapsed time evolves #(cannot be used for much else as client and server may have different times!) #t = metadata.get("time") #if t: # log("elapsed=%s (..)", int(time.time()*1000)-t) #if we have caps, use them: #caps = metadata.get("caps") #if caps: # buf.set_caps(gst.caps_from_string(caps)) if self.push_buffer(buf): self.buffer_count += 1 self.byte_count += len(data) ltime = int(self.queue.get_property("current-level-time")/MS_TO_NS) log("pushed %s bytes, new buffer level: %sms", len(data), ltime) self.emit_info()