def music_delivery(self, session, frames, frame_size, num_frames, sample_type, sample_rate, channels): """Callback used by pyspotify""" # pylint: disable = R0913 # Too many arguments (8/5) assert sample_type == 0, 'Expects 16-bit signed integer samples' capabilites = """ audio/x-raw-int, endianness=(int)1234, channels=(int)%(channels)d, width=(int)16, depth=(int)16, signed=(boolean)true, rate=(int)%(sample_rate)d """ % { 'sample_rate': sample_rate, 'channels': channels, } buffer_ = gst.Buffer(bytes(frames)) buffer_.set_caps(gst.caps_from_string(capabilites)) if self.audio.emit_data(buffer_).get(): return num_frames else: return 0
def do_create(self, offset, length): depth, timestamp = freenect.sync_get_depth() databuf = numpy.getbuffer(depth) self.buf = gst.Buffer(databuf) self.buf.timestamp = 0 self.buf.duration = pow(2, 63) -1 return gst.FLOW_OK, self.buf
def chainfunc(self, pad, buffer): if self.proxy: # we are in proxy mode already self.srcpad.push(buffer) return gst.FLOW_OK self.buffer = self.buffer + buffer.data if not self.buffer_size: try: self.buffer_size, a_type = struct.unpack( ">L4s", self.buffer[:8]) except: return gst.FLOW_OK if len(self.buffer) < self.buffer_size: # we need to buffer more return gst.FLOW_OK buffer = self.buffer[self.buffer_size:] fake_header = self.get_fake_header() n_buf = gst.Buffer(fake_header + buffer) self.proxy = True self.srcpad.push(n_buf) return gst.FLOW_OK
def chainfunc(self, pad, buffer): try: print 'Got resize buffer' # Simplest: just propagate the data # self.srcpad.push(buffer) # Import into PIL and downsize it # Raw jpeg to pr0n PIL wrapper object print 'resize chain', len(buffer.data), len(buffer.data) / 3264.0 #open('temp.jpg', 'w').write(buffer.data) #io = StringIO.StringIO(buffer.data) io = StringIO.StringIO(str(buffer)) try: image = Image.open(io) except: print 'failed to create image' return gst.FLOW_OK # Use a fast filter since this is realtime image = get_scaled(image, 0.5, Image.NEAREST) output = StringIO.StringIO() image.save(output, 'jpeg') self.srcpad.push(gst.Buffer(output.getvalue())) except: traceback.print_exc() os._exit(1) return gst.FLOW_OK
def on_audneeddata(self, element, length): #if self.timestamp is None: # return #ts = element.get_clock().get_time() - element.get_base_time() #audbuffer = self.buffer # print [e.get_caps().to_string() for e in element.src_pads()] #print element.get_property('caps') #print 'length needed: ', length #print 'abuffer len: ', len(audbuffer) #if len(audbuffer) < length / 4: # return #with self.buffer_lock: tcbuf = array.array('h', self.tcgen.build_audio_data()) #tcbuf = self.get_samples_from_buffer(length / 2) if False: #tcbuf is False: print 'BUFFER EMPTY!!!' #element.emit('end-of-stream') #return tcbuf = array.array('h', [0] * (length / 2)) tcstr = tcbuf.tostring() buffer = gst.Buffer(tcstr) #tcgen = self.tcgen #buffer.duration = long((tcgen.samplerate / float(length / 2)) * (10 ** 9)) #buffer.timestamp = gst.CLOCK_TIME_NONE #buffer.set_caps(self.asrccaps.copy()) #buffer.timestamp = self.timestamp #self.timestamp = None #print 'appsrc ts: ', buffer.timestamp #buffer.timestamp = element.get_clock().get_time() #print 'buffer: ', len(tcstr) #print 'new abuffer len: ', len(audbuffer) result = element.emit('push-buffer', buffer)
def startVid(self): gobject.threads_init() video = Frame(self, background='black') video.grid(row=0, column=0, columnspan=8, rowspan=4, padx=2, sticky=E+W+S+N) window_id = video.winfo_id() self.buf = gst.Buffer() self.bin = gst.Bin("my-bin") timeoverlay = gst.element_factory_make("timeoverlay", "overlay") self.bin.add(timeoverlay) pad = timeoverlay.get_pad("video_sink") ghostpad = gst.GhostPad("sink", pad) self.bin.add_pad(ghostpad) videosink = gst.element_factory_make("ximagesink") self.bin.add(videosink) gst.element_link_many(timeoverlay, videosink) self.player.set_property('video-sink', self.bin) self.player.set_property('uri', 'file://%s' % (os.path.abspath(self.project.videoPath))) bus = self.player.get_bus() bus.add_signal_watch() bus.enable_sync_message_emission() bus.connect("message", self.on_message, window_id) bus.connect('sync-message::element', self.on_sync_message, window_id) self.play.configure(command=lambda: self.play_video()) self.back.configure(command=self.play_back)
def do_create(self, offset, size): size = 4096 * 2**4 if self._data and offset < self._data_len: blob = self._data[offset:offset+size] return gst.FLOW_OK, gst.Buffer(blob) else: return gst.FLOW_UNEXPECTED, None
def _finish_fragment(self, timestamp, index): # Write streamheaders at the beginning of each fragment s = self.sinkpad.get_negotiated_caps()[0] frag = [] if s.has_field('streamheader'): frag = list(s['streamheader']) frag.extend(self._fragment) # Check for discontinuities if self._last_event_ts == gst.CLOCK_TIME_NONE or\ timestamp <= self._last_event_ts: self._reset_fragment(timestamp) self._last_fragment = None return # Create the GstBuffer data = ''.join([b.data for b in frag]) buf = gst.Buffer(data) buf.timestamp = self._last_event_ts buf.duration = timestamp - buf.timestamp if self._in_caps: buf.flag_set(gst.BUFFER_FLAG_IN_CAPS) # Create the GstFragment and emit the new-fragment signal self._last_fragment = Fragment(index, buf) self.emit('new-fragment') self._reset_fragment(timestamp)
def do_create(self, offset, length): rgb, timestamp = freenect.sync_get_video() databuf = numpy.getbuffer(rgb.view(numpy.uint8)) self.buf = gst.Buffer(databuf) self.buf.timestamp = 0 self.buf.duration = pow(2, 63) - 1 return gst.FLOW_OK, self.buf
def srcpad_get(self, pad): data = self.fd.read(self.blocksize) if data: return gst.Buffer(data) else: self.set_eos() return gst.Event(gst.EVENT_EOS)
def filter_buffer(self, buffer_in, **kwargs): # TODO: lock? kwargs['environ'] = dict(os.environ) kwargs['input_frame_filename'] = self.tempfile_png_in.name kwargs.update( self.reader.get_data_for_time(kwargs['video_stream_position'])) self.tempfile_png_in.seek(0) self.tempfile_png_in.truncate() self.tempfile_png_in.write(buffer_in) self.tempfile_png_in.flush() self.tempfile_svg.seek(0) self.tempfile_svg.truncate() self.tempfile_svg.write(self.svg_template.generate(**kwargs).render()) self.tempfile_svg.flush() subprocess.check_call([ 'rsvg', '-w', kwargs['video_width'], '-h', kwargs['video_height'], self.tempfile_svg.name, self.tempfile_png_out.name, ]) self.tempfile_png_out.seek(0) buffer_out = gst.Buffer(self.tempfile_png_out.read()) buffer_out.caps = buffer_in.caps buffer_out.timestamp = buffer_in.timestamp buffer_out.duration = buffer_in.duration return buffer_out
def push_data(data): num_samples = CHUNK_SIZE /2 # Because each sample is 16 bits #Generate some psychodelic waveforms data.c += data.d data.d -= data.c / 1000.0 freq = 1100.0 + 1000.0*data.d raw = array('H') for i in xrange(num_samples): data.a += data.b data.b -= data.a/freq a5 = (int(500 * data.a))% 65535 raw.append(a5) data.num_samples += num_samples buffer = gst.Buffer(raw.tostring()) #Set its timestamp and duration buffer.timestamp = gst.util_uint64_scale(data.num_samples, gst.SECOND, SAMPLE_RATE) buffer.duration = gst.util_uint64_scale(CHUNK_SIZE, gst.SECOND, SAMPLE_RATE) #Push the buffer into the appsrc ret = data.app_source.emit("push-buffer", buffer) if (ret != gst.FLOW_OK): return False return True
def write(self, block): if not self.playing: self.playing = True self.log.debug('pure vlc output is starting') self.player.set_state(gst.STATE_PLAYING) # self.proto.sendData(block) self.appsink.emit('push-buffer', gst.Buffer(block))
def do_create(self, offset, size): #debug("Offset: %d, Size: %d" % (offset, size)) try: while True: status, buffer = self._receiver.queue.get(True, 5) if self.seek_in_progress is not None: if status != 1: # change this debug("Skipping prefetched junk ...") self._receiver.queue.task_done() continue debug("Pushing seek'd buffer") event = gst.event_new_new_segment(False, 1.0, gst.FORMAT_TIME, self.seek_in_progress, -1, self.seek_in_progress) r = self.get_static_pad("src").push_event(event) debug("New segment: %s" % r) self.seek_in_progress = None buffer.flag_set(gst.BUFFER_FLAG_DISCONT) self._receiver.queue.task_done() return gst.FLOW_OK, buffer except Queue.Empty: debug("No data avilable") return gst.FLOW_OK, gst.Buffer() except: traceback.print_exc() return gst.FLOW_ERROR, None
def do_create(self, offset, length): self.debug("Pushing buffer") gstBuf = gst.Buffer(self.imgBuf) padcaps = gst.caps_from_string(self.capsStr) gstBuf.set_caps(padcaps) gstBuf.timestamp = 0 gstBuf.duration = self.duration * gst.SECOND return gst.FLOW_OK, gstBuf
def do_create(self, offset, size): self.emit("packet_received") assert self.audio data = self.audio.read() buf = gst.Buffer(data) buf.set_caps(self.caps) print "do_create", len(buf) return gst.FLOW_OK, buf
def pushData(self, data, fragment_duration, level, caps_data): buf = gst.Buffer(data) buf.duration = long(fragment_duration * 1e9) debug(DEBUG, '%s pushData: pushed %s of data (duration= %.2fs) for level %s', self, format_bytes(len(data)), fragment_duration, level) self.pipeline.get_by_name('src').emit('push-buffer', buf) del buf
def do_create(self, offset, size): if offset != self.curoffset: self.fd.seek(offset, 0) data = self.fd.read(self.blocksize) if data: self.curoffset += len(data) return gst.FLOW_OK, gst.Buffer(data) else: return gst.FLOW_UNEXPECTED, None
def set_uri(self, filepath): import gst logging.debug("Pushing %r to appsrc" % filepath) # FIXME: BIG hack to reduce the initial starting time... queue0 = self.decodebin.get_by_name("multiqueue0") if queue0: queue0.set_property("max-size-bytes", 100000) f = open(filepath) self.appsrc.emit('push-buffer', gst.Buffer(f.read()))
def chainfunc(self, pad, buffer): gst.log("Passing buffer with ts %d" % (buffer.timestamp)) fft = numpy.frombuffer(buffer, numpy.complex128) fft = fft * self.transmission b = gst.Buffer(fft) b.set_caps(self.srcpad.get_caps()) b.timestamp = buffer.timestamp return self.srcpad.push(b)
def buf_of_img(img, bufmodel=None): buf = gst.Buffer(img) if bufmodel is not None: buf.caps = bufmodel.caps buf.duration = bufmodel.duration buf.timestamp = bufmodel.timestamp buf.offset = bufmodel.offset buf.offset_end = bufmodel.offset_end return buf
def on_audio_message(self, audio): # Callback for ROS audio messages -- emits the audio data to the # gstreamer pipeline through the appsrc. rospy.logdebug('Received audio packet of length {}'.format( len(audio.data))) if self._app_source: self._app_source.emit('push-buffer', gst.Buffer(str(bytearray(audio.data))))
def feed_data(playbin, buffer_size): print 'feeding data' f = Progress.file() # buffer = gst.Buffer('1'*4096) buffer = gst.Buffer(f.read(4096)) playbin.emit('push-buffer', buffer) Progress.set_progress(Progress.get_progress() + buffer_size)
def do_create(self, offset, size): print " L"+unicode(self.buffer.qsize())+"", try: buff = self.buffer.get(False) return gst.FLOW_OK, gst.Buffer(str(buff)) except: return gst.FLOW_UNEXPECTED, None
def __init__(self, caps): self.__gobject_init__() self.caps = caps values = [0 for a in numpy.arange(0.0, 12*math.pi, 0.06)] data = struct.pack('<' + 'h'*len(values), *values) self.buf = gst.Buffer(data) caps = gst.caps_from_string('audio/x-raw-int, rate=8000, endianness=1234, channels=1, width=16, depth=16, signed=true') self.buf.set_caps(caps)
def _pushPacket(self, data, timestamp, EOS, streamId): if self.breakBlock: return if self.datatype not in ('b', 'B'): # dataChar and dataOctet already provide data as string; all # other formats require packing format = self.datatype data = struct.pack('%d%s' % (len(data), format), *data) self.source.emit('push_buffer', gst.Buffer(data))
def handle(self, uris): """Do something useful with the URIs. :param uris: list of URIs :type uris: :type:`list` :returns: boolean indicating if EOS should be consumed """ # TODO: handle unicode uris which we can get out of elementtree self.srcpad.push(gst.Buffer('\n'.join(uris))) return False
def chainfunc(self, pad, buffer): gst.log("Passing buffer with ts %d" % (buffer.timestamp)) fft = numpy.frombuffer(buffer, numpy.complex128) l = 2 * len(fft) - 2 data = numpy.fft.irfft(fft, l) b = gst.Buffer(data) b.set_caps(self.srcpad.get_caps()) b.timestamp = buffer.timestamp return self.srcpad.push(b)
def do_create(self, offset, size): hdr = self.of.read(hdr_len) size, timestamp, duration, flags = struct.unpack(hdr_fmt, hdr) buffer = gst.Buffer(self.of.read(size)) buffer.timestamp = timestamp buffer.duration = duration #buffer.flags = flags print "buffer timestamp %d %d %d" % (buffer.timestamp, buffer.duration, buffer.flags) return gst.FLOW_OK, buffer
def add_snapshot(self, pixbuf, time_ms): self.width = pixbuf.get_width() self.height = pixbuf.get_height() #print "Pushing %dx%d snapshot to source" % (self.width, self.height) buf = gst.Buffer(pixbuf.get_pixels()) buf.timestamp = int(round(time_ms * gst.MSECOND)) # Don't forget to set the right caps on the buffer self.set_caps_on(buf) src = self.get_static_pad("src") status = src.push(buf) if status != gst.FLOW_OK: raise RuntimeError, "Error while pushing buffer : " + str(status)