def test_create_stream(self): mockito.when(StreamsAPI).create(source_uuid=mockito.any(), name=mockito.any(), slug=mockito.any(), description=mockito.any()).thenReturn(None) data = {'name': faker.lorem.sentence()} eq_(Stream.get_by(name=data['name']), None) self.http_client.fetch(HTTPRequest( self.get_url('/streams'), 'POST', body=json.dumps(data), follow_redirects=False), self.stop) response = self.wait() eq_(response.code, 302) ok_(Stream.get_by(name=data['name']))
def _load_ticket(self, stream_slug, destination_uuid): stream = Stream.get_by(slug=stream_slug) if not destination_uuid: return Ticket.get_by(stream=stream, destination=Node.me()) node = Node.get_by(uuid=destination_uuid) return Ticket.query.filter_by(stream=stream, destination=node).first()
def test_delete(self): node = Node.me() ticket = TicketFactory(destination=node) self.http_client.fetch(HTTPRequest( self.get_url(ticket.absolute_url()), 'DELETE'), self.stop) response = self.wait() eq_(response.code, 200) eq_(Ticket.get_by(id=ticket.id), None) ok_(Stream.get_by(slug=ticket.stream.slug))
def _handle_stream(self, stream): """Update the enabled flag on all tunnels that are for this stream.""" # reload to get a new database session stream = Stream.get_by(slug=stream.slug) try: port = self.create_tunnel(stream.slug, "127.0.0.1", settings.RTMP_PORT, self.stream_tunnels) except Exception, e: log.warning("Couldn't create a tunnel for %s: %s", stream, e)
def test_get_streams(self): Node.me() [StreamFactory() for _ in range(3)] response = self.fetch('/streams') eq_(response.code, 200) result = json.loads(response.body) ok_('streams' in result) for stream in result['streams']: ok_(Stream.get_by(name=stream['name']))
def test_delete(self): node = Node.me() ticket = TicketFactory(destination=node) self.http_client.fetch( HTTPRequest(self.get_url(ticket.absolute_url()), 'DELETE'), self.stop) response = self.wait() eq_(response.code, 200) eq_(Ticket.get_by(id=ticket.id), None) ok_(Stream.get_by(slug=ticket.stream.slug))
def test_get_tickets(self): response = self.fetch("/tickets") eq_(response.code, 200) result = json.loads(response.body) ok_("tickets" in result) eq_(len(result["tickets"]), 3) for ticket in result["tickets"]: source = Node.get_by(uuid=ticket["source"]) destination = Node.get_by(uuid=ticket["destination"]) ok_(Ticket.get_by(stream=Stream.get_by(slug=ticket["stream"]), source=source, destination=destination))
def put(self, stream_slug): stream = Stream.get_by(slug=stream_slug) if stream: # this accepts the streaming flag as a query parameter to get around # the same-origin policy, as we can't get PUT data from the browser. if 'streaming' in self.request.arguments: stream.streaming = self.request.arguments['streaming'] else: stream.streaming = self.get_json_argument('streaming') if stream.streaming: log.info("Resumed streaming %s", stream) else: log.info("Paused streaming %s", stream)
def put(self, stream_slug): stream = Stream.get_by(slug=stream_slug) if stream: # this accepts the streaming flag as a query parameter to get around # the same-origin policy, as we can't get PUT data from the browser. if "streaming" in self.request.arguments: stream.streaming = self.request.arguments["streaming"] else: stream.streaming = self.get_json_argument("streaming") if stream.streaming: log.info("Resumed streaming %s", stream) else: log.info("Paused streaming %s", stream)
def post(self): """Register a new available stream.""" # TODO kind of messy way to handle two different data types, but for # some reason Torando is loading the name and description as lists # instead of strings if they are form encded if not hasattr(self.request, 'arguments') or not self.request.arguments: self.load_json() else: self.request.arguments['name'] = self.request.arguments['name'][0] self.request.arguments['description'] = ( self.request.arguments.get('description', [''])[0]) self.request.arguments.setdefault('source', Node.me().uuid) if Stream.get_by(name=self.request.arguments['name']): self.redirect("%s/upload" % settings.ASTRAL_WEBSERVER) return stream = Stream.from_dict(self.request.arguments) try: StreamsAPI(settings.ASTRAL_WEBSERVER).create( source_uuid=stream.source.uuid, name=stream.name, slug=stream.slug, description=stream.description) except RequestError, e: log.warning("Unable to register stream with origin webserver: %s", e)
def post(self, stream_slug): """Return whether or not this node can forward the stream requested to the requesting node, and start doing so if it can.""" stream = Stream.get_by(slug=stream_slug) if not stream: try: log.debug("Don't know of stream with slug %s, asking the " "origin", stream_slug) stream_data = StreamsAPI(settings.ASTRAL_WEBSERVER).find( stream_slug) except RequestError, e: log.warning("Can't connect to server: %s", e) except ResourceNotFound: log.debug("Origin didn't know of a stream with slug", stream_slug) raise HTTPError(404)
def post(self, stream_slug): """Return whether or not this node can forward the stream requested to the requesting node, and start doing so if it can.""" stream = Stream.get_by(slug=stream_slug) if not stream: try: log.debug( "Don't know of stream with slug %s, asking the " "origin", stream_slug) stream_data = StreamsAPI( settings.ASTRAL_WEBSERVER).find(stream_slug) except RequestError, e: log.warning("Can't connect to server: %s", e) except ResourceNotFound: log.debug("Origin didn't know of a stream with slug", stream_slug) raise HTTPError(404)
def get(self, stream_slug): """Return metadata for the stream.""" stream = Stream.get_by(slug=stream_slug) if not stream: raise web.HTTPError(404) self.write({'stream': stream.to_dict()})
def get(self, stream_slug): """Return metadata for the stream.""" stream = Stream.get_by(slug=stream_slug) if not stream: raise web.HTTPError(404) self.write({"stream": stream.to_dict()})
def update_stream_tunnel_flags(self): for slug, tunnel in self.stream_tunnels.items(): stream = Stream.get_by(slug=slug) if stream: pass
def close_expired_stream_tunnels(self): for slug, tunnel in self.stream_tunnels.items(): if not Stream.get_by(slug=slug): self.destroy_tunnel(slug, self.stream_tunnels)
"-- telling web server to kill it", node, self.node()) NodesAPI(base_url).unregister(self.node().absolute_url()) else: node = Node.from_dict(node) log.info("Stored %s from %s", node, base_url) def load_streams(self, base_url=None): base_url = base_url or settings.ASTRAL_WEBSERVER try: streams = StreamsAPI(base_url).list() except RequestError, e: log.warning("Can't connect to server: %s", e) else: log.debug("Streams returned from the server: %s", streams) for stream in streams: stream = Stream.from_dict(stream) if stream: log.info("Stored %s from %s", stream, base_url) self.prime_stream_tunnels() def prime_stream_tunnels(self): for stream in Stream.query.filter(Stream.source == Node.me()): stream.queue_tunnel_status_flip() def register_with_origin(self): try: NodesAPI(settings.ASTRAL_WEBSERVER).register(self.node().to_dict()) except RequestError, e: log.warning( "Can't connect to server to register as a " "supernode: %s", e)
stream = Stream.get_by(slug=stream_slug) if not stream: try: log.debug( "Don't know of stream with slug %s, asking the " "origin", stream_slug) stream_data = StreamsAPI( settings.ASTRAL_WEBSERVER).find(stream_slug) except RequestError, e: log.warning("Can't connect to server: %s", e) except ResourceNotFound: log.debug("Origin didn't know of a stream with slug", stream_slug) raise HTTPError(404) else: stream = Stream.from_dict(stream_data) if not stream: log.debug("Couldnt find stream with slug %s anywhere", stream_slug) raise HTTPError(404) destination_uuid = self.get_json_argument('destination_uuid', '') if destination_uuid: destination = Node.get_by(uuid=destination_uuid) # TODO since we only have the IP, we have to assume the port is 8000 # to be able to request back to it for more details. hmm. # TODO another problem is that the tornado server is (and i should # have realized this sooner...) single-threaded, and based on the # event model. So the requsting node is blocked waiting for us to # responsed, then we go and query it. well, that's deadlock! a # workaroud since we're only dealing with single supernode # situations is just to query the supernode, since they MUST know # about that other node.
node, self.node()) NodesAPI(base_url).unregister(self.node().absolute_url()) else: node = Node.from_dict(node) log.info("Stored %s from %s", node, base_url) def load_streams(self, base_url=None): base_url = base_url or settings.ASTRAL_WEBSERVER try: streams = StreamsAPI(base_url).list() except RequestError, e: log.warning("Can't connect to server: %s", e) else: log.debug("Streams returned from the server: %s", streams) for stream in streams: stream = Stream.from_dict(stream) if stream: log.info("Stored %s from %s", stream, base_url) self.prime_stream_tunnels() def prime_stream_tunnels(self): for stream in Stream.query.filter(Stream.source == Node.me()): stream.queue_tunnel_status_flip() def register_with_origin(self): try: NodesAPI(settings.ASTRAL_WEBSERVER).register( self.node().to_dict()) except RequestError, e: log.warning("Can't connect to server to register as a " "supernode: %s", e)
the requesting node, and start doing so if it can.""" stream = Stream.get_by(slug=stream_slug) if not stream: try: log.debug("Don't know of stream with slug %s, asking the " "origin", stream_slug) stream_data = StreamsAPI(settings.ASTRAL_WEBSERVER).find( stream_slug) except RequestError, e: log.warning("Can't connect to server: %s", e) except ResourceNotFound: log.debug("Origin didn't know of a stream with slug", stream_slug) raise HTTPError(404) else: stream = Stream.from_dict(stream_data) if not stream: log.debug("Couldnt find stream with slug %s anywhere", stream_slug) raise HTTPError(404) destination_uuid = self.get_json_argument('destination_uuid', '') if destination_uuid: destination = Node.get_by(uuid=destination_uuid) # TODO since we only have the IP, we have to assume the port is 8000 # to be able to request back to it for more details. hmm. # TODO another problem is that the tornado server is (and i should # have realized this sooner...) single-threaded, and based on the # event model. So the requsting node is blocked waiting for us to # responsed, then we go and query it. well, that's deadlock! a # workaroud since we're only dealing with single supernode # situations is just to query the supernode, since they MUST know # about that other node.