Esempio n. 1
0
	def __init__(self, name, processes=1, maxQueueSize=100, args=None):
		try:
			self.pool = []
			self.resultQueue = DeQueue(maxQueueSize)
			self.dataQueue = DeQueue(maxQueueSize)
			self.debugQueue = Queue()
			self.name = name
			self.variables = {}

			print ("processes for {} = {}".format(name, processes))

			for i in range(processes):
				if args:
					p = Process(target=self.process, args=args)
				else:
					p = Process(target=self.process)
				self.pool.append(p)
				p.start()

			asyncio.get_event_loop().create_task(self.poll())

		except:
			exc_type, exc_value, exc_traceback = sys.exc_info()
			traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
			traceback.print_exception(exc_type, exc_value, exc_traceback,
            	          limit=2, file=sys.stdout)
Esempio n. 2
0
    def _send_servo_commands(self):
        servo_started_up = False

        while True:
            if ((not servo_started_up)
                and self.mech_driver
                and self.mech_driver.servo):
                servo_started_up = True
                # Reset all servos on startup, as they may have had an pending
                # error due to power glitch or something similar.
                self.logger.info('Rebooting servos on startup')
                yield From(self.mech_driver.servo.reboot())

            if not self.servo_send_now.is_set():
                # Make sure we wake up periodically
                asyncio.get_event_loop().call_later(
                    SERVO_SEND_INTERVAL, self.servo_send_now.set)
            yield From(self.servo_send_now.wait())
            self.servo_send_now.clear()

            new_pkt = self.net_packet
            old_pkt = self.servo_packet
            if new_pkt is not None:
                yield From(self._send_servo_commands_once(
                        new_pkt, old_pkt))
            self.servo_packet = new_pkt

            # send any status updates
            self.status_send_now.set()
Esempio n. 3
0
	def __init__(self, cameras=[], port=9004, users_file="users.json", recognition_db="recognition.db"):

		Server.__init__(self, port=port, usessl=False)

		self.recognition_db = recognition_db

		self.last_user_uuid = ""
		self.last_len_persons_detected = -1
		self.last_len_users = -1

		self.camera_clients = []
		self.recognizer = Recognizer(users_file)

		self.cameras = cameras
		self.start()

		self.method_handlers = {}
		self.method_handlers["list_users"] = self.list_users
		self.method_handlers["select_camera"] = self.select_camera
		self.method_handlers["list_users_with_level"] = self.list_users_with_level
		self.method_handlers["add_association"] = self.add_association

		self.users_recognized = []

		asyncio.get_event_loop().create_task(self.poll())
Esempio n. 4
0
	def __init__(self, pollRate = MINS(10), port=9001, sslcert = "server.crt", sslkey= "server.key", privateKeyFile = 'dhserver.key', clientsFile = "clients.json"):
		Server.__init__(self, True, port, sslcert, sslkey, privateKeyFile = privateKeyFile, clientsFile = clientsFile)
		self.pollRate = pollRate
		self.port = port

		self.numclients = len(self.clients)
		
		asyncio.get_event_loop().create_task(self.poll())
Esempio n. 5
0
 def test_async_flush_fail(self):
     """Test async_flush in the case that the last heap sent failed.
     This is arranged by filling up the queue slots first.
     """
     for i in range(5):
         trollius.async(self.stream.async_send_heap(self.heap))
     # The above only queues up the async sends on the event loop. The rest of the
     # test needs to be run from inside the event loop
     trollius.get_event_loop().run_until_complete(self._test_async_flush())
Esempio n. 6
0
 def test_send_error(self):
     """An error in sending must be reported through the future."""
     # Create a stream with a packet size that is bigger than the likely
     # MTU. It should cause an error.
     stream = UdpStream(
         spead2.ThreadPool(), "localhost", 8888,
         spead2.send.StreamConfig(max_packet_size=100000), buffer_size=0)
     future = stream.async_send_heap(self.heap)
     trollius.get_event_loop().run_until_complete(self._test_send_error(future))
Esempio n. 7
0
 def session_made(self, conn):
     """
     called when we established a session to the router
     we can look up names/b32 but can't send messages to others yet
     :param conn: our i2cp connection that we are using
     """
     self.conn = conn
     self._log.info('session made we are {}'.format(conn.dest))
     asyncio.get_event_loop().call_later(self._delay, self._send)
Esempio n. 8
0
def main():
    logging.basicConfig(level=logging.WARN, stream=sys.stdout)

    parser = optparse.OptionParser(description=__doc__)

    MechDriver.add_options(parser)

    options, args = parser.parse_args()

    task = Task(start(options))
    asyncio.get_event_loop().run_until_complete(task)
	def onJoin(self, details):
		print("Session Joined.")
		#Setting variables
		self.lastServoValue = 417 #Assumes it starts in the middle
		self.servoMin = 315  # Min pulse length out of 4096
		self.servoMax = 520  # Max pulse length out of 4096
		self.servoMiddle = 417 # middle servo value
		self.servoChannel = 3        
		print "What is happening????"
		self.motorMiddle = 1500
		self.motorChannel = 2
		self.subscribe(self.joyMonitor, 'aero.near.joystream')
		print "joystream ok"
		#subscribe to methods to prevent register conflicts
		self.subscribe(self.honkCommand, 'aero.near.honkHorn')
		print "honk ok"
		self.subscribe(self.emergencyStop, 'aero.near.emergStop')
		print "emergstop ok"
		self.subscribe(self.manualOverride, 'aero.near.override')
		print "About to make the loop"
		self.gps_data = {'latitude': 0,'longitude': 0,'heading': 0,'speed': 0}
		
 		self.loop = asyncio.get_event_loop()
#		self.loop.stop()
#		future = asyncio.Future()
#		print "the future exists"
#		asyncio.async(self.gpsUpdate())
#		self.loop.run_until_complete(future)
#		self.loop = asyncio.new_event_loop()
# 		tasks = [
# 			asyncio.async(self.honk()),
# #			asyncio.async(self.lidarRead())]
# #			asyncio.async(self.gpsUpdate())]
# #			asyncio.async(self.internet_on())]
# 		print tasks
# 		swag.system('cls' if swag.name == 'nt' else 'clear')
# 		try:
		self.loop = asyncio.get_event_loop()
		tasks = [
			asyncio.async(self.netDisconnect())]
		print tasks
		try:
			done, pending = yield self.loop.run_until_complete(asyncio.wait(tasks))
		except Exception as e:
			print e
		print tasks
		#print "running"
		self.loop.close()
# 			done, pending = yield self.loop.run_until_complete(asyncio.wait(tasks))
# 		except Exception as e:
			# print e
		# print tasks
		print "running"
Esempio n. 10
0
    def _connect_dual_stack(self, primary_address, secondary_address):
        '''Connect using happy eyeballs.'''
        self._primary_connection = self._connection_factory(primary_address)
        self._secondary_connection = self._connection_factory(secondary_address)

        @trollius.coroutine
        def connect_primary():
            yield From(self._primary_connection.connect())
            raise Return(self._primary_connection)

        @trollius.coroutine
        def connect_secondary():
            yield From(self._secondary_connection.connect())
            raise Return(self._secondary_connection)

        primary_fut = connect_primary()
        secondary_fut = connect_secondary()

        failed = False

        for fut in trollius.as_completed((primary_fut, secondary_fut)):
            if not self._active_connection:
                try:
                    self._active_connection = yield From(fut)
                except NetworkError:
                    if not failed:
                        _logger.debug('Original dual stack exception', exc_info=True)
                        failed = True
                    else:
                        raise
                else:
                    _logger.debug('Got first of dual stack.')

            else:
                @trollius.coroutine
                def cleanup():
                    try:
                        conn = yield From(fut)
                    except NetworkError:
                        pass
                    else:
                        conn.close()
                    _logger.debug('Closed abandoned connection.')

                trollius.get_event_loop().create_task(cleanup())

        if self._active_connection.address == secondary_address:
            preferred_addr = secondary_address
        else:
            preferred_addr = primary_address

        self._happy_eyeballs_table.set_preferred(preferred_addr, primary_address, secondary_address)
Esempio n. 11
0
    def _start_tls(self):
        '''Start SSL protocol on the socket.'''
        socket_ = self._writer.get_extra_info('socket')

        try:
            trollius.get_event_loop().remove_reader(socket_.fileno())
        except ValueError as error:
            raise trollius.ConnectionAbortedError() from error

        self._writer.write(b'HTTP/1.1 200 Connection established\r\n\r\n')
        yield From(self._writer.drain())

        try:
            trollius.get_event_loop().remove_writer(socket_.fileno())
        except ValueError as error:
            raise trollius.ConnectionAbortedError() from error

        ssl_socket = ssl.wrap_socket(
            socket_, server_side=True,
            certfile=self._cert_filename,
            keyfile=self._key_filename,
            do_handshake_on_connect=False
        )

        # FIXME: this isn't how to START TLS
        for dummy in range(1200):
            try:
                ssl_socket.do_handshake()
                break
            except ssl.SSLError as error:
                if error.errno in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
                    _logger.debug('Do handshake %s', error)
                    yield From(trollius.sleep(0.05))
                else:
                    raise
        else:
            _logger.error(_('Unable to handshake.'))
            ssl_socket.close()
            self._reject_request('Could not start TLS')
            raise trollius.ConnectionAbortedError('Could not start TLS')

        loop = trollius.get_event_loop()
        reader = trollius.StreamReader(loop=loop)
        protocol = trollius.StreamReaderProtocol(reader, loop=loop)
        transport, dummy = yield From(loop.create_connection(
            lambda: protocol, sock=ssl_socket))
        writer = trollius.StreamWriter(transport, protocol, reader, loop)

        self._reader = reader
        self._writer = writer
Esempio n. 12
0
def cancel_after_first_completed(tasks, interrupted_cb, loop=None):
    @asyncio.coroutine
    def _cancel_after_first_completed(tasks):
        while tasks:
            done, pending = yield asyncio.From(asyncio.wait(tasks,
                                return_when=asyncio.FIRST_COMPLETED,
                                loop=loop))
            for task in done:
                info(task)
                assert task in tasks
                tasks.remove(task)
            for task in pending:
                task.cancel()

    assert tasks
    if not loop:
        loop = asyncio.get_event_loop()

    main_task = asyncio.Task(_cancel_after_first_completed(tasks[:]),
                             loop=loop)
    while True:
        try:
            loop.run_until_complete(main_task)
            break
        except (KeyboardInterrupt, SystemExit):
            interrupted_cb()

    for task in tasks:
        assert task.done()
        if task.done() and not task.cancelled():
            exc = task.exception()
            if exc is not None:
                raise exc
Esempio n. 13
0
 def setUp(self):
     self.loop = trollius.get_event_loop()
     self.graph = GraphDatabase("ws://localhost:8182/",
                                username="******",
                                password="******",
                                loop=self.loop,
                                future_class=Future)
Esempio n. 14
0
def _main():
    module_interpreters = _find_module_interpreters()

    enabled_modules = []
    for mod_type, mod_name, mod_socket_path, mod_runas in \
            _find_all_enabled_modules():
        module_interpreter = module_interpreters.get(mod_name)
        if module_interpreter is None:
            raise RuntimeError('Unable to find right interpreter for module '
                               '"%s"' % mod_name)
        enabled_modules.append(
                (mod_type, mod_name, module_interpreter, mod_socket_path,
                 mod_runas))

    _log.info('%u enabled module instances found', len(enabled_modules))

    loop = trollius.get_event_loop()

    for sig_num in signal.SIGINT, signal.SIGTERM:
        loop.add_signal_handler(sig_num, lambda: _stop(sig_num, loop))

    list(map(lambda module: _spawn_process(loop, *module), enabled_modules))

    try:
        loop.run_forever()
    finally:
        loop.close()
Esempio n. 15
0
 def __init__(self, *args, **kwargs):
     if 'loop' in kwargs:
         self._loop = kwargs.pop('loop')
     else:
         self._loop = trollius.get_event_loop()
     super(UdpStream, self).__init__(*args, **kwargs)
     self._active = 0
Esempio n. 16
0
 def __init__(self, *args, **kwargs):
     self._loop = kwargs.pop('loop', None)
     super(Wrapped, self).__init__(*args, **kwargs)
     if self._loop is None:
         self._loop = trollius.get_event_loop()
     self._active = 0
     self._last_queued_future = None
Esempio n. 17
0
def connect_read_pipe(file):
    loop = asyncio.get_event_loop()
    stream_reader = asyncio.StreamReader(loop=loop)
    def factory():
        return asyncio.StreamReaderProtocol(stream_reader)
    transport, _ = yield From(loop.connect_read_pipe(factory, file))
    raise Return(stream_reader, transport)
def create_client(client_klass,
                  host=None,
                  port=None,
                  loop=None,
                  timeouts=None,
                  client_type=None):
    """
    create a Trollius thrift client and return a context manager for it
    This is a coroutine
    :param client_klass: thrift Client class
    :param host: hostname/ip, None = loopback
    :param port: port number
    :param loop: Trollius event loop
    :returns: a Context manager which provides the thrift client
    """
    if not loop:
        loop = asyncio.get_event_loop()
    transport, protocol = yield From(
        loop.create_connection(
            ThriftClientProtocolFactory(
                client_klass,
                loop=loop,
                timeouts=timeouts,
                client_type=client_type,
            ),
            host=host,
            port=port,
        )
    )
    raise Return(protocol_manager(protocol))
Esempio n. 19
0
 def __init__(self, log, max_pool=10, max_tasks=5):
     self.log = log
     self.max_pool = max_pool  # Overall limit.
     self.max_tasks = max_tasks  # Per-key limit.
     self.loop = asyncio.get_event_loop()
     self.connections = {}  # {(host, port, ssl): [Connection, ...], ...}
     self.queue = []  # [Connection, ...]
Esempio n. 20
0
def analyze_body(sdf, address=("127.0.0.1", 11346)):
    """
    Single body analyzer. Opens a new connection, analyzes the
    body, and returns the result. If you already have a manager
    running doing other things, create an instance of `BodyAnalyzer`
    instead.

    :param sdf: SDF object consisting of BodyPart
                instances.
    :type sdf: SDF
    :param address: Tuple of the hostname and port where the analyzer resides. Note
                    that the default is one up from the default Gazebo port,
                    since it is meant to be used with the `run-analyzer.sh` tool.
    :type address: (str, int)
    :return:
    :rtype: (bool, (float, float, float))
    """
    response_obj = [None]

    @trollius.coroutine
    def internal_analyze():
        analyzer = yield From(BodyAnalyzer.create(address))
        response_obj[0] = yield From(analyzer.analyze_sdf(sdf))

    loop = trollius.get_event_loop()
    loop.run_until_complete(internal_analyze())
    return response_obj[0]
Esempio n. 21
0
def main():
    global SERVER

    try:
        loop = trollius.get_event_loop()
        coro = trollius.start_server(
            handle_client,
            host=None,
            port=pagure.APP.config['EVENTSOURCE_PORT'],
            loop=loop)
        SERVER = loop.run_until_complete(coro)
        log.info('Serving server at {}'.format(SERVER.sockets[0].getsockname()))
        if pagure.APP.config.get('EV_STATS_PORT'):
            stats_coro = trollius.start_server(
                stats,
                host=None,
                port=pagure.APP.config.get('EV_STATS_PORT'),
                loop=loop)
            stats_server = loop.run_until_complete(stats_coro)
            log.info('Serving stats  at {}'.format(
                stats_server.sockets[0].getsockname()))
        loop.run_forever()
    except KeyboardInterrupt:
        pass
    except trollius.ConnectionResetError:
        pass

    # Close the server
    SERVER.close()
    if pagure.APP.config.get('EV_STATS_PORT'):
        stats_server.close()
    log.info("End Connection")
    loop.run_until_complete(server.wait_closed())
    loop.close()
    log.info("End")
Esempio n. 22
0
    def produce_record(self, stream, key, data):
        record = Record(key, data, stream)
        stream = self.runner.streams[stream]

        loop = trollius.get_event_loop()
        for subscriber in stream.subscribers:
            loop.call_soon(subscriber.process_record, self, record)
Esempio n. 23
0
def find_master():
    loop = asyncio.get_event_loop()
    ServerState.logger.info('Attempting to locate master CoreMQ server for replication...')
    servers = ServerState.cluster_nodes[:]
    removals = []
    port = str(ServerState.listen_address[1])
    for s in servers:
        sp = s
        if ':' not in s:
            sp += ':6747'

        if ServerState.name.split('.', 1)[0] == sp.split('.', 1)[0] and port == sp.split(':')[-1]:
            removals.append(s)

    for r in removals:
        servers.remove(r)

    if not servers:
        ServerState.logger.info('This server is the only one listed in cluster_nodes, Assuming role of master MQ')
        return

    factory = CoreMqClientFactory(ReplicationClientProtocol, servers, loop=loop)
    yield asyncio.From(factory.connect())
    factory.lost_connection_callback = promote_to_master

    if not factory.connection:
        ServerState.logger.warn('No other CoreMQ servers found. Assuming role of master MQ')
    else:
        conn = factory.connection[1]
        conn.connected_future.add_done_callback(
            lambda _: conn.begin_replication('%s:%s' % (ServerState.name, ServerState.listen_address[1]))
        )
        ServerState.master = conn
Esempio n. 24
0
 def loop(self):
     loop = asyncio.get_event_loop()
     dispatcher = td.AsyncioCallbackDispatcher(loop)
     core = td.TelldusCore(callback_dispatcher=dispatcher)
     core.register_sensor_event(self.sensor_event)
     print("Starting loop")
     loop.run_forever()
Esempio n. 25
0
 def handler(self, message):
     loop = asyncio.get_event_loop()
     block = message[0:4]
     after = message[5:]
     if block == "NICK":
         self.nick = after
         print("{} is identified as {}".format(self.peer, after))
         self.prefix = "~{}!{}@{}".format(self.nick, self.ident, self.addr)
         return
     if block == "USER":
         if self.connected == False:
             self.connected = True
             self.real = after.split(':')[1]
             data = after.split(':')[0].split()
             self.ident = data[0]
             self.host  = data[1]
             self.serv  = data[2]
             print("First Task")
             task = asyncio.Task(self.first_connection())
             self.tasks[task] = (self.peer, loop.time())
             task.add_done_callback(self.task_done)
         return
     if block == "PONG":
         print("Client {} PONG".format(self.peer))
         return
     if block != "PONG":
         print("Recieved '{}' from {} identified as {} - {}".format(repr(message), self.peer, self.nick, timer()))
         self.transport.write("{}\r\n".format(message).encode())
         print("Sent '{}' to {}".format(repr(message), self.peer))
Esempio n. 26
0
    def __init__(self, config_file):
        super(Tourbillon, self).__init__()

        self._aio_run_event = asyncio.Event()
        self._thr_run_event = threading.Event()
        self._loop = asyncio.get_event_loop()
        self._tasks = []
        self._pluginconfig = {}

        with open(config_file, 'r') as f:
            self._config = json.load(f)

        formatter = logging.Formatter(fmt=self._config['log_format'])
        handler = logging.handlers.WatchedFileHandler(
            self._config['log_file'])
        handler.setFormatter(formatter)
        handler.setLevel(getattr(logging, self._config['log_level']))
        logging.getLogger().addHandler(handler)
        logging.getLogger().setLevel(
            getattr(logging, self._config['log_level']))

        logger.info('Use config file: %s', config_file)

        self._load_plugins_config(os.path.abspath(
                                  os.path.dirname(config_file)))

        self._influxdb = InfluxDBClient(**self._config['database'])
        self._databases = [i['name']
                           for i in self._influxdb.get_list_database()]
        print(self._databases)
    def test_queue_overload(self):

        http = HTTPConnectionPool('httpbin.org', maxsize=3, block=True, timeout=3)

        testLoop = asyncio.get_event_loop()
        testLoop.set_debug(True)
        global test_queue_overload_count
        test_queue_overload_count = 0

        @asyncio.coroutine
        def get_page():
            global test_queue_overload_count
            try:
                resp = yield From(http.request('GET', '/delay/1', pool_timeout=3))
                pg = yield From(resp.data)
                self.assertTrue(b'Connection' in pg, pg)
            except EmptyPoolError:
                pass
            except Exception as e:
                raise
            else:
                test_queue_overload_count += 1

        pageGetters = [get_page(), get_page(), get_page(), get_page(), get_page()]
        testLoop.run_until_complete(asyncio.wait(pageGetters, return_when=asyncio.ALL_COMPLETED))
        self.assertGreater(test_queue_overload_count, 4, 'not all page_getters ran')
Esempio n. 28
0
 def __init__(self, handler, lookup=None, session_options={}, keyfile='i2cp.key', i2cp_host='127.0.0.1', i2cp_port=7654, evloop=None):
     self._i2cp_host, self._i2cp_port = i2cp_host, i2cp_port
     self._sid = None
     self._lookup = lookup
     self._done = False
     self.dest = None
     self._connected = False
     self._created = False
     self.handler = handler or I2CPHandler()
     self.keyfile = keyfile
     self.opts = dict(session_options)
     self.opts['i2cp.fastReceive'] = 'true'
     self.send_dgram = self.send_dsa_dgram
     self._host_lookups = dict()
     # create encryption key for LS per session
     self._enckey = crypto.ElGamalGenerate()
     self._msg_handlers = {
         messages.message_type.SessionStatus : self._msg_handle_session_status,
         messages.message_type.RequestLS : self._msg_handle_request_ls,
         messages.message_type.SetDate : self._msg_handle_set_date,
         messages.message_type.Disconnect : self._msg_handle_disconnect,
         messages.message_type.RequestVarLS : self._msg_handle_request_var_ls,
         messages.message_type.HostLookupReply : self._msg_handle_host_lookup_reply,
         messages.message_type.MessagePayload : self._msg_handle_message_payload,
     }
     self._dest_cache = dict()
     if evloop is None:
         self._loop = asyncio.get_event_loop()
     else:
         self._loop = evloop
     self.generate_dest(self.keyfile)
Esempio n. 29
0
    def get_async(self, urls, callback):
        def get(url, headers):
            resp = requests.get(url, headers=headers)
            if resp:
                si = self.process_response(resp)
                callback(si, resp.url)

        @asyncio.coroutine
        def run(urls, concurrency, loop):
            urls = list(urls)
            pending = [loop.run_in_executor(None, get, url, self.headers) for url in urls[:concurrency]]
            rest = urls[concurrency:]
            while pending:
                done, pending = yield asyncio.From(asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED))
                while rest and len(pending) < concurrency:
                    pending.add(loop.run_in_executor(None, get, rest.pop(), self.headers))
                for future in done:
                    try:
                        si = future.result()
                        # callback(si, si.original_url)
                    except Exception:
                        logger.info("failed on url", exc_info=True)

        loop = asyncio.get_event_loop()
        # TODO: make magic concurrency number a parameter
        loop.run_until_complete(run(urls, concurrency=32, loop=loop))
def ThriftAsyncServerFactory(
    processor, interface=None, port=0, loop=None, nthreads=None, sock=None,
    backlog=100
):
    if loop is None:
        loop = asyncio.get_event_loop()

    if not isinstance(processor, TProcessor):
        try:
            processor = processor._processor_type(processor, loop=loop)
        except AttributeError:
            raise TypeError(
                "Unsupported processor type: {}".format(type(processor)),
            )

    if nthreads:
        from concurrent.futures import ThreadPoolExecutor
        loop.set_default_executor(
            ThreadPoolExecutor(max_workers=nthreads),
        )
    event_handler = TServerEventHandler()
    pfactory = ThriftServerProtocolFactory(processor, event_handler, loop)
    server = yield From(loop.create_server(
        pfactory,
        interface,
        port,
        sock=sock,
        backlog=backlog,
    ))

    if server.sockets:
        for socket in server.sockets:
            event_handler.preServe(socket.getsockname())

    raise Return(server)
Esempio n. 31
0
    def __init__(self,
                 pollRate=MINS(10),
                 port=9001,
                 sslcert="server.crt",
                 sslkey="server.key",
                 privateKeyFile='dhserver.key',
                 clientsFile="clients.json"):
        Server.__init__(self,
                        True,
                        port,
                        sslcert,
                        sslkey,
                        privateKeyFile=privateKeyFile,
                        clientsFile=clientsFile)
        self.pollRate = pollRate
        self.port = port

        self.numclients = len(self.clients)

        asyncio.get_event_loop().create_task(self.poll())
Esempio n. 32
0
def main():
    loop = asyncio.get_event_loop()

    # creates a server and starts listening to TCP connections
    server = MyServer()
    server.start(loop)

    @asyncio.coroutine
    def client():
        reader, writer = yield From(asyncio.streams.open_connection(
            '127.0.0.1', 12345, loop=loop))

        def send(msg):
            print("> " + msg)
            writer.write((msg + '\n').encode("utf-8"))

        def recv():
            msgback = (yield From(reader.readline()))
            msgback = msgback.decode("utf-8").rstrip()
            print("< " + msgback)
            raise Return(msgback)

        # send a line
        send("add 1 2")
        msg = yield From(recv())

        Ns = list(range(100, 100000, 10000))
        times = []

        for N in Ns:
            t0 = time.time()
            send("repeat {0} hello world ".format(N))
            msg = yield From(recv())
            assert msg == 'begin'
            while True:
                msg = (yield From(reader.readline()))
                msg = msg.decode("utf-8").rstrip()
                if msg == 'end':
                    break
            t1 = time.time()
            dt = t1 - t0
            print("Time taken: {0:.3f} seconds ({1:.6f} per repetition)"
                  .format(dt, dt/N))
            times.append(dt)

        writer.close()
        yield From(asyncio.sleep(0.5))

    # creates a client and connects to our server
    try:
        loop.run_until_complete(client())
        server.stop(loop)
    finally:
        loop.close()
Esempio n. 33
0
    def __init__(self, builder, human_format_speed=True):
        super().__init__()
        self._builder = builder
        self._human_format_speed = human_format_speed
        self._event_loop = trollius.get_event_loop()
        self._exit_code = 0
        self._statistics = None
        self.stop_observer = wpull.observer.Observer()
        self._server_tasks = []
        self._servers = []

        self.register_hook('exit_status', 'finishing_statistics')
Esempio n. 34
0
    def __init__(self, protocol, ip='127.0.0.1', port=9000):
        ip_str = str(ip)
        port_int = int(port)
        wsip = u"ws://" + ip_str + u":" + str(port_int)
        factory = WebSocketServerFactory(wsip)
        factory.protocol = protocol

        self.loop = trollius.get_event_loop()
        #self.loop.set_debug(False)
        self.coro = self.loop.create_server(factory, ip_str, port_int)
        self.server = self.loop.run_until_complete(self.coro)
        print("WebSockets configured on %s" % wsip)
Esempio n. 35
0
def runner():
    args = prepare()
    loop = asyncio.get_event_loop()
    # wrap asyncio to suppress stacktraces
    if args.debug:
        loop.run_until_complete(main(args))
    else:
        try:
            loop.run_until_complete(main(args))
        except Exception as e:
            print(e.message)
    loop.close()
Esempio n. 36
0
 def datagram_received(self, data, addr):
     header = data[0]
     payload = data[1:]
     if header == _JOYSTICK_AXIS_HEADER:
         axis, value = _JOYSTICK_AXIS_DATA.unpack(payload)
         print("Received axis {0} data: {1}".format(axis, value))
         self.transport.sendto(_PING_BACK)
     else:
         print('Unknown packet data: "{0}"'.format(data))
         self.transport.close()
         loop = asyncio.get_event_loop()
         loop.close()
def async_test(f):

    testLoop = asyncio.get_event_loop()
    testLoop.set_debug(True)

    @functools.wraps(f)
    def wrapper(*args, **kwargs):
        coro = asyncio.coroutine(f)
        future = coro(*args, **kwargs)
        testLoop.run_until_complete(future)

    return wrapper
Esempio n. 38
0
def main():
    logging.basicConfig(level=logging.WARN, stream=sys.stdout)

    asyncio.set_event_loop_policy(asyncio_qt.QtEventLoopPolicy())

    app = QtGui.QApplication(sys.argv)
    app.setApplicationName('mjscore_manager')

    parser = optparse.OptionParser()
    parser.add_option('-s', '--serial', help='serial port to use')

    options, args = parser.parse_args()
    assert len(args) == 0

    manager = ManagerMainWindow()

    if options.serial:
        manager.open_serial(options.serial)

    manager.show()
    asyncio.get_event_loop().run_forever()
Esempio n. 39
0
    def start(self):
        self._loop = asyncio.get_event_loop()
        self._face = ThreadsafeFace(self._loop, "")
        self._face.setCommandSigningInfo(self._keyChain, self._certificateName)
        self._face.registerPrefix(self.prefix, self.onPlayingCommand,
                                  self.onRegisterFailed)

        try:
            self._loop.run_forever()
        except KeyboardInterrupt:
            sys.exit()
        finally:
            self.stop()
Esempio n. 40
0
    def start(self):
        self.loop = asyncio.get_event_loop()
        self.face = ThreadsafeFace(self.loop, "")
        self.face.setCommandSigningInfo(self.keychain, self.certificateName)
        self.songName = raw_input(
            "Song Name(each song separated by a comma): ")
        self.device = raw_input("Music Player: ")
        self.issueSongCommand()

        try:
            self.loop.run_forever()
        finally:
            self.stop()
Esempio n. 41
0
def main():
    asyncio.set_event_loop_policy(asyncio_qt.QtEventLoopPolicy())

    app = QtGui.QApplication(sys.argv)
    dialog = QtGui.QDialog()
    button = QtGui.QPushButton("text", dialog)
    button.clicked.connect(start_other)
    dialog.show()

    loop = asyncio.get_event_loop()
    asyncio.Task(simple_coroutine())

    loop.run_forever()
Esempio n. 42
0
def test_decorate_asyncio_coroutine():
    buf = StringIO()

    @asyncio.coroutine
    @debug.log(print_to=buf, module=False, stacktrace=2, result_repr=repr)
    def coro():
        yield asyncio.From(asyncio.sleep(0.01))
        raise StopIteration("result")

    loop = asyncio.get_event_loop()
    loop.run_until_complete(coro())
    output = buf.getvalue()
    assert 'coro => %r' % 'result' in output
Esempio n. 43
0
    def get_stocks_by_range(self, index):

        loop = asyncio.get_event_loop()
        # response = yield From(loop.run_in_executor(None,self.get_url_data_R,
        # (self.sina_stock_api + self.stock_list[index])))
        response = yield From(
            loop.run_in_executor(
                None, requests.get,
                (self.sina_stock_api + self.stock_list[index])))
        # response = yield (requests.get(self.sina_stock_api + self.stock_list[index]))
        # log.debug("url:%s"%(self.sina_stock_api + self.stock_list[index]))
        # log.debug("res_encoding:%s" % response.encoding[:10])
        self.stock_data.append(response.text)
Esempio n. 44
0
def main():
    config = load_configuration()
    ServerState.logger = get_logger(config, 'CoreWS')
    address = config.get('CoreWS', 'address', '0.0.0.0')
    port = int(config.get('CoreWS', 'port', '9000'))
    mq_servers = comma_string_to_list(
        config.get('CoreMQ', 'cluster_nodes', '').split(','))

    ServerState.logger.info('CoreWS Starting up...')
    ws_factory = WebSocketServerFactory('ws://%s:%s/ws' % (address, port))
    ws_factory.protocol = WsProtocol

    loop = asyncio.get_event_loop()
    server_coro = loop.create_server(ws_factory, address, port)
    server = loop.run_until_complete(server_coro)
    ServerState.logger.info('WebSocket Server running')

    mq_factory = CoreMqClientFactory(WsMqClient, mq_servers, loop=loop)

    @asyncio.coroutine
    def connect(*args):
        ServerState.mq_connection = None
        while True:
            yield asyncio.From(mq_factory.connect())
            if not mq_factory.connection:
                ServerState.logger.warn(
                    'No CoreMQ servers found. Retrying in 3 seconds...')
                yield asyncio.From(asyncio.sleep(3))
            else:
                conn = mq_factory.connection[1]
                conn.connected_future.add_done_callback(
                    lambda _: conn.begin_replication('%s:%s' % (
                        socket.gethostname(), port)))
                ServerState.mq_connection = conn
                break

    mq_factory.lost_connection_callback = connect
    loop.run_until_complete(connect())

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass
    finally:
        ServerState.logger.info('Shutting down WebSocket Server...')
        server.close()
        ServerState.logger.info('Shutting down MQ Client...')
        mq_factory.close()
        loop.close()
        ServerState.logger.info('CoreWS is now shut down')
Esempio n. 45
0
def get_nginx_stats(agent):
    yield From(agent.run_event.wait())
    logger.debug('starting get_nginx_stats')
    config = agent.config['nginx']
    logger.debug('get_nginx_stats config retrieved')
    db_config = config['database']
    yield From(agent.async_create_database(**db_config))
    logger.debug('getting event loop')
    loop = asyncio.get_event_loop()
    while agent.run_event.is_set():
        logger.debug('in while loop')
        try:
            yield From(asyncio.sleep(config['frequency']))
            url = config['url']

            res = yield From(loop.run_in_executor(None, requests.get, url))

            if res.status_code == 200:
                text = res.text

                logger.debug(text)

                status = text.strip().split('\n')
                conn = status[0].strip().split(': ')[-1]
                accepts, handled, num_req = status[2].strip().split(' ')
                reading, writing, waiting = re.split(r'[:\s]\s*',
                                                     status[-1].strip())[1::2]
                data = [{
                    'measurement': 'nginx_stats',
                    'tags': {
                        'hostname': config['host']
                    },
                    'fields': {
                        'connections': int(conn),
                        'total_accepts': int(accepts),
                        'total_handled': int(handled),
                        'total_requests': int(num_req),
                        'reading': int(reading),
                        'writing': int(writing),
                        'waiting': int(waiting)
                    }
                }]
                logger.debug('nginx data: {}'.format(data))
                yield From(agent.async_push(data, db_config['name']))
            else:
                logger.warning('cannot get nginx stats: status={}'.format(
                    res.status_code))
        except:
            logger.exception('cannot get nginx stats')
    logger.info('get_nginx_status terminated')
Esempio n. 46
0
File: worker.py Progetto: gridl/dist
def work(work_q, send_q, data, metadata_addr, address, loop=None):
    """ Work coroutine

    Input Channels:
        work_q: Main mailbox, get work requests from control
        metadata_addr: This directly communicates with the MDStore via ZMQ
        data: A dictionary of local data.  This manages some state.

    Output Channels:
        send_q: Send acknowledgements of task finished (or failed) to requeter
        data:  A (possibly modified) dictionary of local data.
    """
    print("Worker boots up")
    loop = loop or asyncio.get_event_loop()
    while True:
        addr, msg = yield From(work_q.get())
        if msg == 'close':
            break

        key, func, args, kwargs, needed = \
                get(['key', 'function', 'args', 'kwargs', 'needed'], msg, None)

        try:
            d = yield From(get_data(loop, needed, data, metadata_addr))
        except KeyError as e:
            out = {'op': 'computation-failed', 'key': msg['key'], 'error': e}
        else:
            args2 = keys_to_data(args or (), d)
            kwargs2 = keys_to_data(kwargs or {}, d)

            # result = yield From(delay(loop, func, *args2, **kwargs2))
            result = func(*args2, **kwargs2)

            data[key] = result

            # Register ourselves with the metadata store
            req = {
                'op': 'register',
                'keys': [key],
                'address': address,
                'reply': True
            }
            response = yield From(dealer_send_recv(loop, metadata_addr, req))
            assert response == b'OK'

            out = {'op': 'computation-finished', 'key': msg['key']}

        send_q.put_nowait((addr, out))

    raise Return("Work done")
Esempio n. 47
0
    def __init__(self, config_file):
        super(Tourbillon, self).__init__()

        self._aio_run_event = asyncio.Event()
        self._thr_run_event = threading.Event()
        self._loop = asyncio.get_event_loop()
        self._tasks = []
        self._pluginconfig = {}
        self.agent_version = '1.1'

        with open(config_file, 'r') as f:
            self._config = json.load(f)

        formatter = logging.Formatter(fmt=self._config['log_format'])
        handler = logging.handlers.WatchedFileHandler(self._config['log_file'])
        handler.setFormatter(formatter)
        handler.setLevel(getattr(logging, self._config['log_level']))
        logging.getLogger().addHandler(handler)
        logging.getLogger().setLevel(
            getattr(logging, self._config['log_level']))

        logger.info('Use config file: %s', config_file)

        self._load_plugins_config(os.path.abspath(
            os.path.dirname(config_file)))

        self.api_url = 'https://sf-c01.sentinel.la:5556'
        self.nova_api_version = 2
        self.openstack_status = {
            'STOPPED': 0,
            'ACTIVE': 1,
            'SHUTOFF': 2,
            'BUILDING': 3,
            'DELETED': 4,
            'ERROR': 5,
            'SOFT_DELETED': 6,
            'PAUSED': 7,
            'SUSPEND': 8,
            'SHELVED': 9,
            'RESCUED': 10,
            'RESIZED': 11,
            'SHELVED_OFFLOADED': 12
        }

        self.processes = []
        for key, value in self._config['openstack_services'].iteritems():
            if value:
                self.processes.append(value['process'])

        print self.api_url
Esempio n. 48
0
    def update_trigger(self, service):
        """
            update the date when occurs the trigger
        """
        my_count = yield From(q2.get())
        if my_count > 0:

            logger.info(
                "user: {} - provider: {} - consumer: {} - {} = {} new data".
                format(service.user, service.provider.name.name,
                       service.consumer.name.name, service.description,
                       my_count))

            now = arrow.utcnow().to(
                settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ss')
            TriggerService.objects.filter(id=service.id).update(
                date_triggered=now)
        else:
            logger.info(
                "user: {} - provider: {} - consumer: {} - {} nothing new".
                format(service.user, service.provider.name.name,
                       service.consumer.name.name, service.description))
        asyncio.get_event_loop().stop()
Esempio n. 49
0
    def run(self):
        self.running = True
        self.keypress_handlers = {
            'g': self.gui_handler,
            'h': self.help_handler,
            'q': self.quit_handler,
            'v': self.verbosity_handler,
        }

        self.loop = asyncio.get_event_loop()
        asyncio.ensure_future(self.keypress_dispatch())
        asyncio.ensure_future(self.arrive_device())
        self.loop.run_forever()
        asyncio.executor.get_default_executor().shutdown(False)
Esempio n. 50
0
 def __init__(self,
              port=None,
              loop=None,
              timeout=None,
              write_timeout=None,
              inter_byte_timeout=None,
              **kwargs):
     if (timeout is not None or write_timeout is not None
             or inter_byte_timeout is not None):
         raise NotImplementedError("Use asyncio timeout features")
     self.ser = serial.serial_for_url(port, **kwargs)
     if loop is None:
         loop = asyncio.get_event_loop()
     self._loop = loop
Esempio n. 51
0
    def __init__(self, loop=None):
        super(CoreMqServerProtocol, self).__init__()

        self.LOOP = loop or asyncio.get_event_loop()
        self.uuid = str(uuid.uuid4())
        self.transport = None
        self.peer = None
        self.local_ip = None
        self.reader = asyncio.StreamReader()
        self.subscriptions = []
        self.options = dict()
        self.is_replicant = False
        self.hostname = None
        ServerState.connections[self.uuid] = self
Esempio n. 52
0
 def datagram_received(self, data, addr):
     header = data[0]
     payload = data[1:]
     if header == _POSITION_HEADER:
         longitude, latitude = _POSITION_STRUCT.unpack(payload)
         print("Received position: {0} {1}".format(longitude, latitude))
         print("Sending end connection packet")
         self.transport.sendto(_END_HEADER)
         self.transport.close()
     else:
         print('Unknown packet data: "{0}"'.format(data))
         self.transport.close()
         loop = asyncio.get_event_loop()
         loop.close()
Esempio n. 53
0
def main():
    player = Player()
    player.play()
    loop = asyncio.get_event_loop()

    try:
        tasks = [
            # asyncio.async(stop_player(player, seconds=3)),
            asyncio. async (player.init()),
            asyncio. async (run_sensor(player))
        ]
        loop.run_until_complete(asyncio.wait(tasks))
    finally:
        player.stop()
        loop.close()
Esempio n. 54
0
def main():
    def handler(loop, context):
        exc = context['exception']
        if isinstance(exc, DisconnectError) \
                or isinstance(exc, ConnectionResetError):
            print("Got disconnect / connection reset - shutting down.")
            sys.exit(0)
        raise context['exception']

    try:
        loop = trollius.get_event_loop()
        loop.set_exception_handler(handler)
        loop.run_until_complete(run())
    except KeyboardInterrupt:
        print("Got CtrlC, shutting down.")
Esempio n. 55
0
    def __init__(self, retry=False, loop=None):
        ReconnectAsyncio.__init__(self, retry=retry)

        if not loop:
            loop = asyncio.get_event_loop()

        self.retry = retry
        self.loop = loop
        self.handle = None
        self.debug = False
        self.binaryHandler = None
        self.textHandler = None
        self.openHandler = None
        self.closeHandler = None
        self.client = None
Esempio n. 56
0
def main(opts):
    vui_helpers.asyncio_misc_init()

    vui_helpers.logging_init(verbose=True)

    logsaver = vui_helpers.MemoryLoggingHandler(install=True)

    if opts.check:
        logging.info('Check passed')
        return
    # cleanup hack
    proc = subprocess.Popen(
        "killall -v gst-launch-1.0",
        shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT);
    outmsg, _ = proc.communicate()
    for line in outmsg.split('\n'):
        line = line.strip()
        if line != '': logging.debug('Cleanup says: ' + line)
    logging.debug('Cleanup complete, result %r' % proc.returncode)

    cif = ControlInterface(opts, logsaver=logsaver)
    logging.debug('Running')

    asyncio.get_event_loop().run_forever()
Esempio n. 57
0
 def datagram_received(self, data, addr):
     header = data[0]
     if header == _BEGIN_HEADER:
         print("Received begin connection packet from {}".format(str(addr)))
         print("Sending position")
         self.transport.sendto(
             _POSITION_HEADER + _POSITION_STRUCT.pack(-0.1, 1.2), addr)
     elif header == _END_HEADER:
         print("Received end connection packet from {}".format(str(addr)))
     else:
         print('Unknown packet data from {0}: "{1}"'.format(
             str(addr), data))
         self.transport.close()
         loop = asyncio.get_event_loop()
         loop.close()
Esempio n. 58
0
 def datagram_received(self, data, addr):
     header = data[0]
     if header == _BEGIN_HEADER:
         print("Received begin connection packet from {}".format(str(addr)))
         print("Adding client to list")
         self.clients.append(
             addr
         )  # TODO: Remove clients when they disconnect or they don't ping in a while
     elif header == _PING_BACK:
         print("Received ping back packet from {}".format(str(addr)))
     else:
         print('Unknown packet data from {0}: "{1}"'.format(
             str(addr), data))
         self.transport.close()
         loop = asyncio.get_event_loop()
         loop.close()
Esempio n. 59
0
def server(ctx, host, port, timeout):
    """Typod server"""

    corrector_index = ctx.obj['corrector_index']
    corrector_cls = ctx.obj['corrector']
    inst = corrector_cls(corrector_index)
    server = TypedServer(host=host, port=port, timeout=timeout, corrector=inst)

    loop = asyncio.get_event_loop()
    logger.info('Run server on {}:{}, using {} corrector'.format(
        host, port, corrector_cls.typo_name))
    server.start(loop)
    try:
        loop.run_forever()
    finally:
        loop.close()
Esempio n. 60
0
    def start(self):
        print "reg start"
        self.loop = asyncio.get_event_loop()
        self.face = ThreadsafeFace(self.loop, self.address)
        self.face.setCommandSigningInfo(self.keychain, self.certificateName)
        self.face.registerPrefix(self.prefix, self.onInterest,
                                 self.onRegisterFailed)
        self._isStopped = False
        self.face.stopWhen(lambda: self._isStopped)
        try:
            self.loop.run_forever()
        except KeyboardInterrupt:

            sys.exit()
        finally:
            self.stop()