示例#1
0
 def frame_parser(self, reader, writer):
     # This takes care of the framing.
     last_request_id = 0
     while True:
         # Read the frame header, parse it, read the data.
         # NOTE: The readline() and readexactly() calls will hang
         # if the client doesn't send enough data but doesn't
         # disconnect either.  We add a timeout to each.  (But the
         # timeout should really be implemented by StreamReader.)
         framing_b = yield From(asyncio.wait_for(
             reader.readline(),
             timeout=args.timeout, loop=self.loop))
         if random.random()*100 < args.fail_percent:
             logging.warn('Inserting random failure')
             yield From(asyncio.sleep(args.fail_sleep*random.random(),
                                      loop=self.loop))
             writer.write(b'error random failure\r\n')
             break
         logging.debug('framing_b = %r', framing_b)
         if not framing_b:
             break  # Clean close.
         try:
             frame_keyword, request_id_b, byte_count_b = framing_b.split()
         except ValueError:
             writer.write(b'error unparseable frame\r\n')
             break
         if frame_keyword != b'request':
             writer.write(b'error frame does not start with request\r\n')
             break
         try:
             request_id, byte_count = int(request_id_b), int(byte_count_b)
         except ValueError:
             writer.write(b'error unparsable frame parameters\r\n')
             break
         if request_id != last_request_id + 1 or byte_count < 2:
             writer.write(b'error invalid frame parameters\r\n')
             break
         last_request_id = request_id
         request_b = yield From(asyncio.wait_for(
             reader.readexactly(byte_count),
             timeout=args.timeout, loop=self.loop))
         try:
             request = json.loads(request_b.decode('utf8'))
         except ValueError:
             writer.write(b'error unparsable json\r\n')
             break
         response = self.handle_request(request)  # Not a coroutine.
         if response is None:
             writer.write(b'error unhandlable request\r\n')
             break
         response_b = json.dumps(response).encode('utf8') + b'\r\n'
         byte_count = len(response_b)
         framing_s = 'response {0} {1}\r\n'.format(request_id, byte_count)
         writer.write(framing_s.encode('ascii'))
         yield From(asyncio.sleep(args.resp_sleep*random.random(),
                                  loop=self.loop))
         writer.write(response_b)
示例#2
0
def packets_from_tshark_sync(capture, packet_count=None, existing_process=None,timeout=20):
        """
        Returns a generator of packets.
        This is the sync version of packets_from_tshark. It wait for the completion of each coroutine and
         reimplements reading packets in a sync way, yielding each packet as it arrives.

        :param packet_count: If given, stops after this amount of packets is captured.
        """
        # NOTE: This has code duplication with the async version, think about how to solve this
        tshark_process = existing_process or capture.eventloop.run_until_complete(capture._get_tshark_process())
        psml_structure, data = capture.eventloop.run_until_complete(capture._get_psml_struct(tshark_process.stdout))
        packets_captured = 0
        data = b''
        try:
            while True:
                try:
                    packet, data = capture.eventloop.run_until_complete(asyncio.wait_for(capture._get_packet_from_stream(tshark_process.stdout, data, psml_structure=psml_structure),timeout))
                except EOFError:
                    capture.log.debug('EOF reached (sync)')
                    break
                if packet:
                    packets_captured += 1
                    yield packet
                if packet_count and packets_captured >= packet_count:
                    break
        finally:
            capture._cleanup_subprocess(tshark_process)
示例#3
0
    def process(self, client):
        query = (yield
                 From(asyncio.wait_for(client.reader.readline(),
                                       timeout=0.01)))
        if not query:
            return
        query = query.strip()
        client_ip = self.client_ip(client)
        cmd = query.split(' ', 1)
        if cmd[0] == 'RELOAD':
            self.corrector.reload()
            result = 'DONE'
        elif cmd[0] == 'QUERY' and len(cmd) > 1:
            data = cmd[1]
            typo = unicode(data, "utf-8")

            corrected, is_success = self.corrector.suggestion(typo)
            result = corrected.encode('utf-8')
        else:
            result = 'ERROR'

        client.writer.write('{}\n'.format(result))
        yield From(client.writer.drain())
        logger.info("{client}:request:{request}:{result}".format(
            client=client_ip, request=query, result=result))
示例#4
0
    def test_stdin_stdout(self):
        args = PROGRAM_CAT

        @asyncio.coroutine
        def run(data):
            proc = yield From(asyncio.create_subprocess_exec(
                                     *args,
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE,
                                     loop=self.loop))

            # feed data
            proc.stdin.write(data)
            yield From(proc.stdin.drain())
            proc.stdin.close()

            # get output and exitcode
            data = yield From(proc.stdout.read())
            exitcode = yield From(proc.wait())
            raise Return(exitcode, data)

        task = run(b'some data')
        task = asyncio.wait_for(task, 60.0, loop=self.loop)
        exitcode, stdout = self.loop.run_until_complete(task)
        self.assertEqual(exitcode, 0)
        self.assertEqual(stdout, b'some data')
示例#5
0
def list_controllers(self, timeout):
    @asyncio.coroutine
    def on_response_received(packet, future):
        self.logger.debug("List of controllers received with status={}".format(
            packet.status))
        self.remove_callback(packet.packet_uuid)

        if packet.status_code == StatusCode.Success:
            controllers = packet.get(name='controllers',
                                     format_function=lambda raw_ctrls: [
                                         Controller.from_flatbuffers(raw_ctrl)
                                         for raw_ctrl in raw_ctrls
                                     ])

            future.set_result(controllers)
        else:
            future.set_exception(
                BaBLEException(packet, "Failed to list controllers"))

    future = asyncio.Future()
    request_packet = Packet.build(GetControllersList)

    self.register_callback(request_packet.packet_uuid,
                           callback=on_response_received,
                           params={'future': future})

    self.send_packet(request_packet)

    self.logger.debug("Waiting for list of controllers...")
    try:
        controllers = yield From(asyncio.wait_for(future, timeout=timeout))
        raise asyncio.Return(controllers)
    except asyncio.TimeoutError:
        self.remove_callback(request_packet.packet_uuid)
        raise RuntimeError("List controllers timed out")
示例#6
0
文件: shell.py 项目: sshyran/trollius
def test_call(*args, **kw):
    timeout = kw.pop('timeout', None)
    try:
        proc = yield From(asyncio.create_subprocess_exec(*args))
        exitcode = yield From(asyncio.wait_for(proc.wait(), timeout))
        print("%s: exit code %s" % (' '.join(args), exitcode))
    except asyncio.TimeoutError:
        print("timeout! (%.1f sec)" % timeout)
示例#7
0
def main(r):
    task = asyncio.ensure_future(find_cubes(r))
    try:
        yield From(asyncio.wait_for(task, SILO_TIME))
    except asyncio.TimeoutError:
        pass

    yield From(clean_up(r))
示例#8
0
def main(r):
    task = asyncio.ensure_future(find_cubes(r))
    try:
        yield From(asyncio.wait_for(task, SILO_TIME))
    except asyncio.TimeoutError:
        pass

    yield From(clean_up(r))
示例#9
0
    def run(self):
        scrape_snapshot_path = self._get_temp_path('phantom', suffix='.html')
        action_log_path = self._get_temp_path('phantom-action', suffix='.txt')
        event_log_path = self._get_temp_path('phantom-event', suffix='.txt')
        snapshot_paths = [scrape_snapshot_path]
        snapshot_paths.extend(self._get_snapshot_paths())
        url = self._url_item.url_record.url

        driver_params = PhantomJSDriverParams(
            url=url,
            snapshot_paths=snapshot_paths,
            wait_time=self._params.wait_time,
            num_scrolls=self._params.num_scrolls,
            smart_scroll=self._params.smart_scroll,
            snapshot=self._params.snapshot,
            viewport_size=self._params.viewport_size,
            paper_size=self._params.paper_size,
            event_log_filename=event_log_path,
            action_log_filename=action_log_path,
            custom_headers=self._params.custom_headers,
            page_settings=self._params.page_settings,
        )

        driver = self._phantomjs_driver_factory(params=driver_params)

        _logger.info(__(
            _('PhantomJS fetching ‘{url}’.'),
            url=url
        ))

        with contextlib.closing(driver):
            yield From(driver.start())

            # FIXME: we don't account that things might be scrolling and
            # downloading so it might not be a good idea to timeout like
            # this
            if self._params.load_time:
                yield From(trollius.wait_for(
                    driver.process.wait(), self._params.load_time
                ))
            else:
                yield From(driver.process.wait())

            if driver.process.returncode != 0:
                raise PhantomJSCrashed(
                    'PhantomJS exited with code {}'
                    .format(driver.process.returncode)
                )

        if self._warc_recorder:
            self._add_warc_action_log(action_log_path, url)
            for path in snapshot_paths:
                self._add_warc_snapshot(path, url)

        _logger.info(__(
            _('PhantomJS fetched ‘{url}’.'),
            url=url
        ))
示例#10
0
 def go():
     c1 = yield From(pool.acquire())
     c2 = yield From(pool.acquire())
     c3 = pool.acquire()
     self.assertIsInstance(c3, Future)
     with self.assertRaises(trollius.TimeoutError):
         yield From(trollius.wait_for(c3, 0.1))
     c1.conn.close()
     c2.conn.close()
示例#11
0
 def go():
     c1 = yield From(pool.acquire())
     c2 = yield From(pool.acquire())
     c3 = pool.acquire()
     self.assertIsInstance(c3, Future)
     with self.assertRaises(trollius.TimeoutError):
         yield From(trollius.wait_for(c3, 0.1))
     c1.conn.close()
     c2.conn.close()
示例#12
0
    def test_host_max_limit(self):
        pool = ConnectionPool(max_host_count=2)

        yield From(pool.acquire('localhost', self.get_http_port()))
        yield From(pool.acquire('localhost', self.get_http_port()))

        with self.assertRaises(trollius.TimeoutError):
            yield From(
                trollius.wait_for(
                    pool.acquire('localhost', self.get_http_port()), 0.1))
示例#13
0
 def _get_parsed_packet_from_tshark(self, callback):
     yield From(self._current_tshark.stdin.drain())
     try:
         yield From(asyncio.wait_for(self.packets_from_tshark(callback, close_tshark=False),
                                    DEFAULT_TIMEOUT))
     except asyncio.TimeoutError:
         self.close()
         raise asyncio.TimeoutError("Timed out while waiting for tshark to parse packet. "
                                    "Try rerunning with cap.set_debug() to see tshark errors. "
                                    "Closing tshark..")
示例#14
0
 def _get_parsed_packet_from_tshark(self, callback):
     yield From(self._current_tshark.stdin.drain())
     try:
         yield From(asyncio.wait_for(self.packets_from_tshark(callback, close_tshark=False),
                                    DEFAULT_TIMEOUT))
     except asyncio.TimeoutError:
         self.close()
         raise asyncio.TimeoutError("Timed out while waiting for tshark to parse packet. "
                                    "Try rerunning with cap.set_debug() to see tshark errors. "
                                    "Closing tshark..")
示例#15
0
def read(self, controller_id, connection_handle, attribute_handle, on_read,
         timeout):

    if isinstance(on_read, (tuple, list)):
        on_read_cb = on_read[0]
        on_read_params = on_read[1:]
    else:
        on_read_cb = on_read
        on_read_params = []

    @asyncio.coroutine
    def on_response_received(packet, future):
        self.logger.debug("Read response received with status={}".format(
            packet.status))
        self.remove_callback(packet.packet_uuid)

        if packet.status_code == StatusCode.Success:
            data = packet.get_dict([
                'controller_id', 'connection_handle', 'attribute_handle',
                ('value', bytes)
            ])

            on_read_cb(True, data, None, *on_read_params)

            future.set_result(data)
        else:
            error = BaBLEException(packet,
                                   "Failed to read value",
                                   connection_handle=connection_handle,
                                   attribute_handle=attribute_handle)
            on_read_cb(False, None, error, *on_read_params)

            future.set_exception(error)

    future = asyncio.Future()
    request_packet = Packet.build(ReadCentral,
                                  controller_id=controller_id,
                                  connection_handle=connection_handle,
                                  attribute_handle=attribute_handle)

    self.register_callback(request_packet.packet_uuid,
                           callback=on_response_received,
                           params={'future': future})

    self.send_packet(request_packet)

    self.logger.debug("Reading...")
    try:
        result = yield From(asyncio.wait_for(future, timeout=timeout))
        raise asyncio.Return(result)
    except asyncio.TimeoutError:
        self.remove_callback(request_packet.packet_uuid)
        on_read_cb(False, None, "Read timed out", *on_read_params)
        raise RuntimeError("Read timed out")
示例#16
0
文件: core27.py 项目: Aiden880/IRCPyd
 def handle_ident(self, message, host, port):
     try:
         client_reader, client_writer = yield From(asyncio.open_connection(host, port))
         request = "{}\r\n".format(message)
         client_writer.write(request.encode())
         data = yield From(asyncio.wait_for(client_reader.readline(), timeout=6.0))
         data = data.decode().rstrip()
         raise Return(data)
         print("Ident Lookup got '{}'".format(data))
     except:
         raise Return(None)
示例#17
0
def probe_characteristics(self, controller_id, connection_handle, start_handle,
                          end_handle, on_characteristics_probed, timeout):

    if isinstance(on_characteristics_probed, (tuple, list)):
        on_characteristics_probed_cb = on_characteristics_probed[0]
        on_characteristics_probed_params = on_characteristics_probed[1:]
    else:
        on_characteristics_probed_cb = on_characteristics_probed
        on_characteristics_probed_params = []

    @asyncio.coroutine
    def on_response_received(packet, future):
        self.logger.debug(
            "Probe characteristics response received with status={}".format(
                packet.status))
        self.remove_callback(packet.packet_uuid)

        if packet.status_code == StatusCode.Success:
            characteristics = packet.get_dict([
                'controller_id', 'connection_handle',
                ('characteristics', lambda chars:
                 [Characteristic.from_flatbuffers(char) for char in chars])
            ])
            on_characteristics_probed_cb(True, characteristics, None,
                                         *on_characteristics_probed_params)
            future.set_result(characteristics)
        else:
            error = BaBLEException(packet,
                                   "Failed to probe characteristics",
                                   connection_handle=connection_handle)
            on_characteristics_probed_cb(False, None, error,
                                         *on_characteristics_probed_params)
            future.set_exception(error)

    future = asyncio.Future()
    request_packet = Packet.build(ProbeCharacteristics,
                                  controller_id=controller_id,
                                  connection_handle=connection_handle,
                                  start_handle=start_handle,
                                  end_handle=end_handle)

    self.register_callback(request_packet.packet_uuid,
                           callback=on_response_received,
                           params={'future': future})

    self.send_packet(request_packet)

    self.logger.debug("Waiting for characteristics...")
    try:
        characteristics = yield From(asyncio.wait_for(future, timeout=timeout))
        raise asyncio.Return(characteristics)
    except asyncio.TimeoutError:
        self.remove_callback(request_packet.packet_uuid)
        raise RuntimeError("Probe characteristics timed out")
示例#18
0
    def test_host_max_limit(self):
        pool = ConnectionPool(max_host_count=2)

        yield From(pool.acquire('localhost', self.get_http_port()))
        yield From(pool.acquire('localhost', self.get_http_port()))

        with self.assertRaises(trollius.TimeoutError):
            yield From(trollius.wait_for(
                pool.acquire('localhost', self.get_http_port()),
                0.1
            ))
示例#19
0
    def _raw_recv_packet(self):
        @asyncio.coroutine
        def read_frame_header():
            # Read until we get a frame header.
            maybe_header = ''
            while maybe_header != '\xff\xff':
                maybe_header += yield From(
                    self.serial.read(max(1, min(2, 2 - len(maybe_header)))))
                if len(maybe_header) > 2:
                    maybe_header = maybe_header[-2:]

        yield From(
            asyncio.wait_for(read_frame_header(),
                             max(0.1, 6 * 10 / self.baud_rate + 0.01)))

        header = yield From(
            asyncio.wait_for(self.serial.read(5),
                             max(0.1, 5 * 10 / self.baud_rate + 0.005)))
        size, servo, cmd, cksum1, cksum2 = [ord(x) for x in header]

        data = yield From(
            asyncio.wait_for(
                self.serial.read(size - 7),
                max(0.1, (size - 7) * 10 / self.baud_rate + 0.005)))

        expected_cksum1 = self._cksum1(size, servo, cmd, data)
        expected_cksum2 = expected_cksum1 ^ 0xff

        result = Packet()
        result.servo = servo
        result.cmd = cmd
        result.data = data
        result.cksum_good = (cksum1 == (expected_cksum1 & 0xfe)
                             and cksum2 == (expected_cksum2 & 0xfe))

        if not result.cksum_good:
            raise ChecksumError(result)

        raise Return(result)
示例#20
0
    def _raw_recv_packet(self):
        
        @asyncio.coroutine
        def read_frame_header():
            # Read until we get a frame header.
            maybe_header = ''
            while maybe_header != '\xff\xff':
                maybe_header += yield From(self.serial.read(
                        max(1, min(2, 2 - len(maybe_header)))))
                if len(maybe_header) > 2:
                    maybe_header = maybe_header[-2:]

        yield From(asyncio.wait_for(
                read_frame_header(),
                max(0.1, 6 * 10 / self.baud_rate + 0.01)))

        header = yield From(asyncio.wait_for(
                self.serial.read(5), max(0.1, 5*10 / self.baud_rate + 0.005)))
        size, servo, cmd, cksum1, cksum2 = [ord(x) for x in header]

        data = yield From(asyncio.wait_for(
                self.serial.read(size - 7),
                max(0.1, (size - 7) * 10 / self.baud_rate + 0.005)))

        expected_cksum1 = self._cksum1(size, servo, cmd, data)
        expected_cksum2 = expected_cksum1 ^ 0xff

        result = Packet()
        result.servo = servo
        result.cmd = cmd
        result.data = data
        result.cksum_good = (cksum1 == (expected_cksum1 & 0xfe) and
                             cksum2 == (expected_cksum2 & 0xfe))

        if not result.cksum_good:
            raise ChecksumError(result)

        raise Return(result)
示例#21
0
    def read_content(self,
                     file=None,
                     raw=False,
                     rewind=True,
                     duration_timeout=None):
        '''Read the response content into file.

        Args:
            file: A file object or asyncio stream.
            raw (bool): Whether chunked transfer encoding should be included.
            rewind (bool): Seek the given file back to its original offset after
                reading is finished.
            duration_timeout (int): Maximum time in seconds of which the
                entire file must be read.

        Be sure to call :meth:`fetch` first.

        Coroutine.
        '''
        if rewind and file and hasattr(file, 'seek'):
            original_offset = file.tell()
        else:
            original_offset = None

        if not hasattr(file, 'drain'):
            self._response.body = file

            if not isinstance(file, Body):
                self._response.body = Body(file)

        read_future = self._stream.read_body(self._request,
                                             self._response,
                                             file=file,
                                             raw=raw)

        try:
            yield From(trollius.wait_for(read_future,
                                         timeout=duration_timeout))
        except trollius.TimeoutError as error:
            raise DurationTimeout(
                'Did not finish reading after {} seconds.'.format(
                    duration_timeout)) from error

        self._session_complete = True

        if original_offset is not None:
            file.seek(original_offset)

        if self._recorder_session:
            self._recorder_session.response(self._response)
示例#22
0
 def go():
     c1 = yield From(pool.acquire())
     c2 = yield From(pool.acquire())
     c3 = pool.acquire()
     self.assertIsInstance(c3, Future)
     with self.assertRaises(trollius.TimeoutError):
         shielded_fut = trollius.shield(c3)
         yield From(trollius.wait_for(shielded_fut, 0.1))
     yield From(pool.release(c2))
     c3 = yield From(c3)
     self.assertEqual(c2, c3)
     c1.conn.close()
     c2.conn.close()
     c3.conn.close()
示例#23
0
 def go():
     c1 = yield From(pool.acquire())
     c2 = yield From(pool.acquire())
     c3 = pool.acquire()
     self.assertIsInstance(c3, Future)
     with self.assertRaises(trollius.TimeoutError):
         shielded_fut = trollius.shield(c3)
         yield From(trollius.wait_for(shielded_fut, 0.1))
     yield From(pool.release(c2))
     c3 = yield From(c3)
     self.assertEqual(c2, c3)
     c1.conn.close()
     c2.conn.close()
     c3.conn.close()
示例#24
0
    def run(self):
        scrape_snapshot_path = self._get_temp_path('phantom', suffix='.html')
        action_log_path = self._get_temp_path('phantom-action', suffix='.txt')
        event_log_path = self._get_temp_path('phantom-event', suffix='.txt')
        snapshot_paths = [scrape_snapshot_path]
        snapshot_paths.extend(self._get_snapshot_paths())
        url = self._url_item.url_record.url

        driver_params = PhantomJSDriverParams(
            url=url,
            snapshot_paths=snapshot_paths,
            wait_time=self._params.wait_time,
            num_scrolls=self._params.num_scrolls,
            smart_scroll=self._params.smart_scroll,
            snapshot=self._params.snapshot,
            viewport_size=self._params.viewport_size,
            paper_size=self._params.paper_size,
            event_log_filename=event_log_path,
            action_log_filename=action_log_path,
            custom_headers=self._params.custom_headers,
            page_settings=self._params.page_settings,
        )

        driver = self._phantomjs_driver_factory(params=driver_params)

        _logger.info(__(_('PhantomJS fetching ‘{url}’.'), url=url))

        with contextlib.closing(driver):
            yield From(driver.start())

            # FIXME: we don't account that things might be scrolling and
            # downloading so it might not be a good idea to timeout like
            # this
            if self._params.load_time:
                yield From(
                    trollius.wait_for(driver.process.wait(),
                                      self._params.load_time))
            else:
                yield From(driver.process.wait())

            if driver.process.returncode != 0:
                raise PhantomJSCrashed('PhantomJS exited with code {}'.format(
                    driver.process.returncode))

        if self._warc_recorder:
            self._add_warc_action_log(action_log_path, url)
            for path in snapshot_paths:
                self._add_warc_snapshot(path, url)

        _logger.info(__(_('PhantomJS fetched ‘{url}’.'), url=url))
示例#25
0
    def read_content(self, file=None, rewind=True, duration_timeout=None):
        '''Read the response content into file.

        Args:
            file: A file object or asyncio stream.
            rewind: Seek the given file back to its original offset after
                reading is finished.
            duration_timeout (int): Maximum time in seconds of which the
                entire file must be read.

        Returns:
            .ftp.request.Response: A Response populated with the final
            data connection reply.

        Be sure to call :meth:`fetch` first.

        Coroutine.
        '''
        if rewind and file and hasattr(file, 'seek'):
            original_offset = file.tell()
        else:
            original_offset = None

        if not hasattr(file, 'drain'):
            self._response.body = file

            if not isinstance(file, Body):
                self._response.body = Body(file)

        read_future = self._commander.read_stream(file, self._data_stream)

        try:
            reply = yield From(
                trollius.wait_for(read_future, timeout=duration_timeout)
            )
        except trollius.TimeoutError as error:
            raise DurationTimeout(
                'Did not finish reading after {} seconds.'
                .format(duration_timeout)
            ) from error

        self._response.reply = reply

        if original_offset is not None:
            file.seek(original_offset)

        if self._recorder_session:
            self._recorder_session.response(self._response)

        raise Return(self._response)
示例#26
0
    def enumerate(self):
        """Enumerate the list of servos on the bus.  Note, this will
        take approximately 5s to complete.

        :returns: a list of integer servo IDs
        """
        result = []
        for servo in range(0xfe):
            try:
                yield From(asyncio.wait_for(self.status(servo), 0.02))
                result.append(servo)
            except asyncio.TimeoutError:
                pass

        raise Return(result)
示例#27
0
    def enumerate(self):
        """Enumerate the list of servos on the bus.  Note, this will
        take approximately 5s to complete.

        :returns: a list of integer servo IDs
        """
        result = []
        for servo in range(0xfe):
            try:
                yield From(asyncio.wait_for(self.status(servo), 0.02))
                result.append(servo)
            except asyncio.TimeoutError:
                pass

        raise Return(result)
示例#28
0
    def read_content(self, file=None, rewind=True, duration_timeout=None):
        '''Read the response content into file.

        Args:
            file: A file object or asyncio stream.
            rewind: Seek the given file back to its original offset after
                reading is finished.
            duration_timeout (int): Maximum time in seconds of which the
                entire file must be read.

        Returns:
            .ftp.request.Response: A Response populated with the final
            data connection reply.

        Be sure to call :meth:`fetch` first.

        Coroutine.
        '''
        if rewind and file and hasattr(file, 'seek'):
            original_offset = file.tell()
        else:
            original_offset = None

        if not hasattr(file, 'drain'):
            self._response.body = file

            if not isinstance(file, Body):
                self._response.body = Body(file)

        read_future = self._commander.read_stream(file, self._data_stream)

        try:
            reply = yield From(
                trollius.wait_for(read_future, timeout=duration_timeout))
        except trollius.TimeoutError as error:
            raise DurationTimeout(
                'Did not finish reading after {} seconds.'.format(
                    duration_timeout)) from error

        self._response.reply = reply

        if original_offset is not None:
            file.seek(original_offset)

        if self._recorder_session:
            self._recorder_session.response(self._response)

        raise Return(self._response)
示例#29
0
文件: dns.py 项目: flatron18116/wpull
    def _resolve_from_network(self, host, port):
        '''Resolve the address using network.

        Returns:
            list: A list of tuples.
        '''
        _logger.debug('Resolving {0} {1} {2}.'.format(host, port,
                                                      self._family))

        try:
            future = self._getaddrinfo_implementation(host, port)
            results = yield From(trollius.wait_for(future, self._timeout))
        except trollius.TimeoutError as error:
            raise NetworkError('DNS resolve timed out.') from error
        else:
            raise Return(results)
示例#30
0
    def test_communicate(self):
        args = PROGRAM_CAT

        @asyncio.coroutine
        def run(data):
            proc = yield From(
                asyncio.create_subprocess_exec(*args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, loop=self.loop)
            )
            stdout, stderr = yield From(proc.communicate(data))
            raise Return(proc.returncode, stdout)

        task = run(b"some data")
        task = asyncio.wait_for(task, 60.0, loop=self.loop)
        exitcode, stdout = self.loop.run_until_complete(task)
        self.assertEqual(exitcode, 0)
        self.assertEqual(stdout, b"some data")
示例#31
0
 def _cleanup_subprocess(self, process):
     """
     Kill the given process and properly closes any pipes connected to it.
     """
     if process.returncode is None:
         try:
             process.kill()
             yield asyncio.wait_for(process.wait(), 1)
         except TimeoutError:
             self._log.debug('Waiting for process to close failed, may have zombie process.')
         except ProcessLookupError:
             pass
         except OSError:
             if os.name != 'nt':
                 raise
     elif process.returncode > 0:
         raise TSharkCrashException('TShark seems to have crashed (retcode: %d). Try rerunning in debug mode [ capture_obj.set_debug() ] or try updating tshark.' % process.returncode)
示例#32
0
文件: dns.py 项目: Willianvdv/wpull
    def _resolve_from_network(self, host, port):
        '''Resolve the address using network.

        Returns:
            list: A list of tuples.
        '''
        _logger.debug(
            'Resolving {0} {1} {2}.'.format(host, port, self._family)
        )

        try:
            future = self._getaddrinfo_implementation(host, port)
            results = yield From(trollius.wait_for(future, self._timeout))
        except trollius.TimeoutError as error:
            raise NetworkError('DNS resolve timed out.') from error
        else:
            raise Return(results)
示例#33
0
 def _cleanup_subprocess(self, process):
     """
     Kill the given process and properly closes any pipes connected to it.
     """
     if process.returncode is None:
         try:
             process.kill()
             yield asyncio.wait_for(process.wait(), 1)
         except TimeoutError:
             self._log.debug('Waiting for process to close failed, may have zombie process.')
         except ProcessLookupError:
             pass
         except OSError:
             if os.name != 'nt':
                 raise
     elif process.returncode > 0:
         raise TSharkCrashException('TShark seems to have crashed (retcode: %d). Try rerunning in debug mode [ capture_obj.set_debug() ] or try updating tshark.' % process.returncode)
示例#34
0
文件: capture.py 项目: wfr/pyshark
    def apply_on_packets(self, callback, timeout=None):
        """
        Runs through all packets and calls the given callback (a function) with each one as it is read.
        If the capture is infinite (i.e. a live capture), it will run forever, otherwise it will complete after all
        packets have been read.

        Example usage:
        def print_callback(pkt):
            print pkt
        capture.apply_on_packets(print_callback)

        If a timeout is given, raises a Timeout error if not complete before the timeout (in seconds)
        """
        coro = self.packets_from_tshark(callback)
        if timeout is not None:
            coro = asyncio.wait_for(coro, timeout)
        return self.eventloop.run_until_complete(coro)
示例#35
0
def test_asyncserial_timeout_error():
    '''
    Verify serial device AsyncSerial instance is still tied up after closing.

    In Windows, it turns out that the serial port is tied up by an AsyncSerial
    instance until the corresponding event loop is closed.  This test tests
    that this is true.
    '''
    ports = serial.tools.list_ports.comports()
    if not ports:
        raise RuntimeError('No comports available.')

    kwargs = {'port': ports[0].device}

    @asyncio.coroutine
    def _open_asyncserial():
        with asyncserial.AsyncSerial(**kwargs) as async_device:
            yield asyncio.From(asyncio.sleep(5))

        raise asyncio.Return(None)

    def _open_serial(retries=1):
        for i in range(retries):
            try:
                with serial.Serial(**kwargs):
                    pass
                break
            except serial.SerialException as exception:
                pass
        else:
            raise exception

    _open_serial()

    try:
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
        loop.run_until_complete(asyncio.wait_for(_open_asyncserial(),
                                                 timeout=2))
    except asyncio.TimeoutError:
        pass

    try:
        _open_serial()
    except serial.SerialException:
        raises(serial.SerialException)(_open_serial)()
示例#36
0
    def apply_on_packets(self, callback, timeout=None):
        """
        Runs through all packets and calls the given callback (a function) with each one as it is read.
        If the capture is infinite (i.e. a live capture), it will run forever, otherwise it will complete after all
        packets have been read.

        Example usage:
        def print_callback(pkt):
            print pkt
        capture.apply_on_packets(print_callback)

        If a timeout is given, raises a Timeout error if not complete before the timeout (in seconds)
        """
        coro = self.packets_from_tshark(callback)
        if timeout is not None:
            coro = asyncio.wait_for(coro, timeout)
        return self.eventloop.run_until_complete(coro)
示例#37
0
def test_asyncserial_timeout_workaround():
    '''
    Test closing event loop to free up device AsyncSerial instance.
    '''
    ports = serial.tools.list_ports.comports()
    if not ports:
        raise RuntimeError('No comports available.')

    kwargs = {'port': ports[0].device}

    @asyncio.coroutine
    def _open_asyncserial():
        with asyncserial.AsyncSerial(**kwargs) as async_device:
            yield asyncio.From(asyncio.sleep(5))

        raise asyncio.Return(None)

    def _open_serial(retries=1):
        for i in range(retries):
            try:
                with serial.Serial(**kwargs):
                    pass
                break
            except serial.SerialException as exception:
                pass
        else:
            raise exception

    _open_serial()

    try:
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
        loop.run_until_complete(asyncio.wait_for(_open_asyncserial(),
                                                 timeout=2))
    except asyncio.TimeoutError:
        pass
    finally:
        # Close event loop.
        loop.close()

    try:
        _open_serial()
    except serial.SerialException:
        _open_serial()
示例#38
0
def set_gatt_table(self, controller_id, services, characteristics, on_set,
                   timeout):

    if isinstance(on_set, (tuple, list)):
        on_set_cb = on_set[0]
        on_set_params = on_set[1:]
    else:
        on_set_cb = on_set
        on_set_params = []

    @asyncio.coroutine
    def on_response_received(packet, future):
        self.logger.debug("GATT table set with status={}".format(
            packet.status))
        self.remove_callback(packet.packet_uuid)

        if packet.status_code == StatusCode.Success:
            data = packet.get_dict(['controller_id'])

            on_set_cb(True, data, None, *on_set_params)
            future.set_result(data)
        else:
            error = BaBLEException(packet, "Failed to set GATT table")
            on_set_cb(False, None, error, *on_set_params)
            future.set_exception(error)

    future = asyncio.Future()
    request_packet = Packet.build(SetGATTTable,
                                  controller_id=controller_id,
                                  services=services,
                                  characteristics=characteristics)

    self.register_callback(request_packet.packet_uuid,
                           callback=on_response_received,
                           params={'future': future})

    self.send_packet(request_packet)

    self.logger.debug("Waiting for setting GATT table response...")
    try:
        result = yield From(asyncio.wait_for(future, timeout=timeout))
        raise asyncio.Return(result)
    except asyncio.TimeoutError:
        self.remove_callback(request_packet.packet_uuid)
        raise RuntimeError("Set GATT table timed out")
示例#39
0
def stop_scan(self, controller_id, on_scan_stopped, timeout):

    if isinstance(on_scan_stopped, (tuple, list)):
        on_scan_stopped_cb = on_scan_stopped[0]
        on_scan_stopped_params = on_scan_stopped[1:]
    else:
        on_scan_stopped_cb = on_scan_stopped
        on_scan_stopped_params = []

    @asyncio.coroutine
    def on_response_received(packet, future):
        self.logger.debug("Stop scan response received with status={}".format(
            packet.status))
        self.remove_callback(packet.packet_uuid)

        if packet.status_code == StatusCode.Success:
            self.remove_callback(
                PacketUuid(payload_type=Payload.DeviceFound,
                           controller_id=controller_id))
            on_scan_stopped_cb(True, packet.get_dict(['controller_id']), None,
                               *on_scan_stopped_params)
            future.set_result(True)
        else:
            error = BaBLEException(packet, "Failed to stop scan")
            on_scan_stopped_cb(False, None, error, *on_scan_stopped_params)
            future.set_exception(error)

    future = asyncio.Future()
    request_packet = Packet.build(StopScan, controller_id=controller_id)

    self.register_callback(request_packet.packet_uuid,
                           callback=on_response_received,
                           params={'future': future})

    self.send_packet(request_packet)

    self.logger.debug("Waiting for scan to stop...")
    try:
        result = yield From(asyncio.wait_for(future, timeout=timeout))
        raise asyncio.Return(result)
    except asyncio.TimeoutError:
        self.remove_callback(request_packet.packet_uuid)
        on_scan_stopped_cb(False, None, "Stop scan timed out",
                           *on_scan_stopped_params)
        raise RuntimeError("Stop scan timed out")
示例#40
0
def cancel_connection(self, controller_id, on_connection_cancelled, timeout):

    if isinstance(on_connection_cancelled, (tuple, list)):
        on_connection_cancelled_cb = on_connection_cancelled[0]
        on_connection_cancelled_params = on_connection_cancelled[1:]
    else:
        on_connection_cancelled_cb = on_connection_cancelled
        on_connection_cancelled_params = []

    @asyncio.coroutine
    def on_response_received(packet, future):
        self.logger.debug(
            "Cancel connection response received with status={}".format(
                packet.status))
        self.remove_callback(packet.packet_uuid)

        if packet.status_code == StatusCode.Success:
            on_connection_cancelled_cb(True, None, None,
                                       *on_connection_cancelled_params)
            future.set_result(True)
        else:
            error = BaBLEException(packet, "Failed to cancel connection")
            on_connection_cancelled_cb(False, None, error,
                                       *on_connection_cancelled_params)
            future.set_exception(error)

    future = asyncio.Future()
    request_packet = Packet.build(CancelConnection,
                                  controller_id=controller_id)

    self.register_callback(request_packet.packet_uuid,
                           callback=on_response_received,
                           params={'future': future})

    self.send_packet(request_packet)

    self.logger.debug("Waiting for connection to cancel...")
    try:
        result = yield From(asyncio.wait_for(future, timeout=timeout))
        raise asyncio.Return(result)
    except asyncio.TimeoutError:
        self.remove_callback(request_packet.packet_uuid)
        on_connection_cancelled_cb(False, None, "Cancel connection timed out",
                                   *on_connection_cancelled_params)
        raise RuntimeError("Cancel connection timed out")
示例#41
0
    def test_communicate(self):
        args = PROGRAM_CAT

        @asyncio.coroutine
        def run(data):
            proc = yield From(asyncio.create_subprocess_exec(
                                          *args,
                                          stdin=subprocess.PIPE,
                                          stdout=subprocess.PIPE,
                                          loop=self.loop))
            stdout, stderr = yield From(proc.communicate(data))
            raise Return(proc.returncode, stdout)

        task = run(b'some data')
        task = asyncio.wait_for(task, 60.0, loop=self.loop)
        exitcode, stdout = self.loop.run_until_complete(task)
        self.assertEqual(exitcode, 0)
        self.assertEqual(stdout, b'some data')
示例#42
0
    def read_content(self, file=None, raw=False, rewind=True,
                     duration_timeout=None):
        '''Read the response content into file.

        Args:
            file: A file object or asyncio stream.
            raw (bool): Whether chunked transfer encoding should be included.
            rewind (bool): Seek the given file back to its original offset after
                reading is finished.
            duration_timeout (int): Maximum time in seconds of which the
                entire file must be read.

        Be sure to call :meth:`fetch` first.

        Coroutine.
        '''
        if rewind and file and hasattr(file, 'seek'):
            original_offset = file.tell()
        else:
            original_offset = None

        if not hasattr(file, 'drain'):
            self._response.body = file

            if not isinstance(file, Body):
                self._response.body = Body(file)

        read_future = self._stream.read_body(self._request, self._response, file=file, raw=raw)

        try:
            yield From(trollius.wait_for(read_future, timeout=duration_timeout))
        except trollius.TimeoutError as error:
            raise DurationTimeout(
                'Did not finish reading after {} seconds.'
                .format(duration_timeout)
            ) from error

        self._session_complete = True

        if original_offset is not None:
            file.seek(original_offset)

        if self._recorder_session:
            self._recorder_session.response(self._response)
示例#43
0
def notify(self, controller_id, connection_handle, attribute_handle, value,
           timeout):

    # TODO: use characteristic instead of attribute_handle

    @asyncio.coroutine
    def on_ack_received(packet, future):
        self.logger.debug(
            "EmitNotification ack received with status={}".format(
                packet.status))
        self.remove_callback(packet.packet_uuid)

        if packet.status_code == StatusCode.Success:
            future.set_result(True)
        else:
            error = BaBLEException(packet,
                                   "Failed to send EmitNotification packet",
                                   connection_handle=connection_handle,
                                   attribute_handle=attribute_handle)
            future.set_exception(error)

    future = asyncio.Future()
    request_packet = Packet.build(EmitNotification,
                                  controller_id=controller_id,
                                  connection_handle=connection_handle,
                                  attribute_handle=attribute_handle,
                                  value=bytes(value))

    self.register_callback(request_packet.packet_uuid,
                           callback=on_ack_received,
                           params={'future': future})

    self.send_packet(request_packet)

    try:
        result = yield From(asyncio.wait_for(future, timeout=timeout))
        self.logger.debug("Notification sent")
        raise asyncio.Return(result)
    except asyncio.TimeoutError:
        self.remove_callback(request_packet.packet_uuid)
        raise RuntimeError("Notification timed out")
    def _get_conn(self, timeout=None):
        """
        Get a connection. Will return a pooled connection if one is available.

        If no connections are available and :prop:`.block` is ``False``, then a
        fresh connection is returned.

        :param timeout:
            Seconds to wait before giving up and raising
            :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
            :prop:`.block` is ``True``.
        """
        conn = None
        try:
            timeout = Timeout.from_float(timeout)
            try:
                conn = yield From(asyncio.wait_for(self.pool.get(), timeout.connect_timeout))
                pass

            except AttributeError:  # self.pool is None
                raise ClosedPoolError(self, "Pool is closed.")

            except asyncio.TimeoutError:
                raise QueueEmpty

        except QueueEmpty:
            if self.block:
                raise EmptyPoolError(self, "Pool reached maximum size and no more " "connections are allowed.")
            pass  # Oh well, we'll create a new connection then

        # If this is a persistent connection, check if it got disconnected
        if conn and is_connection_dropped(conn):
            log.info("Resetting dropped connection: %s" % self.host)
            conn.close()
            if getattr(conn, "auto_open", 1) == 0:
                # This is a proxied connection that has been mutated by
                # httplib._tunnel() and cannot be reused (since it would
                # attempt to bypass the proxy)
                conn = None

        raise Return(conn or self._new_conn())
示例#45
0
def write_without_response(self, controller_id, connection_handle,
                           attribute_handle, value, timeout):
    @asyncio.coroutine
    def on_ack_received(packet, future):
        self.logger.debug(
            "WriteWithoutResponse ack received with status={}".format(
                packet.status))
        self.remove_callback(packet.packet_uuid)

        if packet.status_code == StatusCode.Success:
            future.set_result(True)
        else:
            error = BaBLEException(
                packet,
                "Failed to send WriteWithoutResponse packet",
                connection_handle=connection_handle,
                attribute_handle=attribute_handle)
            future.set_exception(error)

    future = asyncio.Future()
    request_packet = Packet.build(WriteWithoutResponseCentral,
                                  controller_id=controller_id,
                                  connection_handle=connection_handle,
                                  attribute_handle=attribute_handle,
                                  value=bytes(value))

    self.register_callback(request_packet.packet_uuid,
                           callback=on_ack_received,
                           params={'future': future})

    self.send_packet(request_packet)

    try:
        result = yield From(asyncio.wait_for(future, timeout=timeout))
        self.logger.debug("Write without response command sent")
        raise asyncio.Return(result)
    except asyncio.TimeoutError:
        self.remove_callback(request_packet.packet_uuid)
        raise RuntimeError("WriteWithoutResponse timed out")
示例#46
0
def packets_from_tshark_sync(capture,
                             packet_count=None,
                             existing_process=None,
                             timeout=20):
    """
        Returns a generator of packets.
        This is the sync version of packets_from_tshark. It wait for the completion of each coroutine and
         reimplements reading packets in a sync way, yielding each packet as it arrives.

        :param packet_count: If given, stops after this amount of packets is captured.
        """
    # NOTE: This has code duplication with the async version, think about how to solve this
    tshark_process = existing_process or capture.eventloop.run_until_complete(
        capture._get_tshark_process())
    psml_structure, data = capture.eventloop.run_until_complete(
        capture._get_psml_struct(tshark_process.stdout))
    packets_captured = 0
    data = b''
    try:
        while True:
            try:
                packet, data = capture.eventloop.run_until_complete(
                    asyncio.wait_for(
                        capture._get_packet_from_stream(
                            tshark_process.stdout,
                            data,
                            psml_structure=psml_structure), timeout))
            except EOFError:
                capture.log.debug('EOF reached (sync)')
                break
            if packet:
                packets_captured += 1
                yield packet
            if packet_count and packets_captured >= packet_count:
                break
    finally:
        capture._cleanup_subprocess(tshark_process)
示例#47
0
def show_chip(signals, title='DMF chip'):
    '''
    Display raw webcam view and corresponding perspective-corrected chip view.

    Press ``q`` key to close window.

    Parameters
    ----------
    signals : blinker.Namespace
        DMF chip webcam monitor signals (see
        :func:`dropbot_chip_qc.video.chip_video_process()`).
    title : str, optional
        Window title.

    See also
    --------
    dropbot_chip_qc.video.chip_video_process()
    '''
    print('Press "q" to quit')

    loop = asyncio.get_event_loop()
    frame_ready = asyncio.Event()

    def on_frame_ready(sender, **message):
        frame_ready.record = message
        loop.call_soon_threadsafe(frame_ready.set)

    signals.signal('frame-ready').connect(on_frame_ready)

    while True:
        try:
            yield asyncio.wait_for(frame_ready.wait(), .01)
            cv2.imshow(title, frame_ready.record['frame'])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        except asyncio.TimeoutError:
            continue
示例#48
0
    def process(self, client):
        query = (yield From(asyncio.wait_for(client.reader.readline(),
                                             timeout=0.01)))
        if not query:
            return
        query = query.strip()
        client_ip = self.client_ip(client)
        cmd = query.split(' ', 1)
        if cmd[0] == 'RELOAD':
            self.corrector.reload()
            result = 'DONE'
        elif cmd[0] == 'QUERY' and len(cmd) > 1:
            data = cmd[1]
            typo = unicode(data, "utf-8")

            corrected, is_success = self.corrector.suggestion(typo)
            result = corrected.encode('utf-8')
        else:
            result = 'ERROR'

        client.writer.write('{}\n'.format(result))
        yield From(client.writer.drain())
        logger.info("{client}:request:{request}:{result}"
                    .format(client=client_ip, request=query, result=result))
示例#49
0
文件: async.py 项目: Willianvdv/wpull
 def wrapper(self):
     return self.event_loop.run_until_complete(
         trollius.wait_for(f(self), timeout=timeout,
                           loop=self.event_loop)
     )
示例#50
0
 def queue_get():
     result = (yield From(asyncio.wait_for(q.get(), 0.051, loop=loop)))
     raise Return(result)
示例#51
0
    def run_network_operation(self, task, wait_timeout=None,
                              close_timeout=None,
                              name='Network operation'):
        '''Run the task and raise appropriate exceptions.

        Coroutine.
        '''
        if wait_timeout is not None and close_timeout is not None:
            raise Exception(
                'Cannot use wait_timeout and close_timeout at the same time')

        try:
            if close_timeout is not None:
                with self._close_timer.with_timeout():
                    data = yield From(task)

                if self._close_timer.is_timeout():
                    raise NetworkTimedOut(
                        '{name} timed out.'.format(name=name))
                else:
                    raise Return(data)
            elif wait_timeout is not None:
                data = yield From(trollius.wait_for(task, wait_timeout))
                raise Return(data)
            else:
                raise Return((yield From(task)))

        except trollius.TimeoutError as error:
            self.close()
            raise NetworkTimedOut(
                '{name} timed out.'.format(name=name)) from error
        except (tornado.netutil.SSLCertificateError, SSLVerificationError) \
                as error:
            self.close()
            raise SSLVerificationError(
                '{name} certificate error: {error}'
                .format(name=name, error=error)) from error
        except (socket.error, ssl.SSLError, OSError, IOError) as error:
            self.close()
            if isinstance(error, NetworkError):
                raise

            if error.errno == errno.ECONNREFUSED:
                raise ConnectionRefused(
                    error.errno, os.strerror(error.errno)) from error

            # XXX: This quality case brought to you by OpenSSL and Python.
            # Example: _ssl.SSLError: [Errno 1] error:14094418:SSL
            #          routines:SSL3_READ_BYTES:tlsv1 alert unknown ca
            error_string = str(error).lower()
            if 'certificate' in error_string or 'unknown ca' in error_string:
                raise SSLVerificationError(
                    '{name} certificate error: {error}'
                    .format(name=name, error=error)) from error

            else:
                if error.errno:
                    raise NetworkError(
                        error.errno, os.strerror(error.errno)) from error
                else:
                    raise NetworkError(
                        '{name} network error: {error}'
                        .format(name=name, error=error)) from error
示例#52
0
 def w(g):
     return asyncio.wait_for(g, args.timeout, loop=loop)
示例#53
0
def handle_client(client_reader, client_writer):
    data = None
    while True:
        # give client a chance to respond, timeout after 10 seconds
        line = yield trollius.From(trollius.wait_for(
            client_reader.readline(),
            timeout=10.0))
        if not line.decode().strip():
            break
        line = line.decode().rstrip()
        if data is None:
            data = line

    if data is None:
        log.warning("Expected ticket uid, received None")
        return

    data = data.decode().rstrip().split()
    log.info("Received %s", data)
    if not data:
        log.warning("No URL provided: %s" % data)
        return

    if not '/' in data[1]:
        log.warning("Invalid URL provided: %s" % data[1])
        return

    url = urlparse.urlsplit(data[1])

    try:
        obj = get_obj_from_path(url.path)
    except PagureEvException as err:
        log.warning(err.message)
        return

    origin = pagure.APP.config.get('APP_URL')
    if origin.endswith('/'):
        origin = origin[:-1]

    client_writer.write((
        "HTTP/1.0 200 OK\n"
        "Content-Type: text/event-stream\n"
        "Cache: nocache\n"
        "Connection: keep-alive\n"
        "Access-Control-Allow-Origin: %s\n\n" % origin
    ).encode())

    try:
        connection = yield trollius.From(trollius_redis.Connection.create(
            host=pagure.APP.config['REDIS_HOST'],
            port=pagure.APP.config['REDIS_PORT'],
            db=pagure.APP.config['REDIS_DB']))

        # Create subscriber.
        subscriber = yield trollius.From(connection.start_subscribe())

        # Subscribe to channel.
        yield trollius.From(subscriber.subscribe([obj.uid]))

        # Inside a while loop, wait for incoming events.
        while True:
            reply = yield trollius.From(subscriber.next_published())
            #print(u'Received: ', repr(reply.value), u'on channel', reply.channel)
            log.info(reply)
            log.info("Sending %s", reply.value)
            client_writer.write(('data: %s\n\n' % reply.value).encode())
            yield trollius.From(client_writer.drain())

    except trollius.ConnectionResetError:
        pass
    finally:
        # Wathever happens, close the connection.
        connection.close()
        client_writer.close()
示例#54
0
 def queue_get():
     result = (yield From(asyncio.wait_for(q.get(), 0.051, loop=loop)))
     raise Return(result)