Beispiel #1
0
class DataMonitor(object):
    _STOP_REQUEST = object()

    def __init__(self, client, path, callback, args, kwargs):
        self.client = client
        self.path = path
        self.callback = callback
        self.args = args
        self.kwargs = kwargs
        self.started = AsyncResult()
        self.queue = Queue()
        self._delay = 1.343
        self.max_delay = 180

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                data, stat = self.client.get(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except (zookeeper.ConnectionLossException,
                    zookeeper.SessionExpiredException,
                    zookeeper.InvalidStateException), err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                logging.error("got %r while monitoring %s", str(err),
                              self.path)
                gevent.sleep(self._delay)
                self._delay += self._delay * random.random()
                self._delay = min(self._delay, self.max_delay)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                raise

            self.callback(data, *self.args, **self.kwargs)

            if not self.started.ready():
                self.started.set(None)

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
class DataMonitor(object):
    _STOP_REQUEST = object()

    def __init__(self, client, path, callback, args, kwargs):
        self.client = client
        self.path = path
        self.callback = callback
        self.args = args
        self.kwargs = kwargs
        self.started = AsyncResult()
        self.queue = Queue()
        self._delay = 1.343
        self.max_delay = 180

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                data, stat = self.client.get(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except (zookeeper.ConnectionLossException,
                    zookeeper.SessionExpiredException,
                    zookeeper.InvalidStateException), err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                logging.error("got %r while monitoring %s", str(err),
                              self.path)
                gevent.sleep(self._delay)
                self._delay += self._delay * random.random()
                self._delay = min(self._delay, self.max_delay)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                raise

            self.callback(data, *self.args, **self.kwargs)

            if not self.started.ready():
                self.started.set(None)

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
    def send_raw_with_result(self, data, receiver_address):
        """ Sends data to receiver_address and returns an AsyncResult that will
        be set once the message is acknowledged.

        Always returns same AsyncResult instance for equal input.
        """
        host_port = self.get_host_port(receiver_address)
        echohash = sha3(data + receiver_address)

        if echohash not in self.senthashes_to_states:
            async_result = AsyncResult()
            self.senthashes_to_states[echohash] = SentMessageState(
                async_result,
                receiver_address,
            )
        else:
            async_result = self.senthashes_to_states[echohash].async_result

        if not async_result.ready():
            self.transport.send(
                self.raiden,
                host_port,
                data,
            )

        return async_result
Beispiel #4
0
    def test__control_flow_expired_call(self):
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def make_call(call, ctx, val):
            ar = p._routing_call(call, ctx, val)
            return ar.get(timeout=10)

        ctx = { 'reply-by' : 0 }        # no need for real time, as it compares by CURRENT >= this value
        futurear = AsyncResult()
        with patch('pyon.ion.process.greenlet') as gcm:
            waitar = AsyncResult()
            gcm.getcurrent().kill.side_effect = lambda *a, **k: waitar.set()

            ar = p._routing_call(futurear.set, ctx, sentinel.val)

            waitar.get(timeout=10)

            # futurear is not set
            self.assertFalse(futurear.ready())

            # neither is the ar we got back from routing_call
            self.assertFalse(ar.ready())

            # we should've been killed, though
            self.assertEquals(gcm.getcurrent().kill.call_count, 1)
            self.assertIsInstance(gcm.getcurrent().kill.call_args[1]['exception'], IonTimeout)

        # put a new call through (to show unblocked)
        futurear2 = AsyncResult()
        ar2 = p._routing_call(futurear2.set, MagicMock(), sentinel.val2)
        ar2.get(timeout=2)
Beispiel #5
0
    def test_zero_max_size(self):
        q = queue.Channel()

        def sender(evt, q):
            q.put('hi')
            evt.set('done')

        def receiver(evt, q):
            x = q.get()
            evt.set(x)

        e1 = AsyncResult()
        e2 = AsyncResult()

        p1 = gevent.spawn(sender, e1, q)
        gevent.sleep(0.001)
        self.assert_(not e1.ready())
        p2 = gevent.spawn(receiver, e2, q)
        self.assertEquals(e2.get(), 'hi')
        self.assertEquals(e1.get(), 'done')
        timeout = gevent.Timeout.start_new(0)
        try:
            gevent.joinall([p1, p2])
        finally:
            timeout.cancel()
Beispiel #6
0
    def test__interrupt_control_thread(self):
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        # put a call in that will never finish
        waitar = AsyncResult()      # test specific, wait for this to indicate we're being processed/hung
        callar = AsyncResult()      # test specific, an ar that is just waited on by the spin call
        def spin(inar, outar):
            outar.set(True)
            inar.wait()

        ar = p._routing_call(spin, MagicMock(), callar, waitar)

        # wait until we get notice we're being processed
        waitar.get(timeout=2)

        # interrupt it
        p._interrupt_control_thread()

        # the ar we got back from routing_call will not be set, it never finished the call
        self.assertFalse(ar.ready())

        # to prove we're unblocked, run another call through the control thread
        ar2 = p._routing_call(callar.set, MagicMock(), sentinel.val)
        ar2.get(timeout=2)
        self.assertTrue(callar.ready())
        self.assertEquals(callar.get(), sentinel.val)
Beispiel #7
0
    def test_zero_max_size(self):
        q = queue.Queue(0)

        def sender(evt, q):
            q.put('hi')
            evt.set('done')

        def receiver(evt, q):
            x = q.get()
            evt.set(x)

        e1 = AsyncResult()
        e2 = AsyncResult()

        p1 = gevent.spawn(sender, e1, q)
        gevent.sleep(0.001)
        self.assert_(not e1.ready())
        p2 = gevent.spawn(receiver, e2, q)
        self.assertEquals(e2.get(), 'hi')
        self.assertEquals(e1.get(), 'done')
        timeout = gevent.Timeout.start_new(0)
        try:
            gevent.joinall([p1, p2])
        finally:
            timeout.cancel()
Beispiel #8
0
    def test__interrupt_control_thread(self):
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        # put a call in that will never finish
        waitar = AsyncResult(
        )  # test specific, wait for this to indicate we're being processed/hung
        callar = AsyncResult(
        )  # test specific, an ar that is just waited on by the spin call

        def spin(inar, outar):
            outar.set(True)
            inar.wait()

        ar = p._routing_call(spin, MagicMock(), callar, waitar)

        # wait until we get notice we're being processed
        waitar.get(timeout=2)

        # interrupt it
        p._interrupt_control_thread()

        # the ar we got back from routing_call will not be set, it never finished the call
        self.assertFalse(ar.ready())

        # to prove we're unblocked, run another call through the control thread
        ar2 = p._routing_call(callar.set, MagicMock(), sentinel.val)
        ar2.get(timeout=2)
        self.assertTrue(callar.ready())
        self.assertEquals(callar.get(), sentinel.val)
Beispiel #9
0
    def send_raw_with_result(self, data, receiver_address):
        """ Sends data to receiver_address and returns an AsyncResult that will
        be set once the message is acknowledged.

        Always returns same AsyncResult instance for equal input.
        """
        host_port = self.get_host_port(receiver_address)
        echohash = sha3(data + receiver_address)

        if echohash not in self.senthashes_to_states:
            async_result = AsyncResult()
            self.senthashes_to_states[echohash] = SentMessageState(
                async_result,
                receiver_address,
            )
        else:
            async_result = self.senthashes_to_states[echohash].async_result

        if not async_result.ready():
            self.transport.send(
                self.raiden,
                host_port,
                data,
            )

        return async_result
Beispiel #10
0
class G(object):

    def __init__(self, pool_size=300, timeout=1):
        self.pool_size = pool_size
        self.timeout = timeout

        logging.basicConfig(format='%(message)s')
        self.logger = logging.getLogger(name='g')
        self.logger.setLevel(logging.INFO)

        self.pool = Pool(self.pool_size)
        self.async_result = AsyncResult()

    def url(self, ip, params=None):
        if params:
            suffix = '/search?q=%s' % urllib.quote_plus(' '.join(params))
        else:
            suffix = ''
        return 'http://%s%s' % (ip, suffix)

    def worker(self, ip):
        if not self.async_result.ready():
            try:
                resp = requests.head(self.url(ip), timeout=self.timeout)
            except requests.exceptions.RequestException:
                pass  # ignore timeouts, connection errors
            else:
                if resp.status_code == requests.codes.ok:
                    if not self.async_result.ready():
                        self.async_result.set(ip)

    def run(self):
        # read and parse ip json file
        gdir = os.path.dirname(__file__)
        path = os.path.join(gdir, 'ips.json')
        ips = json.load(open(path))
        # start a thread for pool
        gevent.spawn(self.pool.map, self.worker, ips)
        # block main thread for the 1st available IP
        try:
            ip = self.async_result.get(timeout=5)
        except gevent.timeout.Timeout:
            self.logger.info('Timeout (5s).')
        else:
            self.pool.kill()
            self.logger.info(ip)
            webbrowser.open(self.url(ip, params=sys.argv[1:]))
Beispiel #11
0
class DataMonitor(object):
    _STOP_REQUEST = object()

    def __init__(self, client, path, callback, args, kwargs):
        self.client = client
        self.path = path
        self.callback = callback
        self.args = args
        self.kwargs = kwargs
        self.started = AsyncResult()
        self.queue = Queue()

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                data, stat = self.client.get(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                
            self.callback(data, *self.args, **self.kwargs)

            if not self.started.ready():
                self.started.set(None)

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
Beispiel #12
0
class Future:
    def __init__(self):
        self.result = AsyncResult()

    def set(self, value):
        self.result.set(value)

    def get(self):
        return self.result.get()

    def on_ready(self, func):
        while self.result.ready() != False:
            gevent.sleep(0)
        func(self.result.get())
Beispiel #13
0
def test_termination_message_is_ignored_when_sender_is_not_watched(defer):
    node = DummyNode()
    defer(node.stop)

    received = AsyncResult()

    class Watcher(Actor):
        def receive(self, msg):
            received.set(msg)

    dummy = node.spawn(Actor)
    w = node.spawn(Watcher)
    w << ('terminated', dummy)
    sleep(.01)
    ok_(not received.ready())
Beispiel #14
0
def test_termination_message_is_ignored_when_sender_is_not_watched(defer):
    node = DummyNode()
    defer(node.stop)

    received = AsyncResult()

    class Watcher(Actor):
        def receive(self, msg):
            received.set(msg)

    dummy = node.spawn(Actor)
    w = node.spawn(Watcher)
    w << ('terminated', dummy)
    sleep(.01)
    ok_(not received.ready())
Beispiel #15
0
    def test_zero_max_size(self):
        q = queue.Channel()

        def sender(evt, q):
            q.put("hi")
            evt.set("done")

        def receiver(evt, q):
            x = q.get()
            evt.set(x)

        e1 = AsyncResult()
        e2 = AsyncResult()

        p1 = gevent.spawn(sender, e1, q)
        gevent.sleep(0.001)
        self.assertTrue(not e1.ready())
        p2 = gevent.spawn(receiver, e2, q)
        self.assertEqual(e2.get(), "hi")
        self.assertEqual(e1.get(), "done")
        with gevent.Timeout(0):
            gevent.joinall([p1, p2])
Beispiel #16
0
    def test_zero_max_size(self):
        q = queue.Channel()

        def sender(evt, q):
            q.put('hi')
            evt.set('done')

        def receiver(evt, q):
            x = q.get()
            evt.set(x)

        e1 = AsyncResult()
        e2 = AsyncResult()

        p1 = gevent.spawn(sender, e1, q)
        gevent.sleep(0.001)
        self.assertTrue(not e1.ready())
        p2 = gevent.spawn(receiver, e2, q)
        self.assertEqual(e2.get(), 'hi')
        self.assertEqual(e1.get(), 'done')
        with gevent.Timeout(0):
            gevent.joinall([p1, p2])
Beispiel #17
0
    def test__control_flow_expired_call(self):
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def make_call(call, ctx, val):
            ar = p._routing_call(call, ctx, val)
            return ar.get(timeout=10)

        ctx = {
            'reply-by': 0
        }  # no need for real time, as it compares by CURRENT >= this value
        futurear = AsyncResult()
        with patch('pyon.ion.process.greenlet') as gcm:
            waitar = AsyncResult()
            gcm.getcurrent().kill.side_effect = lambda *a, **k: waitar.set()

            ar = p._routing_call(futurear.set, ctx, sentinel.val)

            waitar.get(timeout=10)

            # futurear is not set
            self.assertFalse(futurear.ready())

            # neither is the ar we got back from routing_call
            self.assertFalse(ar.ready())

            # we should've been killed, though
            self.assertEquals(gcm.getcurrent().kill.call_count, 1)
            self.assertIsInstance(
                gcm.getcurrent().kill.call_args[1]['exception'], IonTimeout)

        # put a new call through (to show unblocked)
        futurear2 = AsyncResult()
        ar2 = p._routing_call(futurear2.set, MagicMock(), sentinel.val2)
        ar2.get(timeout=2)
Beispiel #18
0
class MsgBase(MsgSender,MsgReceiver):
	"""A message which expects a reply."""
	timeout = None
	blocking = False # True if the message needs a reply before sending more

	_timer = None
	_last_channel = None
	_send_err = None
	_recv_err = None
	
	def __init__(self,*a,**k):
		super(MsgBase,self).__init__(*a,**k)
		self.result = AsyncResult()

	def abort(self):
		if not self.result.ready():
			self.result.set(RuntimeError("aborted"))
		super(MsgBase,self).abort()

	def list(self):
		s = super(MsgBase,self)
		if hasattr(s,'list'): yield s

		if self.timeout:
			yield("timeout",self.timeout)
		if self.result.ready():
			try:
				yield("result",self.result.get())
			except Exception as ex:
				yield("error",repr(ex))
		else:
			yield("status","pending")

	def send(self,channel):
		"""write myself to the channel. Return None|SEND_AGAIN|RECV_AGAIN."""
		self._set_timeout()
		self._last_channel = channel

		if self._send_err is None:
			self._send_err = MSG_ERROR("You need to override %s.send"%self.__class__.__name__)
		return self._send_err
	
	def recv(self,data):
		"""A message has been received. Return NOT_MINE|MINE|RECV_AGAIN|SEND_AGAIN."""
		self._clear_timeout()

		if self._recv_err is None:
			self._recv_err = MSG_ERROR("You need to override %s.recv"%self.__class__.__name__)
		return self._recv_err
	
	def retry(self):
		"""Check whether to retry this message"""
		self._clear_timeout()
		return SEND_AGAIN

	def done(self):
		"""Processing is finished."""
		self._clear_timeout()
		if self.result is not None and not self.result.successful():
			raise RuntimeError("Did not trigger the result in %s.dataReceived()"%(self.__class__.__name__,))

	def do_timeout(self):
		if self._last_channel is not None:
			self._last_channel.close()
			self._last_channel = None
		
	def _set_timeout(self):
		if self.timeout is not None:
			self._timer = callLater(True,self.timeout,self._timeout)

	def _clear_timeout(self):
		if self._timer is not None:
			self._timer.cancel()
			self._timer = None
	
	def _timeout(self):
		self._timer = None
		self.do_timeout()
Beispiel #19
0
class Popen(SubprocessProtocol):

    def __init__(self, args, bufsize=-1,
                 stdin=None, stdout=None, stderr=None,
                 shell=False, universal_newlines=False, **kwargs):
        """Create new Popen instance."""
        assert not universal_newlines, "universal_newlines must be False"
        hub = get_hub()
        self.pid = None
        self.returncode = None
        self.universal_newlines = universal_newlines
        self.result = AsyncResult()
        self.stdin = None
        self.stdout = None
        self.stderr = None
        self._transport = None

        if shell:
            hub.wait_async(hub.loop.subprocess_shell(
                lambda: _DelegateProtocol(self), *args,
                stdin=stdin, stdout=stdout, stderr=stderr,
                bufsize=bufsize, **kwargs))
        else:
            hub.wait_async(hub.loop.subprocess_exec(
                lambda: _DelegateProtocol(self), *args,
                stdin=stdin, stdout=stdout, stderr=stderr,
                bufsize=bufsize, **kwargs))

    def __repr__(self):
        return '<%s at 0x%x pid=%r returncode=%r>' % (self.__class__.__name__, id(self), self.pid, self.returncode)

    def communicate(self, input=None, timeout=None):
        """Interact with process: Send data to stdin.  Read data from
        stdout and stderr, until end-of-file is reached.  Wait for
        process to terminate.  The optional input argument should be a
        string to be sent to the child process, or None, if no data
        should be sent to the child.

        communicate() returns a tuple (stdout, stderr)."""
        #TODO: timeout
        greenlets = []
        if self.stdin:
            greenlets.append(spawn(write_and_close, self.stdin, input))

        if self.stdout:
            stdout = spawn(self.stdout.read)
            greenlets.append(stdout)
        else:
            stdout = None

        if self.stderr:
            stderr = spawn(self.stderr.read)
            greenlets.append(stderr)
        else:
            stderr = None

        joinall(greenlets)

        if self.stdout:
            self.stdout.close()
        if self.stderr:
            self.stderr.close()

        self.wait()
        return (None if stdout is None else stdout.value or '',
                None if stderr is None else stderr.value or '')

    def poll(self):
        return self._internal_poll()

    def rawlink(self, callback):
        self.result.rawlink(linkproxy(callback, self))
    # XXX unlink

    def send_signal(self, sig):
        self._transport.send_signal(sig)

    def terminate(self):
        self._transport.terminate()

    def kill(self):
        #noinspection PyProtectedMember
        if self._transport._proc is not None:
            self._transport.kill()

    if mswindows:
        #
        # Windows methods
        #
        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
                    self.returncode = GetExitCodeProcess(self._handle)
                    self.result.set(self.returncode)
            return self.returncode

        def rawlink(self, callback):
            if not self.result.ready() and not self._waiting:
                self._waiting = True
                Greenlet.spawn(self._wait)
            self.result.rawlink(linkproxy(callback, self))
            # XXX unlink

        def _blocking_wait(self):
            WaitForSingleObject(self._handle, INFINITE)
            self.returncode = GetExitCodeProcess(self._handle)
            return self.returncode

        def _wait(self):
            self.threadpool.spawn(self._blocking_wait).rawlink(self.result)

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            if self.returncode is None:
                if not self._waiting:
                    self._waiting = True
                    self._wait()
            return self.result.wait(timeout=timeout)

    else:
        #
        # POSIX methods
        #
        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if get_hub() is not getcurrent():
                    sig_pending = getattr(self._loop, 'sig_pending', True)
                    if sig_pending:
                        sleep(0.00001)
            return self.returncode

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            return self.result.wait(timeout=timeout)
Beispiel #20
0
class OutTimer(Collected):
	"""Timer for timed outputs"""
	storage = OutTimers.storage
	q = None
	_timer = None

	_timer = None
	def __init__(self,parent,timer,nextval):
		global tseq
		tseq += 1
		self.name = parent.name+(str(tseq),)
		super(OutTimer,self).__init__()
		self.parent = parent
		self.end = timer
		self.val = nextval
		self.q = AsyncResult()
		self._start()

	def info(self):
		return "%s:%s" % (self.name, self.val)

	def list(self):
		n = now()
		for r in super(OutTimer,self).list():
			yield r
		yield ("output",self.parent.name)
		yield ("start", self.started)
		yield ("end", self.end)
		yield ("next value",self.val)

	def _start(self):
		if self._timer:
			self._timer.cancel()
		self.started = now()
		self._timer = callLater(False,self.end,self._timeout)
	
	def _timeout(self):
		self._timer = None
		try:
			self.parent.write(self.val)
		except Exception as ex:
			fix_exception(ex)
			self.q.set(ex)
		else:
			self.q.set(None)
	
	def done(self):
		"""called externally via _tmwrite() when the external timer writes"""
		if self._timer:
			self._timer.cancel()
			self._timer = None
		if not self.q.ready():
			self.q.set(None)

	def cancel(self):
		self.delete()

	def delete(self, ctx=None):
		if self._timer is not None:
			self._timer.cancel()
			self._timer = None
		self.q.set(DelayCancelled(self))

		super(OutTimer,self).delete(ctx)
Beispiel #21
0
class ChildrenMonitor(object):
    """Simple monitor that monitors the children of a node and their
    content.
    """
    _STOP_REQUEST = object()

    def __init__(self, client, path, into, factory, args, listener):
        self.client = client
        self.path = path
        self.into = into if into is not None else {}
        self.factory = factory if factory is not None else str
        self.args = args
        self.listener = listener or MonitorListener()
        self.started = AsyncResult()
        self.queue = Queue()
        self.stats = {}
        self._delay = 1.343
        self.max_delay = 180

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                children = self.client.get_children(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except (zookeeper.ConnectionLossException,
                    zookeeper.SessionExpiredException,
                    zookeeper.InvalidStateException) as err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                logging.error("got %r while monitoring %s", str(err),
                              self.path)
                gevent.sleep(self._delay)
                self._delay += self._delay * random.random()
                self._delay = min(self._delay, self.max_delay)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                raise

            for child in children:
                if not child in self.stats:
                    try:
                        data, stat = self.client.get(os.path.join(self.path,
                                                                  child))
                    except zookeeper.NoNodeException:
                        print "race condition while getting", os.path.join(
                            self.path, child)
                    else:
                        self.into[child] = self.factory(data, *self.args)
                        self.listener.created(child, self.into[child])
                        self.stats[child] = stat
                else:
                    try:
                        data, stat = self.client.get(os.path.join(self.path,
                                                                  child))
                    except zookeeper.NoNodeException:
                        print "race condition while getting", os.path.join(
                            self.path, child)
                        # should we remove it here?
                    else:
                        if stat['version'] != self.stats[child]['version']:
                            self.into[child] = self.factory(data, *self.args)
                            self.listener.modified(child, self.into[child])
                        self.stats[child] = stat
            for child in self.into.keys():
                if child not in children:
                    del self.into[child]
                    del self.stats[child]
                    self.listener.deleted(child)

            if not self.started.ready():
                self.started.set(None)

            self.listener.commit()

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
Beispiel #22
0
class Popen(object):
    def __init__(self,
                 args,
                 bufsize=0,
                 executable=None,
                 stdin=None,
                 stdout=None,
                 stderr=None,
                 preexec_fn=None,
                 close_fds=False,
                 shell=False,
                 cwd=None,
                 env=None,
                 universal_newlines=False,
                 startupinfo=None,
                 creationflags=0,
                 threadpool=None):
        """Create new Popen instance."""
        if not isinstance(bufsize, (int, long)):
            raise TypeError("bufsize must be an integer")
        hub = get_hub()

        if mswindows:
            if preexec_fn is not None:
                raise ValueError("preexec_fn is not supported on Windows "
                                 "platforms")
            if close_fds and (stdin is not None or stdout is not None
                              or stderr is not None):
                raise ValueError(
                    "close_fds is not supported on Windows "
                    "platforms if you redirect stdin/stdout/stderr")
            if threadpool is None:
                threadpool = hub.threadpool
            self.threadpool = threadpool
            self._waiting = False
        else:
            # POSIX
            if startupinfo is not None:
                raise ValueError("startupinfo is only supported on Windows "
                                 "platforms")
            if creationflags != 0:
                raise ValueError("creationflags is only supported on Windows "
                                 "platforms")
            assert threadpool is None
            self._loop = hub.loop

        self.stdin = None
        self.stdout = None
        self.stderr = None
        self.pid = None
        self.returncode = None
        self.universal_newlines = universal_newlines
        self.result = AsyncResult()

        # Input and output objects. The general principle is like
        # this:
        #
        # Parent                   Child
        # ------                   -----
        # p2cwrite   ---stdin--->  p2cread
        # c2pread    <--stdout---  c2pwrite
        # errread    <--stderr---  errwrite
        #
        # On POSIX, the child objects are file descriptors.  On
        # Windows, these are Windows file handles.  The parent objects
        # are file descriptors on both platforms.  The parent objects
        # are None when not using PIPEs. The child objects are None
        # when not redirecting.

        (p2cread, p2cwrite, c2pread, c2pwrite, errread,
         errwrite) = self._get_handles(stdin, stdout, stderr)

        self._execute_child(args, executable, preexec_fn, close_fds, cwd, env,
                            universal_newlines, startupinfo, creationflags,
                            shell, p2cread, p2cwrite, c2pread, c2pwrite,
                            errread, errwrite)

        if mswindows:
            if p2cwrite is not None:
                p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
            if c2pread is not None:
                c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
            if errread is not None:
                errread = msvcrt.open_osfhandle(errread.Detach(), 0)

        if p2cwrite is not None:
            self.stdin = FileObject(p2cwrite, 'wb')
        if c2pread is not None:
            if universal_newlines:
                self.stdout = FileObject(c2pread, 'rU')
            else:
                self.stdout = FileObject(c2pread, 'rb')
        if errread is not None:
            if universal_newlines:
                self.stderr = FileObject(errread, 'rU')
            else:
                self.stderr = FileObject(errread, 'rb')

    def __repr__(self):
        return '<%s at 0x%x pid=%r returncode=%r>' % (
            self.__class__.__name__, id(self), self.pid, self.returncode)

    def _on_child(self, watcher):
        watcher.stop()
        status = watcher.rstatus
        if os.WIFSIGNALED(status):
            self.returncode = -os.WTERMSIG(status)
        else:
            self.returncode = os.WEXITSTATUS(status)
        self.result.set(self.returncode)

    def communicate(self, input=None):
        """Interact with process: Send data to stdin.  Read data from
        stdout and stderr, until end-of-file is reached.  Wait for
        process to terminate.  The optional input argument should be a
        string to be sent to the child process, or None, if no data
        should be sent to the child.

        communicate() returns a tuple (stdout, stderr)."""
        greenlets = []
        if self.stdin:
            greenlets.append(spawn(write_and_close, self.stdin, input))

        if self.stdout:
            stdout = spawn(self.stdout.read)
            greenlets.append(stdout)
        else:
            stdout = None

        if self.stderr:
            stderr = spawn(self.stderr.read)
            greenlets.append(stderr)
        else:
            stderr = None

        joinall(greenlets)

        if self.stdout:
            self.stdout.close()
        if self.stderr:
            self.stderr.close()

        self.wait()
        return (None if stdout is None else stdout.value or '',
                None if stderr is None else stderr.value or '')

    def poll(self):
        return self._internal_poll()

    def rawlink(self, callback):
        self.result.rawlink(linkproxy(callback, self))

    # XXX unlink

    if mswindows:
        #
        # Windows methods
        #
        def _get_handles(self, stdin, stdout, stderr):
            """Construct and return tuple with IO objects:
            p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
            """
            if stdin is None and stdout is None and stderr is None:
                return (None, None, None, None, None, None)

            p2cread, p2cwrite = None, None
            c2pread, c2pwrite = None, None
            errread, errwrite = None, None

            if stdin is None:
                p2cread = GetStdHandle(STD_INPUT_HANDLE)
                if p2cread is None:
                    p2cread, _ = CreatePipe(None, 0)
            elif stdin == PIPE:
                p2cread, p2cwrite = CreatePipe(None, 0)
            elif isinstance(stdin, int):
                p2cread = msvcrt.get_osfhandle(stdin)
            else:
                # Assuming file-like object
                p2cread = msvcrt.get_osfhandle(stdin.fileno())
            p2cread = self._make_inheritable(p2cread)

            if stdout is None:
                c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
                if c2pwrite is None:
                    _, c2pwrite = CreatePipe(None, 0)
            elif stdout == PIPE:
                c2pread, c2pwrite = CreatePipe(None, 0)
            elif isinstance(stdout, int):
                c2pwrite = msvcrt.get_osfhandle(stdout)
            else:
                # Assuming file-like object
                c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
            c2pwrite = self._make_inheritable(c2pwrite)

            if stderr is None:
                errwrite = GetStdHandle(STD_ERROR_HANDLE)
                if errwrite is None:
                    _, errwrite = CreatePipe(None, 0)
            elif stderr == PIPE:
                errread, errwrite = CreatePipe(None, 0)
            elif stderr == STDOUT:
                errwrite = c2pwrite
            elif isinstance(stderr, int):
                errwrite = msvcrt.get_osfhandle(stderr)
            else:
                # Assuming file-like object
                errwrite = msvcrt.get_osfhandle(stderr.fileno())
            errwrite = self._make_inheritable(errwrite)

            return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)

        def _make_inheritable(self, handle):
            """Return a duplicate of handle, which is inheritable"""
            return DuplicateHandle(GetCurrentProcess(), handle,
                                   GetCurrentProcess(), 0, 1,
                                   DUPLICATE_SAME_ACCESS)

        def _find_w9xpopen(self):
            """Find and return absolut path to w9xpopen.exe"""
            w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
                                    "w9xpopen.exe")
            if not os.path.exists(w9xpopen):
                # Eeek - file-not-found - possibly an embedding
                # situation - see if we can locate it in sys.exec_prefix
                w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
                                        "w9xpopen.exe")
                if not os.path.exists(w9xpopen):
                    raise RuntimeError("Cannot locate w9xpopen.exe, which is "
                                       "needed for Popen to work with your "
                                       "shell or platform.")
            return w9xpopen

        def _execute_child(self, args, executable, preexec_fn, close_fds, cwd,
                           env, universal_newlines, startupinfo, creationflags,
                           shell, p2cread, p2cwrite, c2pread, c2pwrite,
                           errread, errwrite):
            """Execute program (MS Windows version)"""

            if not isinstance(args, types.StringTypes):
                args = list2cmdline(args)

            # Process startup details
            if startupinfo is None:
                startupinfo = STARTUPINFO()
            if None not in (p2cread, c2pwrite, errwrite):
                startupinfo.dwFlags |= STARTF_USESTDHANDLES
                startupinfo.hStdInput = p2cread
                startupinfo.hStdOutput = c2pwrite
                startupinfo.hStdError = errwrite

            if shell:
                startupinfo.dwFlags |= STARTF_USESHOWWINDOW
                startupinfo.wShowWindow = SW_HIDE
                comspec = os.environ.get("COMSPEC", "cmd.exe")
                args = '{} /c "{}"'.format(comspec, args)
                if GetVersion() >= 0x80000000 or os.path.basename(
                        comspec).lower() == "command.com":
                    # Win9x, or using command.com on NT. We need to
                    # use the w9xpopen intermediate program. For more
                    # information, see KB Q150956
                    # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
                    w9xpopen = self._find_w9xpopen()
                    args = '"%s" %s' % (w9xpopen, args)
                    # Not passing CREATE_NEW_CONSOLE has been known to
                    # cause random failures on win9x.  Specifically a
                    # dialog: "Your program accessed mem currently in
                    # use at xxx" and a hopeful warning about the
                    # stability of your system.  Cost is Ctrl+C wont
                    # kill children.
                    creationflags |= CREATE_NEW_CONSOLE

            # Start the process
            try:
                hp, ht, pid, tid = CreateProcess(
                    executable,
                    args,
                    # no special security
                    None,
                    None,
                    int(not close_fds),
                    creationflags,
                    env,
                    cwd,
                    startupinfo)
            except pywintypes.error as e:
                # Translate pywintypes.error to WindowsError, which is
                # a subclass of OSError.  FIXME: We should really
                # translate errno using _sys_errlist (or similar), but
                # how can this be done from Python?
                raise WindowsError(*e.args)
            finally:
                # Child is launched. Close the parent's copy of those pipe
                # handles that only the child should have open.  You need
                # to make sure that no handles to the write end of the
                # output pipe are maintained in this process or else the
                # pipe will not close when the child process exits and the
                # ReadFile will hang.
                if p2cread is not None:
                    p2cread.Close()
                if c2pwrite is not None:
                    c2pwrite.Close()
                if errwrite is not None:
                    errwrite.Close()

            # Retain the process handle, but close the thread handle
            self._handle = hp
            self.pid = pid
            ht.Close()

        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
                    self.returncode = GetExitCodeProcess(self._handle)
                    self.result.set(self.returncode)
            return self.returncode

        def rawlink(self, callback):
            if not self.result.ready() and not self._waiting:
                self._waiting = True
                Greenlet.spawn(self._wait)
            self.result.rawlink(linkproxy(callback, self))
            # XXX unlink

        def _blocking_wait(self):
            WaitForSingleObject(self._handle, INFINITE)
            self.returncode = GetExitCodeProcess(self._handle)
            return self.returncode

        def _wait(self):
            self.threadpool.spawn(self._blocking_wait).rawlink(self.result)

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            if self.returncode is None:
                if not self._waiting:
                    self._waiting = True
                    self._wait()
            return self.result.wait(timeout=timeout)

        def send_signal(self, sig):
            """Send a signal to the process
            """
            if sig == signal.SIGTERM:
                self.terminate()
            elif sig == signal.CTRL_C_EVENT:
                os.kill(self.pid, signal.CTRL_C_EVENT)
            elif sig == signal.CTRL_BREAK_EVENT:
                os.kill(self.pid, signal.CTRL_BREAK_EVENT)
            else:
                raise ValueError("Unsupported signal: {}".format(sig))

        def terminate(self):
            """Terminates the process
            """
            TerminateProcess(self._handle, 1)

        kill = terminate

    else:
        #
        # POSIX methods
        #
        def _get_handles(self, stdin, stdout, stderr):
            """Construct and return tuple with IO objects:
            p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
            """
            p2cread, p2cwrite = None, None
            c2pread, c2pwrite = None, None
            errread, errwrite = None, None

            if stdin is None:
                pass
            elif stdin == PIPE:
                p2cread, p2cwrite = self.pipe_cloexec()
            elif isinstance(stdin, int):
                p2cread = stdin
            else:
                # Assuming file-like object
                p2cread = stdin.fileno()

            if stdout is None:
                pass
            elif stdout == PIPE:
                c2pread, c2pwrite = self.pipe_cloexec()
            elif isinstance(stdout, int):
                c2pwrite = stdout
            else:
                # Assuming file-like object
                c2pwrite = stdout.fileno()

            if stderr is None:
                pass
            elif stderr == PIPE:
                errread, errwrite = self.pipe_cloexec()
            elif stderr == STDOUT:
                errwrite = c2pwrite
            elif isinstance(stderr, int):
                errwrite = stderr
            else:
                # Assuming file-like object
                errwrite = stderr.fileno()

            return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)

        def _set_cloexec_flag(self, fd, cloexec=True):
            try:
                cloexec_flag = fcntl.FD_CLOEXEC
            except AttributeError:
                cloexec_flag = 1

            old = fcntl.fcntl(fd, fcntl.F_GETFD)
            if cloexec:
                fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
            else:
                fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)

        def _remove_nonblock_flag(self, fd):
            flags = fcntl.fcntl(fd, fcntl.F_GETFL) & (~os.O_NONBLOCK)
            fcntl.fcntl(fd, fcntl.F_SETFL, flags)

        def pipe_cloexec(self):
            """Create a pipe with FDs set CLOEXEC."""
            # Pipes' FDs are set CLOEXEC by default because we don't want them
            # to be inherited by other subprocesses: the CLOEXEC flag is removed
            # from the child's FDs by _dup2(), between fork() and exec().
            # This is not atomic: we would need the pipe2() syscall for that.
            r, w = os.pipe()
            self._set_cloexec_flag(r)
            self._set_cloexec_flag(w)
            return r, w

        def _close_fds(self, but):
            if hasattr(os, 'closerange'):
                os.closerange(3, but)
                os.closerange(but + 1, MAXFD)
            else:
                for i in xrange(3, MAXFD):
                    if i == but:
                        continue
                    try:
                        os.close(i)
                    except:
                        pass

        def _execute_child(self, args, executable, preexec_fn, close_fds, cwd,
                           env, universal_newlines, startupinfo, creationflags,
                           shell, p2cread, p2cwrite, c2pread, c2pwrite,
                           errread, errwrite):
            """Execute program (POSIX version)"""

            if isinstance(args, types.StringTypes):
                args = [args]
            else:
                args = list(args)

            if shell:
                args = ["/bin/sh", "-c"] + args
                if executable:
                    args[0] = executable

            if executable is None:
                executable = args[0]

            self._loop.install_sigchld()

            # For transferring possible exec failure from child to parent
            # The first char specifies the exception type: 0 means
            # OSError, 1 means some other error.
            errpipe_read, errpipe_write = self.pipe_cloexec()
            try:
                try:
                    gc_was_enabled = gc.isenabled()
                    # Disable gc to avoid bug where gc -> file_dealloc ->
                    # write to stderr -> hang.  http://bugs.python.org/issue1336
                    gc.disable()
                    try:
                        self.pid = fork()
                    except:
                        if gc_was_enabled:
                            gc.enable()
                        raise
                    if self.pid == 0:
                        # Child
                        try:
                            # Close parent's pipe ends
                            if p2cwrite is not None:
                                os.close(p2cwrite)
                            if c2pread is not None:
                                os.close(c2pread)
                            if errread is not None:
                                os.close(errread)
                            os.close(errpipe_read)

                            # When duping fds, if there arises a situation
                            # where one of the fds is either 0, 1 or 2, it
                            # is possible that it is overwritten (#12607).
                            if c2pwrite == 0:
                                c2pwrite = os.dup(c2pwrite)
                            if errwrite == 0 or errwrite == 1:
                                errwrite = os.dup(errwrite)

                            # Dup fds for child
                            def _dup2(a, b):
                                # dup2() removes the CLOEXEC flag but
                                # we must do it ourselves if dup2()
                                # would be a no-op (issue #10806).
                                if a == b:
                                    self._set_cloexec_flag(a, False)
                                elif a is not None:
                                    os.dup2(a, b)
                                self._remove_nonblock_flag(b)

                            _dup2(p2cread, 0)
                            _dup2(c2pwrite, 1)
                            _dup2(errwrite, 2)

                            # Close pipe fds.  Make sure we don't close the
                            # same fd more than once, or standard fds.
                            closed = set([None])
                            for fd in [p2cread, c2pwrite, errwrite]:
                                if fd not in closed and fd > 2:
                                    os.close(fd)
                                    closed.add(fd)

                            # Close all other fds, if asked for
                            if close_fds:
                                self._close_fds(but=errpipe_write)

                            if cwd is not None:
                                os.chdir(cwd)

                            if preexec_fn:
                                preexec_fn()

                            if env is None:
                                os.execvp(executable, args)
                            else:
                                os.execvpe(executable, args, env)

                        except:
                            exc_type, exc_value, tb = sys.exc_info()
                            # Save the traceback and attach it to the exception object
                            exc_lines = traceback.format_exception(
                                exc_type, exc_value, tb)
                            exc_value.child_traceback = ''.join(exc_lines)
                            os.write(errpipe_write, pickle.dumps(exc_value))

                        finally:
                            # Make sure that the process exits no matter what.
                            # The return code does not matter much as it won't be
                            # reported to the application
                            os._exit(1)

                    # Parent
                    self._watcher = self._loop.child(self.pid)
                    self._watcher.start(self._on_child, self._watcher)

                    if gc_was_enabled:
                        gc.enable()
                finally:
                    # be sure the FD is closed no matter what
                    os.close(errpipe_write)

                if p2cread is not None and p2cwrite is not None:
                    os.close(p2cread)
                if c2pwrite is not None and c2pread is not None:
                    os.close(c2pwrite)
                if errwrite is not None and errread is not None:
                    os.close(errwrite)

                # Wait for exec to fail or succeed; possibly raising exception
                errpipe_read = FileObject(errpipe_read, 'rb')
                data = errpipe_read.read()
            finally:
                if hasattr(errpipe_read, 'close'):
                    errpipe_read.close()
                else:
                    os.close(errpipe_read)

            if data != "":
                self.wait()
                child_exception = pickle.loads(data)
                for fd in (p2cwrite, c2pread, errread):
                    if fd is not None:
                        os.close(fd)
                raise child_exception

        def _handle_exitstatus(self, sts):
            if os.WIFSIGNALED(sts):
                self.returncode = -os.WTERMSIG(sts)
            elif os.WIFEXITED(sts):
                self.returncode = os.WEXITSTATUS(sts)
            else:
                # Should never happen
                raise RuntimeError("Unknown child exit status!")

        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if get_hub() is not getcurrent():
                    sig_pending = getattr(self._loop, 'sig_pending', True)
                    if sig_pending:
                        sleep(0.00001)
            return self.returncode

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            return self.result.wait(timeout=timeout)

        def send_signal(self, sig):
            """Send a signal to the process
            """
            os.kill(self.pid, sig)

        def terminate(self):
            """Terminate the process with SIGTERM
            """
            self.send_signal(signal.SIGTERM)

        def kill(self):
            """Kill the process with SIGKILL
            """
            self.send_signal(signal.SIGKILL)
Beispiel #23
0
class ProxyBot(Bot):

    def __init__(self, name, client, game, bot):
        self.name = name
        self.client = client
        self.bot = bot
        if bot:
            self.TIMEOUT = 60.0
        else:
            self.TIMEOUT = None

        self.expecting = None
        self._vote = None
        self._select = None
        self._sabotage = None
        self._join = None
        self._part = None
        self.game = game 

    def __call__(self, game, index, spy):
        """This function pretends to be a Builder, but in fact just
        configures this object in place as it's easier to setup and maintain."""
        Player.__init__(self, self.name, index)
        self.state = game
        self.spy = spy

        self._join = Event()

        self.channel = '%s-player-%i' % (self.game, index)
        self.client.send_message(message.Join(self.channel))
        self.client.send_message(message.Join(self.game))

        # Use elegant /INVITE command for humans that have better clients.
        # FIXME: Invitation fails when there's already someone else in the channel.
        self.client.send_message(message.Command([self.name, self.channel], 'INVITE'))
        return self

    def bakeTeam(self, team):
        return ', '.join([str(p) for p in team])

    def makeTeam(self, msg):
        for s in '\t,.!;?': msg = msg.replace(s, ' ')
        names = [n for n in msg.split(' ') if n]
        players = []
        for n in names:
            players.append(self.makePlayer(n))
        return players

    def makePlayer(self, name):
        for p in self.state.players:
            if str(p.index) in name:
                return p
            if name in p.name:
                return p
        assert False, "Can't find player for input name '%s'." % (name)

    def makeAnnouncement(self, msg):
        return {self.makePlayer(m.group(1)): float(m.group(2).rstrip('.')) for m in RE_MAPPING.finditer(msg)}

    def send(self, msg):
        self.client.msg(self.channel, msg)

    def onGameRevealed(self, players, spies):
        roles = {True: "Spy", False: "Resistance"}
        s = ""
        if self.spy:
            s = "; SPIES " + self.bakeTeam(spies)

        w = self._join.wait() # timeout=self.Timeout
        assert w is not None, "Problem with bot %r joining." % self
        self._join = None
        self.send('REVEAL %s; ROLE %s; PLAYERS %s%s.' % (self.game, roles[self.spy], self.bakeTeam(players), s))

    def onMissionAttempt(self, mission, tries, leader):
        self.send('MISSION %i.%i; LEADER %s.' % (mission, tries, Player.__repr__(leader)))

    def select(self, players, count):
        self._select = AsyncResult()
        self.state.count = count
        self.expecting = self.process_SELECTED

        self.send('SELECT %i!' % (count))
        if not self.bot:
            self.send('/me '  + self.expecting.__doc__)
        selection = self._select.get(timeout=self.TIMEOUT)
        self._select = None
        return selection

    def process_SELECTED(self, msg):
        """Type a list of players to select for the team, e.g. `select 1, 2.`"""

        if 'select' in msg[1].lower():
            msg = ' '.join(msg[2:])
        else:
            msg = ' '.join(msg[1:])
        team = self.makeTeam(msg)

        if len(team) != self.state.count:
            self.send('SELECT %i?' % (self.state.count))
        else:
            assert self._select is not None
            self._select.set(team)

    def onTeamSelected(self, leader, team):
        self._vote = AsyncResult()
        self.expecting = self.process_VOTED

        self.state.team = team[:]
        self.send("VOTE %s?" % (self.bakeTeam(team)))
        if not self.bot:
            self.send('/me '  + self.expecting.__doc__)

    def vote(self, team):
        v = self._vote.get(timeout=self.TIMEOUT)
        self._vote = None
        return v   

    def process_VOTED(self, msg):
        """Enter your vote, for example as `YES` or `NO`."""

        result = parseYesOrNo(' '.join(msg[1:]))
        if result is not None:
            assert self._vote is not None
            self._vote.set(result)

    def onVoteComplete(self, votes):
        self.send("VOTES %s." % (', '.join([showYesOrNo(v) for v in votes])))
        
        v = [b for b in votes if b]
        if self in self.state.team and len(v) > 2:
            self._sabotage = AsyncResult()
            self.expecting = self.process_SABOTAGED
            self.send("SABOTAGE?")
            if not self.bot:
                self.send('/me '  + self.expecting.__doc__)
        else:
            self._sabotage = None

    def sabotage(self):
        assert self._sabotage is not None
        s = self._sabotage.get(timeout=self.TIMEOUT)
        self._sabotage = None
        return s 

    def process_SABOTAGED(self, msg):
        """Decide whether to sabotage, for typing in `YES` or `NO`."""

        result = parseYesOrNo(' '.join(msg[1:]))
        if result is not None:
            assert self._sabotage is not None
            if result and not self.spy:
                self.send("Can't sabotage mission: you are resistance!")
                result = False
            self._sabotage.set(result)

    def onMissionComplete(self, sabotaged):
        # Force synchronization in case sabotage() is not called due to the bot
        # being resistance.  This helps hide human identity by having the same
        # input delay in Spy or Resistance cases.
        if self._sabotage and not self._sabotage.ready():
            s = self._sabotage.get(timeout=self.TIMEOUT)
            assert not s, "Expecting sabotage() to be False if it was handled automatically."

        self.send("SABOTAGES %i." % (sabotaged))
        self.expecting = None

        self.do_announce()

    def onMissionFailed(self, leader, team):
        self.do_announce()

    def do_announce(self):
        self._announce = AsyncResult()
        self.expecting = self.process_ANNOUNCED

        self.send('ANNOUNCE!')
        if not self.bot:
            self.send('/me '  + self.expecting.__doc__)

    def announce(self):        
        timeout = self.TIMEOUT if self.bot else 10.0
        try:
            ann = self._announce.get(timeout=timeout)
        except Timeout as t:
            if self.bot:
                raise t
            ann = {}
        self._announce = None
        return ann

    def process_ANNOUNCED(self, msg):
        """Input a list of players and their spy probabilities, e.g. 3: 0.0, 4: 1.0."""

        if 'announce' in msg[1].lower():
            msg = ' '.join(msg[2:])
        else:
            msg = ' '.join(msg[1:])

        ann = self.makeAnnouncement(msg)
        self._announce.set(ann)

    def onAnnouncement(self, source, announcement):
        self.send("ANNOUNCES %s: %r" % (source, announcement))

    def onGameComplete(self, win, spies):
        if not self.spy:
            self.send("RESULT %s; SPIES %s." % ("Win" if win else "Loss", self.bakeTeam(spies)))
        else:
            self.send("RESULT %s." % ("Loss" if win else "Win",))

        self.client.send_message(message.Command(self.game, 'PART'))

        # Bots wait for the host to leave the channel for synchronization
        # purposes, but for humans we can display the results anyway.
        self._part = Event()
        if self.bot:
            self._part.wait(timeout=self.TIMEOUT)

        self.client.send_message(message.Command(self.channel, 'PART'))
Beispiel #24
0
class IRCClientManager(HasLogger, GClient):
	"""An abstraction around an irc connection that provides automatic reconnects
	while preserving channel membership and re-enqueuing any messages we know were never sent
	(so messages may still be lost, but it is less likely).
	Takes a generic callback with args (irc_client_manager, msg) for all incoming PrivMsgs.
	"""

	def __init__(self, host, nick, callback, channels=None, logger=None, **irc_kwargs):
		irc_kwargs.update(hostname=host, nick=nick)
		self.recv_callback = callback
		self.irc_kwargs = irc_kwargs

		self.channels = set() if channels is None else channels
		self.channel_pending = defaultdict(lambda: 0) # {channel: num of pending messages on queue}
		self._client = AsyncResult()
		self._recv_queue = Queue()

		super(IRCClientManager, self).__init__(logger=logger)

	@property
	def all_open_channels(self):
		"""self.channels is desired channels, self.channel_pending tracks any with unsent messages,
		actual open channels is a union of the two."""
		return self.channels | set(self.channel_pending.keys())

	def _start(self):
		self._client_loop_worker = self.group.spawn(self._client_loop)

	def _client_loop(self):
		try:
			backoff = Backoff(start=0.1, limit=10, rate=5)
			while True:
				self.logger.info("Starting new irc connection")
				client = girc.Client(**self.irc_kwargs)
				self.logger.debug("Joining channels: {}".format(self.all_open_channels))
				for channel in self.all_open_channels:
					client.channel(channel).join()
				client.handler(self._client_recv, command=girc.message.Privmsg)
				self._client.set(client)
				try:
					client.start()
					self.logger.debug("Started new irc connection")
					backoff.reset()
					client.wait_for_stop()
				except Exception as ex:
					self.logger.warning("irc connection died, retrying in {}".format(backoff.peek()), exc_info=True)
					# clear _client if no-one else has
					if self._client.ready():
						assert self._client.get() is client
						self._client = AsyncResult()
					gevent.sleep(backoff.get())
				else:
					self.logger.info("irc connection exited gracefully, stopping")
					self.stop() # graceful exit
					return
		except Exception as ex:
			self.stop(ex)

	def _client_recv(self, client, msg):
		self._recv_queue.put(msg)

	@property
	def client(self):
		while True:
			waiter = self._client
			client = waiter.get()
			if not client._stopping:
				return client
			# clear _client if no-one else has
			if self._client is waiter:
				self._client = AsyncResult()

	def send(self, channel, text):
		self.channel_pending[channel] += 1
		self.logger.debug("Enqueuing message for channel {} ({} now pending): {!r}".format(
			channel, self.channel_pending[channel], text
		))
		super(IRCClientManager, self).send((channel, text))

	def update_channels(self, new_channels):
		old_channels = self.all_open_channels
		self.channels = new_channels
		if not self._client.ready() or self._client.get()._stopping:
			self.logger.debug("Ignoring channel resync, client not running")
			return # no active connection, we're done
		client = self._client.get()
		self.logger.debug("Updating channels: {} to {}".format(old_channels, self.all_open_channels))
		for channel in old_channels - self.all_open_channels:
			client.channel(channel).part()
		for channel in new_channels - self.all_open_channels:
			client.channel(channel).join()

	def _send(self, msg):
		if msg == 'stop':
			self.logger.debug("Calling graceful stop due to stop message on queue")
			self.stop() # does not return
			assert False
		channel, text = msg
		try:
			girc.message.Privmsg(self.client, channel, text).send(block=True)
		except Exception:
			# we can't be certain the message wasn't sent, so discard it
			# but at least we know all remaining items in the queue have not been.
			pass
		finally:
			self.channel_pending[channel] -= 1
			self.logger.debug("Sent message for channel {} ({} now pending): {!r}".format(
				channel, self.channel_pending[channel], text
			))
			if self.channel_pending[channel] <= 0:
				del self.channel_pending[channel]
				if self._client.ready() and channel not in self.all_open_channels:
					self._client.get().channel(channel).part()

	def _receive(self):
		for msg in self._recv_queue:
			if msg.target not in self.channels:
				self.logger.debug("Ignoring message {}, not a channel we care about".format(msg))
				# ignore PMs and messages from channels we're only holding open while we finish sending
				return
			try:
				self.recv_callback(self, msg)
			except Exception:
				self.logger.exception("Failed to handle message {}".format(msg))
				pass

	def wait_and_stop(self):
		"""Graceful stop. Waits to send all remaining messages."""
		self.logger.debug("Setting stop message for flushing send queue then stop")
		super(IRCClientManager, self).send('stop')
		self.wait_for_stop()

	def _stop(self, ex):
		if self._client.ready():
			self._client.get().quit()
Beispiel #25
0
class MessageChannel(Channel):
    """A channel that adds useful semantics for publishing and consuming messages.

    Semantics that are added:

    * Support for registering consumers to receive basic.deliver events.
      Consumers also receive errors and are automatically deregistered when
      basic_cancel is received.

    * Support for the RabbitMQ extension confirm_select, which makes
      basic_publish block

    * Can check for messages returned with basic_return

    """
    def __init__(self, connection, id):
        super(MessageChannel, self).__init__(connection, id)
        self.consumer_id = 1
        self.consumers = {}
        self.returned = AsyncResult()
        self.listeners.set_handler('basic.deliver', self.on_deliver)
        self.listeners.set_handler('basic.return', self.on_basic_return)
        self.listeners.set_handler('basic.cancel-ok', self.on_cancel_ok)
        self.listeners.set_handler('basic.cancel', self.on_cancel_ok)

    def on_deliver(self, message):
        """Called when a message is received.

        Dispatches the message to the registered consumer.
        """
        self.consumers[message.consumer_tag](message)

    def on_basic_return(self, msg):
        """When we receive a basic.return message, store it.

        The value can later be checked using .check_returned().

        """
        self.returned.set(msg)

    def check_returned(self):
        """Raise an error if a message has been returned.

        This also clears the returned frame, with the intention that each
        basic.return message may cause at most one MessageReturned error.

        """
        if self._method and self._method.name == 'basic.return':
            self.must_now_block()
            returned = self.returned.get()
        else:
            try:
                returned = self.returned.get_nowait()
            except gevent.Timeout:
                return

        self.clear_returned()
        if returned:
            raise exceptions.return_exception_from_frame(returned)

    def clear_returned(self):
        """Discard any returned message."""
        if self.returned.ready():
            # we can only replace returned if it is ready - otherwise anything
            # that was blocked waiting would wait forever.
            self.returned = AsyncResult()

    def on_error(self, exc):
        """Override on_error, to pass error to all consumers."""
        for consumer in self.consumers.values():
            self.queue.put((consumer, (exc, )))
        super(MessageChannel, self).on_error(exc)

    def on_cancel_ok(self, frame):
        """The server has cancelled a consumer.

        We can remove its consumer tag from the registered consumers."""
        del (self.consumers[frame.consumer_tag])

    def basic_consume(self,
                      queue='',
                      no_local=False,
                      no_ack=False,
                      exclusive=False,
                      arguments={},
                      callback=None):
        """Begin consuming messages from a queue.

        Consumers last as long as the channel they were declared on, or until
        the client cancels them.

        :param queue: Specifies the name of the queue to consume from.
        :param no_local: Do not deliver own messages. If this flag is set the
            server will not send messages to the connection that published
            them.
        :param no_ack: Don't require acknowledgements. If this flag is set the
            server does not expect acknowledgements for messages. That is, when
            a message is delivered to the client the server assumes the
            delivery will succeed and immediately dequeues it. This
            functionality may increase performance but at the cost of
            reliability. Messages can get lost if a client dies before they
            are delivered to the application.
        :param exclusive: Request exclusive consumer access, meaning only this
            consumer can access the queue.
        :param arguments: A set of arguments for the consume. The syntax and
            semantics of these arguments depends on the server implementation.
        :param callback: A callback to be called for each message received.

        """
        tag = 'ct-%d' % self.consumer_id
        self.consumer_id += 1

        kwargs = dict(queue=queue,
                      no_local=no_local,
                      no_ack=no_ack,
                      exclusive=exclusive,
                      arguments=arguments,
                      consumer_tag=tag)

        if callback is not None:
            self.consumers[tag] = callback
            return super(MessageChannel, self).basic_consume(**kwargs)
        else:
            queue = MessageQueue(self, tag)
            self.consumers[tag] = queue.put
            super(MessageChannel, self).basic_consume(**kwargs)
            return queue

    def basic_get(self, *args, **kwargs):
        """Wrap basic_get to return None if the response is basic.get-empty.

        This will be easier for users to check than testing whether a response
        is get-empty.
        """
        r = super(MessageChannel, self).basic_get(*args, **kwargs)
        return r if isinstance(r, Message) else None

    def confirm_select(self, nowait=False):
        """Turn on RabbitMQ's publisher acknowledgements.

        See http://www.rabbitmq.com/confirms.html

        There are two things that need to be done:

        * Swap basic_publish to a version that blocks waiting for the
          corresponding ack.

        * Support nowait (because this method blocks or not depending on that
          argument)

        """
        self.basic_publish = self.basic_publish_with_confirm

        if nowait:
            super(MessageChannel, self).confirm_select(nowait=nowait)
        else:
            # Send frame directly, as no callback will be received
            self._send(spec.FrameConfirmSelect(1))

    def basic_publish_with_confirm(self,
                                   exchange='',
                                   routing_key='',
                                   mandatory=False,
                                   immediate=False,
                                   headers={},
                                   body=''):
        """Version of basic publish that blocks waiting for confirm."""
        method = super(MessageChannel, self).basic_publish
        self.clear_returned()
        ret = self._call_sync(method, ('basic.ack', 'basic.nack'), exchange,
                              routing_key, mandatory, immediate, headers, body)
        if ret.name == 'basic.nack':
            raise exceptions.PublishFailed(ret)
        if mandatory or immediate:
            self.check_returned()
        return ret
Beispiel #26
0
class InputTable(scheme.Table):
    _table_name = 'input'
    _table_collection = input_tables

    id = scheme.Column('api')
    type = scheme.Column('api')
    parent = scheme.Column(
        'api',
        lambda self, value: value and [value._table_name, value.id] or None)
    timeout = scheme.Column(
        'api',
        lambda self, timeout: timeout and int(timeout.eta * 1000) or None)
    elements = scheme.Column('api')
    result = scheme.Column('api')
    close_aborts = scheme.Column('api')

    ignore_api = False

    def __init__(self,
                 type,
                 parent,
                 timeout,
                 elements,
                 close_aborts,
                 ignore_api=False):
        self.type = type
        self.parent = parent
        self.timeout = None
        self.elements = [isinstance(e, list) and e or [e] for e in elements]
        self.close_aborts = close_aborts
        self.ignore_api = ignore_api

        if parent:
            parent.input = self

        self._result = AsyncResult()
        self.reset_timeout(timeout)

    def set_result(self, value):
        if self._result.ready():
            #raise RuntimeError('result of input already set')
            return
        with scheme.transaction:
            self.result = value
            self.reset_timeout(None)
        self._result.set(value)
        event.fire("input:result", self)

    def set_error(self, value):
        if self._result.ready():
            #raise RuntimeError('result of input already set')
            return
        with scheme.transaction:
            self.result = str(value)
            self.reset_timeout(None)
        self._result.set_exception(value)
        event.fire("input:error", self)

    def reset_timeout(self, timeout):
        with scheme.transaction:
            if self.timeout:
                self.timeout.kill()
            if timeout:
                self.timeout = gevent.spawn_later(timeout, self._timed_out)
                self.timeout.eta = time.time() + timeout
            elif self.timeout:
                self.timeout = None

    def _timed_out(self):
        with scheme.transaction:
            self.timeout = None
            self.set_error(InputTimeout())
Beispiel #27
0
class ChildrenMonitor(object):
    """Simple monitor that monitors the children of a node and their
    content.
    """
    _STOP_REQUEST = object()

    def __init__(self, client, path, into, factory, args, listener):
        self.client = client
        self.path = path
        self.into = into if into is not None else {}
        self.factory = factory if factory is not None else str
        self.args = args
        self.listener = listener or MonitorListener()
        self.started = AsyncResult()
        self.queue = Queue()
        self.stats = {}
        self._delay = 1.343
        self.max_delay = 180

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                children = self.client.get_children(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except (zookeeper.ConnectionLossException,
                    zookeeper.SessionExpiredException,
                    zookeeper.InvalidStateException) as err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                logging.error("got %r while monitoring %s", str(err),
                              self.path)
                gevent.sleep(self._delay)
                self._delay += self._delay * random.random()
                self._delay = min(self._delay, self.max_delay)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                raise

            for child in children:
                if not child in self.stats:
                    try:
                        data, stat = self.client.get(
                            os.path.join(self.path, child))
                    except zookeeper.NoNodeException:
                        print "race condition while getting", os.path.join(
                            self.path, child)
                    else:
                        self.into[child] = self.factory(data, *self.args)
                        self.listener.created(child, self.into[child])
                        self.stats[child] = stat
                else:
                    try:
                        data, stat = self.client.get(
                            os.path.join(self.path, child))
                    except zookeeper.NoNodeException:
                        print "race condition while getting", os.path.join(
                            self.path, child)
                        # should we remove it here?
                    else:
                        if stat['version'] != self.stats[child]['version']:
                            self.into[child] = self.factory(data, *self.args)
                            self.listener.modified(child, self.into[child])
                        self.stats[child] = stat
            for child in self.into.keys():
                if child not in children:
                    del self.into[child]
                    del self.stats[child]
                    self.listener.deleted(child)

            if not self.started.ready():
                self.started.set(None)

            self.listener.commit()

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
Beispiel #28
0
class ProxyBot(Bot):
    def __init__(self, name, client, game, bot):
        self.name = name
        self.client = client
        self.bot = bot
        if bot:
            self.TIMEOUT = 1.0
        else:
            self.TIMEOUT = None

        self.expecting = None
        self._vote = None
        self._select = None
        self._sabotage = None
        self._join = None
        self._part = None
        self.game = game

    def __call__(self, game, index, spy):
        """This function pretends to be a Builder, but in fact just
        configures this object in place as it's easier to setup and maintain."""
        Player.__init__(self, self.name, index)
        self.state = game
        self.spy = spy

        self.channel = '%s-player-%i' % (self.game, index)
        self.client.send_message(message.Join(self.channel))
        self.client.send_message(message.Join(self.game))

        self._join = Event()
        # Use elegant /INVITE command for humans that have better clients.
        self.client.send_message(
            message.Command([self.name, self.channel], 'INVITE'))
        return self

    def bakeTeam(self, team):
        return ', '.join([str(p) for p in team])

    def makeTeam(self, msg):
        for s in '\t,.!;?':
            msg = msg.replace(s, ' ')
        names = [n for n in msg.split(' ') if n]
        players = []
        for n in names:
            players.append(self.makePlayer(n))
        return players

    def makePlayer(self, name):
        for p in self.state.players:
            if str(p.index) in name:
                return p
            if name in p.name:
                return p
        assert False, "Can't find player for input name '%s'." % (name)

    def send(self, msg):
        self.client.msg(self.channel, msg)

    def onGameRevealed(self, players, spies):
        roles = {True: "Spy", False: "Resistance"}
        s = ""
        if self.spy:
            s = "; SPIES " + self.bakeTeam(spies)

        self._join.wait()
        self._join = None
        self.send('REVEAL %s; ROLE %s; PLAYERS %s%s.' %
                  (self.game, roles[self.spy], self.bakeTeam(players), s))

    def onMissionAttempt(self, mission, tries, leader):
        self.send('MISSION %i.%i; LEADER %s.' %
                  (mission, tries, Player.__repr__(leader)))

    def select(self, players, count):
        self.send('SELECT %i!' % (count))
        self._select = AsyncResult()
        self.state.count = count
        self.expecting = self.process_SELECTED
        return self._select.get(timeout=self.TIMEOUT)

    def process_SELECTED(self, msg):
        if 'select' in msg[1].lower():
            msg = ' '.join(msg[2:])
        else:
            msg = ' '.join(msg[1:])
        team = self.makeTeam(msg)
        if len(team) != self.state.count:
            self.send('SELECT %i?' % (self.state.count))
        else:
            self._select.set(team)

    def onTeamSelected(self, leader, team):
        self.state.team = team[:]
        self.send("VOTE %s?" % (self.bakeTeam(team)))
        self._vote = AsyncResult()
        self.expecting = self.process_VOTED

    def vote(self, team):
        return self._vote.get(timeout=self.TIMEOUT)

    def process_VOTED(self, msg):
        result = parseYesOrNo(' '.join(msg[1:]))
        if result is not None:
            self._vote.set(result)

    def onVoteComplete(self, votes):
        self.send("VOTES %s." % (', '.join([showYesOrNo(v) for v in votes])))

        v = [b for b in votes if b]
        if self in self.state.team and len(v) > 2:
            self.send("SABOTAGE?")
            self._sabotage = AsyncResult()
            self.expecting = self.process_SABOTAGED
        else:
            self._sabotage = None

    def sabotage(self):
        assert self._sabotage is not None
        return self._sabotage.get(timeout=self.TIMEOUT)

    def process_SABOTAGED(self, msg):
        result = parseYesOrNo(' '.join(msg[1:]))
        if result is not None:
            self._sabotage.set(result)

    def onMissionComplete(self, sabotaged):
        # Force synchronization in case sabotage() is not called due to the bot
        # being resistance.  This helps hide human identity by having the same
        # input delay in Spy or Resistance cases.
        if self._sabotage and not self._sabotage.ready():
            s = self._sabotage.get(timeout=self.TIMEOUT)
            assert not s, "Expecting sabotage() to be False if it was handled automatically."

        self.send("SABOTAGES %i." % (sabotaged))
        self.expecting = None

    def onGameComplete(self, win, spies):
        self.send("RESULT %s; SPIES %s." %
                  (showYesOrNo(win), self.bakeTeam(spies)))

        self.client.send_message(message.Command(self.game, 'PART'))
        self._part = Event()
        self._part.wait()
        self.client.send_message(message.Command(self.channel, 'PART'))
Beispiel #29
0
class Popen(object):
    def __init__(self,
                 args,
                 bufsize=None,
                 executable=None,
                 stdin=None,
                 stdout=None,
                 stderr=None,
                 preexec_fn=None,
                 close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
                 shell=False,
                 cwd=None,
                 env=None,
                 universal_newlines=False,
                 startupinfo=None,
                 creationflags=0,
                 threadpool=None,
                 **kwargs):
        """Create new Popen instance."""

        if not PY3 and kwargs:
            raise TypeError("Got unexpected keyword arguments", kwargs)
        pass_fds = kwargs.pop('pass_fds', ())
        start_new_session = kwargs.pop('start_new_session', False)
        restore_signals = kwargs.pop('restore_signals', True)

        hub = get_hub()

        if bufsize is None:
            # bufsize has different defaults on Py3 and Py2
            if PY3:
                bufsize = -1
            else:
                bufsize = 0
        if not isinstance(bufsize, integer_types):
            raise TypeError("bufsize must be an integer")

        if mswindows:
            if preexec_fn is not None:
                raise ValueError("preexec_fn is not supported on Windows "
                                 "platforms")
            any_stdio_set = (stdin is not None or stdout is not None
                             or stderr is not None)
            if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
                if any_stdio_set:
                    close_fds = False
                else:
                    close_fds = True
            elif close_fds and any_stdio_set:
                raise ValueError(
                    "close_fds is not supported on Windows "
                    "platforms if you redirect stdin/stdout/stderr")
            if threadpool is None:
                threadpool = hub.threadpool
            self.threadpool = threadpool
            self._waiting = False
        else:
            # POSIX
            if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
                # close_fds has different defaults on Py3/Py2
                if PY3:
                    close_fds = True
                else:
                    close_fds = False

            if pass_fds and not close_fds:
                import warnings
                warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
                close_fds = True
            if startupinfo is not None:
                raise ValueError("startupinfo is only supported on Windows "
                                 "platforms")
            if creationflags != 0:
                raise ValueError("creationflags is only supported on Windows "
                                 "platforms")
            assert threadpool is None
            self._loop = hub.loop

        if PY3:
            self.args = args
        self.stdin = None
        self.stdout = None
        self.stderr = None
        self.pid = None
        self.returncode = None
        self.universal_newlines = universal_newlines
        self.result = AsyncResult()

        # Input and output objects. The general principle is like
        # this:
        #
        # Parent                   Child
        # ------                   -----
        # p2cwrite   ---stdin--->  p2cread
        # c2pread    <--stdout---  c2pwrite
        # errread    <--stderr---  errwrite
        #
        # On POSIX, the child objects are file descriptors.  On
        # Windows, these are Windows file handles.  The parent objects
        # are file descriptors on both platforms.  The parent objects
        # are None when not using PIPEs. The child objects are None
        # when not redirecting.

        (p2cread, p2cwrite, c2pread, c2pwrite, errread,
         errwrite) = self._get_handles(stdin, stdout, stderr)

        # We wrap OS handles *before* launching the child, otherwise a
        # quickly terminating child could make our fds unwrappable
        # (see #8458).
        if mswindows:
            if p2cwrite is not None:
                p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
            if c2pread is not None:
                c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
            if errread is not None:
                errread = msvcrt.open_osfhandle(errread.Detach(), 0)

        if p2cwrite is not None:
            if PY3 and universal_newlines:
                # Under Python 3, if we left on the 'b' we'd get different results
                # depending on whether we used FileObjectPosix or FileObjectThread
                self.stdin = FileObject(p2cwrite, 'wb', bufsize)
                self.stdin._translate = True
                self.stdin.io = io.TextIOWrapper(self.stdin.io,
                                                 write_through=True,
                                                 line_buffering=(bufsize == 1))
            else:
                self.stdin = FileObject(p2cwrite, 'wb', bufsize)
        if c2pread is not None:
            if universal_newlines:
                if PY3:
                    # FileObjectThread doesn't support the 'U' qualifier
                    # with a bufsize of 0
                    self.stdout = FileObject(c2pread, 'rb', bufsize)
                    # NOTE: Universal Newlines are broken on Windows/Py3, at least
                    # in some cases. This is true in the stdlib subprocess module
                    # as well; the following line would fix the test cases in
                    # test__subprocess.py that depend on python_universal_newlines,
                    # but would be inconsistent with the stdlib:
                    #msvcrt.setmode(self.stdout.fileno(), os.O_TEXT)
                    self.stdout.io = io.TextIOWrapper(self.stdout.io)
                    self.stdout.io.mode = 'r'
                    self.stdout._translate = True
                else:
                    self.stdout = FileObject(c2pread, 'rU', bufsize)
            else:
                self.stdout = FileObject(c2pread, 'rb', bufsize)
        if errread is not None:
            if universal_newlines:
                if PY3:
                    self.stderr = FileObject(errread, 'rb', bufsize)
                    self.stderr.io = io.TextIOWrapper(self.stderr.io)
                    self.stderr._translate = True
                else:
                    self.stderr = FileObject(errread, 'rU', bufsize)
            else:
                self.stderr = FileObject(errread, 'rb', bufsize)

        self._closed_child_pipe_fds = False
        try:
            self._execute_child(args, executable, preexec_fn, close_fds,
                                pass_fds, cwd, env, universal_newlines,
                                startupinfo, creationflags, shell, p2cread,
                                p2cwrite, c2pread, c2pwrite, errread, errwrite,
                                restore_signals, start_new_session)
        except:
            # Cleanup if the child failed starting.
            # (gevent: New in python3, but reported as gevent bug in #347.
            # Note that under Py2, any error raised below will replace the
            # original error so we have to use reraise)
            if not PY3:
                exc_info = sys.exc_info()
            for f in filter(None, (self.stdin, self.stdout, self.stderr)):
                try:
                    f.close()
                except (OSError, IOError):
                    pass  # Ignore EBADF or other errors.

            if not self._closed_child_pipe_fds:
                to_close = []
                if stdin == PIPE:
                    to_close.append(p2cread)
                if stdout == PIPE:
                    to_close.append(c2pwrite)
                if stderr == PIPE:
                    to_close.append(errwrite)
                if hasattr(self, '_devnull'):
                    to_close.append(self._devnull)
                for fd in to_close:
                    try:
                        os.close(fd)
                    except (OSError, IOError):
                        pass
            if not PY3:
                try:
                    reraise(*exc_info)
                finally:
                    del exc_info
            raise

    def __repr__(self):
        return '<%s at 0x%x pid=%r returncode=%r>' % (
            self.__class__.__name__, id(self), self.pid, self.returncode)

    def _on_child(self, watcher):
        watcher.stop()
        status = watcher.rstatus
        if os.WIFSIGNALED(status):
            self.returncode = -os.WTERMSIG(status)
        else:
            self.returncode = os.WEXITSTATUS(status)
        self.result.set(self.returncode)

    def _get_devnull(self):
        if not hasattr(self, '_devnull'):
            self._devnull = os.open(os.devnull, os.O_RDWR)
        return self._devnull

    _stdout_buffer = None
    _stderr_buffer = None

    def communicate(self, input=None, timeout=None):
        """Interact with process: Send data to stdin.  Read data from
        stdout and stderr, until end-of-file is reached.  Wait for
        process to terminate.  The optional input argument should be a
        string to be sent to the child process, or None, if no data
        should be sent to the child.

        communicate() returns a tuple (stdout, stderr).

        :keyword timeout: Under Python 2, this is a gevent extension; if
           given and it expires, we will raise :class:`gevent.timeout.Timeout`.
           Under Python 3, this raises the standard :exc:`TimeoutExpired` exception.
        """
        greenlets = []
        if self.stdin:
            greenlets.append(spawn(write_and_close, self.stdin, input))

        # If the timeout parameter is used, and the caller calls back after
        # getting a TimeoutExpired exception, we can wind up with multiple
        # greenlets trying to run and read from and close stdout/stderr.
        # That's bad because it can lead to 'RuntimeError: reentrant call in io.BufferedReader'.
        # We can't just kill the previous greenlets when a timeout happens,
        # though, because we risk losing the output collected by that greenlet
        # (and Python 3, where timeout is an official parameter, explicitly says
        # that no output should be lost in the event of a timeout.) Instead, we're
        # watching for the exception and ignoring it. It's not elegant,
        # but it works
        if self.stdout:

            def _read_out():
                try:
                    data = self.stdout.read()
                except RuntimeError:
                    return
                if self._stdout_buffer is not None:
                    self._stdout_buffer += data
                else:
                    self._stdout_buffer = data

            stdout = spawn(_read_out)
            greenlets.append(stdout)
        else:
            stdout = None

        if self.stderr:

            def _read_err():
                try:
                    data = self.stderr.read()
                except RuntimeError:
                    return
                if self._stderr_buffer is not None:
                    self._stderr_buffer += data
                else:
                    self._stderr_buffer = data

            stderr = spawn(_read_err)
            greenlets.append(stderr)
        else:
            stderr = None

        # If we were given stdin=stdout=stderr=None, we have no way to
        # communicate with the child, and thus no greenlets to wait
        # on. This is a nonsense case, but it comes up in the test
        # case for Python 3.5 (test_subprocess.py
        # RunFuncTestCase.test_timeout). Instead, we go directly to
        # self.wait
        if not greenlets and timeout is not None:
            result = self.wait(timeout=timeout)
            # Python 3 would have already raised, but Python 2 would not
            # so we need to do that manually
            if result is None:
                from gevent.timeout import Timeout
                raise Timeout(timeout)

        done = joinall(greenlets, timeout=timeout)
        if timeout is not None and len(done) != len(greenlets):
            if PY3:
                raise TimeoutExpired(self.args, timeout)
            from gevent.timeout import Timeout
            raise Timeout(timeout)

        if self.stdout:
            try:
                self.stdout.close()
            except RuntimeError:
                pass
        if self.stderr:
            try:
                self.stderr.close()
            except RuntimeError:
                pass
        self.wait()
        stdout_value = self._stdout_buffer
        self._stdout_buffer = None
        stderr_value = self._stderr_buffer
        self._stderr_buffer = None
        # XXX: Under python 3 in universal newlines mode we should be
        # returning str, not bytes
        return (None if stdout is None else stdout_value or b'',
                None if stderr is None else stderr_value or b'')

    def poll(self):
        return self._internal_poll()

    if PY3:

        def __enter__(self):
            return self

        def __exit__(self, type, value, traceback):
            if self.stdout:
                self.stdout.close()
            if self.stderr:
                self.stderr.close()
            try:  # Flushing a BufferedWriter may raise an error
                if self.stdin:
                    self.stdin.close()
            finally:
                # Wait for the process to terminate, to avoid zombies.
                # JAM: gevent: If the process never terminates, this
                # blocks forever.
                self.wait()

    if mswindows:
        #
        # Windows methods
        #
        def _get_handles(self, stdin, stdout, stderr):
            """Construct and return tuple with IO objects:
            p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
            """
            if stdin is None and stdout is None and stderr is None:
                return (None, None, None, None, None, None)

            p2cread, p2cwrite = None, None
            c2pread, c2pwrite = None, None
            errread, errwrite = None, None

            try:
                DEVNULL
            except NameError:
                _devnull = object()
            else:
                _devnull = DEVNULL

            if stdin is None:
                p2cread = GetStdHandle(STD_INPUT_HANDLE)
                if p2cread is None:
                    p2cread, _ = CreatePipe(None, 0)
                    if PY3:
                        p2cread = Handle(p2cread)
                        _winapi.CloseHandle(_)
            elif stdin == PIPE:
                p2cread, p2cwrite = CreatePipe(None, 0)
                if PY3:
                    p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
            elif stdin == _devnull:
                p2cread = msvcrt.get_osfhandle(self._get_devnull())
            elif isinstance(stdin, int):
                p2cread = msvcrt.get_osfhandle(stdin)
            else:
                # Assuming file-like object
                p2cread = msvcrt.get_osfhandle(stdin.fileno())
            p2cread = self._make_inheritable(p2cread)

            if stdout is None:
                c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
                if c2pwrite is None:
                    _, c2pwrite = CreatePipe(None, 0)
                    if PY3:
                        c2pwrite = Handle(c2pwrite)
                        _winapi.CloseHandle(_)
            elif stdout == PIPE:
                c2pread, c2pwrite = CreatePipe(None, 0)
                if PY3:
                    c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
            elif stdout == _devnull:
                c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
            elif isinstance(stdout, int):
                c2pwrite = msvcrt.get_osfhandle(stdout)
            else:
                # Assuming file-like object
                c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
            c2pwrite = self._make_inheritable(c2pwrite)

            if stderr is None:
                errwrite = GetStdHandle(STD_ERROR_HANDLE)
                if errwrite is None:
                    _, errwrite = CreatePipe(None, 0)
                    if PY3:
                        errwrite = Handle(errwrite)
                        _winapi.CloseHandle(_)
            elif stderr == PIPE:
                errread, errwrite = CreatePipe(None, 0)
                if PY3:
                    errread, errwrite = Handle(errread), Handle(errwrite)
            elif stderr == STDOUT:
                errwrite = c2pwrite
            elif stderr == _devnull:
                errwrite = msvcrt.get_osfhandle(self._get_devnull())
            elif isinstance(stderr, int):
                errwrite = msvcrt.get_osfhandle(stderr)
            else:
                # Assuming file-like object
                errwrite = msvcrt.get_osfhandle(stderr.fileno())
            errwrite = self._make_inheritable(errwrite)

            return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)

        def _make_inheritable(self, handle):
            """Return a duplicate of handle, which is inheritable"""
            return DuplicateHandle(GetCurrentProcess(), handle,
                                   GetCurrentProcess(), 0, 1,
                                   DUPLICATE_SAME_ACCESS)

        def _find_w9xpopen(self):
            """Find and return absolut path to w9xpopen.exe"""
            w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
                                    "w9xpopen.exe")
            if not os.path.exists(w9xpopen):
                # Eeek - file-not-found - possibly an embedding
                # situation - see if we can locate it in sys.exec_prefix
                w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
                                        "w9xpopen.exe")
                if not os.path.exists(w9xpopen):
                    raise RuntimeError("Cannot locate w9xpopen.exe, which is "
                                       "needed for Popen to work with your "
                                       "shell or platform.")
            return w9xpopen

        def _execute_child(self, args, executable, preexec_fn, close_fds,
                           pass_fds, cwd, env, universal_newlines, startupinfo,
                           creationflags, shell, p2cread, p2cwrite, c2pread,
                           c2pwrite, errread, errwrite, unused_restore_signals,
                           unused_start_new_session):
            """Execute program (MS Windows version)"""

            assert not pass_fds, "pass_fds not supported on Windows."

            if not isinstance(args, string_types):
                args = list2cmdline(args)

            # Process startup details
            if startupinfo is None:
                startupinfo = STARTUPINFO()
            if None not in (p2cread, c2pwrite, errwrite):
                startupinfo.dwFlags |= STARTF_USESTDHANDLES
                startupinfo.hStdInput = p2cread
                startupinfo.hStdOutput = c2pwrite
                startupinfo.hStdError = errwrite

            if shell:
                startupinfo.dwFlags |= STARTF_USESHOWWINDOW
                startupinfo.wShowWindow = SW_HIDE
                comspec = os.environ.get("COMSPEC", "cmd.exe")
                args = '{} /c "{}"'.format(comspec, args)
                if GetVersion() >= 0x80000000 or os.path.basename(
                        comspec).lower() == "command.com":
                    # Win9x, or using command.com on NT. We need to
                    # use the w9xpopen intermediate program. For more
                    # information, see KB Q150956
                    # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
                    w9xpopen = self._find_w9xpopen()
                    args = '"%s" %s' % (w9xpopen, args)
                    # Not passing CREATE_NEW_CONSOLE has been known to
                    # cause random failures on win9x.  Specifically a
                    # dialog: "Your program accessed mem currently in
                    # use at xxx" and a hopeful warning about the
                    # stability of your system.  Cost is Ctrl+C wont
                    # kill children.
                    creationflags |= CREATE_NEW_CONSOLE

            # Start the process
            try:
                hp, ht, pid, tid = CreateProcess(
                    executable,
                    args,
                    # no special security
                    None,
                    None,
                    int(not close_fds),
                    creationflags,
                    env,
                    cwd,
                    startupinfo)
            except IOError as e:  # From 2.6 on, pywintypes.error was defined as IOError
                # Translate pywintypes.error to WindowsError, which is
                # a subclass of OSError.  FIXME: We should really
                # translate errno using _sys_errlist (or similar), but
                # how can this be done from Python?
                if PY3:
                    raise  # don't remap here
                raise WindowsError(*e.args)
            finally:
                # Child is launched. Close the parent's copy of those pipe
                # handles that only the child should have open.  You need
                # to make sure that no handles to the write end of the
                # output pipe are maintained in this process or else the
                # pipe will not close when the child process exits and the
                # ReadFile will hang.
                def _close(x):
                    if x is not None and x != -1:
                        if hasattr(x, 'Close'):
                            x.Close()
                        else:
                            _winapi.CloseHandle(x)

                _close(p2cread)
                _close(c2pwrite)
                _close(errwrite)
                if hasattr(self, '_devnull'):
                    os.close(self._devnull)

            # Retain the process handle, but close the thread handle
            self._child_created = True
            self._handle = Handle(hp) if not hasattr(hp, 'Close') else hp
            self.pid = pid
            _winapi.CloseHandle(ht) if not hasattr(ht, 'Close') else ht.Close()

        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
                    self.returncode = GetExitCodeProcess(self._handle)
                    self.result.set(self.returncode)
            return self.returncode

        def rawlink(self, callback):
            if not self.result.ready() and not self._waiting:
                self._waiting = True
                Greenlet.spawn(self._wait)
            self.result.rawlink(linkproxy(callback, self))
            # XXX unlink

        def _blocking_wait(self):
            WaitForSingleObject(self._handle, INFINITE)
            self.returncode = GetExitCodeProcess(self._handle)
            return self.returncode

        def _wait(self):
            self.threadpool.spawn(self._blocking_wait).rawlink(self.result)

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            if self.returncode is None:
                if not self._waiting:
                    self._waiting = True
                    self._wait()
            result = self.result.wait(timeout=timeout)
            if PY3 and timeout is not None and not self.result.ready():
                raise TimeoutExpired(self.args, timeout)
            return result

        def send_signal(self, sig):
            """Send a signal to the process
            """
            if sig == signal.SIGTERM:
                self.terminate()
            elif sig == signal.CTRL_C_EVENT:
                os.kill(self.pid, signal.CTRL_C_EVENT)
            elif sig == signal.CTRL_BREAK_EVENT:
                os.kill(self.pid, signal.CTRL_BREAK_EVENT)
            else:
                raise ValueError("Unsupported signal: {}".format(sig))

        def terminate(self):
            """Terminates the process
            """
            TerminateProcess(self._handle, 1)

        kill = terminate

    else:
        #
        # POSIX methods
        #

        def rawlink(self, callback):
            self.result.rawlink(linkproxy(callback, self))

        # XXX unlink

        def _get_handles(self, stdin, stdout, stderr):
            """Construct and return tuple with IO objects:
            p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
            """
            p2cread, p2cwrite = None, None
            c2pread, c2pwrite = None, None
            errread, errwrite = None, None

            try:
                DEVNULL
            except NameError:
                _devnull = object()
            else:
                _devnull = DEVNULL

            if stdin is None:
                pass
            elif stdin == PIPE:
                p2cread, p2cwrite = self.pipe_cloexec()
            elif stdin == _devnull:
                p2cread = self._get_devnull()
            elif isinstance(stdin, int):
                p2cread = stdin
            else:
                # Assuming file-like object
                p2cread = stdin.fileno()

            if stdout is None:
                pass
            elif stdout == PIPE:
                c2pread, c2pwrite = self.pipe_cloexec()
            elif stdout == _devnull:
                c2pwrite = self._get_devnull()
            elif isinstance(stdout, int):
                c2pwrite = stdout
            else:
                # Assuming file-like object
                c2pwrite = stdout.fileno()

            if stderr is None:
                pass
            elif stderr == PIPE:
                errread, errwrite = self.pipe_cloexec()
            elif stderr == STDOUT:
                errwrite = c2pwrite
            elif stderr == _devnull:
                errwrite = self._get_devnull()
            elif isinstance(stderr, int):
                errwrite = stderr
            else:
                # Assuming file-like object
                errwrite = stderr.fileno()

            return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)

        def _set_cloexec_flag(self, fd, cloexec=True):
            try:
                cloexec_flag = fcntl.FD_CLOEXEC
            except AttributeError:
                cloexec_flag = 1

            old = fcntl.fcntl(fd, fcntl.F_GETFD)
            if cloexec:
                fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
            else:
                fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)

        def _remove_nonblock_flag(self, fd):
            flags = fcntl.fcntl(fd, fcntl.F_GETFL) & (~os.O_NONBLOCK)
            fcntl.fcntl(fd, fcntl.F_SETFL, flags)

        def pipe_cloexec(self):
            """Create a pipe with FDs set CLOEXEC."""
            # Pipes' FDs are set CLOEXEC by default because we don't want them
            # to be inherited by other subprocesses: the CLOEXEC flag is removed
            # from the child's FDs by _dup2(), between fork() and exec().
            # This is not atomic: we would need the pipe2() syscall for that.
            r, w = os.pipe()
            self._set_cloexec_flag(r)
            self._set_cloexec_flag(w)
            return r, w

        def _close_fds(self, keep):
            # `keep` is a set of fds, so we
            # use os.closerange from 3 to min(keep)
            # and then from max(keep + 1) to MAXFD and
            # loop through filling in the gaps.
            # Under new python versions, we need to explicitly set
            # passed fds to be inheritable or they will go away on exec
            if hasattr(os, 'set_inheritable'):
                set_inheritable = os.set_inheritable
            else:
                set_inheritable = lambda i, v: True
            if hasattr(os, 'closerange'):
                keep = sorted(keep)
                min_keep = min(keep)
                max_keep = max(keep)
                os.closerange(3, min_keep)
                os.closerange(max_keep + 1, MAXFD)
                for i in xrange(min_keep, max_keep):
                    if i in keep:
                        set_inheritable(i, True)
                        continue

                    try:
                        os.close(i)
                    except:
                        pass
            else:
                for i in xrange(3, MAXFD):
                    if i in keep:
                        set_inheritable(i, True)
                        continue
                    try:
                        os.close(i)
                    except:
                        pass

        def _execute_child(self, args, executable, preexec_fn, close_fds,
                           pass_fds, cwd, env, universal_newlines, startupinfo,
                           creationflags, shell, p2cread, p2cwrite, c2pread,
                           c2pwrite, errread, errwrite, restore_signals,
                           start_new_session):
            """Execute program (POSIX version)"""

            if PY3 and isinstance(args, (str, bytes)):
                args = [args]
            elif not PY3 and isinstance(args, string_types):
                args = [args]
            else:
                args = list(args)

            if shell:
                args = ["/bin/sh", "-c"] + args
                if executable:
                    args[0] = executable

            if executable is None:
                executable = args[0]

            self._loop.install_sigchld()

            # For transferring possible exec failure from child to parent
            # The first char specifies the exception type: 0 means
            # OSError, 1 means some other error.
            errpipe_read, errpipe_write = self.pipe_cloexec()
            # errpipe_write must not be in the standard io 0, 1, or 2 fd range.
            low_fds_to_close = []
            while errpipe_write < 3:
                low_fds_to_close.append(errpipe_write)
                errpipe_write = os.dup(errpipe_write)
            for low_fd in low_fds_to_close:
                os.close(low_fd)
            try:
                try:
                    gc_was_enabled = gc.isenabled()
                    # Disable gc to avoid bug where gc -> file_dealloc ->
                    # write to stderr -> hang.  http://bugs.python.org/issue1336
                    gc.disable()
                    try:
                        self.pid = fork_and_watch(self._on_child, self._loop,
                                                  True, fork)
                    except:
                        if gc_was_enabled:
                            gc.enable()
                        raise
                    if self.pid == 0:
                        # Child
                        try:
                            # Close parent's pipe ends
                            if p2cwrite is not None:
                                os.close(p2cwrite)
                            if c2pread is not None:
                                os.close(c2pread)
                            if errread is not None:
                                os.close(errread)
                            os.close(errpipe_read)

                            # When duping fds, if there arises a situation
                            # where one of the fds is either 0, 1 or 2, it
                            # is possible that it is overwritten (#12607).
                            if c2pwrite == 0:
                                c2pwrite = os.dup(c2pwrite)
                            if errwrite == 0 or errwrite == 1:
                                errwrite = os.dup(errwrite)

                            # Dup fds for child
                            def _dup2(a, b):
                                # dup2() removes the CLOEXEC flag but
                                # we must do it ourselves if dup2()
                                # would be a no-op (issue #10806).
                                if a == b:
                                    self._set_cloexec_flag(a, False)
                                elif a is not None:
                                    os.dup2(a, b)
                                self._remove_nonblock_flag(b)

                            _dup2(p2cread, 0)
                            _dup2(c2pwrite, 1)
                            _dup2(errwrite, 2)

                            # Close pipe fds.  Make sure we don't close the
                            # same fd more than once, or standard fds.
                            closed = set([None])
                            for fd in [p2cread, c2pwrite, errwrite]:
                                if fd not in closed and fd > 2:
                                    os.close(fd)
                                    closed.add(fd)

                            if cwd is not None:
                                os.chdir(cwd)

                            if preexec_fn:
                                preexec_fn()

                            # Close all other fds, if asked for. This must be done
                            # after preexec_fn runs.
                            if close_fds:
                                fds_to_keep = set(pass_fds)
                                fds_to_keep.add(errpipe_write)
                                self._close_fds(fds_to_keep)
                            elif hasattr(os, 'get_inheritable'):
                                # close_fds was false, and we're on
                                # Python 3.4 or newer, so "all file
                                # descriptors except standard streams
                                # are closed, and inheritable handles
                                # are only inherited if the close_fds
                                # parameter is False."
                                for i in xrange(3, MAXFD):
                                    try:
                                        if i == errpipe_write or os.get_inheritable(
                                                i):
                                            continue
                                        os.close(i)
                                    except:
                                        pass

                            if restore_signals:
                                # restore the documented signals back to sig_dfl;
                                # not all will be defined on every platform
                                for sig in 'SIGPIPE', 'SIGXFZ', 'SIGXFSZ':
                                    sig = getattr(signal, sig, None)
                                    if sig is not None:
                                        signal.signal(sig, signal.SIG_DFL)

                            if start_new_session:
                                os.setsid()

                            if env is None:
                                os.execvp(executable, args)
                            else:
                                os.execvpe(executable, args, env)

                        except:
                            exc_type, exc_value, tb = sys.exc_info()
                            # Save the traceback and attach it to the exception object
                            exc_lines = traceback.format_exception(
                                exc_type, exc_value, tb)
                            exc_value.child_traceback = ''.join(exc_lines)
                            os.write(errpipe_write, pickle.dumps(exc_value))

                        finally:
                            # Make sure that the process exits no matter what.
                            # The return code does not matter much as it won't be
                            # reported to the application
                            os._exit(1)

                    # Parent
                    self._child_created = True
                    if gc_was_enabled:
                        gc.enable()
                finally:
                    # be sure the FD is closed no matter what
                    os.close(errpipe_write)

                # self._devnull is not always defined.
                devnull_fd = getattr(self, '_devnull', None)
                if p2cread is not None and p2cwrite is not None and p2cread != devnull_fd:
                    os.close(p2cread)
                if c2pwrite is not None and c2pread is not None and c2pwrite != devnull_fd:
                    os.close(c2pwrite)
                if errwrite is not None and errread is not None and errwrite != devnull_fd:
                    os.close(errwrite)
                if devnull_fd is not None:
                    os.close(devnull_fd)
                # Prevent a double close of these fds from __init__ on error.
                self._closed_child_pipe_fds = True

                # Wait for exec to fail or succeed; possibly raising exception
                errpipe_read = FileObject(errpipe_read, 'rb')
                data = errpipe_read.read()
            finally:
                if hasattr(errpipe_read, 'close'):
                    errpipe_read.close()
                else:
                    os.close(errpipe_read)

            if data != b"":
                self.wait()
                child_exception = pickle.loads(data)
                for fd in (p2cwrite, c2pread, errread):
                    if fd is not None:
                        os.close(fd)
                raise child_exception

        def _handle_exitstatus(self, sts):
            if os.WIFSIGNALED(sts):
                self.returncode = -os.WTERMSIG(sts)
            elif os.WIFEXITED(sts):
                self.returncode = os.WEXITSTATUS(sts)
            else:
                # Should never happen
                raise RuntimeError("Unknown child exit status!")

        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if get_hub() is not getcurrent():
                    sig_pending = getattr(self._loop, 'sig_pending', True)
                    if sig_pending:
                        sleep(0.00001)
            return self.returncode

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute.

            :keyword timeout: The floating point number of seconds to wait.
                Under Python 2, this is a gevent extension, and we simply return if it
                expires. Under Python 3,
                if this time elapses without finishing the process, :exc:`TimeoutExpired`
                is raised."""
            result = self.result.wait(timeout=timeout)
            if PY3 and timeout is not None and not self.result.ready():
                raise TimeoutExpired(self.args, timeout)
            return result

        def send_signal(self, sig):
            """Send a signal to the process
            """
            os.kill(self.pid, sig)

        def terminate(self):
            """Terminate the process with SIGTERM
            """
            self.send_signal(signal.SIGTERM)

        def kill(self):
            """Kill the process with SIGKILL
            """
            self.send_signal(signal.SIGKILL)
Beispiel #30
0
class Popen(object):

    def __init__(self, args, bufsize=0, executable=None,
                 stdin=None, stdout=None, stderr=None,
                 preexec_fn=None, close_fds=False, shell=False,
                 cwd=None, env=None, universal_newlines=False,
                 startupinfo=None, creationflags=0, threadpool=None):
        """Create new Popen instance."""
        if not isinstance(bufsize, integer_types):
            raise TypeError("bufsize must be an integer")
        hub = get_hub()

        if mswindows:
            if preexec_fn is not None:
                raise ValueError("preexec_fn is not supported on Windows "
                                 "platforms")
            if close_fds and (stdin is not None or stdout is not None or
                              stderr is not None):
                raise ValueError("close_fds is not supported on Windows "
                                 "platforms if you redirect stdin/stdout/stderr")
            if threadpool is None:
                threadpool = hub.threadpool
            self.threadpool = threadpool
            self._waiting = False
        else:
            # POSIX
            if startupinfo is not None:
                raise ValueError("startupinfo is only supported on Windows "
                                 "platforms")
            if creationflags != 0:
                raise ValueError("creationflags is only supported on Windows "
                                 "platforms")
            assert threadpool is None
            self._loop = hub.loop

        self.stdin = None
        self.stdout = None
        self.stderr = None
        self.pid = None
        self.returncode = None
        self.universal_newlines = universal_newlines
        self.result = AsyncResult()

        # Input and output objects. The general principle is like
        # this:
        #
        # Parent                   Child
        # ------                   -----
        # p2cwrite   ---stdin--->  p2cread
        # c2pread    <--stdout---  c2pwrite
        # errread    <--stderr---  errwrite
        #
        # On POSIX, the child objects are file descriptors.  On
        # Windows, these are Windows file handles.  The parent objects
        # are file descriptors on both platforms.  The parent objects
        # are None when not using PIPEs. The child objects are None
        # when not redirecting.

        (p2cread, p2cwrite,
         c2pread, c2pwrite,
         errread, errwrite) = self._get_handles(stdin, stdout, stderr)

        self._execute_child(args, executable, preexec_fn, close_fds,
                            cwd, env, universal_newlines,
                            startupinfo, creationflags, shell,
                            p2cread, p2cwrite,
                            c2pread, c2pwrite,
                            errread, errwrite)

        if mswindows:
            if p2cwrite is not None:
                p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
            if c2pread is not None:
                c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
            if errread is not None:
                errread = msvcrt.open_osfhandle(errread.Detach(), 0)

        if p2cwrite is not None:
            self.stdin = FileObject(p2cwrite, 'wb')
        if c2pread is not None:
            if universal_newlines:
                self.stdout = FileObject(c2pread, 'rU')
            else:
                self.stdout = FileObject(c2pread, 'rb')
        if errread is not None:
            if universal_newlines:
                self.stderr = FileObject(errread, 'rU')
            else:
                self.stderr = FileObject(errread, 'rb')

    def __repr__(self):
        return '<%s at 0x%x pid=%r returncode=%r>' % (self.__class__.__name__, id(self), self.pid, self.returncode)

    def _on_child(self, watcher):
        watcher.stop()
        status = watcher.rstatus
        if os.WIFSIGNALED(status):
            self.returncode = -os.WTERMSIG(status)
        else:
            self.returncode = os.WEXITSTATUS(status)
        self.result.set(self.returncode)

    def communicate(self, input=None):
        """Interact with process: Send data to stdin.  Read data from
        stdout and stderr, until end-of-file is reached.  Wait for
        process to terminate.  The optional input argument should be a
        string to be sent to the child process, or None, if no data
        should be sent to the child.

        communicate() returns a tuple (stdout, stderr)."""
        greenlets = []
        if self.stdin:
            greenlets.append(spawn(write_and_close, self.stdin, input))

        if self.stdout:
            stdout = spawn(self.stdout.read)
            greenlets.append(stdout)
        else:
            stdout = None

        if self.stderr:
            stderr = spawn(self.stderr.read)
            greenlets.append(stderr)
        else:
            stderr = None

        joinall(greenlets)

        if self.stdout:
            self.stdout.close()
        if self.stderr:
            self.stderr.close()

        self.wait()
        return (None if stdout is None else stdout.value or '',
                None if stderr is None else stderr.value or '')

    def poll(self):
        return self._internal_poll()

    if mswindows:
        #
        # Windows methods
        #
        def _get_handles(self, stdin, stdout, stderr):
            """Construct and return tuple with IO objects:
            p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
            """
            if stdin is None and stdout is None and stderr is None:
                return (None, None, None, None, None, None)

            p2cread, p2cwrite = None, None
            c2pread, c2pwrite = None, None
            errread, errwrite = None, None

            if stdin is None:
                p2cread = GetStdHandle(STD_INPUT_HANDLE)
                if p2cread is None:
                    p2cread, _ = CreatePipe(None, 0)
            elif stdin == PIPE:
                p2cread, p2cwrite = CreatePipe(None, 0)
            elif isinstance(stdin, int):
                p2cread = msvcrt.get_osfhandle(stdin)
            else:
                # Assuming file-like object
                p2cread = msvcrt.get_osfhandle(stdin.fileno())
            p2cread = self._make_inheritable(p2cread)

            if stdout is None:
                c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
                if c2pwrite is None:
                    _, c2pwrite = CreatePipe(None, 0)
            elif stdout == PIPE:
                c2pread, c2pwrite = CreatePipe(None, 0)
            elif isinstance(stdout, int):
                c2pwrite = msvcrt.get_osfhandle(stdout)
            else:
                # Assuming file-like object
                c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
            c2pwrite = self._make_inheritable(c2pwrite)

            if stderr is None:
                errwrite = GetStdHandle(STD_ERROR_HANDLE)
                if errwrite is None:
                    _, errwrite = CreatePipe(None, 0)
            elif stderr == PIPE:
                errread, errwrite = CreatePipe(None, 0)
            elif stderr == STDOUT:
                errwrite = c2pwrite
            elif isinstance(stderr, int):
                errwrite = msvcrt.get_osfhandle(stderr)
            else:
                # Assuming file-like object
                errwrite = msvcrt.get_osfhandle(stderr.fileno())
            errwrite = self._make_inheritable(errwrite)

            return (p2cread, p2cwrite,
                    c2pread, c2pwrite,
                    errread, errwrite)

        def _make_inheritable(self, handle):
            """Return a duplicate of handle, which is inheritable"""
            return DuplicateHandle(GetCurrentProcess(),
                                   handle, GetCurrentProcess(), 0, 1,
                                   DUPLICATE_SAME_ACCESS)

        def _find_w9xpopen(self):
            """Find and return absolut path to w9xpopen.exe"""
            w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
                                    "w9xpopen.exe")
            if not os.path.exists(w9xpopen):
                # Eeek - file-not-found - possibly an embedding
                # situation - see if we can locate it in sys.exec_prefix
                w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
                                        "w9xpopen.exe")
                if not os.path.exists(w9xpopen):
                    raise RuntimeError("Cannot locate w9xpopen.exe, which is "
                                       "needed for Popen to work with your "
                                       "shell or platform.")
            return w9xpopen

        def _execute_child(self, args, executable, preexec_fn, close_fds,
                           cwd, env, universal_newlines,
                           startupinfo, creationflags, shell,
                           p2cread, p2cwrite,
                           c2pread, c2pwrite,
                           errread, errwrite):
            """Execute program (MS Windows version)"""

            if not isinstance(args, string_types):
                args = list2cmdline(args)

            # Process startup details
            if startupinfo is None:
                startupinfo = STARTUPINFO()
            if None not in (p2cread, c2pwrite, errwrite):
                startupinfo.dwFlags |= STARTF_USESTDHANDLES
                startupinfo.hStdInput = p2cread
                startupinfo.hStdOutput = c2pwrite
                startupinfo.hStdError = errwrite

            if shell:
                startupinfo.dwFlags |= STARTF_USESHOWWINDOW
                startupinfo.wShowWindow = SW_HIDE
                comspec = os.environ.get("COMSPEC", "cmd.exe")
                args = '{} /c "{}"'.format(comspec, args)
                if GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com":
                    # Win9x, or using command.com on NT. We need to
                    # use the w9xpopen intermediate program. For more
                    # information, see KB Q150956
                    # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
                    w9xpopen = self._find_w9xpopen()
                    args = '"%s" %s' % (w9xpopen, args)
                    # Not passing CREATE_NEW_CONSOLE has been known to
                    # cause random failures on win9x.  Specifically a
                    # dialog: "Your program accessed mem currently in
                    # use at xxx" and a hopeful warning about the
                    # stability of your system.  Cost is Ctrl+C wont
                    # kill children.
                    creationflags |= CREATE_NEW_CONSOLE

            # Start the process
            try:
                hp, ht, pid, tid = CreateProcess(executable, args,
                                                 # no special security
                                                 None, None,
                                                 int(not close_fds),
                                                 creationflags,
                                                 env,
                                                 cwd,
                                                 startupinfo)
            except pywintypes.error as e:
                # Translate pywintypes.error to WindowsError, which is
                # a subclass of OSError.  FIXME: We should really
                # translate errno using _sys_errlist (or similar), but
                # how can this be done from Python?
                raise WindowsError(*e.args)
            finally:
                # Child is launched. Close the parent's copy of those pipe
                # handles that only the child should have open.  You need
                # to make sure that no handles to the write end of the
                # output pipe are maintained in this process or else the
                # pipe will not close when the child process exits and the
                # ReadFile will hang.
                if p2cread is not None:
                    p2cread.Close()
                if c2pwrite is not None:
                    c2pwrite.Close()
                if errwrite is not None:
                    errwrite.Close()

            # Retain the process handle, but close the thread handle
            self._handle = hp
            self.pid = pid
            ht.Close()

        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
                    self.returncode = GetExitCodeProcess(self._handle)
                    self.result.set(self.returncode)
            return self.returncode

        def rawlink(self, callback):
            if not self.result.ready() and not self._waiting:
                self._waiting = True
                Greenlet.spawn(self._wait)
            self.result.rawlink(linkproxy(callback, self))
            # XXX unlink

        def _blocking_wait(self):
            WaitForSingleObject(self._handle, INFINITE)
            self.returncode = GetExitCodeProcess(self._handle)
            return self.returncode

        def _wait(self):
            self.threadpool.spawn(self._blocking_wait).rawlink(self.result)

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            if self.returncode is None:
                if not self._waiting:
                    self._waiting = True
                    self._wait()
            return self.result.wait(timeout=timeout)

        def send_signal(self, sig):
            """Send a signal to the process
            """
            if sig == signal.SIGTERM:
                self.terminate()
            elif sig == signal.CTRL_C_EVENT:
                os.kill(self.pid, signal.CTRL_C_EVENT)
            elif sig == signal.CTRL_BREAK_EVENT:
                os.kill(self.pid, signal.CTRL_BREAK_EVENT)
            else:
                raise ValueError("Unsupported signal: {}".format(sig))

        def terminate(self):
            """Terminates the process
            """
            TerminateProcess(self._handle, 1)

        kill = terminate

    else:
        #
        # POSIX methods
        #

        def rawlink(self, callback):
            self.result.rawlink(linkproxy(callback, self))
        # XXX unlink

        def _get_handles(self, stdin, stdout, stderr):
            """Construct and return tuple with IO objects:
            p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
            """
            p2cread, p2cwrite = None, None
            c2pread, c2pwrite = None, None
            errread, errwrite = None, None

            if stdin is None:
                pass
            elif stdin == PIPE:
                p2cread, p2cwrite = self.pipe_cloexec()
            elif isinstance(stdin, int):
                p2cread = stdin
            else:
                # Assuming file-like object
                p2cread = stdin.fileno()

            if stdout is None:
                pass
            elif stdout == PIPE:
                c2pread, c2pwrite = self.pipe_cloexec()
            elif isinstance(stdout, int):
                c2pwrite = stdout
            else:
                # Assuming file-like object
                c2pwrite = stdout.fileno()

            if stderr is None:
                pass
            elif stderr == PIPE:
                errread, errwrite = self.pipe_cloexec()
            elif stderr == STDOUT:
                errwrite = c2pwrite
            elif isinstance(stderr, int):
                errwrite = stderr
            else:
                # Assuming file-like object
                errwrite = stderr.fileno()

            return (p2cread, p2cwrite,
                    c2pread, c2pwrite,
                    errread, errwrite)

        def _set_cloexec_flag(self, fd, cloexec=True):
            try:
                cloexec_flag = fcntl.FD_CLOEXEC
            except AttributeError:
                cloexec_flag = 1

            old = fcntl.fcntl(fd, fcntl.F_GETFD)
            if cloexec:
                fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
            else:
                fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)

        def _remove_nonblock_flag(self, fd):
            flags = fcntl.fcntl(fd, fcntl.F_GETFL) & (~os.O_NONBLOCK)
            fcntl.fcntl(fd, fcntl.F_SETFL, flags)

        def pipe_cloexec(self):
            """Create a pipe with FDs set CLOEXEC."""
            # Pipes' FDs are set CLOEXEC by default because we don't want them
            # to be inherited by other subprocesses: the CLOEXEC flag is removed
            # from the child's FDs by _dup2(), between fork() and exec().
            # This is not atomic: we would need the pipe2() syscall for that.
            r, w = os.pipe()
            self._set_cloexec_flag(r)
            self._set_cloexec_flag(w)
            return r, w

        def _close_fds(self, but):
            if hasattr(os, 'closerange'):
                os.closerange(3, but)
                os.closerange(but + 1, MAXFD)
            else:
                for i in xrange(3, MAXFD):
                    if i == but:
                        continue
                    try:
                        os.close(i)
                    except:
                        pass

        def _execute_child(self, args, executable, preexec_fn, close_fds,
                           cwd, env, universal_newlines,
                           startupinfo, creationflags, shell,
                           p2cread, p2cwrite,
                           c2pread, c2pwrite,
                           errread, errwrite):
            """Execute program (POSIX version)"""

            if isinstance(args, string_types):
                args = [args]
            else:
                args = list(args)

            if shell:
                args = ["/bin/sh", "-c"] + args
                if executable:
                    args[0] = executable

            if executable is None:
                executable = args[0]

            self._loop.install_sigchld()

            # For transferring possible exec failure from child to parent
            # The first char specifies the exception type: 0 means
            # OSError, 1 means some other error.
            errpipe_read, errpipe_write = self.pipe_cloexec()
            try:
                try:
                    gc_was_enabled = gc.isenabled()
                    # Disable gc to avoid bug where gc -> file_dealloc ->
                    # write to stderr -> hang.  http://bugs.python.org/issue1336
                    gc.disable()
                    try:
                        self.pid = fork()
                    except:
                        if gc_was_enabled:
                            gc.enable()
                        raise
                    if self.pid == 0:
                        # Child
                        try:
                            # Close parent's pipe ends
                            if p2cwrite is not None:
                                os.close(p2cwrite)
                            if c2pread is not None:
                                os.close(c2pread)
                            if errread is not None:
                                os.close(errread)
                            os.close(errpipe_read)

                            # When duping fds, if there arises a situation
                            # where one of the fds is either 0, 1 or 2, it
                            # is possible that it is overwritten (#12607).
                            if c2pwrite == 0:
                                c2pwrite = os.dup(c2pwrite)
                            if errwrite == 0 or errwrite == 1:
                                errwrite = os.dup(errwrite)

                            # Dup fds for child
                            def _dup2(a, b):
                                # dup2() removes the CLOEXEC flag but
                                # we must do it ourselves if dup2()
                                # would be a no-op (issue #10806).
                                if a == b:
                                    self._set_cloexec_flag(a, False)
                                elif a is not None:
                                    os.dup2(a, b)
                                self._remove_nonblock_flag(b)
                            _dup2(p2cread, 0)
                            _dup2(c2pwrite, 1)
                            _dup2(errwrite, 2)

                            # Close pipe fds.  Make sure we don't close the
                            # same fd more than once, or standard fds.
                            closed = set([None])
                            for fd in [p2cread, c2pwrite, errwrite]:
                                if fd not in closed and fd > 2:
                                    os.close(fd)
                                    closed.add(fd)

                            # Close all other fds, if asked for
                            if close_fds:
                                self._close_fds(but=errpipe_write)

                            if cwd is not None:
                                os.chdir(cwd)

                            if preexec_fn:
                                preexec_fn()

                            if env is None:
                                os.execvp(executable, args)
                            else:
                                os.execvpe(executable, args, env)

                        except:
                            exc_type, exc_value, tb = sys.exc_info()
                            # Save the traceback and attach it to the exception object
                            exc_lines = traceback.format_exception(exc_type,
                                                                   exc_value,
                                                                   tb)
                            exc_value.child_traceback = ''.join(exc_lines)
                            os.write(errpipe_write, pickle.dumps(exc_value))

                        finally:
                            # Make sure that the process exits no matter what.
                            # The return code does not matter much as it won't be
                            # reported to the application
                            os._exit(1)

                    # Parent
                    self._watcher = self._loop.child(self.pid)
                    self._watcher.start(self._on_child, self._watcher)

                    if gc_was_enabled:
                        gc.enable()
                finally:
                    # be sure the FD is closed no matter what
                    os.close(errpipe_write)

                if p2cread is not None and p2cwrite is not None:
                    os.close(p2cread)
                if c2pwrite is not None and c2pread is not None:
                    os.close(c2pwrite)
                if errwrite is not None and errread is not None:
                    os.close(errwrite)

                # Wait for exec to fail or succeed; possibly raising exception
                errpipe_read = FileObject(errpipe_read, 'rb')
                data = errpipe_read.read()
            finally:
                if hasattr(errpipe_read, 'close'):
                    errpipe_read.close()
                else:
                    os.close(errpipe_read)

            if data != b"":
                self.wait()
                child_exception = pickle.loads(data)
                for fd in (p2cwrite, c2pread, errread):
                    if fd is not None:
                        os.close(fd)
                raise child_exception

        def _handle_exitstatus(self, sts):
            if os.WIFSIGNALED(sts):
                self.returncode = -os.WTERMSIG(sts)
            elif os.WIFEXITED(sts):
                self.returncode = os.WEXITSTATUS(sts)
            else:
                # Should never happen
                raise RuntimeError("Unknown child exit status!")

        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if get_hub() is not getcurrent():
                    sig_pending = getattr(self._loop, 'sig_pending', True)
                    if sig_pending:
                        sleep(0.00001)
            return self.returncode

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            return self.result.wait(timeout=timeout)

        def send_signal(self, sig):
            """Send a signal to the process
            """
            os.kill(self.pid, sig)

        def terminate(self):
            """Terminate the process with SIGTERM
            """
            self.send_signal(signal.SIGTERM)

        def kill(self):
            """Kill the process with SIGKILL
            """
            self.send_signal(signal.SIGKILL)
Beispiel #31
0
class Popen(object):

    def __init__(self, args, bufsize=None, executable=None,
                 stdin=None, stdout=None, stderr=None,
                 preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS, shell=False,
                 cwd=None, env=None, universal_newlines=False,
                 startupinfo=None, creationflags=0, threadpool=None,
                 **kwargs):
        """Create new Popen instance."""

        if not PY3 and kwargs:
            raise TypeError("Got unexpected keyword arguments", kwargs)
        pass_fds = kwargs.pop('pass_fds', ())
        start_new_session = kwargs.pop('start_new_session', False)
        restore_signals = kwargs.pop('restore_signals', True)

        hub = get_hub()

        if bufsize is None:
            # bufsize has different defaults on Py3 and Py2
            if PY3:
                bufsize = -1
            else:
                bufsize = 0
        if not isinstance(bufsize, integer_types):
            raise TypeError("bufsize must be an integer")

        if mswindows:
            if preexec_fn is not None:
                raise ValueError("preexec_fn is not supported on Windows "
                                 "platforms")
            any_stdio_set = (stdin is not None or stdout is not None or
                             stderr is not None)
            if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
                if any_stdio_set:
                    close_fds = False
                else:
                    close_fds = True
            elif close_fds and any_stdio_set:
                raise ValueError("close_fds is not supported on Windows "
                                 "platforms if you redirect stdin/stdout/stderr")
            if threadpool is None:
                threadpool = hub.threadpool
            self.threadpool = threadpool
            self._waiting = False
        else:
            # POSIX
            if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
                # close_fds has different defaults on Py3/Py2
                if PY3:
                    close_fds = True
                else:
                    close_fds = False

            if pass_fds and not close_fds:
                import warnings
                warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
                close_fds = True
            if startupinfo is not None:
                raise ValueError("startupinfo is only supported on Windows "
                                 "platforms")
            if creationflags != 0:
                raise ValueError("creationflags is only supported on Windows "
                                 "platforms")
            assert threadpool is None
            self._loop = hub.loop

        if PY3:
            self.args = args
        self.stdin = None
        self.stdout = None
        self.stderr = None
        self.pid = None
        self.returncode = None
        self.universal_newlines = universal_newlines
        self.result = AsyncResult()

        # Input and output objects. The general principle is like
        # this:
        #
        # Parent                   Child
        # ------                   -----
        # p2cwrite   ---stdin--->  p2cread
        # c2pread    <--stdout---  c2pwrite
        # errread    <--stderr---  errwrite
        #
        # On POSIX, the child objects are file descriptors.  On
        # Windows, these are Windows file handles.  The parent objects
        # are file descriptors on both platforms.  The parent objects
        # are None when not using PIPEs. The child objects are None
        # when not redirecting.

        (p2cread, p2cwrite,
         c2pread, c2pwrite,
         errread, errwrite) = self._get_handles(stdin, stdout, stderr)

        # We wrap OS handles *before* launching the child, otherwise a
        # quickly terminating child could make our fds unwrappable
        # (see #8458).
        if mswindows:
            if p2cwrite is not None:
                p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
            if c2pread is not None:
                c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
            if errread is not None:
                errread = msvcrt.open_osfhandle(errread.Detach(), 0)

        if p2cwrite is not None:
            if PY3 and universal_newlines:
                # Under Python 3, if we left on the 'b' we'd get different results
                # depending on whether we used FileObjectPosix or FileObjectThread
                self.stdin = FileObject(p2cwrite, 'wb', bufsize)
                self.stdin._translate = True
                self.stdin.io = io.TextIOWrapper(self.stdin.io, write_through=True,
                                                 line_buffering=(bufsize == 1))
            else:
                self.stdin = FileObject(p2cwrite, 'wb', bufsize)
        if c2pread is not None:
            if universal_newlines:
                if PY3:
                    # FileObjectThread doesn't support the 'U' qualifier
                    # with a bufsize of 0
                    self.stdout = FileObject(c2pread, 'rb', bufsize)
                    # NOTE: Universal Newlines are broken on Windows/Py3, at least
                    # in some cases. This is true in the stdlib subprocess module
                    # as well; the following line would fix the test cases in
                    # test__subprocess.py that depend on python_universal_newlines,
                    # but would be inconsistent with the stdlib:
                    #msvcrt.setmode(self.stdout.fileno(), os.O_TEXT)
                    self.stdout.io = io.TextIOWrapper(self.stdout.io)
                    self.stdout.io.mode = 'r'
                    self.stdout._translate = True
                else:
                    self.stdout = FileObject(c2pread, 'rU', bufsize)
            else:
                self.stdout = FileObject(c2pread, 'rb', bufsize)
        if errread is not None:
            if universal_newlines:
                if PY3:
                    self.stderr = FileObject(errread, 'rb', bufsize)
                    self.stderr.io = io.TextIOWrapper(self.stderr.io)
                    self.stderr._translate = True
                else:
                    self.stderr = FileObject(errread, 'rU', bufsize)
            else:
                self.stderr = FileObject(errread, 'rb', bufsize)

        self._closed_child_pipe_fds = False
        try:
            self._execute_child(args, executable, preexec_fn, close_fds,
                                pass_fds, cwd, env, universal_newlines,
                                startupinfo, creationflags, shell,
                                p2cread, p2cwrite,
                                c2pread, c2pwrite,
                                errread, errwrite,
                                restore_signals, start_new_session)
        except:
            # Cleanup if the child failed starting.
            # (gevent: New in python3, but reported as gevent bug in #347.
            # Note that under Py2, any error raised below will replace the
            # original error so we have to use reraise)
            if not PY3:
                exc_info = sys.exc_info()
            for f in filter(None, (self.stdin, self.stdout, self.stderr)):
                try:
                    f.close()
                except (OSError, IOError):
                    pass  # Ignore EBADF or other errors.

            if not self._closed_child_pipe_fds:
                to_close = []
                if stdin == PIPE:
                    to_close.append(p2cread)
                if stdout == PIPE:
                    to_close.append(c2pwrite)
                if stderr == PIPE:
                    to_close.append(errwrite)
                if hasattr(self, '_devnull'):
                    to_close.append(self._devnull)
                for fd in to_close:
                    try:
                        os.close(fd)
                    except (OSError, IOError):
                        pass
            if not PY3:
                try:
                    reraise(*exc_info)
                finally:
                    del exc_info
            raise

    def __repr__(self):
        return '<%s at 0x%x pid=%r returncode=%r>' % (self.__class__.__name__, id(self), self.pid, self.returncode)

    def _on_child(self, watcher):
        watcher.stop()
        status = watcher.rstatus
        if os.WIFSIGNALED(status):
            self.returncode = -os.WTERMSIG(status)
        else:
            self.returncode = os.WEXITSTATUS(status)
        self.result.set(self.returncode)

    def _get_devnull(self):
        if not hasattr(self, '_devnull'):
            self._devnull = os.open(os.devnull, os.O_RDWR)
        return self._devnull

    _stdout_buffer = None
    _stderr_buffer = None

    def communicate(self, input=None, timeout=None):
        """Interact with process: Send data to stdin.  Read data from
        stdout and stderr, until end-of-file is reached.  Wait for
        process to terminate.  The optional input argument should be a
        string to be sent to the child process, or None, if no data
        should be sent to the child.

        communicate() returns a tuple (stdout, stderr).

        :keyword timeout: Under Python 2, this is a gevent extension; if
           given and it expires, we will raise :class:`gevent.timeout.Timeout`.
           Under Python 3, this raises the standard :exc:`TimeoutExpired` exception.
        """
        greenlets = []
        if self.stdin:
            greenlets.append(spawn(write_and_close, self.stdin, input))

        # If the timeout parameter is used, and the caller calls back after
        # getting a TimeoutExpired exception, we can wind up with multiple
        # greenlets trying to run and read from and close stdout/stderr.
        # That's bad because it can lead to 'RuntimeError: reentrant call in io.BufferedReader'.
        # We can't just kill the previous greenlets when a timeout happens,
        # though, because we risk losing the output collected by that greenlet
        # (and Python 3, where timeout is an official parameter, explicitly says
        # that no output should be lost in the event of a timeout.) Instead, we're
        # watching for the exception and ignoring it. It's not elegant,
        # but it works
        if self.stdout:
            def _read_out():
                try:
                    data = self.stdout.read()
                except RuntimeError:
                    return
                if self._stdout_buffer is not None:
                    self._stdout_buffer += data
                else:
                    self._stdout_buffer = data
            stdout = spawn(_read_out)
            greenlets.append(stdout)
        else:
            stdout = None

        if self.stderr:
            def _read_err():
                try:
                    data = self.stderr.read()
                except RuntimeError:
                    return
                if self._stderr_buffer is not None:
                    self._stderr_buffer += data
                else:
                    self._stderr_buffer = data
            stderr = spawn(_read_err)
            greenlets.append(stderr)
        else:
            stderr = None

        # If we were given stdin=stdout=stderr=None, we have no way to
        # communicate with the child, and thus no greenlets to wait
        # on. This is a nonsense case, but it comes up in the test
        # case for Python 3.5 (test_subprocess.py
        # RunFuncTestCase.test_timeout). Instead, we go directly to
        # self.wait
        if not greenlets and timeout is not None:
            result = self.wait(timeout=timeout)
            # Python 3 would have already raised, but Python 2 would not
            # so we need to do that manually
            if result is None:
                from gevent.timeout import Timeout
                raise Timeout(timeout)

        done = joinall(greenlets, timeout=timeout)
        if timeout is not None and len(done) != len(greenlets):
            if PY3:
                raise TimeoutExpired(self.args, timeout)
            from gevent.timeout import Timeout
            raise Timeout(timeout)

        if self.stdout:
            try:
                self.stdout.close()
            except RuntimeError:
                pass
        if self.stderr:
            try:
                self.stderr.close()
            except RuntimeError:
                pass
        self.wait()
        stdout_value = self._stdout_buffer
        self._stdout_buffer = None
        stderr_value = self._stderr_buffer
        self._stderr_buffer = None
        # XXX: Under python 3 in universal newlines mode we should be
        # returning str, not bytes
        return (None if stdout is None else stdout_value or b'',
                None if stderr is None else stderr_value or b'')

    def poll(self):
        return self._internal_poll()

    if PY3:
        def __enter__(self):
            return self

        def __exit__(self, type, value, traceback):
            if self.stdout:
                self.stdout.close()
            if self.stderr:
                self.stderr.close()
            try:  # Flushing a BufferedWriter may raise an error
                if self.stdin:
                    self.stdin.close()
            finally:
                # Wait for the process to terminate, to avoid zombies.
                # JAM: gevent: If the process never terminates, this
                # blocks forever.
                self.wait()

    if mswindows:
        #
        # Windows methods
        #
        def _get_handles(self, stdin, stdout, stderr):
            """Construct and return tuple with IO objects:
            p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
            """
            if stdin is None and stdout is None and stderr is None:
                return (None, None, None, None, None, None)

            p2cread, p2cwrite = None, None
            c2pread, c2pwrite = None, None
            errread, errwrite = None, None

            try:
                DEVNULL
            except NameError:
                _devnull = object()
            else:
                _devnull = DEVNULL

            if stdin is None:
                p2cread = GetStdHandle(STD_INPUT_HANDLE)
                if p2cread is None:
                    p2cread, _ = CreatePipe(None, 0)
                    if PY3:
                        p2cread = Handle(p2cread)
                        _winapi.CloseHandle(_)
            elif stdin == PIPE:
                p2cread, p2cwrite = CreatePipe(None, 0)
                if PY3:
                    p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
            elif stdin == _devnull:
                p2cread = msvcrt.get_osfhandle(self._get_devnull())
            elif isinstance(stdin, int):
                p2cread = msvcrt.get_osfhandle(stdin)
            else:
                # Assuming file-like object
                p2cread = msvcrt.get_osfhandle(stdin.fileno())
            p2cread = self._make_inheritable(p2cread)

            if stdout is None:
                c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
                if c2pwrite is None:
                    _, c2pwrite = CreatePipe(None, 0)
                    if PY3:
                        c2pwrite = Handle(c2pwrite)
                        _winapi.CloseHandle(_)
            elif stdout == PIPE:
                c2pread, c2pwrite = CreatePipe(None, 0)
                if PY3:
                    c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
            elif stdout == _devnull:
                c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
            elif isinstance(stdout, int):
                c2pwrite = msvcrt.get_osfhandle(stdout)
            else:
                # Assuming file-like object
                c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
            c2pwrite = self._make_inheritable(c2pwrite)

            if stderr is None:
                errwrite = GetStdHandle(STD_ERROR_HANDLE)
                if errwrite is None:
                    _, errwrite = CreatePipe(None, 0)
                    if PY3:
                        errwrite = Handle(errwrite)
                        _winapi.CloseHandle(_)
            elif stderr == PIPE:
                errread, errwrite = CreatePipe(None, 0)
                if PY3:
                    errread, errwrite = Handle(errread), Handle(errwrite)
            elif stderr == STDOUT:
                errwrite = c2pwrite
            elif stderr == _devnull:
                errwrite = msvcrt.get_osfhandle(self._get_devnull())
            elif isinstance(stderr, int):
                errwrite = msvcrt.get_osfhandle(stderr)
            else:
                # Assuming file-like object
                errwrite = msvcrt.get_osfhandle(stderr.fileno())
            errwrite = self._make_inheritable(errwrite)

            return (p2cread, p2cwrite,
                    c2pread, c2pwrite,
                    errread, errwrite)

        def _make_inheritable(self, handle):
            """Return a duplicate of handle, which is inheritable"""
            return DuplicateHandle(GetCurrentProcess(),
                                   handle, GetCurrentProcess(), 0, 1,
                                   DUPLICATE_SAME_ACCESS)

        def _find_w9xpopen(self):
            """Find and return absolut path to w9xpopen.exe"""
            w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
                                    "w9xpopen.exe")
            if not os.path.exists(w9xpopen):
                # Eeek - file-not-found - possibly an embedding
                # situation - see if we can locate it in sys.exec_prefix
                w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
                                        "w9xpopen.exe")
                if not os.path.exists(w9xpopen):
                    raise RuntimeError("Cannot locate w9xpopen.exe, which is "
                                       "needed for Popen to work with your "
                                       "shell or platform.")
            return w9xpopen

        def _execute_child(self, args, executable, preexec_fn, close_fds,
                           pass_fds, cwd, env, universal_newlines,
                           startupinfo, creationflags, shell,
                           p2cread, p2cwrite,
                           c2pread, c2pwrite,
                           errread, errwrite,
                           unused_restore_signals, unused_start_new_session):
            """Execute program (MS Windows version)"""

            assert not pass_fds, "pass_fds not supported on Windows."

            if not isinstance(args, string_types):
                args = list2cmdline(args)

            # Process startup details
            if startupinfo is None:
                startupinfo = STARTUPINFO()
            if None not in (p2cread, c2pwrite, errwrite):
                startupinfo.dwFlags |= STARTF_USESTDHANDLES
                startupinfo.hStdInput = p2cread
                startupinfo.hStdOutput = c2pwrite
                startupinfo.hStdError = errwrite

            if shell:
                startupinfo.dwFlags |= STARTF_USESHOWWINDOW
                startupinfo.wShowWindow = SW_HIDE
                comspec = os.environ.get("COMSPEC", "cmd.exe")
                args = '{} /c "{}"'.format(comspec, args)
                if GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com":
                    # Win9x, or using command.com on NT. We need to
                    # use the w9xpopen intermediate program. For more
                    # information, see KB Q150956
                    # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
                    w9xpopen = self._find_w9xpopen()
                    args = '"%s" %s' % (w9xpopen, args)
                    # Not passing CREATE_NEW_CONSOLE has been known to
                    # cause random failures on win9x.  Specifically a
                    # dialog: "Your program accessed mem currently in
                    # use at xxx" and a hopeful warning about the
                    # stability of your system.  Cost is Ctrl+C wont
                    # kill children.
                    creationflags |= CREATE_NEW_CONSOLE

            # Start the process
            try:
                hp, ht, pid, tid = CreateProcess(executable, args,
                                                 # no special security
                                                 None, None,
                                                 int(not close_fds),
                                                 creationflags,
                                                 env,
                                                 cwd,
                                                 startupinfo)
            except IOError as e: # From 2.6 on, pywintypes.error was defined as IOError
                # Translate pywintypes.error to WindowsError, which is
                # a subclass of OSError.  FIXME: We should really
                # translate errno using _sys_errlist (or similar), but
                # how can this be done from Python?
                if PY3:
                    raise # don't remap here
                raise WindowsError(*e.args)
            finally:
                # Child is launched. Close the parent's copy of those pipe
                # handles that only the child should have open.  You need
                # to make sure that no handles to the write end of the
                # output pipe are maintained in this process or else the
                # pipe will not close when the child process exits and the
                # ReadFile will hang.
                def _close(x):
                    if x is not None and x != -1:
                        if hasattr(x, 'Close'):
                            x.Close()
                        else:
                            _winapi.CloseHandle(x)

                _close(p2cread)
                _close(c2pwrite)
                _close(errwrite)
                if hasattr(self, '_devnull'):
                    os.close(self._devnull)

            # Retain the process handle, but close the thread handle
            self._child_created = True
            self._handle = Handle(hp) if not hasattr(hp, 'Close') else hp
            self.pid = pid
            _winapi.CloseHandle(ht) if not hasattr(ht, 'Close') else ht.Close()

        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
                    self.returncode = GetExitCodeProcess(self._handle)
                    self.result.set(self.returncode)
            return self.returncode

        def rawlink(self, callback):
            if not self.result.ready() and not self._waiting:
                self._waiting = True
                Greenlet.spawn(self._wait)
            self.result.rawlink(linkproxy(callback, self))
            # XXX unlink

        def _blocking_wait(self):
            WaitForSingleObject(self._handle, INFINITE)
            self.returncode = GetExitCodeProcess(self._handle)
            return self.returncode

        def _wait(self):
            self.threadpool.spawn(self._blocking_wait).rawlink(self.result)

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            if self.returncode is None:
                if not self._waiting:
                    self._waiting = True
                    self._wait()
            result = self.result.wait(timeout=timeout)
            if PY3 and timeout is not None and not self.result.ready():
                raise TimeoutExpired(self.args, timeout)
            return result

        def send_signal(self, sig):
            """Send a signal to the process
            """
            if sig == signal.SIGTERM:
                self.terminate()
            elif sig == signal.CTRL_C_EVENT:
                os.kill(self.pid, signal.CTRL_C_EVENT)
            elif sig == signal.CTRL_BREAK_EVENT:
                os.kill(self.pid, signal.CTRL_BREAK_EVENT)
            else:
                raise ValueError("Unsupported signal: {}".format(sig))

        def terminate(self):
            """Terminates the process
            """
            TerminateProcess(self._handle, 1)

        kill = terminate

    else:
        #
        # POSIX methods
        #

        def rawlink(self, callback):
            self.result.rawlink(linkproxy(callback, self))
        # XXX unlink

        def _get_handles(self, stdin, stdout, stderr):
            """Construct and return tuple with IO objects:
            p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
            """
            p2cread, p2cwrite = None, None
            c2pread, c2pwrite = None, None
            errread, errwrite = None, None

            try:
                DEVNULL
            except NameError:
                _devnull = object()
            else:
                _devnull = DEVNULL

            if stdin is None:
                pass
            elif stdin == PIPE:
                p2cread, p2cwrite = self.pipe_cloexec()
            elif stdin == _devnull:
                p2cread = self._get_devnull()
            elif isinstance(stdin, int):
                p2cread = stdin
            else:
                # Assuming file-like object
                p2cread = stdin.fileno()

            if stdout is None:
                pass
            elif stdout == PIPE:
                c2pread, c2pwrite = self.pipe_cloexec()
            elif stdout == _devnull:
                c2pwrite = self._get_devnull()
            elif isinstance(stdout, int):
                c2pwrite = stdout
            else:
                # Assuming file-like object
                c2pwrite = stdout.fileno()

            if stderr is None:
                pass
            elif stderr == PIPE:
                errread, errwrite = self.pipe_cloexec()
            elif stderr == STDOUT:
                errwrite = c2pwrite
            elif stderr == _devnull:
                errwrite = self._get_devnull()
            elif isinstance(stderr, int):
                errwrite = stderr
            else:
                # Assuming file-like object
                errwrite = stderr.fileno()

            return (p2cread, p2cwrite,
                    c2pread, c2pwrite,
                    errread, errwrite)

        def _set_cloexec_flag(self, fd, cloexec=True):
            try:
                cloexec_flag = fcntl.FD_CLOEXEC
            except AttributeError:
                cloexec_flag = 1

            old = fcntl.fcntl(fd, fcntl.F_GETFD)
            if cloexec:
                fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
            else:
                fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)

        def _remove_nonblock_flag(self, fd):
            flags = fcntl.fcntl(fd, fcntl.F_GETFL) & (~os.O_NONBLOCK)
            fcntl.fcntl(fd, fcntl.F_SETFL, flags)

        def pipe_cloexec(self):
            """Create a pipe with FDs set CLOEXEC."""
            # Pipes' FDs are set CLOEXEC by default because we don't want them
            # to be inherited by other subprocesses: the CLOEXEC flag is removed
            # from the child's FDs by _dup2(), between fork() and exec().
            # This is not atomic: we would need the pipe2() syscall for that.
            r, w = os.pipe()
            self._set_cloexec_flag(r)
            self._set_cloexec_flag(w)
            return r, w

        def _close_fds(self, keep):
            # `keep` is a set of fds, so we
            # use os.closerange from 3 to min(keep)
            # and then from max(keep + 1) to MAXFD and
            # loop through filling in the gaps.
            # Under new python versions, we need to explicitly set
            # passed fds to be inheritable or they will go away on exec
            if hasattr(os, 'set_inheritable'):
                set_inheritable = os.set_inheritable
            else:
                set_inheritable = lambda i, v: True
            if hasattr(os, 'closerange'):
                keep = sorted(keep)
                min_keep = min(keep)
                max_keep = max(keep)
                os.closerange(3, min_keep)
                os.closerange(max_keep + 1, MAXFD)
                for i in xrange(min_keep, max_keep):
                    if i in keep:
                        set_inheritable(i, True)
                        continue

                    try:
                        os.close(i)
                    except:
                        pass
            else:
                for i in xrange(3, MAXFD):
                    if i in keep:
                        set_inheritable(i, True)
                        continue
                    try:
                        os.close(i)
                    except:
                        pass

        def _execute_child(self, args, executable, preexec_fn, close_fds,
                           pass_fds, cwd, env, universal_newlines,
                           startupinfo, creationflags, shell,
                           p2cread, p2cwrite,
                           c2pread, c2pwrite,
                           errread, errwrite,
                           restore_signals, start_new_session):
            """Execute program (POSIX version)"""

            if PY3 and isinstance(args, (str, bytes)):
                args = [args]
            elif not PY3 and isinstance(args, string_types):
                args = [args]
            else:
                args = list(args)

            if shell:
                args = ["/bin/sh", "-c"] + args
                if executable:
                    args[0] = executable

            if executable is None:
                executable = args[0]

            self._loop.install_sigchld()

            # For transferring possible exec failure from child to parent
            # The first char specifies the exception type: 0 means
            # OSError, 1 means some other error.
            errpipe_read, errpipe_write = self.pipe_cloexec()
            # errpipe_write must not be in the standard io 0, 1, or 2 fd range.
            low_fds_to_close = []
            while errpipe_write < 3:
                low_fds_to_close.append(errpipe_write)
                errpipe_write = os.dup(errpipe_write)
            for low_fd in low_fds_to_close:
                os.close(low_fd)
            try:
                try:
                    gc_was_enabled = gc.isenabled()
                    # Disable gc to avoid bug where gc -> file_dealloc ->
                    # write to stderr -> hang.  http://bugs.python.org/issue1336
                    gc.disable()
                    try:
                        self.pid = fork_and_watch(self._on_child, self._loop, True, fork)
                    except:
                        if gc_was_enabled:
                            gc.enable()
                        raise
                    if self.pid == 0:
                        # Child
                        try:
                            # Close parent's pipe ends
                            if p2cwrite is not None:
                                os.close(p2cwrite)
                            if c2pread is not None:
                                os.close(c2pread)
                            if errread is not None:
                                os.close(errread)
                            os.close(errpipe_read)

                            # When duping fds, if there arises a situation
                            # where one of the fds is either 0, 1 or 2, it
                            # is possible that it is overwritten (#12607).
                            if c2pwrite == 0:
                                c2pwrite = os.dup(c2pwrite)
                            if errwrite == 0 or errwrite == 1:
                                errwrite = os.dup(errwrite)

                            # Dup fds for child
                            def _dup2(a, b):
                                # dup2() removes the CLOEXEC flag but
                                # we must do it ourselves if dup2()
                                # would be a no-op (issue #10806).
                                if a == b:
                                    self._set_cloexec_flag(a, False)
                                elif a is not None:
                                    os.dup2(a, b)
                                self._remove_nonblock_flag(b)
                            _dup2(p2cread, 0)
                            _dup2(c2pwrite, 1)
                            _dup2(errwrite, 2)

                            # Close pipe fds.  Make sure we don't close the
                            # same fd more than once, or standard fds.
                            closed = set([None])
                            for fd in [p2cread, c2pwrite, errwrite]:
                                if fd not in closed and fd > 2:
                                    os.close(fd)
                                    closed.add(fd)

                            if cwd is not None:
                                os.chdir(cwd)

                            if preexec_fn:
                                preexec_fn()

                            # Close all other fds, if asked for. This must be done
                            # after preexec_fn runs.
                            if close_fds:
                                fds_to_keep = set(pass_fds)
                                fds_to_keep.add(errpipe_write)
                                self._close_fds(fds_to_keep)
                            elif hasattr(os, 'get_inheritable'):
                                # close_fds was false, and we're on
                                # Python 3.4 or newer, so "all file
                                # descriptors except standard streams
                                # are closed, and inheritable handles
                                # are only inherited if the close_fds
                                # parameter is False."
                                for i in xrange(3, MAXFD):
                                    try:
                                        if i == errpipe_write or os.get_inheritable(i):
                                            continue
                                        os.close(i)
                                    except:
                                        pass

                            if restore_signals:
                                # restore the documented signals back to sig_dfl;
                                # not all will be defined on every platform
                                for sig in 'SIGPIPE', 'SIGXFZ', 'SIGXFSZ':
                                    sig = getattr(signal, sig, None)
                                    if sig is not None:
                                        signal.signal(sig, signal.SIG_DFL)

                            if start_new_session:
                                os.setsid()

                            if env is None:
                                os.execvp(executable, args)
                            else:
                                os.execvpe(executable, args, env)

                        except:
                            exc_type, exc_value, tb = sys.exc_info()
                            # Save the traceback and attach it to the exception object
                            exc_lines = traceback.format_exception(exc_type,
                                                                   exc_value,
                                                                   tb)
                            exc_value.child_traceback = ''.join(exc_lines)
                            os.write(errpipe_write, pickle.dumps(exc_value))

                        finally:
                            # Make sure that the process exits no matter what.
                            # The return code does not matter much as it won't be
                            # reported to the application
                            os._exit(1)

                    # Parent
                    self._child_created = True
                    if gc_was_enabled:
                        gc.enable()
                finally:
                    # be sure the FD is closed no matter what
                    os.close(errpipe_write)

                # self._devnull is not always defined.
                devnull_fd = getattr(self, '_devnull', None)
                if p2cread is not None and p2cwrite is not None and p2cread != devnull_fd:
                    os.close(p2cread)
                if c2pwrite is not None and c2pread is not None and c2pwrite != devnull_fd:
                    os.close(c2pwrite)
                if errwrite is not None and errread is not None and errwrite != devnull_fd:
                    os.close(errwrite)
                if devnull_fd is not None:
                    os.close(devnull_fd)
                # Prevent a double close of these fds from __init__ on error.
                self._closed_child_pipe_fds = True

                # Wait for exec to fail or succeed; possibly raising exception
                errpipe_read = FileObject(errpipe_read, 'rb')
                data = errpipe_read.read()
            finally:
                if hasattr(errpipe_read, 'close'):
                    errpipe_read.close()
                else:
                    os.close(errpipe_read)

            if data != b"":
                self.wait()
                child_exception = pickle.loads(data)
                for fd in (p2cwrite, c2pread, errread):
                    if fd is not None:
                        os.close(fd)
                raise child_exception

        def _handle_exitstatus(self, sts):
            if os.WIFSIGNALED(sts):
                self.returncode = -os.WTERMSIG(sts)
            elif os.WIFEXITED(sts):
                self.returncode = os.WEXITSTATUS(sts)
            else:
                # Should never happen
                raise RuntimeError("Unknown child exit status!")

        def _internal_poll(self):
            """Check if child process has terminated.  Returns returncode
            attribute.
            """
            if self.returncode is None:
                if get_hub() is not getcurrent():
                    sig_pending = getattr(self._loop, 'sig_pending', True)
                    if sig_pending:
                        sleep(0.00001)
            return self.returncode

        def wait(self, timeout=None):
            """Wait for child process to terminate.  Returns returncode
            attribute.

            :keyword timeout: The floating point number of seconds to wait.
                Under Python 2, this is a gevent extension, and we simply return if it
                expires. Under Python 3,
                if this time elapses without finishing the process, :exc:`TimeoutExpired`
                is raised."""
            result = self.result.wait(timeout=timeout)
            if PY3 and timeout is not None and not self.result.ready():
                raise TimeoutExpired(self.args, timeout)
            return result

        def send_signal(self, sig):
            """Send a signal to the process
            """
            os.kill(self.pid, sig)

        def terminate(self):
            """Terminate the process with SIGTERM
            """
            self.send_signal(signal.SIGTERM)

        def kill(self):
            """Kill the process with SIGKILL
            """
            self.send_signal(signal.SIGKILL)
Beispiel #32
0
class ChildrenMonitor(object):
    """Simple monitor that monitors the children of a node and their
    content.
    """
    _STOP_REQUEST = object()

    def __init__(self, client, path, into, factory, args, listener):
        self.client = client
        self.path = path
        self.into = into if into is not None else {}
        self.factory = factory if factory is not None else str
        self.args = args
        self.listener = listener or MonitorListener()
        self.started = AsyncResult()
        self.queue = Queue()
        self.stats = {}

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                children = self.client.get_children(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break

            for child in children:
                if not child in self.stats:
                    data, stat = self.client.get(os.path.join(self.path, child))
                    self.into[child] = self.factory(data, *self.args)
                    self.listener.created(child, self.into[child])
                    self.stats[child] = stat
                else:
                    data, stat = self.client.get(os.path.join(self.path, child))
                    if stat['version'] != self.stats[child]['version']:
                        self.into[child] = self.factory(data, *self.args)
                        self.listener.modified(child, self.into[child])
                    self.stats[child] = stat
            for child in self.into.keys():
                if child not in children:
                    del self.into[child]
                    del self.stats[child]
                    self.listener.deleted(child)

            if not self.started.ready():
                self.started.set(None)

            self.listener.commit()

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
class TransactionDummy(ATransaction):
    ping_timeout = 5  # sec
    result_timeout = 20  # sec

    def __init__(self,
                 callback_url: str,
                 local_timeout: float,
                 ping_timeout=None,
                 result_timeout=None):
        super().__init__(ObjectId())
        self.callback_url = callback_url
        self.local_timeout = local_timeout
        self.ping_timeout = ping_timeout if ping_timeout is not None else TransactionDummy.ping_timeout  # type: float
        self.result_timeout = result_timeout if result_timeout is not None else TransactionDummy.result_timeout  # type: float

        self.key = sha256(
            bytes(str(self.id) +
                  str(int(time.time() * 10**6) ^ randint(0, 2**20)),
                  encoding="utf-8")).hexdigest()

        debug_SSE.event({
            "event": "init",
            "t": datetime.now(),
            "data": {
                "callback_url": self.callback_url,
                "local_timeout": self.local_timeout * 1000,
                "result_timeout": self.result_timeout * 1000,
                "ping_timeout": self.ping_timeout * 1000,
                "key": self.key,
                "_id": self.id
            }
        })  # DEBUG init

        self._ping = Event()
        self.result = AsyncResult()
        self.ping_timeout_thread_obj = None  # type: Greenlet
        self.result_thread_obj = None  # type: Greenlet

    @g_async
    def _spawn(self):
        self.ping_timeout_thread_obj = self.ping_timeout_thread(
        )  # THREAD:1, loop

    # wait((self.ready_commit, self.fail), timeout=self.local_timeout)  # BLOCK, timeout
    # wait((self.commit, self.fail))  # BLOCK

    @g_async
    def ping_timeout_thread(self):
        while not (self.done.ready() or self.fail.ready()):
            debug_SSE.event({
                "event": "wait_ping",
                "t": datetime.now(),
                "data": None
            })  # DEBUG wait_ping
            w = wait((self._ping, self.done, self.fail),
                     count=1,
                     timeout=self.ping_timeout * 2)  # BLOCK, timeout
            if not len(w):
                debug_SSE.event({
                    "event": "fail",
                    "t": datetime.now(),
                    "data": "ping timeout"
                })  # DEBUG ping timeout
                self.fail.set()  # EMIT(fail)
                break

            if self._ping.ready():
                debug_SSE.event({
                    "event": "ping",
                    "t": datetime.now(),
                    "data": None
                })  # DEBUG ping
                self._ping.clear()  # EMIT(-ping)
                sleep()

    def do_work(self, resource):
        self.result_thread_obj = self.result_thread(resource)  # THREAD:1

    @g_async
    def result_thread(self, resource):
        sleep(self.result_timeout)  # BLOCK, sleep
        if not (self.ready_commit.ready() or self.fail.ready()):
            self.result.set(resource)  # EMIT(result)
            self.ready_commit.set()  # EMIT(ready_commit)
            debug_SSE.event({
                "event": "ready_commit",
                "t": datetime.now(),
                "data": None
            })  # DEBUG ready_commit
            data = {"key": self.key, "response": {"data": self.result.get()}}
            rp = requests.put(self.callback_url,
                              headers={"Connection": "close"},
                              json=data,
                              timeout=5)

    # else:
    # 	raise Exception("error during work")

    def ping(self) -> bool:
        if not (self.fail.ready() or self.done.ready()):
            self._ping.set()  # EMIT(ping)
            return True
        return False

    @g_async
    def do_commit(self):
        if not self.fail.ready():
            if self.ready_commit.ready() and self.result.ready():
                self.commit.set()  # EMIT(ping)
                debug_SSE.event({
                    "event": "commit",
                    "t": datetime.now(),
                    "data": None
                })  # DEBUG commit
            else:
                raise Exception("Error during commit")

            sleep(randint(self.ping_timeout - 2, self.ping_timeout + 2))

            data = {"key": self.key, "done": True}
            rp = requests.put(self.callback_url,
                              headers={"Connection": "close"},
                              json=data)
            debug_SSE.event({
                "event": "done",
                "t": datetime.now(),
                "data": None
            })  # DEBUG done

    @g_async
    def do_rollback(self):
        self.fail.set()  # EMIT(fail)
        debug_SSE.event({
            "event": "rollback",
            "t": datetime.now(),
            "data": None
        })  # DEBUG rollback
Beispiel #34
0
class GClient(object):
	"""
	A generic gevent-based network client, that implements common send and receive functionality.
	Useful members:
		group: A gevent.pool.Group() tied to the lifetime of the client. When stopping, all greenlets
		       in the group will be killed.
		started: True if the client has been started
		stopped: True if the client has been stopped
		running: True if the client has been started but not stopped
	"""

	def __init__(self, logger=None):
		self.group = Group()
		self.started = False
		self._send_queue = Queue()
		self._stopping = False
		self._stopped = AsyncResult()
		if not hasattr(self, 'logger'): # let subclass overrride if they want
			if not logger:
				logger = logging.getLogger('gclient').getChild(type(self).__name__)
			self.logger = logger

	def start(self):
		"""Start the client, performing some connection step and beginning processing."""
		if self.started:
			raise Exception("Already started")
		self.started = True
		self.logger.debug("{} starting".format(self))
		self._start()
		self._send_loop_worker = self.group.spawn(self._send_loop)
		self._recv_loop_worker = self.group.spawn(self._recv_loop)
		self.logger.info("{} started".format(self))

	def _start(self):
		"""Override this with code that creates and initializes a connection"""

	def stop(self, ex=None):
		"""Stop the client, optionally referencing some exception.
		This will kill all greenlets in group and do any specific stop handling.
		Anyone waiting on the client stopping will have the exception raised, if any.
		"""
		if self._stopping:
			self.wait_for_stop()
			return
		if not self.started:
			self.started = True
		self._stopping = True

		if ex:
			self.logger.info("{} stopping with error".format(self), exc_info=True)
		else:
			self.logger.info("{} stopping".format(self))

		# since the greenlet calling stop() might be in self.group, we make a new greenlet to do the work
		@gevent.spawn
		def stop_worker():
			self.group.kill(block=True)
			while not self._send_queue.empty():
				msg, event = self._send_queue.get(block=False)
				event.set()
			self._stop(ex)
			if ex:
				self._stopped.set_exception(ex)
			else:
				self._stopped.set(None)
			self.logger.debug("{} fully stopped".format(self))

		stop_worker.get()

	def _stop(self, ex=None):
		"""Optionally override this with specific cleanup code for stopping the client,
		such as closing the connection."""
		pass

	def wait_for_stop(self):
		"""Block until the client has stopped, re-raising the exception it was stopped with, if any."""
		self._stopped.get()

	@property
	def stopped(self):
		return self._stopped.ready()

	@property
	def running(self):
		return self.started and not self.stopped

	def send(self, msg, block=False):
		"""Enqueue some kind of message to be sent. If block=True, block until actually sent.
		If block=False, returns a gevent.event.Event() that will be set when actually sent,
		or the client is stopped.
		Note that messages are sent in order, so using either of these shouldn't often be needed.
		"""
		if self._stopping:
			raise Exception("Can't send to stopped client")
		event = Event()
		self._send_queue.put((msg, event))
		if block:
			event.wait()
		else:
			return event

	def _send_loop(self):
		try:
			for msg, event in self._send_queue:
				self._send(msg)
				event.set()
		except Exception as ex:
			self.stop(ex)

	def _send(self, msg):
		"""Override this with specific code for sending a message. It may raise to indicate a failure
		that will stop the client."""

	def _recv_loop(self):
		try:
			self._receive()
		except Exception as ex:
			self.stop(ex)
		else:
			self.stop()

	def _receive(self):
		"""Override this with code that receives data. It may return to indicate a graceful close,
Beispiel #35
0
class OutTimer(Collected):
    """Timer for timed outputs"""
    storage = OutTimers.storage
    q = None
    _timer = None

    _timer = None

    def __init__(self, parent, timer, nextval):
        global tseq
        tseq += 1
        self.name = parent.name + (str(tseq), )
        super(OutTimer, self).__init__()
        self.parent = parent
        self.end = timer
        self.val = nextval
        self.q = AsyncResult()
        self._start()

    def info(self):
        return "%s:%s" % (self.name, self.val)

    def list(self):
        n = now()
        yield super(OutTimer, self)
        yield ("output", self.parent.name)
        yield ("start", self.started)
        yield ("end", self.end)
        yield ("next value", self.val)

    def _start(self):
        if self._timer:
            self._timer.cancel()
        self.started = now()
        self._timer = callLater(False, self.end, self._timeout)

    def _timeout(self):
        self._timer = None
        try:
            self.parent.write(self.val)
        except Exception as ex:
            fix_exception(ex)
            self.q.set(ex)
        else:
            self.q.set(None)

    def done(self):
        """called externally via _tmwrite() when the external timer writes"""
        if self._timer:
            self._timer.cancel()
            self._timer = None
        if not self.q.ready():
            self.q.set(None)

    def cancel(self):
        self.delete()

    def delete(self, ctx=None):
        if self._timer is not None:
            self._timer.cancel()
            self._timer = None
        self.q.set(DelayCancelled(self))

        super(OutTimer, self).delete(ctx)
Beispiel #36
0
class Operation(BaseOperation):
    def __init__(self, monitor, callable, *args):
        super(Operation, self).__init__(monitor)

        self.id = (callable, args)

        self.callable = callable
        self.args = args
        self.node = getattr(callable, "__self__", None)
        self.method = getattr(callable, "__name__", None)

        self.result = AsyncResult()

        self.primary_parent = self.monitor.get_current()
        self.primary_parent.add_dependency(self)

        self.greenlet = Yaylet(callable, *args)
        self.greenlet.operation = self
        self.greenlet.link(self._operation_finish)

    def start(self):
        self.greenlet.start()

    def ready(self):
        return self.result.ready()

    def get(self):
        return self.result.get()

    def _operation_finish(self, source):
        # WARNING: This method will be caused in it's own greenlet.
        # Using self.monitor.execute from here will cause work to be owned by Root

        # Cycle breaking
        source.operation = None

        # Setup the AsyncResult so *new* calls will return immediately
        # But let's not notify the existing blocked greenlets until we
        # have run the paradox detector
        if source.successful():
            self.result.value = source.value
            self.result._exception = None
        else:
            self.result.value = None
            self.result._exception = source.exception

        # Purge any operations that were cached during a peek operation
        checks = []

        for p in self.peeks:
            for c, op in p.walk_children():
                if op.method.startswith("as_"):
                    checks.append(op)
                op.purge_one()

            if p.method.startswith("as_"):
                checks.append(p)
            p.purge_one()

        getcurrent().operation = self

        for op in checks:
            try:
                current_val = op.get()
                new_val = self.monitor.wait(getattr(op.node, op.method))
            except Exception as e:
                self.result.set_exception(e)
                return

            if new_val != current_val:
                self.result.set_exception(
                    errors.ParadoxError(
                        "Inconsistent configuration detected - changed from %r to %r" % (current_val, new_val),
                        anchor=op.node.anchor,
                    )
                )
                getcurrent().operation = None
                return

        getcurrent().operation = None

        # Now notify all the other greenlets waiting for us that it is safe to continue
        if source.successful():
            self.result.set(source.value)
        else:
            self.result.set_exception(source.exception)

    def __repr__(self):
        return "%s<%s>.%s(%r)" % (self.node.__class__.__name__, id(self), self.method, self.args)
Beispiel #37
0
class ProxyBot(Bot):

    def __init__(self, name, client, game, bot):
        self.name = name
        self.client = client
        self.bot = bot
        if bot:
            self.TIMEOUT = 1.0
        else:
            self.TIMEOUT = None

        self.expecting = None
        self._vote = None
        self._select = None
        self._sabotage = None
        self._join = None
        self._part = None
        self.game = game 

    def __call__(self, game, index, spy):
        """This function pretends to be a Builder, but in fact just
        configures this object in place as it's easier to setup and maintain."""
        Player.__init__(self, self.name, index)
        self.state = game
        self.spy = spy

        self.channel = '%s-player-%i' % (self.game, index)
        self.client.send_message(message.Join(self.channel))
        self.client.send_message(message.Join(self.game))

        self._join = Event() 
        # Use elegant /INVITE command for humans that have better clients.
        self.client.send_message(message.Command([self.name, self.channel], 'INVITE'))
        return self

    def bakeTeam(self, team):
        return ', '.join([str(p) for p in team])

    def makeTeam(self, msg):
        for s in '\t,.!;?': msg = msg.replace(s, ' ')
        names = [n for n in msg.split(' ') if n]
        players = []
        for n in names:
            players.append(self.makePlayer(n))
        return players

    def makePlayer(self, name):
        for p in self.state.players:
            if str(p.index) in name:
                return p
            if name in p.name:
                return p
        assert False, "Can't find player for input name '%s'." % (name)

    def send(self, msg):
        self.client.msg(self.channel, msg)

    def onGameRevealed(self, players, spies):
        roles = {True: "Spy", False: "Resistance"}
        s = ""
        if self.spy:
            s = "; SPIES " + self.bakeTeam(spies)

        self._join.wait()
        self._join = None
        self.send('REVEAL %s; ROLE %s; PLAYERS %s%s.' % (self.game, roles[self.spy], self.bakeTeam(players), s))

    def onMissionAttempt(self, mission, tries, leader):
        self.send('MISSION %i.%i; LEADER %s.' % (mission, tries, Player.__repr__(leader)))

    def select(self, players, count):
        self.send('SELECT %i!' % (count))
        self._select = AsyncResult()
        self.state.count = count
        self.expecting = self.process_SELECTED
        return self._select.get(timeout=self.TIMEOUT)

    def process_SELECTED(self, msg):
        if 'select' in msg[1].lower():
            msg = ' '.join(msg[2:])
        else:
            msg = ' '.join(msg[1:])
        team = self.makeTeam(msg)
        if len(team) != self.state.count:
            self.send('SELECT %i?' % (self.state.count))
        else:
            self._select.set(team)
            self._select = None

    def onTeamSelected(self, leader, team):
        self.state.team = team[:]
        self.send("VOTE %s?" % (self.bakeTeam(team)))
        self._vote = AsyncResult()
        self.expecting = self.process_VOTED

    def vote(self, team):
        v = self._vote.get(timeout=self.TIMEOUT)
        self._vote = None
        return v   

    def process_VOTED(self, msg):
        result = parseYesOrNo(' '.join(msg[1:]))
        if result is not None:
            self._vote.set(result)

    def onVoteComplete(self, votes):
        self.send("VOTES %s." % (', '.join([showYesOrNo(v) for v in votes])))
        
        v = [b for b in votes if b]
        if self in self.state.team and len(v) > 2:
            self.send("SABOTAGE?")
            self._sabotage = AsyncResult()
            self.expecting = self.process_SABOTAGED
        else:
            self._sabotage = None

    def sabotage(self):
        assert self._sabotage is not None
        s = self._sabotage.get(timeout=self.TIMEOUT)
        self._sabotage = None
        return s 

    def process_SABOTAGED(self, msg):
        result = parseYesOrNo(' '.join(msg[1:]))
        if result is not None:
            self._sabotage.set(result)

    def onMissionComplete(self, sabotaged):
        # Force synchronization in case sabotage() is not called due to the bot
        # being resistance.  This helps hide human identity by having the same
        # input delay in Spy or Resistance cases.
        if self._sabotage and not self._sabotage.ready():
            s = self._sabotage.get(timeout=self.TIMEOUT)
            assert not s, "Expecting sabotage() to be False if it was handled automatically."

        self.send("SABOTAGES %i." % (sabotaged))
        self.expecting = None

    def onGameComplete(self, win, spies):
        self.send("RESULT %s; SPIES %s." % (showYesOrNo(win), self.bakeTeam(spies)))

        self.client.send_message(message.Command(self.game, 'PART'))
        self._part = Event() 
        self._part.wait()
        self.client.send_message(message.Command(self.channel, 'PART'))
Beispiel #38
0
class _Socket(_original_Socket):
    """Green version of :class:`zmq.core.socket.Socket`

    The following methods are overridden:

        * send
        * recv

    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or recieving
    is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
    
    The `__state_changed` method is triggered when the zmq.FD for the socket is
    marked as readable and triggers the necessary read and write events (which
    are waited for in the recv and send methods).

    Some double underscore prefixes are used to minimize pollution of
    :class:`zmq.core.socket.Socket`'s namespace.
    """
    def __init__(self, context, socket_type):
        self.__in_send_multipart = False
        self.__in_recv_multipart = False
        self.__setup_events()

    def __del__(self):
        self.close()

    def close(self, linger=None):
        super(_Socket, self).close(linger)
        self.__cleanup_events()

    def __cleanup_events(self):
        # close the _state_event event, keeps the number of active file descriptors down
        if getattr(self, '_state_event', None):
            _stop(self._state_event)
            self._state_event = None
        # if the socket has entered a close state resume any waiting greenlets
        self.__writable.set()
        self.__readable.set()

    def __setup_events(self):
        self.__readable = AsyncResult()
        self.__writable = AsyncResult()
        self.__readable.set()
        self.__writable.set()

        try:
            self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD),
                                                  1)  # read state watcher
            self._state_event.start(self.__state_changed)
        except AttributeError:
            # for gevent<1.0 compatibility
            from gevent.core import read_event
            self._state_event = read_event(self.getsockopt(zmq.FD),
                                           self.__state_changed,
                                           persist=True)

    def __state_changed(self, event=None, _evtype=None):
        if self.closed:
            self.__cleanup_events()
            return
        try:
            # avoid triggering __state_changed from inside __state_changed
            events = super(_Socket, self).getsockopt(zmq.EVENTS)
        except zmq.ZMQError as exc:
            self.__writable.set_exception(exc)
            self.__readable.set_exception(exc)
        else:
            if events & zmq.POLLOUT:
                self.__writable.set()
            if events & zmq.POLLIN:
                self.__readable.set()

    def _wait_write(self):
        assert self.__writable.ready(
        ), "Only one greenlet can be waiting on this event"
        self.__writable = AsyncResult()
        # timeout is because libzmq cannot be trusted to properly signal a new send event:
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        timeout = gevent.Timeout(seconds=1)
        try:
            timeout.start()
            self.__writable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if toc - tic > 0.9 and self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
                print("BUG: gevent missed a libzmq send event on %i!" %
                      self.FD,
                      file=sys.stderr)
        finally:
            timeout.cancel()
            self.__writable.set()

    def _wait_read(self):
        assert self.__readable.ready(
        ), "Only one greenlet can be waiting on this event"
        self.__readable = AsyncResult()
        # timeout is because libzmq cannot always be trusted to play nice with libevent.
        # I can only confirm that this actually happens for send, but lets be symmetrical
        # with our dirty hacks.
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        timeout = gevent.Timeout(seconds=1)
        try:
            timeout.start()
            self.__readable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if toc - tic > 0.9 and self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
                print("BUG: gevent missed a libzmq recv event on %i!" %
                      self.FD,
                      file=sys.stderr)
        finally:
            timeout.cancel()
            self.__readable.set()

    def send(self, data, flags=0, copy=True, track=False):
        """send, which will only block current greenlet
        
        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """

        # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).send(data, flags, copy, track)
            finally:
                if not self.__in_send_multipart:
                    self.__state_changed()
            return msg
        # ensure the zmq.NOBLOCK flag is part of flags
        flags |= zmq.NOBLOCK
        while True:  # Attempt to complete this operation indefinitely, blocking the current greenlet
            try:
                # attempt the actual call
                msg = super(_Socket, self).send(data, flags, copy, track)
            except zmq.ZMQError as e:
                # if the raised ZMQError is not EAGAIN, reraise
                if e.errno != zmq.EAGAIN:
                    if not self.__in_send_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_send_multipart:
                    self.__state_changed()
                return msg
            # defer to the event loop until we're notified the socket is writable
            self._wait_write()

    def recv(self, flags=0, copy=True, track=False):
        """recv, which will only block current greenlet
        
        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            finally:
                if not self.__in_recv_multipart:
                    self.__state_changed()
            return msg

        flags |= zmq.NOBLOCK
        while True:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            except zmq.ZMQError as e:
                if e.errno != zmq.EAGAIN:
                    if not self.__in_recv_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_recv_multipart:
                    self.__state_changed()
                return msg
            self._wait_read()

    def send_multipart(self, *args, **kwargs):
        """wrap send_multipart to prevent state_changed on each partial send"""
        self.__in_send_multipart = True
        try:
            msg = super(_Socket, self).send_multipart(*args, **kwargs)
        finally:
            self.__in_send_multipart = False
            self.__state_changed()
        return msg

    def recv_multipart(self, *args, **kwargs):
        """wrap recv_multipart to prevent state_changed on each partial recv"""
        self.__in_recv_multipart = True
        try:
            msg = super(_Socket, self).recv_multipart(*args, **kwargs)
        finally:
            self.__in_recv_multipart = False
            self.__state_changed()
        return msg

    def getsockopt(self, opt):
        """trigger state_changed on getsockopt(EVENTS)"""
        optval = super(_Socket, self).getsockopt(opt)
        if opt == zmq.EVENTS:
            self.__state_changed()
        return optval
Beispiel #39
0
def socket_send(context, dest_id, message, expect_reply=False):
    if dest_id is None:
        logger.error("socket_send dest ID %s is not valid" %
                     ID_TO_STRING(dest_id))
        return None

    if context is not None and context.direction == ONLY_FROM_TCP_SERVER and context.id == dest_id:
        # logger.debug("socket_send reuses socket since context ID is the same as dest_id %s" % str(dest_id))
        next_hop_id = dest_id
        address = context.address
        sock = context.socket
        nonce = context.nonce

    else:
        next_hop = find_nexthop_for_id(dest_id)
        # logger.debug("socket_send next_hop is %s" % str(next_hop))
        if next_hop is None:
            logger.error(
                "socket_send next hop for dest ID %s 0x%X cannot be found" %
                (ID_TO_STRING(dest_id), dest_id))
            return None

        next_hop_id = next_hop.id
        address = next_hop.tcp_address

        sock = ConnectionManager.init().get_peer_by_id(next_hop_id)
        nonce = os.urandom(MPTN_TCP_NONCE_SIZE)

    if sock is None:
        # logger.debug("socket_send no socket found for ID %s"%ID_TO_STRING(next_hop_id))
        sock = reconnect(address)
        if sock is None:
            # logger.error("socket_send cannot re-setup socket for next_hop_id=%s addr=%s msg is\n%s" % (ID_TO_STRING(next_hop_id), str(address), formatted_print(split_packet_to_list(message))))
            return
        try:
            sock.send(self_id_net_endian_string)
        except Exception as e:
            logger.error(
                "socket_send self_id_net_endian_string error=%s. addr=%s, self_id_net_endian_string=%s, nonce=%s, message is\n%s\nerror=%s\n%s"
                % (str(address), ID_TO_STRING(self_id_net_endian_string),
                   str(map(ord, nonce)),
                   str(formatted_print(split_packet_to_list(message))), str(e),
                   traceback.format_exc()))
            return
        gevent.spawn(socket_recv, sock, address, next_hop_id)
        ConnectionManager.init().add_peer(
            address, Peer(socket=sock, id=next_hop_id, address=address))
        gevent.sleep(0)

    # logger.debug("socket_send message %s to ID %s" % (str(message), ID_TO_STRING(next_hop_id)))
    size = 0
    try:
        sock.send(nonce)
        size = struct.pack("!L", socket.htonl(len(message)))
        sock.send(size)
        sock.sendall(message)
    except Exception as e:
        logger.error(
            "socket_send nonce addr=%s, self_id_net_endian_string=%s, nonce=%s, message is\n%s\nerror=%s\n%s"
            % (str(address), ID_TO_STRING(self_id_net_endian_string),
               str(map(ord, nonce)),
               str(formatted_print(split_packet_to_list(message))), str(e),
               traceback.format_exc()))
        ConnectionManager.init().remove_peer(address)
        return None

    if not expect_reply: return None

    callback = AsyncResult()
    ConnectionManager.init().add_nonce(nonce, NonceCallback(dest_id, callback))

    if context is None:
        while not callback.ready():
            gevent.sleep(0)

    return callback.get()
Beispiel #40
0
class _Socket(_original_Socket):
    """Green version of :class:`zmq.core.socket.Socket`

    The following methods are overridden:

        * send
        * recv

    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
    is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
    
    The `__state_changed` method is triggered when the zmq.FD for the socket is
    marked as readable and triggers the necessary read and write events (which
    are waited for in the recv and send methods).

    Some double underscore prefixes are used to minimize pollution of
    :class:`zmq.core.socket.Socket`'s namespace.
    """
    __in_send_multipart = False
    __in_recv_multipart = False
    __writable = None
    __readable = None
    _state_event = None
    _gevent_bug_timeout = 11.6 # timeout for not trusting gevent
    _debug_gevent = False # turn on if you think gevent is missing events
    _poller_class = _Poller
    
    def __init__(self, context, socket_type):
        _original_Socket.__init__(self, context, socket_type)
        self.__in_send_multipart = False
        self.__in_recv_multipart = False
        self.__setup_events()
        

    def __del__(self):
        self.close()

    def close(self, linger=None):
        super(_Socket, self).close(linger)
        self.__cleanup_events()

    def __cleanup_events(self):
        # close the _state_event event, keeps the number of active file descriptors down
        if getattr(self, '_state_event', None):
            _stop(self._state_event)
            self._state_event = None
        # if the socket has entered a close state resume any waiting greenlets
        self.__writable.set()
        self.__readable.set()

    def __setup_events(self):
        self.__readable = AsyncResult()
        self.__writable = AsyncResult()
        self.__readable.set()
        self.__writable.set()
        
        try:
            self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher
            self._state_event.start(self.__state_changed)
        except AttributeError:
            # for gevent<1.0 compatibility
            from gevent.core import read_event
            self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True)

    def __state_changed(self, event=None, _evtype=None):
        if self.closed:
            self.__cleanup_events()
            return
        try:
            # avoid triggering __state_changed from inside __state_changed
            events = super(_Socket, self).getsockopt(zmq.EVENTS)
        except zmq.ZMQError as exc:
            self.__writable.set_exception(exc)
            self.__readable.set_exception(exc)
        else:
            if events & zmq.POLLOUT:
                self.__writable.set()
            if events & zmq.POLLIN:
                self.__readable.set()

    def _wait_write(self):
        assert self.__writable.ready(), "Only one greenlet can be waiting on this event"
        self.__writable = AsyncResult()
        # timeout is because libzmq cannot be trusted to properly signal a new send event:
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__writable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if self._debug_gevent and timeout and toc-tic > dt and \
                    self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
                print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr)
        finally:
            if timeout:
                timeout.cancel()
            self.__writable.set()

    def _wait_read(self):
        assert self.__readable.ready(), "Only one greenlet can be waiting on this event"
        self.__readable = AsyncResult()
        # timeout is because libzmq cannot always be trusted to play nice with libevent.
        # I can only confirm that this actually happens for send, but lets be symmetrical
        # with our dirty hacks.
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__readable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if self._debug_gevent and timeout and toc-tic > dt and \
                    self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
                print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr)
        finally:
            if timeout:
                timeout.cancel()
            self.__readable.set()

    def send(self, data, flags=0, copy=True, track=False):
        """send, which will only block current greenlet
        
        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """
        
        # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).send(data, flags, copy, track)
            finally:
                if not self.__in_send_multipart:
                    self.__state_changed()
            return msg
        # ensure the zmq.NOBLOCK flag is part of flags
        flags |= zmq.NOBLOCK
        while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
            try:
                # attempt the actual call
                msg = super(_Socket, self).send(data, flags, copy, track)
            except zmq.ZMQError as e:
                # if the raised ZMQError is not EAGAIN, reraise
                if e.errno != zmq.EAGAIN:
                    if not self.__in_send_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_send_multipart:
                    self.__state_changed()
                return msg
            # defer to the event loop until we're notified the socket is writable
            self._wait_write()

    def recv(self, flags=0, copy=True, track=False):
        """recv, which will only block current greenlet
        
        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            finally:
                if not self.__in_recv_multipart:
                    self.__state_changed()
            return msg
        
        flags |= zmq.NOBLOCK
        while True:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            except zmq.ZMQError as e:
                if e.errno != zmq.EAGAIN:
                    if not self.__in_recv_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_recv_multipart:
                    self.__state_changed()
                return msg
            self._wait_read()
    
    def send_multipart(self, *args, **kwargs):
        """wrap send_multipart to prevent state_changed on each partial send"""
        self.__in_send_multipart = True
        try:
            msg = super(_Socket, self).send_multipart(*args, **kwargs)
        finally:
            self.__in_send_multipart = False
            self.__state_changed()
        return msg
    
    def recv_multipart(self, *args, **kwargs):
        """wrap recv_multipart to prevent state_changed on each partial recv"""
        self.__in_recv_multipart = True
        try:
            msg = super(_Socket, self).recv_multipart(*args, **kwargs)
        finally:
            self.__in_recv_multipart = False
            self.__state_changed()
        return msg
    
    def get(self, opt):
        """trigger state_changed on getsockopt(EVENTS)"""
        optval = super(_Socket, self).get(opt)
        if opt == zmq.EVENTS:
            self.__state_changed()
        return optval
Beispiel #41
0
class InputTable(scheme.Table):
    _table_name = 'input'
    _table_collection = input_tables

    id = scheme.Column('api')
    type = scheme.Column('api')
    parent = scheme.Column('api', lambda self, value: value and [value._table_name, value.id] or None)
    timeout = scheme.Column('api', lambda self, timeout: timeout and int(timeout.eta*1000) or None)
    elements = scheme.Column('api')
    result = scheme.Column('api')
    close_aborts = scheme.Column('api')

    ignore_api = False

    def __init__(self, type, parent, timeout, elements, close_aborts, ignore_api=False):
        self.type = type
        self.parent = parent
        self.timeout = None
        self.elements = [isinstance(e, list) and e or [e] for e in elements]
        self.close_aborts = close_aborts
        self.ignore_api = ignore_api

        if parent:
            parent.input = self
        
        self._result = AsyncResult()
        self.reset_timeout(timeout)

    def set_result(self, value):
        if self._result.ready():
            #raise RuntimeError('result of input already set')
            return
        with scheme.transaction:
            self.result = value
            self.reset_timeout(None)
        self._result.set(value)
        event.fire("input:result", self)

    def set_error(self, value):
        if self._result.ready():
            #raise RuntimeError('result of input already set')
            return
        with scheme.transaction:
            self.result = str(value)
            self.reset_timeout(None)
        self._result.set_exception(value)
        event.fire("input:error", self)

    def reset_timeout(self, timeout):
        with scheme.transaction:
            if self.timeout:
                self.timeout.kill()
            if timeout:
                self.timeout = gevent.spawn_later(timeout, self._timed_out)
                self.timeout.eta = time.time() + timeout
            elif self.timeout:
                self.timeout = None

    def _timed_out(self):
        with scheme.transaction:
            self.timeout = None
            self.set_error(InputTimeout())