Example #1
0
def Worker(sock, addr):
    write_channel = stackless.channel()
    write_channel.preference = 1  # Prefer the sender.
    now_ts = time.time()
    timestamps = [now_ts, now_ts]  # read_ts, write_ts
    writer_tasklet = stackless.tasklet(Writer)(timestamps, write_channel, sock)
    sleeper_tasklet = stackless.tasklet(Sleeper)(timestamps, write_channel, 3)
    # TODO(pts): Flush earlier.
    write_channel.send('Hello, please type something.\r\n')
    try:
        while True:
            msg = sock.recv(256)
            if not msg:
                break
            timestamps[0] = max(timestamps[0], time.time())  # Register read.
            # TODO(pts): Flush earlier.
            write_channel.send('You typed %r.\r\n' % msg)
    finally:
        logging.info('connection closed from %r' % (addr, ))
        if writer_tasklet.alive:
            write_channel.send(None)  # Will kill writer_tasklet eventually.
        timestamps[0] = None  # Will kill sleeper_tasklet eventually.
        while writer_tasklet.alive or sleeper_tasklet.alive:
            stackless.schedule(None)
        sock.close()
Example #2
0
def Worker(sock, addr):
  write_channel = stackless.channel()
  write_channel.preference = 1  # Prefer the sender.
  now_ts = time.time()
  timestamps = [now_ts, now_ts]  # read_ts, write_ts
  writer_tasklet = stackless.tasklet(Writer)(timestamps, write_channel, sock)
  sleeper_tasklet = stackless.tasklet(Sleeper)(timestamps, write_channel, 3)
  # TODO(pts): Flush earlier.
  write_channel.send('Hello, please type something.\r\n')
  try:
    while True:
      msg = sock.recv(256)
      if not msg:
        break
      timestamps[0] = max(timestamps[0], time.time())  # Register read.
      # TODO(pts): Flush earlier.
      write_channel.send('You typed %r.\r\n' % msg)
  finally:
    logging.info('connection closed from %r' % (addr,))
    if writer_tasklet.alive:
      write_channel.send(None)  # Will kill writer_tasklet eventually.
    timestamps[0] = None  # Will kill sleeper_tasklet eventually.
    while writer_tasklet.alive or sleeper_tasklet.alive:
      stackless.schedule(None)
    sock.close()
def main():
    # Without patch.patch_socket() or patch.patch_pymysql() the
    # counter below would jump from 0 to 1000 in one big step. With this
    # patch, MySQL socket communication is done with Syncless, so the counter
    # increases in little steps.
    patch.patch_pymysql()
    # patch_socket() works instead of patch_pymysql(), but it effects more
    # Python modules.
    #patch.patch_socket()
    patch.patch_stderr()

    mysql_config = dict(MYSQL_CONFIG)
    if 'password' in mysql_config:
        mysql_config['passwd'] = mysql_config.pop('password')
    if 'database' in mysql_config:
        mysql_config['db'] = mysql_config.pop('database')
    if mysql_config.get('unix_socket'):
        mysql_config['host'] = '127.0.0.1'
    #mysql_config['charset'] = 'utf8'
    db_conn = mysql_dbapi.connect(**mysql_config)
    assert mysql_dbapi.paramstyle == 'format'
    cursor = db_conn.cursor()
    cursor.execute('SET NAMES %s COLLATE %s', ('utf8', 'utf8_general_ci'))
    cursor = db_conn.cursor()
    # In SQLite, this would be:
    # for row in cursor.execute('SELECT LENGTH(?), ('\xC3\xA1',)): print row
    cursor.execute('SELECT CHAR_LENGTH(%s)', ('\xC3\xA1', ))
    #for row in cursor:  # Fetch results.
    #  print >>sys.stderr, row
    assert list(cursor) == [(1, )]

    if len(sys.argv) > 1:
        num_iterations = int(sys.argv)
    else:
        num_iterations = 1000

    progress_channel = stackless.channel()
    progress_channel.preference = 1  # Prefer the sender.
    stackless.tasklet(Worker)(db_conn, num_iterations, progress_channel)
    done_count = 0
    receive_count = 0
    while True:
        sys.stderr.write('\r%s of %s ' % (done_count, num_iterations))
        if done_count >= num_iterations:
            break
        done_count = progress_channel.receive()
        receive_count += 1
    # receive_count might be smaller than done_count (e.g. 993 < 1000) here
    # (but sometims it's equal), because sometimes the main tasklet was slow
    # to receive, so Worker did multiple iterations per one
    # progress_channel.receive().
    sys.stderr.write('done, receive_count=%d\n' % receive_count)
    # Needed for exit because we might have done DNS lookups with coio (evdns).
    stackless.main.insert()
    sys.exit(0)
def main():
    # Without patch.patch_socket() or patch.patch_pymysql() the
    # counter below would jump from 0 to 1000 in one big step. With this
    # patch, MySQL socket communication is done with Syncless, so the counter
    # increases in little steps.
    patch.patch_pymysql()
    # patch_socket() works instead of patch_pymysql(), but it effects more
    # Python modules.
    # patch.patch_socket()
    patch.patch_stderr()

    mysql_config = dict(MYSQL_CONFIG)
    if "password" in mysql_config:
        mysql_config["passwd"] = mysql_config.pop("password")
    if "database" in mysql_config:
        mysql_config["db"] = mysql_config.pop("database")
    if mysql_config.get("unix_socket"):
        mysql_config["host"] = "127.0.0.1"
    # mysql_config['charset'] = 'utf8'
    db_conn = mysql_dbapi.connect(**mysql_config)
    assert mysql_dbapi.paramstyle == "format"
    cursor = db_conn.cursor()
    cursor.execute("SET NAMES %s COLLATE %s", ("utf8", "utf8_general_ci"))
    cursor = db_conn.cursor()
    # In SQLite, this would be:
    # for row in cursor.execute('SELECT LENGTH(?), ('\xC3\xA1',)): print row
    cursor.execute("SELECT CHAR_LENGTH(%s)", ("\xC3\xA1",))
    # for row in cursor:  # Fetch results.
    #  print >>sys.stderr, row
    assert list(cursor) == [(1,)]

    if len(sys.argv) > 1:
        num_iterations = int(sys.argv)
    else:
        num_iterations = 1000

    progress_channel = stackless.channel()
    progress_channel.preference = 1  # Prefer the sender.
    stackless.tasklet(Worker)(db_conn, num_iterations, progress_channel)
    done_count = 0
    receive_count = 0
    while True:
        sys.stderr.write("\r%s of %s " % (done_count, num_iterations))
        if done_count >= num_iterations:
            break
        done_count = progress_channel.receive()
        receive_count += 1
    # receive_count might be smaller than done_count (e.g. 993 < 1000) here
    # (but sometims it's equal), because sometimes the main tasklet was slow
    # to receive, so Worker did multiple iterations per one
    # progress_channel.receive().
    sys.stderr.write("done, receive_count=%d\n" % receive_count)
    # Needed for exit because we might have done DNS lookups with coio (evdns).
    stackless.main.insert()
    sys.exit(0)
Example #5
0
def main():
    # Without patch.patch_socket() or patch.patch_mysql_connector() the
    # counter below would jump from 0 to 1000 in one big step. With this
    # patch, MySQL socket communication is done with Syncless, so the counter
    # increases in little steps.
    patch.patch_mysql_connector()
    # patch_socket() works instead of patch_mysql_connector(), but it effects more
    # Python modules.
    #patch.patch_socket()
    patch.patch_stderr()

    db_conn = mysql_dbapi.connect(**MYSQL_CONFIG)
    assert mysql_dbapi.paramstyle == 'pyformat'
    assert db_conn.charset_name == 'utf8'
    assert db_conn.collation_name == 'utf8_general_ci'

    #query = 'SELECT CONNECTION_ID()'
    #query = 'SELECT LENGTH("\xC3\xA1")'  # :2
    #query = 'SELECT CHAR_LENGTH("\xC3\xA1")'  #: 1
    #query = 'SELECT UPPER("\xC3\xA1")'  #: '\xC3\x81'
    # Would raise e.g. mysql.connector.errors.ProgrammingError on SQL error.
    cursor = db_conn.cursor()
    # In SQLite, this would be:
    # for row in cursor.execute('SELECT LENGTH(?), ('\xC3\xA1',)): print row
    cursor.execute('SELECT CHAR_LENGTH(%s)', ('\xC3\xA1', ))
    #for row in cursor:  # Fetch results.
    #  print >>sys.stderr, row
    assert list(cursor) == [(1, )]

    if len(sys.argv) > 1:
        num_iterations = int(sys.argv)
    else:
        num_iterations = 1000

    progress_channel = stackless.channel()
    progress_channel.preference = 1  # Prefer the sender.
    stackless.tasklet(Worker)(db_conn, num_iterations, progress_channel)
    done_count = 0
    receive_count = 0
    while True:
        sys.stderr.write('\r%s of %s ' % (done_count, num_iterations))
        if done_count >= num_iterations:
            break
        done_count = progress_channel.receive()
        receive_count += 1
    # receive_count might be smaller than done_count (e.g. 993 < 1000) here
    # (but sometims it's equal), because sometimes the main tasklet was slow
    # to receive, so Worker did multiple iterations per one
    # progress_channel.receive().
    sys.stderr.write('done, receive_count=%d\n' % receive_count)
    # Needed for exit because we might have done DNS lookups with coio (evdns).
    stackless.main.insert()
    sys.exit(0)
Example #6
0
def main():
  # Without patch.patch_socket() or patch.patch_mysql_connector() the
  # counter below would jump from 0 to 1000 in one big step. With this
  # patch, MySQL socket communication is done with Syncless, so the counter
  # increases in little steps.
  patch.patch_mysql_connector()
  # patch_socket() works instead of patch_mysql_connector(), but it effects more
  # Python modules.
  #patch.patch_socket()
  patch.patch_stderr()

  db_conn = mysql_dbapi.connect(**MYSQL_CONFIG)
  assert mysql_dbapi.paramstyle == 'pyformat'
  assert db_conn.charset_name == 'utf8'
  assert db_conn.collation_name == 'utf8_general_ci'

  #query = 'SELECT CONNECTION_ID()'
  #query = 'SELECT LENGTH("\xC3\xA1")'  # :2
  #query = 'SELECT CHAR_LENGTH("\xC3\xA1")'  #: 1
  #query = 'SELECT UPPER("\xC3\xA1")'  #: '\xC3\x81'
  # Would raise e.g. mysql.connector.errors.ProgrammingError on SQL error.
  cursor = db_conn.cursor()
  # In SQLite, this would be:
  # for row in cursor.execute('SELECT LENGTH(?), ('\xC3\xA1',)): print row
  cursor.execute('SELECT CHAR_LENGTH(%s)', ('\xC3\xA1',))
  #for row in cursor:  # Fetch results.
  #  print >>sys.stderr, row
  assert list(cursor) == [(1,)]

  if len(sys.argv) > 1:
    num_iterations = int(sys.argv)
  else:
    num_iterations = 1000

  progress_channel = stackless.channel()
  progress_channel.preference = 1  # Prefer the sender.
  stackless.tasklet(Worker)(db_conn, num_iterations, progress_channel)
  done_count = 0
  receive_count = 0
  while True:
    sys.stderr.write('\r%s of %s ' % (done_count, num_iterations))
    if done_count >= num_iterations:
      break
    done_count = progress_channel.receive()
    receive_count += 1
  # receive_count might be smaller than done_count (e.g. 993 < 1000) here
  # (but sometims it's equal), because sometimes the main tasklet was slow
  # to receive, so Worker did multiple iterations per one
  # progress_channel.receive().
  sys.stderr.write('done, receive_count=%d\n' % receive_count)
  # Needed for exit because we might have done DNS lookups with coio (evdns).
  stackless.main.insert()
  sys.exit(0)
Example #7
0
 def change(self, timeout):
     """Change the timeout (restarting from 0)."""
     if self.sleeper_tasklet:
         self.sleeper_tasklet.remove().kill()
         self.sleeper_tasklet = None
     self.timeout = timeout
     if timeout is not None:
         # TODO(pts): speed: Do this without creating a new tasklet. Would this
         # improve speed?
         self.sleeper_tasklet = stackless.tasklet(self.Sleeper)()
Example #8
0
 def change(self, timeout):
     """Change the timeout (restarting from 0)."""
     if self.sleeper_tasklet:
         self.sleeper_tasklet.remove().kill()
         self.sleeper_tasklet = None
     self.timeout = timeout
     if timeout is not None:
         # TODO(pts): speed: Do this without creating a new tasklet. Would this
         # improve speed?
         self.sleeper_tasklet = stackless.tasklet(self.Sleeper)()
Example #9
0
def main():
  patch.patch_socket()
  patch.patch_ssl()
  result_channel = stackless.channel()
  result_channel.preference = 1  # Prefer the sender.
  stackless.tasklet(FetchWorker)('https://www.facebook.com/', result_channel)
  progress_reporter_tasklet = stackless.tasklet(ProgressReporter)(0.02)
  # This blocks the current tasklet, while FetchWorker and ProgressReporter are
  # running.
  data = result_channel.receive()
  progress_reporter_tasklet.kill()
  sys.stderr.write("\n")
  match = re.search(r'(?is)<title>(.*?)</title>', data)
  if match:
    data = match.group(1).strip()
  print 'Downloaded:', data
  # Needed for exit because we did DNS lookups with coio (evdns).
  # TODO(pts): Remove stackless.main.insert() once the segfault bug is fixed.
  stackless.main.insert()
  sys.exit(0)
Example #10
0
def main():
    patch.patch_socket()
    patch.patch_ssl()
    result_channel = stackless.channel()
    result_channel.preference = 1  # Prefer the sender.
    stackless.tasklet(FetchWorker)('https://www.facebook.com/', result_channel)
    progress_reporter_tasklet = stackless.tasklet(ProgressReporter)(0.02)
    # This blocks the current tasklet, while FetchWorker and ProgressReporter are
    # running.
    data = result_channel.receive()
    progress_reporter_tasklet.kill()
    sys.stderr.write("\n")
    match = re.search(r'(?is)<title>(.*?)</title>', data)
    if match:
        data = match.group(1).strip()
    print 'Downloaded:', data
    # Needed for exit because we did DNS lookups with coio (evdns).
    # TODO(pts): Remove stackless.main.insert() once the segfault bug is fixed.
    stackless.main.insert()
    sys.exit(0)
Example #11
0
    def testFairSchedulingBlockedOnFile(self):
        events = []

        def Worker(name, count):
            while count > 0:
                events.append(name)
                count -= 1
                if count > 0:
                    stackless.schedule()

        nbf = coio.nbfile(*os.pipe())

        try:

            def SenderWorker(name, count):
                while count > 0:
                    events.append(name)
                    count -= 1
                    if count > 0:
                        stackless.schedule()
                events.append('R')
                nbf.write('S')
                nbf.flush()
                events.append('T')

            def ReceiverWorker(name):
                events.append(name)
                nbf.read_at_most(1)
                events.append(name.lower())

            stackless.tasklet(SenderWorker)('A', 3)
            stackless.tasklet(Worker)('B', 6)
            stackless.tasklet(ReceiverWorker)('W')
            stackless.tasklet(Worker)('C', 9)
            for i in xrange(32):
                stackless.schedule()

            self.assertEqual(
                'ABWC'  # First iteration, in tasklet creation order.
                'ABC'  # W is blocked on reading now.
                'ARTBC'  # A sends 'S' to wake up W.
                'wBC'  # W woken up, inserted to the beginning of the chain.
                'BC'
                'BC'
                'C'  # B's counter has expired.
                'C'
                'C',
                ''.join(events))
            nbf.close()
        finally:
            nbf.close()
def Foo():
  thread_pool_obj = coio.thread_pool(3)
  stackless.tasklet(Sleeper)(thread_pool_obj, 9999)
  stackless.tasklet(Sleeper)(thread_pool_obj, 9999)
  stackless.tasklet(Sleeper)(thread_pool_obj, 2)
  stackless.schedule()
  f = lambda a, b: time.sleep(0.2) or a / b
  #f = lambda a, b: a / b
  #f = lambda a, b: sys.exit(42)
  print 'X0'
  if False:
    for i in xrange(1, 11):
      print i
      assert 42 == worker(f, 84 * i, 2 * i)
  print 'X1'
  # This first call is slow (takes about 2 seconds), because we have to wait for
  # a Sleeper to return.
  print thread_pool_obj(f, -42, -1)
  print 'X2'
  print thread_pool_obj(f, -42, -1)
  print 'X3'
  print thread_pool_obj(f, -42, -1)
  #print 'T'
  #time.sleep(10)
  print 'X4'
  try:
    thread_pool_obj(f, 7, 0)
    e = None
  except ZeroDivisionError, e:
    pass
Example #13
0
def Foo():
    thread_pool_obj = coio.thread_pool(3)
    stackless.tasklet(Sleeper)(thread_pool_obj, 9999)
    stackless.tasklet(Sleeper)(thread_pool_obj, 9999)
    stackless.tasklet(Sleeper)(thread_pool_obj, 2)
    stackless.schedule()
    f = lambda a, b: time.sleep(0.2) or a / b
    #f = lambda a, b: a / b
    #f = lambda a, b: sys.exit(42)
    print 'X0'
    if False:
        for i in xrange(1, 11):
            print i
            assert 42 == worker(f, 84 * i, 2 * i)
    print 'X1'
    # This first call is slow (takes about 2 seconds), because we have to wait for
    # a Sleeper to return.
    print thread_pool_obj(f, -42, -1)
    print 'X2'
    print thread_pool_obj(f, -42, -1)
    print 'X3'
    print thread_pool_obj(f, -42, -1)
    #print 'T'
    #time.sleep(10)
    print 'X4'
    try:
        thread_pool_obj(f, 7, 0)
        e = None
    except ZeroDivisionError, e:
        pass
Example #14
0
  def testFairSchedulingBlockedOnFile(self):
    events = []

    def Worker(name, count):
      while count > 0:
        events.append(name)
        count -= 1
        if count > 0:
          stackless.schedule()

    nbf = coio.nbfile(*os.pipe())

    try:
      def SenderWorker(name, count):
        while count > 0:
          events.append(name)
          count -= 1
          if count > 0:
            stackless.schedule()
        events.append('R')
        nbf.write('S')
        nbf.flush()
        events.append('T')

      def ReceiverWorker(name):
        events.append(name)
        nbf.read_at_most(1)
        events.append(name.lower())

      stackless.tasklet(SenderWorker)('A', 3)
      stackless.tasklet(Worker)('B', 6)
      stackless.tasklet(ReceiverWorker)('W')
      stackless.tasklet(Worker)('C', 9)
      for i in xrange(32):
        stackless.schedule()

      self.assertEqual(
          'ABWC'  # First iteration, in tasklet creation order.
          'ABC'  # W is blocked on reading now.
          'ARTBC'  # A sends 'S' to wake up W.
          'wBC'  # W woken up, inserted to the beginning of the chain.
          'BC'
          'BC'
          'C'  # B's counter has expired.
          'C'
          'C',
          ''.join(events))
      nbf.close()
    finally:
      nbf.close()
Example #15
0
  def testBlockingQueueReverse(self):
    events = []
    q = util.Queue()

    def Worker(prefix):
      while True:
        item = q.pop()
        events.append((prefix, item))
        if not item:
          return

    stackless.tasklet(Worker)(1)
    stackless.tasklet(Worker)(2)
    self.assertEqual(0, q.pending_receiver_count)
    stackless.schedule()
    self.assertEqual(2, q.pending_receiver_count)
    self.assertEqual([], events)
    q.append('foo')
    self.assertEqual([], events)
    self.assertEqual(1, len(q))
    stackless.schedule()
    self.assertEqual(0, len(q))
    self.assertEqual([(1, 'foo')], events)
    q.append('bar')
    self.assertEqual(1, len(q))
    self.assertEqual([(1, 'foo')], events)
    stackless.schedule()
    self.assertEqual(0, len(q))
    self.assertEqual([(1, 'foo'), (2, 'bar')], events)
    self.assertEqual(2, q.pending_receiver_count)
    q.append(0)
    q.append(None)
    self.assertEqual(2, len(q))
    self.assertEqual(0, q.pending_receiver_count)
    stackless.schedule()
    # Only this is different from testBlockingQueue.
    self.assertEqual([(1, 'foo'), (2, 'bar'), (1, None), (2, 0)], events)
    self.assertEqual(0, len(q))
    self.assertEqual(0, q.pending_receiver_count)
Example #16
0
    def testBlockingQueueReverse(self):
        events = []
        q = util.Queue()

        def Worker(prefix):
            while True:
                item = q.pop()
                events.append((prefix, item))
                if not item:
                    return

        stackless.tasklet(Worker)(1)
        stackless.tasklet(Worker)(2)
        self.assertEqual(0, q.pending_receiver_count)
        stackless.schedule()
        self.assertEqual(2, q.pending_receiver_count)
        self.assertEqual([], events)
        q.append('foo')
        self.assertEqual([], events)
        self.assertEqual(1, len(q))
        stackless.schedule()
        self.assertEqual(0, len(q))
        self.assertEqual([(1, 'foo')], events)
        q.append('bar')
        self.assertEqual(1, len(q))
        self.assertEqual([(1, 'foo')], events)
        stackless.schedule()
        self.assertEqual(0, len(q))
        self.assertEqual([(1, 'foo'), (2, 'bar')], events)
        self.assertEqual(2, q.pending_receiver_count)
        q.append(0)
        q.append(None)
        self.assertEqual(2, len(q))
        self.assertEqual(0, q.pending_receiver_count)
        stackless.schedule()
        # Only this is different from testBlockingQueue.
        self.assertEqual([(1, 'foo'), (2, 'bar'), (1, None), (2, 0)], events)
        self.assertEqual(0, len(q))
        self.assertEqual(0, q.pending_receiver_count)
Example #17
0
  def testFairSchedulingWithoutFile(self):
    events = []
    def Worker(name, count):
      while count > 0:
        events.append(name)
        stackless.schedule()
        count -= 1

    stackless.tasklet(Worker)('A', 5)
    stackless.tasklet(Worker)('B', 9)
    stackless.tasklet(Worker)('C', 7)
    for i in xrange(10):
      stackless.schedule()

    self.assertEqual('ABCABCABCABCABCBCBCBB', ''.join(events))
Example #18
0
    def testFairSchedulingWithoutFile(self):
        events = []

        def Worker(name, count):
            while count > 0:
                events.append(name)
                stackless.schedule()
                count -= 1

        stackless.tasklet(Worker)('A', 5)
        stackless.tasklet(Worker)('B', 9)
        stackless.tasklet(Worker)('C', 7)
        for i in xrange(10):
            stackless.schedule()

        self.assertEqual('ABCABCABCABCABCBCBCBB', ''.join(events))
Example #19
0
  def testFairSchedulingWithFile(self):
    events = []
    def Worker(name, count):
      while count > 0:
        events.append(name)
        stackless.schedule()
        count -= 1

    nbf = coio.nbfile(*os.pipe())
    try:
      stackless.tasklet(Worker)('A', 5)
      stackless.tasklet(Worker)('B', 9)
      stackless.tasklet(Worker)('C', 7)
      for i in xrange(10):
        stackless.schedule()

      self.assertEqual('ABCABCABCABCABCBCBCBB', ''.join(events))
      #self.assertEqual([nbf], coio.CurrentMainLoop().nbfs)
    finally:
      nbf.close()
Example #20
0
    def testFairSchedulingWithFile(self):
        events = []

        def Worker(name, count):
            while count > 0:
                events.append(name)
                stackless.schedule()
                count -= 1

        nbf = coio.nbfile(*os.pipe())
        try:
            stackless.tasklet(Worker)('A', 5)
            stackless.tasklet(Worker)('B', 9)
            stackless.tasklet(Worker)('C', 7)
            for i in xrange(10):
                stackless.schedule()

            self.assertEqual('ABCABCABCABCABCBCBCBB', ''.join(events))
            #self.assertEqual([nbf], coio.CurrentMainLoop().nbfs)
        finally:
            nbf.close()
Example #21
0
 def __enter__(self):
     self.busy_tasklet = stackless.current
     if self.timeout is not None:
         self.sleeper_tasklet = stackless.tasklet(self.Sleeper)()
     return self
Example #22
0
    while True:
      msg = sock.recv(256)
      if not msg:
        break
      timestamps[0] = max(timestamps[0], time.time())  # Register read.
      # TODO(pts): Flush earlier.
      write_channel.send('You typed %r.\r\n' % msg)
  finally:
    logging.info('connection closed from %r' % (addr,))
    if writer_tasklet.alive:
      write_channel.send(None)  # Will kill writer_tasklet eventually.
    timestamps[0] = None  # Will kill sleeper_tasklet eventually.
    while writer_tasklet.alive or sleeper_tasklet.alive:
      stackless.schedule(None)
    sock.close()

if __name__ == '__main__':
  logging.root.setLevel(logging.INFO)
  logging.info('echo server with heartbeat initializing')
  server_socket = coio.new_realsocket(socket.AF_INET, socket.SOCK_STREAM)
  server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
  server_socket.bind(('127.0.0.1', 5454))
  server_socket.listen(100)
  logging.info('connect with:  telnet %s %s' % server_socket.getsockname()[:2])
  while True:
    client_socket, addr = server_socket.accept()
    logging.info('connection from %r, runcount=%d' %
                 (addr, stackless.getruncount()))
    stackless.tasklet(Worker)(client_socket, addr)
    client_socket = addr = None  # Free memory early.
Example #23
0
from syncless import coio

STDOUT_FILENO = 1
STDERR_FILENO = 2


def ShowTwistedProgress():
    os.write(STDOUT_FILENO, 'T')  # Twisted captures sys.stdout and sys.stderr.


def ProgressWorker(sleep_amount):
    while True:
        os.write(STDOUT_FILENO, 'W')
        coio.sleep(sleep_amount)


class Simple(resource.Resource):
    isLeaf = True

    def render_GET(self, request):
        return 'Hello, <b>World</b>!'


log.startLogging(sys.stdout)
site = server.Site(Simple())
reactor.listenTCP(8080, site)
task.LoopingCall(ShowTwistedProgress).start(0.1)
stackless.tasklet(ProgressWorker)(0.1)
reactor.run()
    progress_channel = stackless.channel()
    progress_channel.preference = 1  # Prefer the sender.
    stackless.tasklet(Worker)(db_conn, num_iterations, progress_channel)
    done_count = 0
    receive_count = 0
    while True:
        sys.stderr.write('\r%s of %s ' % (done_count, num_iterations))
        if done_count >= num_iterations:
            break
        done_count = progress_channel.receive()
        receive_count += 1
    # receive_count might be smaller than done_count (e.g. 993 < 1000) here
    # (but sometims it's equal), because sometimes the main tasklet was slow
    # to receive, so Worker did multiple iterations per one
    # progress_channel.receive().
    sys.stderr.write('done, receive_count=%d\n' % receive_count)
    # Needed for exit because we might have done DNS lookups with coio (evdns).
    stackless.main.insert()
    sys.exit(0)


if __name__ == '__main__':
    # We need this before we create the first stackless.tasklet if
    # syncless.greenstackless is used.
    __import__('syncless.coio')
    # Moving all work to another tasklet because stackless.main is not allowed
    # to be blocked on a channel.receive() (StopIteration would be raised).
    stackless.tasklet(main)()
    stackless.schedule_remove(None)
seconds, because a thread pool of size 3 will be used by 4 threads doing
a sleep of 2 seconds each, so the last sleep can only be started after the
first thread has finished.
"""

__author__ = '[email protected] (Peter Szabo)'

import sys
import time

from syncless.best_stackless import stackless
from syncless import coio


def ProgressReporter(delta_sec):
    while True:
        sys.stderr.write('.')
        coio.sleep(delta_sec)


if __name__ == '__main__':
    stackless.tasklet(ProgressReporter)(0.05)
    thread_pool_obj = coio.thread_pool(4 - bool(len(sys.argv) > 1))
    stackless.tasklet(thread_pool_obj)(time.sleep, 2)
    stackless.tasklet(thread_pool_obj)(time.sleep, 2)
    stackless.tasklet(thread_pool_obj)(time.sleep, 2)
    sys.stderr.write('S')
    stackless.schedule()
    thread_pool_obj(time.sleep, 2)
    sys.stderr.write('D\n')
def main():
    # Without patch.geventmysql() run by importing fast_mysql, not only the
    # counter below would jump from 0 to 1000 in one big step, but maybe the
    # client wouldn't work at all, because vanilla gevenymysql expects gevent,
    # but we use Syncless here. With this patch, MySQL socket communication is
    # done with Syncless, so the counter increases in little steps.

    patch.patch_stderr()

    # Preprocess the connection information.
    mysql_config = dict(MYSQL_CONFIG)
    if mysql_config.get('unix_socket'):
        mysql_config['host'] = mysql_config.pop('unix_socket')
        mysql_config['port'] = None
        assert mysql_config['host'].startswith('/')
    if 'database' in mysql_config:
        mysql_config['db'] = mysql_config.pop('database')
    old_use_unicode = bool(mysql_config.pop('use_unicode', False))
    mysql_config['use_unicode'] = True  # Required for mysql_config['charset'].
    mysql_config.setdefault('charset', 'utf-8')
    db_conn = geventmysql.connect(**mysql_config)
    db_conn.client.set_use_unicode(old_use_unicode)
    assert geventmysql.paramstyle == 'format'

    # These are not supported by geventmysql.
    #assert db_conn.charset_name == 'utf8'
    #assert db_conn.collation_name == 'utf8_general_ci'

    #query = 'SELECT CONNECTION_ID()'
    #query = 'SELECT LENGTH("\xC3\xA1")'  # :2
    #query = 'SELECT CHAR_LENGTH("\xC3\xA1")'  #: 1
    #query = 'SELECT UPPER("\xC3\xA1")'  #: '\xC3\x81'
    # Would raise e.g. mysql.connector.errors.ProgrammingError on SQL error.
    cursor = db_conn.cursor()
    # In SQLite, this would be:
    # for row in cursor.execute('SELECT LENGTH(?), ('\xC3\xA1',)): print row
    cursor.execute('SELECT CHAR_LENGTH(%s)', ('\xC3\xA1', ))

    # Since geventmysql cursors are not iterable, we have to use
    # cursor.fetchall() instead of list(cursor) here.
    assert cursor.fetchall() == [(1, )]
    cursor.close()  # geventmysql requires this.

    if len(sys.argv) > 1:
        num_iterations = int(sys.argv)
    else:
        num_iterations = 1000

    progress_channel = stackless.channel()
    progress_channel.preference = 1  # Prefer the sender.
    stackless.tasklet(Worker)(db_conn, num_iterations, progress_channel)
    done_count = 0
    receive_count = 0
    while True:
        sys.stderr.write('\r%s of %s ' % (done_count, num_iterations))
        if done_count >= num_iterations:
            break
        done_count = progress_channel.receive()
        receive_count += 1
    # receive_count might be smaller than done_count (e.g. 993 < 1000) here
    # (but sometims it's equal), because sometimes the main tasklet was slow
    # to receive, so Worker did multiple iterations per one
    # progress_channel.receive().
    sys.stderr.write('done, receive_count=%d\n' % receive_count)
    # Needed for exit because we might have done DNS lookups with coio (evdns).
    stackless.main.insert()
    sys.exit(0)
  progress_channel = stackless.channel()
  progress_channel.preference = 1  # Prefer the sender.
  stackless.tasklet(Worker)(db_conn, num_iterations, progress_channel)
  done_count = 0
  receive_count = 0
  while True:
    sys.stderr.write('\r%s of %s ' % (done_count, num_iterations))
    if done_count >= num_iterations:
      break
    done_count = progress_channel.receive()
    receive_count += 1
  # receive_count might be smaller than done_count (e.g. 993 < 1000) here
  # (but sometims it's equal), because sometimes the main tasklet was slow
  # to receive, so Worker did multiple iterations per one
  # progress_channel.receive().
  sys.stderr.write('done, receive_count=%d\n' % receive_count)
  # Needed for exit because we might have done DNS lookups with coio (evdns).
  stackless.main.insert()
  sys.exit(0)


if __name__ == '__main__':
  # We need this before we create the first stackless.tasklet if
  # syncless.greenstackless is used.
  __import__('syncless.coio')
  # Moving all work to another tasklet because stackless.main is not allowed
  # to be blocked on a channel.receive() (StopIteration would be raised).
  stackless.tasklet(main)()
  stackless.schedule_remove(None)
Example #28
0
 def __enter__(self):
     self.busy_tasklet = stackless.current
     if self.timeout is not None:
         self.sleeper_tasklet = stackless.tasklet(self.Sleeper)()
     return self
Example #29
0
            if not msg:
                break
            timestamps[0] = max(timestamps[0], time.time())  # Register read.
            # TODO(pts): Flush earlier.
            write_channel.send('You typed %r.\r\n' % msg)
    finally:
        logging.info('connection closed from %r' % (addr, ))
        if writer_tasklet.alive:
            write_channel.send(None)  # Will kill writer_tasklet eventually.
        timestamps[0] = None  # Will kill sleeper_tasklet eventually.
        while writer_tasklet.alive or sleeper_tasklet.alive:
            stackless.schedule(None)
        sock.close()


if __name__ == '__main__':
    logging.root.setLevel(logging.INFO)
    logging.info('echo server with heartbeat initializing')
    server_socket = coio.new_realsocket(socket.AF_INET, socket.SOCK_STREAM)
    server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    server_socket.bind(('127.0.0.1', 5454))
    server_socket.listen(100)
    logging.info('connect with:  telnet %s %s' %
                 server_socket.getsockname()[:2])
    while True:
        client_socket, addr = server_socket.accept()
        logging.info('connection from %r, runcount=%d' %
                     (addr, stackless.getruncount()))
        stackless.tasklet(Worker)(client_socket, addr)
        client_socket = addr = None  # Free memory early.
Example #30
0
from twisted.internet import reactor
from twisted.internet import task
from twisted.python import log
from twisted.web import resource
from twisted.web import server

from syncless import coio

STDOUT_FILENO = 1
STDERR_FILENO = 2

def ShowTwistedProgress():
  os.write(STDOUT_FILENO, 'T')  # Twisted captures sys.stdout and sys.stderr.

def ProgressWorker(sleep_amount):
  while True:
    os.write(STDOUT_FILENO, 'W')
    coio.sleep(sleep_amount)

class Simple(resource.Resource):
  isLeaf = True
  def render_GET(self, request):
    return 'Hello, <b>World</b>!'

log.startLogging(sys.stdout)
site = server.Site(Simple())
reactor.listenTCP(8080, site)
task.LoopingCall(ShowTwistedProgress).start(0.1)
stackless.tasklet(ProgressWorker)(0.1)
reactor.run()
def main():
  # Without patch.geventmysql() run by importing fast_mysql, not only the
  # counter below would jump from 0 to 1000 in one big step, but maybe the
  # client wouldn't work at all, because vanilla gevenymysql expects gevent,
  # but we use Syncless here. With this patch, MySQL socket communication is
  # done with Syncless, so the counter increases in little steps.

  patch.patch_stderr()

  # Preprocess the connection information.
  mysql_config = dict(MYSQL_CONFIG)
  if mysql_config.get('unix_socket'):
    mysql_config['host'] = mysql_config.pop('unix_socket')
    mysql_config['port'] = None
    assert mysql_config['host'].startswith('/')
  if 'database' in mysql_config:
    mysql_config['db'] = mysql_config.pop('database')
  old_use_unicode = bool(mysql_config.pop('use_unicode', False))
  mysql_config['use_unicode'] = True  # Required for mysql_config['charset'].
  mysql_config.setdefault('charset', 'utf-8')
  db_conn = geventmysql.connect(**mysql_config)
  db_conn.client.set_use_unicode(old_use_unicode)
  assert geventmysql.paramstyle == 'format'

  # These are not supported by geventmysql.
  #assert db_conn.charset_name == 'utf8'
  #assert db_conn.collation_name == 'utf8_general_ci'

  #query = 'SELECT CONNECTION_ID()'
  #query = 'SELECT LENGTH("\xC3\xA1")'  # :2
  #query = 'SELECT CHAR_LENGTH("\xC3\xA1")'  #: 1
  #query = 'SELECT UPPER("\xC3\xA1")'  #: '\xC3\x81'
  # Would raise e.g. mysql.connector.errors.ProgrammingError on SQL error.
  cursor = db_conn.cursor()
  # In SQLite, this would be:
  # for row in cursor.execute('SELECT LENGTH(?), ('\xC3\xA1',)): print row
  cursor.execute('SELECT CHAR_LENGTH(%s)', ('\xC3\xA1',))

  # Since geventmysql cursors are not iterable, we have to use
  # cursor.fetchall() instead of list(cursor) here.
  assert cursor.fetchall() == [(1,)]
  cursor.close()  # geventmysql requires this.

  if len(sys.argv) > 1:
    num_iterations = int(sys.argv)
  else:
    num_iterations = 1000

  progress_channel = stackless.channel()
  progress_channel.preference = 1  # Prefer the sender.
  stackless.tasklet(Worker)(db_conn, num_iterations, progress_channel)
  done_count = 0
  receive_count = 0
  while True:
    sys.stderr.write('\r%s of %s ' % (done_count, num_iterations))
    if done_count >= num_iterations:
      break
    done_count = progress_channel.receive()
    receive_count += 1
  # receive_count might be smaller than done_count (e.g. 993 < 1000) here
  # (but sometims it's equal), because sometimes the main tasklet was slow
  # to receive, so Worker did multiple iterations per one
  # progress_channel.receive().
  sys.stderr.write('done, receive_count=%d\n' % receive_count)
  # Needed for exit because we might have done DNS lookups with coio (evdns).
  stackless.main.insert()
  sys.exit(0)
seconds, because a thread pool of size 3 will be used by 4 threads doing
a sleep of 2 seconds each, so the last sleep can only be started after the
first thread has finished.
"""

__author__ = "[email protected] (Peter Szabo)"

import sys
import time

from syncless.best_stackless import stackless
from syncless import coio


def ProgressReporter(delta_sec):
    while True:
        sys.stderr.write(".")
        coio.sleep(delta_sec)


if __name__ == "__main__":
    stackless.tasklet(ProgressReporter)(0.05)
    thread_pool_obj = coio.thread_pool(4 - bool(len(sys.argv) > 1))
    stackless.tasklet(thread_pool_obj)(time.sleep, 2)
    stackless.tasklet(thread_pool_obj)(time.sleep, 2)
    stackless.tasklet(thread_pool_obj)(time.sleep, 2)
    sys.stderr.write("S")
    stackless.schedule()
    thread_pool_obj(time.sleep, 2)
    sys.stderr.write("D\n")