Beispiel #1
0
 def testInterProcessSingleService(self):
     print
     print 'testInterProcessSingleService'
     N = 1000
     self.registerSubprocess(spawn_server(16300))
     listener = ('localhost', 16300)
     store_client = Client(listener, StoreProtocol())
     store_client.connect()
     self.clients.append(store_client)
     while not store_client.is_connected():
         coio.sleep(0.01)
     values = [('foo%d' % i, i, 'bar%d' % i) for i in xrange(N)]
     received_values = []
     start = time.time()
     for key, timestamp, value in values:
         store_client.set(key, timestamp, value)
         received_values.append(store_client.get(key))
     elapsed = time.time() - start
     for (key, timestamp,
          value), (received_timestamp,
                   received_value) in zip(values, received_values):
         assert timestamp == received_timestamp
         assert value == received_value
     print 'Elapsed: %.2fs' % elapsed
     print '%.2f invocations / s' % (2 * N / elapsed)
def main():
    parser = argparse.ArgumentParser(description="Tako test feeder.")
    parser.add_argument('address')
    parser.add_argument('port', type=int)
    parser.add_argument('-l', '--limit', type=int, default=0)
    parser.add_argument('-d', '--delay', type=float, default=1)
    parser.add_argument('-dbg', '--debug', action='store_true')
    parser.add_argument('-u', '--update', help='Configuration update interval (seconds) default = 300', type=int, default=300)
    args = parser.parse_args()

    level = logging.DEBUG if args.debug else logging.INFO
    debug.configure_logging('native_client_feeder', level)

    listener = (args.address, args.port)
    client = Client('native_client_feeder', [listener], configuration_update_interval=args.update)
    client.connect()
    while not client.is_connected():
        logging.debug('connected nodes: %d (%d)', client.connected_node_count(), client.total_node_count())
        coio.sleep(0.1)

    print 'feeding cluster coordinated by %s' % repr(listener)
    M = 10000

    for i in xrange(M):
        coio.stackless.tasklet(feed)(client)

    global counter
    while True:
        coio.sleep(1)
        logging.info('counter = %d (%d reqs)', counter, counter * 3)
    def testInProcessSingleService_Sync(self):
        print
        print "testInProcessSingleService_Sync"
        N = 1000
        listener = ("localhost", 16000)
        server = Server(listener, [StoreService()])
        server.serve()
        self.servers.append(server)
        store_client = Client(listener, StoreProtocol())
        store_client.connect()
        self.clients.append(store_client)
        while not store_client.is_connected():
            coio.sleep(0.01)
        coio.stackless.schedule()

        values = [("foo%d" % i, i, "bar%d" % i) for i in xrange(N)]
        received_values = []

        start = time.time()
        for key, timestamp, value in values:
            retreived_timestamp = store_client.set(key, timestamp, value)
            assert retreived_timestamp == timestamp
            received_values.append(store_client.get(key))
        elapsed = time.time() - start

        for (key, timestamp, value), (received_timestamp, received_value) in zip(values, received_values):
            assert timestamp == received_timestamp
            assert value == received_value

        print "Elapsed: %.2fs" % elapsed
        print "%.2f invocations / s" % (2 * N / elapsed)
Beispiel #4
0
 def Sleeper(self):
     coio.sleep(self.timeout)
     if self.busy_tasklet:
         if isinstance(self.exc, BaseException):
             self.busy_tasklet.raise_exception(type(self.exc), *self.exc.args)
         else:
             self.busy_tasklet.raise_exception(self.exc)
Beispiel #5
0
    def serve(self):
        self.__configuration_controller.start()
        while not self.__configuration:
            if __debug__: logging.debug('Waiting for configuration.')
            coio.sleep(1)

        internal_service = service.Service(
            InternalNodeServiceProtocol(),
            get=self.__internal_get,
            set=self.__internal_set,
            stat=self.__internal_stat,
        )

        public_service = service.Service(
            PublicNodeServiceProtocol(),
            get=self.__public_get,
            set=self.__public_set,
            stat=self.__public_stat,
        )

        self.__repair_task = coio.stackless.tasklet(self.__repair_task_loop)()

        logging.info('Internal API: %s:%s' % (self.node.address, self.node.port))
        self.__server = service.Server(listener=('', self.node.port),
                                                services=(internal_service, public_service))
        self.__server.serve()
Beispiel #6
0
    def testInProcessSingleService_Sync(self):
        print
        print 'testInProcessSingleService_Sync'
        N = 1000
        listener = ('localhost', 16000)
        server = Server(listener, [StoreService()])
        server.serve()
        self.servers.append(server)
        store_client = Client(listener, StoreProtocol())
        store_client.connect()
        self.clients.append(store_client)
        while not store_client.is_connected():
            coio.sleep(0.01)
        coio.stackless.schedule()

        values = [('foo%d' % i, i, 'bar%d' % i) for i in xrange(N)]
        received_values = []

        start = time.time()
        for key, timestamp, value in values:
            retreived_timestamp = store_client.set(key, timestamp, value)
            assert retreived_timestamp == timestamp
            received_values.append(store_client.get(key))
        elapsed = time.time() - start

        for (key, timestamp,
             value), (received_timestamp,
                      received_value) in zip(values, received_values):
            assert timestamp == received_timestamp
            assert value == received_value

        print 'Elapsed: %.2fs' % elapsed
        print '%.2f invocations / s' % (2 * N / elapsed)
Beispiel #7
0
 def Sleeper(self):
     coio.sleep(self.timeout)
     if self.busy_tasklet:
         if isinstance(self.exc, BaseException):
             self.busy_tasklet.raise_exception(type(self.exc),
                                               *self.exc.args)
         else:
             self.busy_tasklet.raise_exception(self.exc)
Beispiel #8
0
 def testMainSleep(self):
     self.assertEqual(
         LOOPRET,
         coio.nonblocking_loop_for_tests())  # No registered events.
     coio.sleep(SMALL_SLEEP_SEC)
     self.assertEqual(
         LOOPRET,
         coio.nonblocking_loop_for_tests())  # No registered events.
Beispiel #9
0
 def testNegativeSleep(self):
     self.assertEqual(
         LOOPRET,
         coio.nonblocking_loop_for_tests())  # No registered events.
     coio.sleep(-42)
     self.assertEqual(
         LOOPRET,
         coio.nonblocking_loop_for_tests())  # No registered events.
Beispiel #10
0
 def set(self, key, timestamp, value):
     if __debug__: logging.debug('key = %s, timestamp = %s', key, timestamp)
     for i in xrange(self.max_retries + 1):
         node_clients = self.__connected_clients_for_key(key)
         for node_client in node_clients:
             new_timestamp = node_client.set(key, timestamp, value)
             if new_timestamp is not None:
                 return new_timestamp
         coio.sleep(self.retry_interval)
     raise NotAvailableException(key)
Beispiel #11
0
 def testPerf(self):
     s = HttpServer(listener=('', 4711), handlers=(('/', {'GET':self.GET}),))
     t = coio.stackless.tasklet(s.serve)()
     coio.stackless.schedule()
     def ab():
         os.system('ab -k -n 10000 -c 10 http://127.0.0.1:4711/')
     p = processing.Process(target=ab)
     p.start()
     while p.isAlive():
         coio.sleep(1)
     t.kill()
Beispiel #12
0
 def get(self, key):
     if __debug__: logging.debug('key = %s', key)
     for i in xrange(self.max_retries + 1):
         node_clients = self.__connected_clients_for_key(key)
         for node_client in node_clients:
             result = node_client.get(key)
             if __debug__: logging.debug('result = %s', result)
             if result is not None:
                 timestamp, value = result
                 return timestamp or None, value
         coio.sleep(self.retry_interval)
     raise NotAvailableException(key)
Beispiel #13
0
    def testPerformance(self):
        token = id(self)
        message_length = 1024
        N = 10000
        batch_size = 100
        collector = Collector(batch_size)
        l = 0
        port = 6001
        host = ('localhost', port)
        p = launch_echoserver(port)
        bytecount = 0
        try:
            sent_messages = deque()
            coio.sleep(1)
            messenger = Messenger(host)
            messenger.connect()
            message_buffer = ''.join('%d' % (i % 10)
                                     for i in xrange(N + message_length * 2))
            i = 0
            start_time = timer()
            for i in xrange(N):
                if message_length > 4096:
                    message = buffer(message_buffer, i, message_length)
                else:
                    message = message_buffer[i:i + message_length]
                bytecount += len(message)
                messenger.send(message, token, collector)
                sent_messages.append((message, token))
                l += 1
                if l % batch_size == 0:
                    replies = collector.collect()
                    for i in xrange(len(replies)):
                        rm, rt = replies[i]
                        sm, st = sent_messages.popleft()
                        if type(sm) is buffer:
                            rm = buffer(rm)
                        if rm != sm:
                            print 'i: ', i
                            assert False
                    collector = Collector(batch_size)

            end_time = timer()
            elapsed_time = end_time - start_time
            print 'Transmitted %d messages with a size of %d bytes' % (
                N, message_length)
            print 'Transmission time (with validation): %fs' % elapsed_time
            print '%.2f requests+replies/s, %.2f MB/s' % (
                float(N * 2) / elapsed_time,
                (float(bytecount * 2) / 2**20) / elapsed_time)
            messenger.close()
        finally:
            os.kill(p.pid, signal.SIGKILL)
Beispiel #14
0
 def __repair_task_loop(self):
     while True:
         start_time = time.time()
         try:
             self.__repair_store()
         except Exception, e:
             logging.exception(e)
             pass
         elapsed = time.time() - start_time
         spare_seconds = self.__background_repair_interval_seconds - elapsed
         if spare_seconds > 0:
             logging.info('Repair task sleeping %s', timedelta(seconds=spare_seconds))
             coio.sleep(spare_seconds)
Beispiel #15
0
 def __flush(self):
     coio.sleep(random.random() * self.auto_commit_interval)
     while True:
         if self.operation_counter > 0:
             if __debug__: logging.debug('Committing %d operations', self.operation_counter)
             self.commit()
             self.operation_counter = 0
             # Close and reopen to free memory allocated by TC
             # Otherwise memory usage balloons until we run out of memory and get killed
             self.db.close()
             self.db.open(self.filepath, tc.BDBOWRITER | tc.BDBOCREAT)
             self.begin()
         coio.sleep(self.auto_commit_interval)
Beispiel #16
0
 def __fetch_configurations(self):
     while True:
         logging.debug('coordinators: %s', self.coordinators)
         if self.coordinators:
             configurations = []
             for coordinator in self.coordinators:
                 configurations.append(self.__fetch_configuration(coordinator))
             configurations.sort()
             for new_configuration, source_coordinator in configurations:
                 if new_configuration and (not self.configuration or new_configuration.timestamp > self.configuration.timestamp):
                     self.__set_configuration(new_configuration)
                     self.__notify()
                     break
         coio.sleep(self.interval)
 def testResilience(self):
     try:
         token = id(self)
         q = Queue()
         def callback(value, token):
             q.append((value, token))
         port = 6000
         host = ('localhost', port)
         p = launch_echoserver(port)
         coio.sleep(1)
         messenger = Messenger(host, reconnect_max_interval=0.1)
         messenger.connect()
         messenger.send('1', token, callback)
         assert q.popleft() == ('1', token)
         os.kill(p.pid, signal.SIGKILL)
         messenger.send('2', token, callback)
         coio.sleep(1)
         messenger.send('3', token, callback)
         assert q.popleft() == (None, token)
         assert q.popleft() == (None, token)
         p = launch_echoserver(port)
         coio.sleep(1)
         messenger.send('4', token, callback)
         assert q.popleft() == ('4', token)
         messenger.close()
         coio.sleep(1)
     finally:
         os.kill(p.pid, signal.SIGKILL)
Beispiel #18
0
    def testResilience(self):
        try:
            token = id(self)
            q = Queue()

            def callback(value, token):
                q.append((value, token))

            port = 6000
            host = ('localhost', port)
            p = launch_echoserver(port)
            coio.sleep(1)
            messenger = Messenger(host, reconnect_max_interval=0.1)
            messenger.connect()
            messenger.send('1', token, callback)
            assert q.popleft() == ('1', token)
            os.kill(p.pid, signal.SIGKILL)
            messenger.send('2', token, callback)
            coio.sleep(1)
            messenger.send('3', token, callback)
            assert q.popleft() == (None, token)
            assert q.popleft() == (None, token)
            p = launch_echoserver(port)
            coio.sleep(1)
            messenger.send('4', token, callback)
            assert q.popleft() == ('4', token)
            messenger.close()
            coio.sleep(1)
        finally:
            os.kill(p.pid, signal.SIGKILL)
    def testPerformance(self):
        token = id(self)
        message_length = 1024
        N = 10000
        batch_size = 100
        collector = Collector(batch_size)
        l = 0
        port = 6001
        host = ('localhost', port)
        p = launch_echoserver(port)
        bytecount = 0
        try:
            sent_messages = deque()
            coio.sleep(1)
            messenger = Messenger(host)
            messenger.connect()
            message_buffer = ''.join('%d' % (i % 10) for i in xrange(N+message_length*2))
            i = 0
            start_time = timer()
            for i in xrange(N):
                if message_length > 4096:
                    message = buffer(message_buffer, i, message_length)
                else:
                    message = message_buffer[i:i+message_length]
                bytecount += len(message)
                messenger.send(message, token, collector)
                sent_messages.append((message, token))
                l += 1
                if l % batch_size == 0:
                    replies = collector.collect()
                    for i in xrange(len(replies)):
                        rm, rt = replies[i]
                        sm, st = sent_messages.popleft()
                        if type(sm) is buffer:
                            rm = buffer(rm)
                        if rm != sm:
                            print 'i: ', i
                            assert False
                    collector = Collector(batch_size)

            end_time = timer()
            elapsed_time = end_time - start_time
            print 'Transmitted %d messages with a size of %d bytes' % (N, message_length)
            print 'Transmission time (with validation): %fs' % elapsed_time
            print '%.2f requests+replies/s, %.2f MB/s' % (float(N*2) / elapsed_time, (float(bytecount*2) / 2**20) / elapsed_time)
            messenger.close()
        finally:
            os.kill(p.pid, signal.SIGKILL)
Beispiel #20
0
    def testInterProcessMultiService_Async(self):
        print
        print 'testInterProcessMultiService_Async'
        M = 10
        N = 1000
        ports = range(16600, 16600 + M)
        for port in ports:
            self.registerSubprocess(spawn_server(port))
        listeners = [('localhost', port) for port in ports]
        clients = [Client(listener, StoreProtocol()) for listener in listeners]
        self.clients.extend(clients)
        for client in clients:
            client.connect()
            while not client.is_connected():
                coio.sleep(0.1)

        store_client = MulticastClient(StoreProtocol())

        keys = ['foo%d' % i for i in xrange(N)]
        timestamps = [i for i in xrange(N)]
        values = ['bar%d' % i for i in xrange(N)]

        start = time.time()

        collector = store_client.set_collector(clients, N)
        for key, timestamp, value in zip(keys, timestamps, values):
            store_client.set_async(collector, key, timestamp, value)
        collector.collect()

        collector = store_client.get_collector(clients, N)
        for key in keys:
            store_client.get_async(collector, key)
        received_value_lists = collector.collect()

        elapsed = time.time() - start

        for token, received_values in received_value_lists.iteritems():
            for timestamp, value, (received_timestamp, received_value) in zip(
                    timestamps, values, received_values):
                if str(received_value) != str(value):
                    print received_value, value
                assert str(received_value) == str(value)
                assert received_timestamp == timestamp

        invocation_count = 2 * len(ports) * N
        print 'Elapsed: %.2fs' % elapsed
        print 'Invocations: %d' % invocation_count
        print '%.2f invocations / s' % (invocation_count / elapsed)
Beispiel #21
0
def Sleeper(timestamps, write_channel, interval):
    if timestamps[0] is not None:
        sleep_amount = min(timestamps) + interval - time.time()
        while True:
            if sleep_amount > 0:
                coio.sleep(sleep_amount)
                if timestamps[0] is None:
                    break
                sleep_amount = min(timestamps) + interval - time.time()
                if sleep_amount > 0:
                    continue
            now_ts = time.time()
            write_channel.send('heart-beat\r\n')
            for i in xrange(len(timestamps)):
                timestamps[i] = max(timestamps[i], now_ts)
            sleep_amount = min(timestamps) + interval - time.time()
Beispiel #22
0
def Sleeper(timestamps, write_channel, interval):
  if timestamps[0] is not None:
    sleep_amount = min(timestamps) + interval - time.time()
    while True:
      if sleep_amount > 0:
        coio.sleep(sleep_amount)
        if timestamps[0] is None:
          break
        sleep_amount = min(timestamps) + interval - time.time()
        if sleep_amount > 0:
          continue
      now_ts = time.time()
      write_channel.send('heart-beat\r\n')
      for i in xrange(len(timestamps)):
        timestamps[i] = max(timestamps[i], now_ts)
      sleep_amount = min(timestamps) + interval - time.time()
    def testInterProcessMultiService_Async(self):
        print
        print "testInterProcessMultiService_Async"
        M = 10
        N = 1000
        ports = range(16600, 16600 + M)
        for port in ports:
            self.registerSubprocess(spawn_server(port))
        listeners = [("localhost", port) for port in ports]
        clients = [Client(listener, StoreProtocol()) for listener in listeners]
        self.clients.extend(clients)
        for client in clients:
            client.connect()
            while not client.is_connected():
                coio.sleep(0.1)

        store_client = MulticastClient(StoreProtocol())

        keys = ["foo%d" % i for i in xrange(N)]
        timestamps = [i for i in xrange(N)]
        values = ["bar%d" % i for i in xrange(N)]

        start = time.time()

        collector = store_client.set_collector(clients, N)
        for key, timestamp, value in zip(keys, timestamps, values):
            store_client.set_async(collector, key, timestamp, value)
        collector.collect()

        collector = store_client.get_collector(clients, N)
        for key in keys:
            store_client.get_async(collector, key)
        received_value_lists = collector.collect()

        elapsed = time.time() - start

        for token, received_values in received_value_lists.iteritems():
            for timestamp, value, (received_timestamp, received_value) in zip(timestamps, values, received_values):
                if str(received_value) != str(value):
                    print received_value, value
                assert str(received_value) == str(value)
                assert received_timestamp == timestamp

        invocation_count = 2 * len(ports) * N
        print "Elapsed: %.2fs" % elapsed
        print "Invocations: %d" % invocation_count
        print "%.2f invocations / s" % (invocation_count / elapsed)
    def testInProcessSingleService_Async(self):
        print
        print "testInProcessSingleService_Async"
        N = 1000
        listener = ("localhost", 16100)
        server = Server(listener, [StoreService()])
        server.serve()
        self.servers.append(server)
        store_client = Client(listener, StoreProtocol())
        store_client.connect()
        self.clients.append(store_client)
        while not store_client.is_connected():
            coio.sleep(0.01)

        data = [("foo%d" % i, long(i), "bar%d" % i) for i in xrange(N)]

        start = time.time()

        collector = store_client.set_collector(N)
        for key, timestamp, value in data:
            store_client.set_async(collector, key, timestamp, value)
        replies = collector.collect()
        assert len(replies) == len(data)

        collector = store_client.get_collector(N)
        for key, timestamp, value in data:
            store_client.get_async(collector, key)
        replies = collector.collect()

        elapsed = time.time() - start

        assert len(replies) == len(data)
        for ((fetched_timestamp, fetched_value), client), (key, timestamp, value) in zip(replies, data):
            if fetched_value != value:
                print "%s (%d %s) != %s (%d %s)" % (
                    repr(fetched_value),
                    len(fetched_value),
                    type(fetched_value),
                    repr(value),
                    len(value),
                    type(value),
                )
            assert fetched_value == value
            assert fetched_timestamp == timestamp

        print "Elapsed: %.2fs" % elapsed
        print "%.2f invocations / s" % (2 * N / elapsed)
 def testClient(self):
     cfg_filepath = 'test/local_cluster.yaml'
     cfg = configuration.try_load_file(paths.path(cfg_filepath))
     coordinator_server = CoordinatorServer(cfg.master_coordinator_id, paths.path(cfg_filepath))
     coordinator_server_task = coio.stackless.tasklet(coordinator_server.serve)()
     coio.stackless.schedule()
     self.new_configuration = None
     self.new_timestamp = None
     client = CoordinatorClient(coordinators=[cfg.master_coordinator], callbacks=[self.callback])
     client.start()
     for i in xrange(0, 1000):
         coio.sleep(0.01)
         if self.new_configuration or self.new_timestamp:
             break
     assert cfg.representation() == self.new_configuration.representation()
     print 'Fetched configuration: ', self.new_configuration
     print 'Timestamp: ', self.new_timestamp
     coordinator_server_task.kill()
Beispiel #26
0
    def testInProcessMultiService_Sync(self):
        print
        print 'testInProcessMultiService_Sync'
        N = 1000
        listener1 = ('localhost', 16200)
        listener2 = ('localhost', 16201)
        server1 = Server(listener1, [StoreService()])
        server2 = Server(listener2, [StoreService()])
        self.servers.append(server1)
        self.servers.append(server2)
        server1.serve()
        server2.serve()
        listeners = [listener1, listener2]
        clients = [Client(listener, StoreProtocol()) for listener in listeners]
        for client in clients:
            client.connect()
            while not client.is_connected():
                coio.sleep(0.01)
        self.clients.extend(clients)
        store_client = MulticastClient(StoreProtocol())
        coio.stackless.schedule()

        values = [('foo%d' % i, i, 'bar%d' % i) for i in xrange(N)]
        received_values_list = []

        start = time.time()

        for key, timestamp, value in values:
            store_client.set(clients, key, timestamp, value)
            received_values_list.append(store_client.get(clients, key))

        elapsed = time.time() - start

        for received_values, (key, timestamp,
                              value) in zip(received_values_list, values):
            for token, (received_timestamp, received_value) in received_values:
                if str(received_value) != str(value):
                    print received_value, value
                assert str(received_value) == str(value)
                assert received_timestamp == timestamp

        print 'Elapsed: %.2fs' % elapsed
        print '%.2f invocations / s' % (len(listeners) * 2 * N / elapsed)
Beispiel #27
0
def run_in_tasklet_with_timeout(function,
                                timeout,
                                default_value=None,
                                args=(),
                                kwargs={}):
    """Run function in sepearte tasklet, kill when timeout elapsed.
  
  Create a new tasklet, run function(*args, **kwargs) in it, and once done,
  return its return value or raise the exception it has raised. If function
  is not done within `timeout' seconds, send TaskletExit to its tasklet
  (switching to it so it can handle it, then proceeding with scheduling the
  caller tasklet), and return default_value.

  This method is safe when exceptions are raised (or forced) in any of its
  two tasklets. For example, when TaskletExit is raised in any of the
  tasklets, it's immediately propagated to the other tasklet.
  """
    results = []

    def Worker(sleeper_tasklet, function, args, kwargs):
        try:
            results.append(function(*args, **kwargs))
        except:
            # We do this for TaskletExit as well.
            results.extend(sys.exc_info())
        if sleeper_tasklet.alive:
            sleeper_tasklet.insert()  # Interrupt coio.sleep().

    worker_tasklet = coio.stackless.tasklet(Worker)(stackless.current,
                                                    function, args, kwargs)
    try:
        coio.sleep(timeout)
    finally:
        if worker_tasklet.alive:
            worker_tasklet.remove()
            # This raises TaskletExit in Worker, so it might further extend results
            # as a side effect. We don't care about that.
            worker_tasklet.kill()
            return default_value
        else:
            if len(results) > 1:  # Propagate exception.
                raise results[0], results[1], results[2]
            return results[0]
Beispiel #28
0
    def testInterProcessSingleService_Async(self):
        print
        print 'testInterProcessSingleService_Async'
        N = 100000
        self.registerSubprocess(spawn_server(16400))
        listener = ('localhost', 16400)
        store_client = Client(listener, StoreProtocol())
        store_client.connect()
        self.clients.append(store_client)
        while not store_client.is_connected():
            coio.sleep(0.01)

        data = [('foo%d' % i, i, 'bar%d' % i) for i in xrange(N)]
        data = [(key * 100, timestamp, value * 100)
                for key, timestamp, value in data]

        start = time.time()

        collector = store_client.set_collector(N)
        for key, timestamp, value in data:
            store_client.set_async(collector, key, timestamp, value)
        replies = collector.collect()
        assert len(replies) == len(data)

        collector = store_client.get_collector(N)
        for key, timestamp, value in data:
            store_client.get_async(collector, key)
        replies = collector.collect()
        assert len(replies) == len(data)

        elapsed = time.time() - start

        for ((fetched_timestamp, fetched_value),
             client), (key, timestamp, value) in zip(replies, data):
            if fetched_value != value:
                print '%s (%d %s) != %s (%d %s)' % (
                    repr(fetched_value), len(fetched_value),
                    type(fetched_value), repr(value), len(value), type(value))
            assert fetched_value == value
            assert fetched_timestamp == timestamp

        print 'Elapsed: %.2fs' % elapsed
        print '%.2f invocations / s' % (2 * N / elapsed)
    def testInProcessMultiService_Sync(self):
        print
        print "testInProcessMultiService_Sync"
        N = 1000
        listener1 = ("localhost", 16200)
        listener2 = ("localhost", 16201)
        server1 = Server(listener1, [StoreService()])
        server2 = Server(listener2, [StoreService()])
        self.servers.append(server1)
        self.servers.append(server2)
        server1.serve()
        server2.serve()
        listeners = [listener1, listener2]
        clients = [Client(listener, StoreProtocol()) for listener in listeners]
        for client in clients:
            client.connect()
            while not client.is_connected():
                coio.sleep(0.01)
        self.clients.extend(clients)
        store_client = MulticastClient(StoreProtocol())
        coio.stackless.schedule()

        values = [("foo%d" % i, i, "bar%d" % i) for i in xrange(N)]
        received_values_list = []

        start = time.time()

        for key, timestamp, value in values:
            store_client.set(clients, key, timestamp, value)
            received_values_list.append(store_client.get(clients, key))

        elapsed = time.time() - start

        for received_values, (key, timestamp, value) in zip(received_values_list, values):
            for token, (received_timestamp, received_value) in received_values:
                if str(received_value) != str(value):
                    print received_value, value
                assert str(received_value) == str(value)
                assert received_timestamp == timestamp

        print "Elapsed: %.2fs" % elapsed
        print "%.2f invocations / s" % (len(listeners) * 2 * N / elapsed)
Beispiel #30
0
def main():
    parser = argparse.ArgumentParser(description="Tako test feeder.")
    parser.add_argument('address')
    parser.add_argument('port', type=int)
    parser.add_argument('-l', '--limit', type=int, default=0)
    parser.add_argument('-d', '--delay', type=float, default=1)
    parser.add_argument('-v', '--verbose', action='store_true')
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.ERROR)

    listener = (args.address, args.port)
    client = Client(listener, PublicNodeServiceProtocol())
    while not client.is_connected():
        coio.sleep(0.01)

    last_time = time.time()
    print 'feeding %s' % repr(listener)
    i = 0
    N = 1000
    while True:
        if time.time() - last_time > 1:
            last_time = time.time()
            print i
        collector = client.set_collector(N)
        for j in xrange(N):
            key = sha256('%d:%d' % (i, j))
            value = sha256(key) * 16
            client.set_async(collector, key, value)
            i += 1
        collector.collect()
        if not client.is_connected():
            exit(-1)
        if args.delay:
            coio.sleep(args.delay)
        if args.limit > 0 and i >= args.limit:
            break
Beispiel #31
0
def run_in_tasklet_with_timeout(function, timeout, default_value=None, args=(), kwargs={}):
    """Run function in sepearte tasklet, kill when timeout elapsed.
  
  Create a new tasklet, run function(*args, **kwargs) in it, and once done,
  return its return value or raise the exception it has raised. If function
  is not done within `timeout' seconds, send TaskletExit to its tasklet
  (switching to it so it can handle it, then proceeding with scheduling the
  caller tasklet), and return default_value.

  This method is safe when exceptions are raised (or forced) in any of its
  two tasklets. For example, when TaskletExit is raised in any of the
  tasklets, it's immediately propagated to the other tasklet.
  """
    results = []

    def Worker(sleeper_tasklet, function, args, kwargs):
        try:
            results.append(function(*args, **kwargs))
        except:
            # We do this for TaskletExit as well.
            results.extend(sys.exc_info())
        if sleeper_tasklet.alive:
            sleeper_tasklet.insert()  # Interrupt coio.sleep().

    worker_tasklet = coio.stackless.tasklet(Worker)(stackless.current, function, args, kwargs)
    try:
        coio.sleep(timeout)
    finally:
        if worker_tasklet.alive:
            worker_tasklet.remove()
            # This raises TaskletExit in Worker, so it might further extend results
            # as a side effect. We don't care about that.
            worker_tasklet.kill()
            return default_value
        else:
            if len(results) > 1:  # Propagate exception.
                raise results[0], results[1], results[2]
            return results[0]
def feed(client):
    global counter
    while True:
        key = sha256(str(counter))
        value = sha256(key) * 16
        timestamp = long(time.time() * 1000000.0)
        key = sha256(repr(timestamp))
        counter += 1
        for i in range(3):
            try:
                new_timestamp = client.set(key, timestamp, value)
                if new_timestamp != timestamp:
                    logging.warning('new_timestamp != timestamp (%s != %s)', new_timestamp, timestamp)
                    logging.warning('Retrying...')
                    coio.sleep(1)
                    continue
                fetched_timestamp, fetched_value = client.get(key)
                stat_timestamp = client.stat(key)
                if fetched_timestamp != timestamp:
                    logging.warning('fetched_timestamp != timestamp (%s != %s)', fetched_timestamp, timestamp)
                    logging.warning('Retrying...')
                    coio.sleep(1)
                    continue
                if fetched_value != value:
                    logging.warning('fetched_value != value (%s != %s)', fetched_value, value)
                    logging.warning('Retrying...')
                    coio.sleep(1)
                    continue
                if stat_timestamp != timestamp:
                    logging.warning('stat_timestamp != timestamp (%s != %s)', stat_timestamp, timestamp)
                    logging.warning('Retrying...')
                    coio.sleep(1)
                    continue
                break
            except NotAvailableException, e:
                logging.warning(e)
                logging.warning('Retrying...')
                coio.sleep(1)
 def testInterProcessSingleService(self):
     print
     print "testInterProcessSingleService"
     N = 1000
     self.registerSubprocess(spawn_server(16300))
     listener = ("localhost", 16300)
     store_client = Client(listener, StoreProtocol())
     store_client.connect()
     self.clients.append(store_client)
     while not store_client.is_connected():
         coio.sleep(0.01)
     values = [("foo%d" % i, i, "bar%d" % i) for i in xrange(N)]
     received_values = []
     start = time.time()
     for key, timestamp, value in values:
         store_client.set(key, timestamp, value)
         received_values.append(store_client.get(key))
     elapsed = time.time() - start
     for (key, timestamp, value), (received_timestamp, received_value) in zip(values, received_values):
         assert timestamp == received_timestamp
         assert value == received_value
     print "Elapsed: %.2fs" % elapsed
     print "%.2f invocations / s" % (2 * N / elapsed)
Beispiel #34
0
    def testInterProcessMultiService_Sync(self):
        print
        print 'testInterProcessMultiService_Sync'
        N = 1000
        ports = range(16500, 16510)
        for port in ports:
            self.registerSubprocess(spawn_server(port))
        listeners = [('localhost', port) for port in ports]
        clients = [Client(listener, StoreProtocol()) for listener in listeners]
        self.clients.extend(clients)
        for client in clients:
            client.connect()
            while not client.is_connected():
                coio.sleep(0.1)

        store_client = MulticastClient(StoreProtocol())

        values = [('foo%d' % i, i, 'bar%d' % i) for i in xrange(N)]

        received_values_list = []
        start = time.time()
        for key, timestamp, value in values:
            store_client.set(clients, key, timestamp, value)
            received_values = store_client.get(clients, key)
            received_values_list.append(received_values)
        elapsed = time.time() - start

        for received_values, (key, timestamp,
                              value) in zip(received_values_list, values):
            for token, (received_timestamp, received_value) in received_values:
                if str(received_value) != str(value):
                    print received_value, value
                assert str(received_value) == str(value)
                assert received_timestamp == timestamp

        print 'Elapsed: %.2fs' % elapsed
        print '%.2f invocations / s' % (len(ports) * N / elapsed)
    def testInterProcessMultiService_Sync(self):
        print
        print "testInterProcessMultiService_Sync"
        N = 1000
        ports = range(16500, 16510)
        for port in ports:
            self.registerSubprocess(spawn_server(port))
        listeners = [("localhost", port) for port in ports]
        clients = [Client(listener, StoreProtocol()) for listener in listeners]
        self.clients.extend(clients)
        for client in clients:
            client.connect()
            while not client.is_connected():
                coio.sleep(0.1)

        store_client = MulticastClient(StoreProtocol())

        values = [("foo%d" % i, i, "bar%d" % i) for i in xrange(N)]

        received_values_list = []
        start = time.time()
        for key, timestamp, value in values:
            store_client.set(clients, key, timestamp, value)
            received_values = store_client.get(clients, key)
            received_values_list.append(received_values)
        elapsed = time.time() - start

        for received_values, (key, timestamp, value) in zip(received_values_list, values):
            for token, (received_timestamp, received_value) in received_values:
                if str(received_value) != str(value):
                    print received_value, value
                assert str(received_value) == str(value)
                assert received_timestamp == timestamp

        print "Elapsed: %.2fs" % elapsed
        print "%.2f invocations / s" % (len(ports) * N / elapsed)
Beispiel #36
0
def ProgressReporter(delta_sec):
  while True:
    sys.stderr.write('.')
    coio.sleep(delta_sec)
Beispiel #37
0
def ProgressWorker(sleep_amount):
    while True:
        os.write(STDOUT_FILENO, 'W')
        coio.sleep(sleep_amount)
Beispiel #38
0
 def Sleeper():
     coio.sleep(99999999)  # Quite a lot, won't be reached.
     log_items.append("slept")
Beispiel #39
0
 def Sleeper():
     try:
         coio.sleep(30)  # Half a minute, won't be reached.
     except AssertionError, e:
         log_items.append(str(e))
Beispiel #40
0
 def Sleeper():
     log_items.append('sleeping')
     coio.sleep(SMALL_SLEEP_SEC)
     log_items.append('slept')
     sleep_done_channel.send(None)
Beispiel #41
0
def ProgressReporter(delta_sec):
    from syncless import coio
    while True:
        sys.stderr.write('.')
        coio.sleep(delta_sec)
        collector = store_client.get_collector(clients, N)
        for key in keys:
            store_client.get_async(collector, key)
        received_value_lists = collector.collect()

        elapsed = time.time() - start

        for token, received_values in received_value_lists.iteritems():
            for timestamp, value, (received_timestamp, received_value) in zip(timestamps, values, received_values):
                if str(received_value) != str(value):
                    print received_value, value
                assert str(received_value) == str(value)
                assert received_timestamp == timestamp

        invocation_count = 2 * len(ports) * N
        print "Elapsed: %.2fs" % elapsed
        print "Invocations: %d" % invocation_count
        print "%.2f invocations / s" % (invocation_count / elapsed)


if __name__ == "__main__":
    if len(sys.argv) > 1:
        port = int(sys.argv[1])
        listener = ("localhost", port)
        server = Server(listener, [StoreService()])
        server.serve()
        while True:
            coio.sleep(1)
    else:
        unittest.main()
Beispiel #43
0
 def testMainSleep(self):
     self.assertEqual(LOOPRET, coio.nonblocking_loop_for_tests())  # No registered events.
     coio.sleep(SMALL_SLEEP_SEC)
     self.assertEqual(LOOPRET, coio.nonblocking_loop_for_tests())  # No registered events.
Beispiel #44
0
 def Sleeper():
     log_items.append("sleeping")
     coio.sleep(SMALL_SLEEP_SEC)
     log_items.append("slept")
     sleep_done_channel.send(None)
Beispiel #45
0
 def Sleeper():
     try:
         coio.sleep(30)  # Half a minute, won't be reached.
     except AssertionError, e:
         log_items.append(str(e))
Beispiel #46
0
 def Sleeper():
     coio.sleep(99999999)  # Quite a lot, won't be reached.
     log_items.append('slept')
Beispiel #47
0
 def ProgressWorker(sleep_amount):
     while True:
         os.write(2, '.')
         coio.sleep(sleep_amount)
Beispiel #48
0
 def TickerWorker(cls, sleep_amount):
     while True:
         os.write(1, '.')
         coio.sleep(sleep_amount)
Beispiel #49
0
 def testNegativeSleep(self):
     self.assertEqual(LOOPRET, coio.nonblocking_loop_for_tests())  # No registered events.
     coio.sleep(-42)
     self.assertEqual(LOOPRET, coio.nonblocking_loop_for_tests())  # No registered events.
Beispiel #50
0
        for key in keys:
            store_client.get_async(collector, key)
        received_value_lists = collector.collect()

        elapsed = time.time() - start

        for token, received_values in received_value_lists.iteritems():
            for timestamp, value, (received_timestamp, received_value) in zip(
                    timestamps, values, received_values):
                if str(received_value) != str(value):
                    print received_value, value
                assert str(received_value) == str(value)
                assert received_timestamp == timestamp

        invocation_count = 2 * len(ports) * N
        print 'Elapsed: %.2fs' % elapsed
        print 'Invocations: %d' % invocation_count
        print '%.2f invocations / s' % (invocation_count / elapsed)


if __name__ == '__main__':
    if len(sys.argv) > 1:
        port = int(sys.argv[1])
        listener = ('localhost', port)
        server = Server(listener, [StoreService()])
        server.serve()
        while True:
            coio.sleep(1)
    else:
        unittest.main()