Example #1
0
    def test_a_buncha_stuff(self):
        assert_ = self.assert_

        class Dummy(object):
            def foo(self, when, token=None):
                assert_(token is not None)
                time.sleep(random.random() / 200.0)
                return token

        def sender_loop(loopnum):
            obj = tpool.Proxy(Dummy())
            count = 100
            for n in six.moves.range(count):
                eventlet.sleep(random.random() / 200.0)
                now = time.time()
                token = loopnum * count + n
                rv = obj.foo(now, token=token)
                self.assertEqual(token, rv)
                eventlet.sleep(random.random() / 200.0)

        cnt = 10
        pile = eventlet.GreenPile(cnt)
        for i in six.moves.range(cnt):
            pile.spawn(sender_loop, i)
        results = list(pile)
        self.assertEqual(len(results), cnt)
        tpool.killall()
Example #2
0
    def test_a_buncha_stuff(self):
        assert_ = self.assert_
        class Dummy(object):
            def foo(self,when,token=None):
                assert_(token is not None)
                time.sleep(random.random()/200.0)
                return token
        
        def sender_loop(loopnum):
            obj = tpool.Proxy(Dummy())
            count = 100
            for n in xrange(count):
                eventlet.sleep(random.random()/200.0)
                now = time.time()
                token = loopnum * count + n
                rv = obj.foo(now,token=token)
                self.assertEquals(token, rv)
                eventlet.sleep(random.random()/200.0)

        pile = eventlet.GreenPile(10)
        for i in xrange(10):
            pile.spawn(sender_loop,i)
        results = list(pile)
        self.assertEquals(len(results), 10)
        tpool.killall()
Example #3
0
    def tearDown(self):
        self.timer.cancel()
        if self.previous_alarm:
            signal.signal(signal.SIGALRM, self.previous_alarm[0])
            signal.alarm(self.previous_alarm[1])

        tpool.killall()
        verify_hub_empty()
Example #4
0
    def tearDown(self):
        self.timer.cancel()
        if self.previous_alarm:
            signal.signal(signal.SIGALRM, self.previous_alarm[0])
            signal.alarm(self.previous_alarm[1])

        tpool.killall()
        verify_hub_empty()
Example #5
0
def main():
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option("-l", "--listen", dest="host", default="0.0.0.0",
            help="the ip interface to bind")
    parser.add_option("-p", "--port", default=7902, type=int,
            help="which port to listen")
#    parser.add_option("-d", "--daemon", action="store_true", 
#            help="run in daemon", default=False)
    parser.add_option("-H", "--home", default="beansdb",
            help="the database path")
    parser.add_option("-c", "--count", default=16, type=int,
            help="number of db file, power of 16")
    parser.add_option("-s", "--start", default=0, type=int,
            help="start index of db file")
    parser.add_option("-e", "--end", default=-1, type=int,
            help="last end of db file, -1 means no limit")
    parser.add_option("-n", "--limit", default=100, type=int, 
            help="diffs limit to do db scan")
    parser.add_option("-t", "--threads", type=int, default=20,
            help="number of IO threads")


    (options, args) = parser.parse_args()

    store = (HStore(options.home, 
                int(math.log(options.count, 16)),
                options.start, options.end))
    #store.check(options.limit, nonblocking=True)
    api.spawn(tpool.execute, store.check, options.limit) # check in thread pool
    api.spawn(tpool.execute, flush, store)

    print "server listening on %s:%s" % (options.host, options.port)
    server = api.tcp_listener((options.host, options.port))
    util.set_reuse_addr(server)

    while True:
        try:
            new_sock, address = server.accept()
        except KeyboardInterrupt:
            break
        api.spawn(handler, store, new_sock, 
            new_sock.makefile('r'), new_sock.makefile('w'))

    global quit
    quit = True
    
    print 'close listener ...'
    server.close()
    
    print 'stop checker thread ...'
    store.stop_check()

    print 'stop worker threads ...'
    tpool.killall()

    print 'close store...'
    store.close()
Example #6
0
    def tearDown(self):
        self.timer.cancel()
        if self.previous_alarm:
            signal.signal(signal.SIGALRM, self.previous_alarm[0])
            signal.alarm(self.previous_alarm[1])

        tpool.killall()
        gc.collect()
        eventlet.sleep(0)
        verify_hub_empty()
Example #7
0
    def tearDown(self):
        self.timer.cancel()
        if self.previous_alarm:
            signal.signal(signal.SIGALRM, self.previous_alarm[0])
            signal.alarm(self.previous_alarm[1])

        tpool.killall()
        gc.collect()
        eventlet.sleep(0)
        verify_hub_empty()
Example #8
0
    def test_killall_remaining_results(self):
        semaphore = eventlet.Event()

        def native_fun():
            time.sleep(.5)

        def gt_fun():
            semaphore.send(None)
            tpool.execute(native_fun)

        gt = eventlet.spawn(gt_fun)
        semaphore.wait()
        tpool.killall()
        gt.wait()
Example #9
0
    def test_killall_remaining_results(self):
        semaphore = event.Event()

        def native_fun():
            time.sleep(.5)

        def gt_fun():
            semaphore.send(None)
            tpool.execute(native_fun)

        gt = eventlet.spawn(gt_fun)
        semaphore.wait()
        tpool.killall()
        gt.wait()
Example #10
0
    def test_long_lived_object(self):
        class A():
            lock = thread.allocate_lock()
            sequence = 0

            def __init__(self):
                with A.lock:
                    A.sequence += 1

            def noop(self):
                return 'A'

            def raise_exception(self):
                raise RuntimeError('not implement yet')

            def __del__(self):
                with A.lock:
                    A.sequence -= 1

        def test_method_noop():
            try:
                a = tpool.Proxy(A())
                a.noop()
            except RuntimeError:
                pass

        def test_method_exception():
            try:
                a = tpool.Proxy(A())
                a.raise_exception()
            except RuntimeError:
                pass

        for i in range(100):
            test_method_noop()
        eventlet.sleep(0)
        gc.collect()
        self.assert_(A.sequence == 0)

        for i in range(100):
            test_method_exception()
        # yield to tpool_trampoline(), otherwise e.send(rv) have a reference
        eventlet.sleep(0)
        gc.collect()
        self.assert_(A.sequence == 0)
        tpool.killall()
Example #11
0
    def test_benchmark(self):
        """ Benchmark computing the amount of overhead tpool adds to function calls."""
        iterations = 10000
        import timeit
        imports = """
from tests.tpool_test import noop
from eventlet.tpool import execute
        """
        t = timeit.Timer("noop()", imports)
        results = t.repeat(repeat=3, number=iterations)
        best_normal = min(results)

        t = timeit.Timer("execute(noop)", imports)
        results = t.repeat(repeat=3, number=iterations)
        best_tpool = min(results)

        tpool_overhead = (best_tpool-best_normal)/iterations
        print "%s iterations\nTpool overhead is %s seconds per call.  Normal: %s; Tpool: %s" % (
            iterations, tpool_overhead, best_normal, best_tpool)
        tpool.killall()
Example #12
0
 def test_leakage_from_tracebacks(self):
     tpool.execute(noop)  # get it started
     gc.collect()
     initial_objs = len(gc.get_objects())
     for i in range(10):
         self.assertRaises(RuntimeError, tpool.execute, raise_exception)
     gc.collect()
     middle_objs = len(gc.get_objects())
     # some objects will inevitably be created by the previous loop
     # now we test to ensure that running the loop an order of
     # magnitude more doesn't generate additional objects
     for i in six.moves.range(100):
         self.assertRaises(RuntimeError, tpool.execute, raise_exception)
     first_created = middle_objs - initial_objs
     gc.collect()
     second_created = len(gc.get_objects()) - middle_objs
     self.assert_(second_created - first_created < 10,
                  "first loop: %s, second loop: %s" % (first_created,
                                                       second_created))
     tpool.killall()
Example #13
0
    def test_benchmark(self):
        """ Benchmark computing the amount of overhead tpool adds to function calls."""
        iterations = 10000
        import timeit
        imports = """
from tests.tpool_test import noop
from eventlet.tpool import execute
        """
        t = timeit.Timer("noop()", imports)
        results = t.repeat(repeat=3, number=iterations)
        best_normal = min(results)

        t = timeit.Timer("execute(noop)", imports)
        results = t.repeat(repeat=3, number=iterations)
        best_tpool = min(results)

        tpool_overhead = (best_tpool-best_normal)/iterations
        print("%s iterations\nTpool overhead is %s seconds per call.  Normal: %s; Tpool: %s" % (
            iterations, tpool_overhead, best_normal, best_tpool))
        tpool.killall()
Example #14
0
 def test_leakage_from_tracebacks(self):
     tpool.execute(noop)  # get it started
     gc.collect()
     initial_objs = len(gc.get_objects())
     for i in range(10):
         self.assertRaises(RuntimeError, tpool.execute, raise_exception)
     gc.collect()
     middle_objs = len(gc.get_objects())
     # some objects will inevitably be created by the previous loop
     # now we test to ensure that running the loop an order of
     # magnitude more doesn't generate additional objects
     for i in six.moves.range(100):
         self.assertRaises(RuntimeError, tpool.execute, raise_exception)
     first_created = middle_objs - initial_objs
     gc.collect()
     second_created = len(gc.get_objects()) - middle_objs
     self.assert_(second_created - first_created < 10,
                  "first loop: %s, second loop: %s" % (first_created,
                                                       second_created))
     tpool.killall()
Example #15
0
File: test.py Project: joshw/cinder
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        self.patch('cinder.rpc.get_notifier',
                   side_effect=self._get_joined_notifier)

        if self.MOCK_WORKER:
            # Mock worker creation for all tests that don't care about it
            clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s'
            for method in ('create_worker', 'set_worker', 'unset_worker'):
                self.patch(clean_path % method, return_value=None)

        if self.MOCK_TOOZ:
            self.patch('cinder.coordination.Coordinator.start')
            self.patch('cinder.coordination.Coordinator.stop')
            self.patch('cinder.coordination.Coordinator.get_lock')

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name: strutils.bool_from_string(
            os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_url = 'fake:/'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)

        # Load oslo_messaging_notifications config group so we can set an
        # override to prevent notifications from being ignored due to the
        # short-circuit mechanism.
        oslo_messaging.get_notification_transport(CONF)
        #  We need to use a valid driver for the notifications, so we use test.
        self.override_config('driver', ['test'],
                             group='oslo_messaging_notifications')
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        # Init AuthProtocol to register some base options first, such as
        # auth_url.
        auth_token.AuthProtocol('fake_app', {
            'auth_type': 'password',
            'auth_url': 'fake_url'
        })

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api,
                                 migration,
                                 sql_connection=CONF.database.connection)
        self.useFixture(_DB_CACHE)

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(cinder_fixtures.WarningsFixture())

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.mock_notifier(self)

        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path, group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )), self.POLICY_PATH),
                             group='oslo_policy')
        self.override_config(
            'resource_query_filters_file',
            os.path.join(
                os.path.abspath(os.path.join(
                    os.path.dirname(__file__),
                    '..',
                )), self.RESOURCE_FILTER_PATH))
        self._disable_osprofiler()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}

        self.override_config('backend_url',
                             'file://' + lock_path,
                             group='coordination')
        coordination.COORDINATOR.start()
        self.addCleanup(coordination.COORDINATOR.stop)

        if six.PY3:
            # TODO(smcginnis) Python 3 deprecates assertRaisesRegexp to
            # assertRaisesRegex, but Python 2 does not have the new name. This
            # can be removed once we stop supporting py2 or the new name is
            # added.
            self.assertRaisesRegexp = self.assertRaisesRegex

        # Ensure we have the default tpool size value and we don't carry
        # threads from other test runs.
        tpool.killall()
        tpool._nthreads = 20

        # NOTE(mikal): make sure we don't load a privsep helper accidentally
        self.useFixture(cinder_fixtures.PrivsepNoHelperFixture())
Example #16
0
 def tearDown(self):
     super(TpoolConnectionPool, self).tearDown()
     from eventlet import tpool
     tpool.killall()
Example #17
0
 def tearDown(self):
     super(TpoolConnectionPool, self).tearDown()
     from eventlet import tpool
     tpool.killall()
Example #18
0
 def tearDown(self):
     tpool.killall()
     super(TestTpool, self).tearDown()
Example #19
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        self.patch('cinder.rpc.get_notifier',
                   side_effect=self._get_joined_notifier)

        if self.MOCK_WORKER:
            # Mock worker creation for all tests that don't care about it
            clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s'
            for method in ('create_worker', 'set_worker', 'unset_worker'):
                self.patch(clean_path % method, return_value=None)

        if self.MOCK_TOOZ:
            self.patch('cinder.coordination.Coordinator.start')
            self.patch('cinder.coordination.Coordinator.stop')
            self.patch('cinder.coordination.Coordinator.get_lock')

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)

        # Load oslo_messaging_notifications config group so we can set an
        # override to prevent notifications from being ignored due to the
        # short-circuit mechanism.
        oslo_messaging.get_notification_transport(CONF)
        #  We need to use a valid driver for the notifications, so we use test.
        self.override_config('driver', ['test'],
                             group='oslo_messaging_notifications')
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection)
        self.useFixture(_DB_CACHE)

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(cinder_fixtures.WarningsFixture())

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.mock_notifier(self)

        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 self.POLICY_PATH),
                             group='oslo_policy')
        self.override_config('resource_query_filters_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 self.RESOURCE_FILTER_PATH))
        self._disable_osprofiler()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}

        self.override_config('backend_url', 'file://' + lock_path,
                             group='coordination')
        coordination.COORDINATOR.start()
        self.addCleanup(coordination.COORDINATOR.stop)

        if six.PY3:
            # TODO(smcginnis) Python 3 deprecates assertRaisesRegexp to
            # assertRaisesRegex, but Python 2 does not have the new name. This
            # can be removed once we stop supporting py2 or the new name is
            # added.
            self.assertRaisesRegexp = self.assertRaisesRegex

        # Ensure we have the default tpool size value and we don't carry
        # threads from other test runs.
        tpool.killall()
        tpool._nthreads = 20
Example #20
0
 def test_killall(self):
     tpool.killall()
     tpool.setup()
Example #21
0
 def tearDown(self):
     tpool.killall()
     super(TestTpool, self).tearDown()
Example #22
0
def main():
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option("-l",
                      "--listen",
                      dest="host",
                      default="0.0.0.0",
                      help="the ip interface to bind")
    parser.add_option("-p",
                      "--port",
                      default=7902,
                      type=int,
                      help="which port to listen")
    #    parser.add_option("-d", "--daemon", action="store_true",
    #            help="run in daemon", default=False)
    parser.add_option("-H",
                      "--home",
                      default="beansdb",
                      help="the database path")
    parser.add_option("-c",
                      "--count",
                      default=16,
                      type=int,
                      help="number of db file, power of 16")
    parser.add_option("-s",
                      "--start",
                      default=0,
                      type=int,
                      help="start index of db file")
    parser.add_option("-e",
                      "--end",
                      default=-1,
                      type=int,
                      help="last end of db file, -1 means no limit")
    parser.add_option("-n",
                      "--limit",
                      default=100,
                      type=int,
                      help="diffs limit to do db scan")
    parser.add_option("-t",
                      "--threads",
                      type=int,
                      default=20,
                      help="number of IO threads")

    (options, args) = parser.parse_args()

    store = (HStore(options.home, int(math.log(options.count, 16)),
                    options.start, options.end))
    #store.check(options.limit, nonblocking=True)
    api.spawn(tpool.execute, store.check,
              options.limit)  # check in thread pool
    api.spawn(tpool.execute, flush, store)

    print "server listening on %s:%s" % (options.host, options.port)
    server = api.tcp_listener((options.host, options.port))
    util.set_reuse_addr(server)

    while True:
        try:
            new_sock, address = server.accept()
        except KeyboardInterrupt:
            break
        api.spawn(handler, store, new_sock, new_sock.makefile('r'),
                  new_sock.makefile('w'))

    global quit
    quit = True

    print 'close listener ...'
    server.close()

    print 'stop checker thread ...'
    store.stop_check()

    print 'stop worker threads ...'
    tpool.killall()

    print 'close store...'
    store.close()
Example #23
0
 def test_killall(self):
     tpool.killall()
     tpool.setup()