Beispiel #1
0
    def test_exclude_from_tilde_expansion(self):
        basedir = "cli/Backup/exclude_from_tilde_expansion"
        fileutil.make_dirs(basedir)
        nodeurl_path = os.path.join(basedir, 'node.url')
        fileutil.write(nodeurl_path, 'http://example.net:2357/')

        # ensure that tilde expansion is performed on exclude-from argument
        exclude_file = u'~/.tahoe/excludes.dummy'

        ns = Namespace()
        ns.called = False
        original_open = open
        def call_file(name, *args, **kwargs):
            if name.endswith("excludes.dummy"):
                ns.called = True
                self.failUnlessEqual(name, abspath_expanduser_unicode(exclude_file))
                return StringIO()
            else:
                return original_open(name, *args, **kwargs)

        if PY2:
            from allmydata.scripts import cli as module_to_patch
        else:
            import builtins as module_to_patch
        patcher = MonkeyPatcher((module_to_patch, 'open', call_file))
        patcher.runWithPatches(parse_options, basedir, "backup", ['--exclude-from-utf-8', unicode_to_argv(exclude_file), 'from', 'to'])
        self.failUnless(ns.called)
    def test_delete_works_on_valid_credentials(self):

        from hashlib import sha512

        ctuple = namedtuple('named_user', 'email key')
        user_tuple = ctuple('*****@*****.**', sha512('key').hexdigest())

        def delete(fles, user):
            self.assertEqual(user.email, '*****@*****.**')
            self.assertEqual(user, user_tuple)

        monkey_patcher = MonkeyPatcher((user.User, 'delete', delete))
        monkey_patcher.patch()

        url = '/delete/key'
        request = self.generate_request_and_session(
            url, auth=lambda: True, uuid='73s7b33f'
        )
        request.session.user = user_tuple
        request.session.expire = lambda: True

        result = yield self.account.render(request)

        self.assertEqual(result.code, http.OK)
        self.assertEqual(result.subject['success'], True)
Beispiel #3
0
    def __init__(self, model):
        if '__pypy__' in sys.modules:
            # try to use psycopg2ct if we are on PyPy
            try:
                from psycopg2ct import compat
                compat.register()

                # monkey patch to dont let Storm crash on register type
                import psycopg2
                psycopg2._psycopg = object

                class _psycopg:
                    UNICODEARRAY = psycopg2.extensions.UNICODEARRAY

                from twisted.python.monkey import MonkeyPatcher
                monkey_patcher = MonkeyPatcher(
                    (psycopg2, '_psycopg', _psycopg))
                monkey_patcher.patch()

            except ImportError:
                raise RuntimeError(
                    'You are trying to use PostgreSQL with PyPy. Regular '
                    'psycopg2 module don\'t work with PyPy, you may install '
                    'psycopg2ct in order to can use psycopg2 with PyPy'
                )

        self.model = model
Beispiel #4
0
 def setUp(self):
     self.monkey_patcher = MonkeyPatcher()
     self.monkey_patcher.addPatch(krpc_sender, "reactor", HollowReactor())
     self.monkey_patcher.patch()
     self.k_iter = KRPC_Iterator()
     self.k_iter.transport = HollowTransport()
     self.target_id = 5
Beispiel #5
0
 def test_put_verifyProperRemoval(self):
     # Replace the time function of the datastore module
     # so that we can artificially speed up time
     monkey_patcher = MonkeyPatcher()
     c = clock()
     c.set(0)
     monkey_patcher.addPatch(datastore, "time", c)
     # Replace the peer_timeout to 5 seconds
     monkey_patcher.addPatch(constants, "peer_timeout", 5)
     monkey_patcher.patch()
     # Insert a node and verify it is within the datastore
     m = self.datastore(self.reactor)
     infohash = 5
     expected_peer = ("127.0.0.1", 5151)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     # Iterate over a 1 element list
     for peer in peers:
         self.assertEqual(expected_peer, peer)
     self.assertEquals(1, len(peers))
     # Change the time and verify that the cleaning function
     # actually removes the peer
     c.set(5)
     # TODO hackish, shouldnt reach into object
     m._cleanup(infohash, peer)
     peers = m.get(infohash)
     self.assertEqual(0, len(peers))
     monkey_patcher.restore()
Beispiel #6
0
    def test_config_drive(self):
        """
        The instance ID is retrieved from the config drive in preference to the
        metadata server.
        """
        patch = MonkeyPatcher()
        # A compute_instance_id found on config drive
        drive_compute_instance_id = unicode(uuid4())
        # A compute_instance_id found from the metadata service
        server_compute_instance_id = unicode(uuid4())

        # Set up a fake config drive and point the API to its label
        configdrive_label = filesystem_label_for_test(self)
        device = formatted_loopback_device_for_test(
            self,
            label=configdrive_label,
        )
        with temporary_mount(device.device) as mountpoint:
            metadata_file = mountpoint.descendant(METADATA_RELATIVE_PATH)
            metadata_file.parent().makedirs()
            metadata_file.setContent(
                json.dumps({"uuid": drive_compute_instance_id}))
        patch.addPatch(
            self.api,
            '_config_drive_label',
            configdrive_label,
        )
        # Set up a fake metadata service and point the API to its endpoint
        listening = webserver_for_test(
            self,
            url_path="/" + "/".join(METADATA_RELATIVE_PATH),
            response_content=json.dumps({"uuid": server_compute_instance_id}),
        )

        def set_metadata_service_endpoint(port):
            address = port.getHost()
            endpoint = (address.host, address.port)
            patch.addPatch(
                self.api,
                '_metadata_service_endpoint',
                endpoint,
            )
            return port

        listening.addCallback(set_metadata_service_endpoint)

        # Run compute_instance_id in a separate thread.
        # With the API patched to check the fake metadata sources.
        def start_compute_instance_id(port):
            patch.patch()
            return deferToThread(self.api.compute_instance_id)

        connecting = listening.addCallback(start_compute_instance_id)

        def check(result):
            self.assertEqual(drive_compute_instance_id, result)

        checking = connecting.addCallback(check)
        return checking
Beispiel #7
0
 def test_constructWithPatches(self):
     """
     Constructing a L{MonkeyPatcher} with patches should add all of the
     given patches to the patch list.
     """
     patcher = MonkeyPatcher((self.testObject, "foo", "haha"), (self.testObject, "bar", "hehe"))
     patcher.patch()
     self.assertEqual("haha", self.testObject.foo)
     self.assertEqual("hehe", self.testObject.bar)
     self.assertEqual(self.originalObject.baz, self.testObject.baz)
Beispiel #8
0
 def test_put_verifyProperRemoval(self):
     # Replace the time function of the datastore module
     # so that we can artificially speed up time
     monkey_patcher = MonkeyPatcher()
     c = clock()
     c.set(0)
     monkey_patcher.addPatch(datastore, "time", c)
     # Replace the peer_timeout to 5 seconds
     monkey_patcher.addPatch(constants, "peer_timeout", 5)
     monkey_patcher.patch()
     # Insert a node and verify it is within the datastore
     m = self.datastore(self.reactor)
     infohash = 5
     expected_peer = ("127.0.0.1", 5151)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     # Iterate over a 1 element list
     for peer in peers:
         self.assertEqual(expected_peer, peer)
     self.assertEquals(1, len(peers))
     # Change the time and verify that the cleaning function
     # actually removes the peer
     c.set(5)
     # TODO hackish, shouldnt reach into object
     m._cleanup(infohash, peer)
     peers = m.get(infohash)
     self.assertEqual(0, len(peers))
     monkey_patcher.restore()
Beispiel #9
0
 def test_constructWithPatches(self):
     """
     Constructing a L{MonkeyPatcher} with patches should add all of the
     given patches to the patch list.
     """
     patcher = MonkeyPatcher((self.testObject, "foo", "haha"),
                             (self.testObject, "bar", "hehe"))
     patcher.patch()
     self.assertEqual("haha", self.testObject.foo)
     self.assertEqual("hehe", self.testObject.bar)
     self.assertEqual(self.originalObject.baz, self.testObject.baz)
Beispiel #10
0
 def test_constructWithPatches(self):
     """
     Constructing a L{MonkeyPatcher} with patches should add all of the
     given patches to the patch list.
     """
     patcher = MonkeyPatcher((self.testObject, 'foo', 'haha'),
                             (self.testObject, 'bar', 'hehe'))
     patcher.patch()
     self.assertEqual('haha', self.testObject.foo)
     self.assertEqual('hehe', self.testObject.bar)
     self.assertEqual(self.originalObject.baz, self.testObject.baz)
Beispiel #11
0
    def test_metadata_service(self):
        """
        The instance ID is retrieved from the metadata service if it can't be
        found on the config drive.
        """
        patch = MonkeyPatcher()
        # A compute_instance_id found from the metadata service
        server_compute_instance_id = unicode(uuid4())

        # Point the API to a config drive label that won't be found.
        configdrive_label = filesystem_label_for_test(self)
        patch.addPatch(
            self.api,
            '_config_drive_label',
            configdrive_label,
        )
        # Set up a fake metadata service and point the API to its endpoint
        listening = webserver_for_test(
            self,
            url_path="/" + "/".join(METADATA_RELATIVE_PATH),
            response_content=json.dumps(
                {"uuid": server_compute_instance_id}
            ),
        )

        def set_metadata_service_endpoint(port):
            address = port.getHost()
            endpoint = (address.host, address.port)
            patch.addPatch(
                self.api,
                '_metadata_service_endpoint',
                endpoint,
            )
            return port

        listening.addCallback(set_metadata_service_endpoint)

        # Run compute_instance_id in a separate thread.
        # With the API patched to check the fake metadata sources.
        def start_compute_instance_id(port):
            patch.patch()
            return deferToThread(
                self.api.compute_instance_id
            )
        connecting = listening.addCallback(start_compute_instance_id)

        def check(result):
            self.assertEqual(server_compute_instance_id, result)
        checking = connecting.addCallback(check)
        return checking
Beispiel #12
0
    def _monkey_patch(self):
        """
        Monkeypatch some parts of the twisted library that are waiting
        for bugfix inclussion in the trunk
        """

        if not self.monkey_patched:
            # add new method
            setattr(http.Request, 'getClientProxyIP', getClientProxyIP)

            # patch getClientIP
            monkey_patcher = MonkeyPatcher(
                (http.Request, 'getClientIP', getClientIPPatch))
            monkey_patcher.patch()
            self.monkey_patched = True
Beispiel #13
0
    def test_metadata_service(self):
        """
        The instance ID is retrieved from the metadata service if it can't be
        found on the config drive.
        """
        patch = MonkeyPatcher()
        # A compute_instance_id found from the metadata service
        server_compute_instance_id = unicode(uuid4())

        # Point the API to a config drive label that won't be found.
        configdrive_label = filesystem_label_for_test(self)
        patch.addPatch(
            self.api,
            '_config_drive_label',
            configdrive_label,
        )
        # Set up a fake metadata service and point the API to its endpoint
        listening = webserver_for_test(
            self,
            url_path="/" + "/".join(METADATA_RELATIVE_PATH),
            response_content=json.dumps({"uuid": server_compute_instance_id}),
        )

        def set_metadata_service_endpoint(port):
            address = port.getHost()
            endpoint = (address.host, address.port)
            patch.addPatch(
                self.api,
                '_metadata_service_endpoint',
                endpoint,
            )
            return port

        listening.addCallback(set_metadata_service_endpoint)

        # Run compute_instance_id in a separate thread.
        # With the API patched to check the fake metadata sources.
        def start_compute_instance_id(port):
            patch.patch()
            return deferToThread(self.api.compute_instance_id)

        connecting = listening.addCallback(start_compute_instance_id)

        def check(result):
            self.assertEqual(server_compute_instance_id, result)

        checking = connecting.addCallback(check)
        return checking
Beispiel #14
0
    def __init__(self, model):
        if '__pypy__' in sys.modules:
            # try to use psycopg2ct if we are on PyPy
            try:
                from psycopg2ct import compat
                compat.register()

                # monkey patch to dont let Storm crash on register type
                import psycopg2
                psycopg2._psycopg = object

                class _psycopg:
                    UNICODEARRAY = psycopg2.extensions.UNICODEARRAY

                from twisted.python.monkey import MonkeyPatcher
                monkey_patcher = MonkeyPatcher(
                    (psycopg2, '_psycopg', _psycopg))
                monkey_patcher.patch()

            except ImportError:
                raise RuntimeError(
                    'You are trying to use PostgreSQL with PyPy. Regular '
                    'psycopg2 module don\'t work with PyPy, you may install '
                    'psycopg2ct in order to can use psycopg2 with PyPy'
                )

        self.model = model

        self._columns_mapping = {
            properties.Bool: 'bool',
            properties.UUID: 'uuid',
            properties.RawStr: 'bytea',
            properties.Pickle: 'bytea',
            properties.JSON: 'json',
            properties.DateTime: 'timestamp',
            properties.Date: 'date',
            properties.Time: 'time',
            properties.TimeDelta: 'interval',
            properties.Enum: 'integer',
            properties.Decimal: 'decimal'
        }

        self.parse = singledispatch(self.parse)
        self.parse.register(properties.Int, self._parse_int)
        self.parse.register(properties.Unicode, self._parse_unicode)
        self.parse.register(properties.Float, self._parse_float)
        self.parse.register(properties.List, self._parse_list)
        self.parse.register(NativeEnum, self._parse_enum)
Beispiel #15
0
    def __init__(self, model):
        if '__pypy__' in sys.modules:
            # try to use psycopg2ct if we are on PyPy
            try:
                from psycopg2ct import compat
                compat.register()

                # monkey patch to dont let Storm crash on register type
                import psycopg2
                psycopg2._psycopg = object

                class _psycopg:
                    UNICODEARRAY = psycopg2.extensions.UNICODEARRAY

                from twisted.python.monkey import MonkeyPatcher
                monkey_patcher = MonkeyPatcher(
                    (psycopg2, '_psycopg', _psycopg))
                monkey_patcher.patch()

            except ImportError:
                raise RuntimeError(
                    'You are trying to use PostgreSQL with PyPy. Regular '
                    'psycopg2 module don\'t work with PyPy, you may install '
                    'psycopg2ct in order to can use psycopg2 with PyPy'
                )

        self.model = model

        self._columns_mapping = {
            properties.Bool: 'bool',
            properties.UUID: 'uuid',
            properties.RawStr: 'bytea',
            properties.Pickle: 'bytea',
            properties.JSON: 'json',
            properties.DateTime: 'timestamp',
            properties.Date: 'date',
            properties.Time: 'time',
            properties.TimeDelta: 'interval',
            properties.Enum: 'integer',
            properties.Decimal: 'decimal'
        }

        self.parse = singledispatch(self.parse)
        self.parse.register(properties.Int, self._parse_int)
        self.parse.register(properties.Unicode, self._parse_unicode)
        self.parse.register(properties.Float, self._parse_float)
        self.parse.register(properties.List, self._parse_list)
        self.parse.register(NativeEnum, self._parse_enum)
Beispiel #16
0
 def setUp(self):
     self.monkey_patcher = MonkeyPatcher()
     self.monkey_patcher.addPatch(krpc_sender, "reactor", HollowReactor())
     self.monkey_patcher.patch()
     self.k_iter = KRPC_Iterator()
     self.k_iter.transport = HollowTransport()
     self.target_id = 5
Beispiel #17
0
    def _monkey_patch(self):
        """
        Monkeypatch some parts of the twisted library that are waiting
        for bugfix inclussion in the trunk
        """

        if not self.monkey_patched:
            # add new method
            setattr(http.Request, 'getClientProxyIP', getClientProxyIP)

            # patch getClientIP
            monkey_patcher = MonkeyPatcher(
                (http.Request, 'getClientIP', getClientIPPatch)
            )
            monkey_patcher.patch()
            self.monkey_patched = True
Beispiel #18
0
 def test_error_logging(self, logger):
     """
     Failures while applying a diff emit a log message containing the full
     diff.
     """
     o1 = DiffTestObjInvariant(
         a=1,
         b=2,
     )
     patcher = MonkeyPatcher()
     patcher.addPatch(
         DiffTestObjInvariant,
         '_perform_invariant_check',
         False
     )
     patcher.patch()
     try:
         o2 = o1.set('b', 1)
     finally:
         patcher.restore()
     diff = create_diff(o1, o2)
     self.assertRaises(
         InvariantException,
         diff.apply,
         o1,
     )
Beispiel #19
0
    def test_exclude_from_tilde_expansion(self):
        basedir = "cli/Backup/exclude_from_tilde_expansion"
        fileutil.make_dirs(basedir)
        nodeurl_path = os.path.join(basedir, 'node.url')
        fileutil.write(nodeurl_path, 'http://example.net:2357/')

        # ensure that tilde expansion is performed on exclude-from argument
        exclude_file = u'~/.tahoe/excludes.dummy'

        ns = Namespace()
        ns.called = False
        def call_file(name, *args):
            ns.called = True
            self.failUnlessEqual(name, abspath_expanduser_unicode(exclude_file))
            return StringIO()

        patcher = MonkeyPatcher((__builtin__, 'file', call_file))
        patcher.runWithPatches(parse_options, basedir, "backup", ['--exclude-from', unicode_to_argv(exclude_file), 'from', 'to'])
        self.failUnless(ns.called)
    def test_exclude_from_tilde_expansion(self):
        basedir = "cli/Backup/exclude_from_tilde_expansion"
        fileutil.make_dirs(basedir)
        nodeurl_path = os.path.join(basedir, 'node.url')
        fileutil.write(nodeurl_path, 'http://example.net:2357/')

        # ensure that tilde expansion is performed on exclude-from argument
        exclude_file = u'~/.tahoe/excludes.dummy'

        ns = Namespace()
        ns.called = False
        def call_file(name, *args):
            ns.called = True
            self.failUnlessEqual(name, abspath_expanduser_unicode(exclude_file))
            return StringIO()

        patcher = MonkeyPatcher((__builtin__, 'file', call_file))
        patcher.runWithPatches(parse_options, basedir, "backup", ['--exclude-from', unicode_to_argv(exclude_file), 'from', 'to'])
        self.failUnless(ns.called)
    def test_regsiter_works_when_provided_information_is_valid(self):

        def create(fles):

            self.assertEqual(fles.name, 'Someone')
            self.assertEqual(fles.email, '*****@*****.**')

        monkey_patcher = MonkeyPatcher((user.User, 'create', create))
        monkey_patcher.patch()
        request = self.generate_request(['/register'], '''{
            "name": "Someone",
            "email": "*****@*****.**"
        }''')

        result = yield self.account.render(request)

        self.assertEqual(result.code, http.OK)
        self.assertEqual(type(result.subject), dict)
        self.assertEqual(result.headers['content-type'], 'application/json')
        self.assertTrue(result.subject['success'])
Beispiel #22
0
    def test_report_import_error(self):
        marker = "wheeeyo"
        real_import_func = __import__
        def raiseIE_from_this_particular_func(name, *args):
            if name == "foolscap":
                raise ImportError(marker + " foolscap cant be imported")
            else:
                return real_import_func(name, *args)

        # Let's run as little code as possible with __import__ patched.
        patcher = MonkeyPatcher((__builtin__, '__import__', raiseIE_from_this_particular_func))
        vers_and_locs, errors = patcher.runWithPatches(allmydata.get_package_versions_and_locations)

        foolscap_stuffs = [stuff for (pkg, stuff) in vers_and_locs if pkg == 'foolscap']
        self.failUnlessEqual(len(foolscap_stuffs), 1)
        comment = str(foolscap_stuffs[0][2])
        self.failUnlessIn(marker, comment)
        self.failUnlessIn('raiseIE_from_this_particular_func', comment)

        self.failUnless([e for e in errors if "dependency \'foolscap\' could not be imported" in e])
Beispiel #23
0
    def test_report_import_error(self):
        marker = "wheeeyo"
        real_import_func = __import__

        def raiseIE_from_this_particular_func(name, *args):
            if name == "foolscap":
                raise ImportError(marker + " foolscap cant be imported")
            else:
                return real_import_func(name, *args)

        # Let's run as little code as possible with __import__ patched.
        patcher = MonkeyPatcher((__builtin__, "__import__", raiseIE_from_this_particular_func))
        vers_and_locs, errors = patcher.runWithPatches(allmydata.get_package_versions_and_locations)

        foolscap_stuffs = [stuff for (pkg, stuff) in vers_and_locs if pkg == "foolscap"]
        self.failUnlessEqual(len(foolscap_stuffs), 1)
        comment = str(foolscap_stuffs[0][2])
        self.failUnlessIn(marker, comment)
        self.failUnlessIn("raiseIE_from_this_particular_func", comment)

        self.failUnless([e for e in errors if "dependency 'foolscap' could not be imported" in e])
Beispiel #24
0
    def run(self, result):
        """
        Run the test case in the context of a distinct Eliot action.

        The action will finish after the test is done.  It will note the name of
        the test being run.

        All messages emitted by the test will be validated.  They will still be
        delivered to the global logger.
        """
        # The idea here is to decorate the test method itself so that all of
        # the extra logic happens at the point where test/application logic is
        # expected to be.  This `run` method is more like test infrastructure
        # and things do not go well when we add too much extra behavior here.
        # For example, exceptions raised here often just kill the whole
        # runner.
        patcher = MonkeyPatcher()

        # So, grab the test method.
        name = self.case._testMethodName
        original = getattr(self.case, name)
        decorated = with_logging(ensure_text(self.case.id()), original)
        patcher.addPatch(self.case, name, decorated)
        try:
            # Patch it in
            patcher.patch()
            # Then use the rest of the machinery to run it.
            return self._run_tests_with_factory(
                self.case,
                self.handlers,
                self.last_resort,
            ).run(result)
        finally:
            # Clean up the patching for idempotency or something.
            patcher.restore()
Beispiel #25
0
    def test_list_removed_containers(self):
        """
        ``DockerClient.list`` does not list containers which are removed,
        during its operation, from another thread.
        """
        patcher = MonkeyPatcher()

        namespace = namespace_for_test(self)
        flocker_docker_client = DockerClient(namespace=namespace)

        name1 = random_name(self)
        adding_unit1 = flocker_docker_client.add(name1, ANY_IMAGE)
        self.addCleanup(flocker_docker_client.remove, name1)

        name2 = random_name(self)
        adding_unit2 = flocker_docker_client.add(name2, ANY_IMAGE)
        self.addCleanup(flocker_docker_client.remove, name2)

        docker_client = flocker_docker_client._client
        docker_client_containers = docker_client.containers

        def simulate_missing_containers(*args, **kwargs):
            """
            Remove a container before returning the original list.
            """
            containers = docker_client_containers(*args, **kwargs)
            container_name1 = flocker_docker_client._to_container_name(name1)
            docker_client.remove_container(
                container=container_name1, force=True)
            return containers

        adding_units = gatherResults([adding_unit1, adding_unit2])

        def get_list(ignored):
            patcher.addPatch(
                docker_client,
                'containers',
                simulate_missing_containers
            )
            patcher.patch()
            return flocker_docker_client.list()

        listing_units = adding_units.addCallback(get_list)

        def check_list(units):
            patcher.restore()
            self.assertEqual(
                [name2], sorted([unit.name for unit in units])
            )
        running_assertions = listing_units.addCallback(check_list)

        return running_assertions
Beispiel #26
0
    def __init__(self, pool=None, testing=False):
        if pool is not None:
            self.pool = pool

        self.started = False
        self.__testing = testing

        if not self.zstorm_configured:
            provideUtility(global_zstorm, IZStorm)
            zstorm = getUtility(IZStorm)
            zstorm.set_default_uri('mamba', config.Database().uri)

        SQLite.register()
        MySQL.register()
        PostgreSQL.register()

        # MonkeyPatch Storm
        if not self.monkey_patched:
            monkey_patcher = MonkeyPatcher(
                (properties, 'PropertyColumn', PropertyColumnMambaPatch))
            monkey_patcher.patch()
            self.monkey_patched = True
    def test_regsiter_works_when_provided_information_is_valid(self):
        def create(fles):

            self.assertEqual(fles.name, "Someone")
            self.assertEqual(fles.email, "*****@*****.**")

        monkey_patcher = MonkeyPatcher((user.User, "create", create))
        monkey_patcher.patch()
        request = self.generate_request(
            ["/register"],
            """{
            "name": "Someone",
            "email": "*****@*****.**"
        }""",
        )

        result = yield self.account.render(request)

        self.assertEqual(result.code, http.OK)
        self.assertEqual(type(result.subject), dict)
        self.assertEqual(result.headers["content-type"], "application/json")
        self.assertTrue(result.subject["success"])
Beispiel #28
0
    def __init__(self, pool=None, testing=False):
        if pool is not None:
            self.pool = pool

        self.started = False
        self.__testing = testing

        if not self.zstorm_configured:
            provideUtility(global_zstorm, IZStorm)
            zstorm = getUtility(IZStorm)
            zstorm.set_default_uri('mamba', config.Database().uri)

        SQLite.register()
        MySQL.register()
        PostgreSQL.register()

        # MonkeyPatch Storm
        if not self.monkey_patched:
            monkey_patcher = MonkeyPatcher(
                (properties, 'PropertyColumn', PropertyColumnMambaPatch)
            )
            monkey_patcher.patch()
            self.monkey_patched = True
    def test_delete_works_on_valid_credentials(self):

        from hashlib import sha512

        ctuple = namedtuple("named_user", "email key")
        user_tuple = ctuple("*****@*****.**", sha512("key").hexdigest())

        def delete(fles, user):
            self.assertEqual(user.email, "*****@*****.**")
            self.assertEqual(user, user_tuple)

        monkey_patcher = MonkeyPatcher((user.User, "delete", delete))
        monkey_patcher.patch()

        url = "/delete/key"
        request = self.generate_request_and_session(url, auth=lambda: True, uid="73s7b33f")
        request.session.user = user_tuple
        request.session.expire = lambda: True

        result = yield self.account.render(request)

        self.assertEqual(result.code, http.OK)
        self.assertEqual(result.subject["success"], True)
Beispiel #30
0
class TestingBase(object):
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

    def tearDown(self):
        self.monkey_patcher.restore()
Beispiel #31
0
 def test_put_reannounceResetsTimer(self):
     # Replace the time function of the datastore module
     # so that we can artificially speed up time
     monkey_patcher = MonkeyPatcher()
     c = clock()
     c.set(0)
     monkey_patcher.addPatch(datastore, "time", c)
     # Replace the peer_timeout to 5 seconds
     monkey_patcher.addPatch(constants, "peer_timeout", 5)
     monkey_patcher.patch()
     # Insert a node and verify it is within the datastore
     m = self.datastore(self.reactor)
     infohash = 5
     expected_peer = ("127.0.0.1", 5151)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     # Iterate over a 1 element list
     self.assertEquals(1, len(peers))
     for peer in peers:
         self.assertEqual(expected_peer, peer)
     # Change the time and reannounce the peer
     # (make sure the cleanup function doesnt
     #  remove the peer yet)
     c.set(4)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(1, len(peers))
     m._cleanup(infohash, expected_peer)
     c.set(8)
     m._cleanup(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(1, len(peers))
     c.set(9)
     m._cleanup(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(0, len(peers))
     monkey_patcher.restore()
Beispiel #32
0
class TestingBase(object):
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

    def tearDown(self):
        self.monkey_patcher.restore()
Beispiel #33
0
 def test_unknown_instance_id(self):
     """
     ``UnknownInstanceID`` is raised if all node UUID lookup mechanisms
     fail.
     """
     patch = MonkeyPatcher()
     # Use non-existent config drive label.
     # Mount will fail.
     patch.addPatch(self.api, '_config_drive_label',
                    filesystem_label_for_test(self))
     # Use an unreachable metadata service endpoint address.
     # TCP connections will fail.
     patch.addPatch(self.api, '_metadata_service_endpoint',
                    find_free_port())
     self.addCleanup(patch.restore)
     patch.patch()
     self.assertRaises(UnknownInstanceID, self.api.compute_instance_id)
Beispiel #34
0
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

        self.address = ("127.0.0.1", 55)
        self.query = Query()
        self.query.rpctype = "ping"
        self.query._from = 15
        self.query._transaction_id = 99
        self.packet = krpc_coder.encode(self.query)
        # Patch in hardcoded value for the bandwidth
        # limits so that changing the constants will
        # not effect the usefulness of this test case
        # (The global bandwidth is set to 3 standard ping queries)
        # (The per user bandwidth is set to 1 standard ping query)
        self.monkey_patcher.addPatch(rate_limiter.constants,
                                     "global_bandwidth_rate",
                                     3 * len(self.packet))
        self.monkey_patcher.addPatch(rate_limiter.constants,
                                     "host_bandwidth_rate",
                                     1 * len(self.packet))
        self.monkey_patcher.patch()
Beispiel #35
0
 def test_put_reannounceResetsTimer(self):
     # Replace the time function of the datastore module
     # so that we can artificially speed up time
     monkey_patcher = MonkeyPatcher()
     c = clock()
     c.set(0)
     monkey_patcher.addPatch(datastore, "time", c)
     # Replace the peer_timeout to 5 seconds
     monkey_patcher.addPatch(constants, "peer_timeout", 5)
     monkey_patcher.patch()
     # Insert a node and verify it is within the datastore
     m = self.datastore(self.reactor)
     infohash = 5
     expected_peer = ("127.0.0.1", 5151)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     # Iterate over a 1 element list
     self.assertEquals(1, len(peers))
     for peer in peers:
         self.assertEqual(expected_peer, peer)
     # Change the time and reannounce the peer
     # (make sure the cleanup function doesnt
     #  remove the peer yet)
     c.set(4)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(1, len(peers))
     m._cleanup(infohash, expected_peer)
     c.set(8)
     m._cleanup(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(1, len(peers))
     c.set(9)
     m._cleanup(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(0, len(peers))
     monkey_patcher.restore()
Beispiel #36
0
 def test_error_logging(self, logger):
     """
     Failures while applying a diff emit a log message containing the full
     diff.
     """
     o1 = DiffTestObjInvariant(
         a=1,
         b=2,
     )
     patcher = MonkeyPatcher()
     patcher.addPatch(DiffTestObjInvariant, '_perform_invariant_check',
                      False)
     patcher.patch()
     try:
         o2 = o1.set('b', 1)
     finally:
         patcher.restore()
     diff = create_diff(o1, o2)
     self.assertRaises(
         InvariantException,
         diff.apply,
         o1,
     )
Beispiel #37
0
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

        self.address = ("127.0.0.1", 55)
        self.query = Query()
        self.query.rpctype = "ping"
        self.query._from = 15
        self.query._transaction_id = 99
        self.packet = krpc_coder.encode(self.query)
        # Patch in hardcoded value for the bandwidth
        # limits so that changing the constants will
        # not effect the usefulness of this test case
        # (The global bandwidth is set to 3 standard ping queries)
        # (The per user bandwidth is set to 1 standard ping query)
        self.monkey_patcher.addPatch(rate_limiter.constants,
                "global_bandwidth_rate", 3 * len(self.packet))
        self.monkey_patcher.addPatch(rate_limiter.constants,
                "host_bandwidth_rate", 1 * len(self.packet))
        self.monkey_patcher.patch()
Beispiel #38
0
 def test_unknown_instance_id(self):
     """
     ``UnknownInstanceID`` is raised if all node UUID lookup mechanisms
     fail.
     """
     patch = MonkeyPatcher()
     # Use non-existent config drive label.
     # Mount will fail.
     patch.addPatch(
         self.api,
         '_config_drive_label',
         filesystem_label_for_test(self)
     )
     # Use an unreachable metadata service endpoint address.
     # TCP connections will fail.
     patch.addPatch(
         self.api,
         '_metadata_service_endpoint',
         find_free_port()
     )
     self.addCleanup(patch.restore)
     patch.patch()
     self.assertRaises(UnknownInstanceID, self.api.compute_instance_id)
Beispiel #39
0
 def setUp(self):
     self.clock = Clock()
     self.monkey_patcher = MonkeyPatcher()
     self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
     self.monkey_patcher.patch()
Beispiel #40
0
class MonkeyPatcherTest(unittest.TestCase):
    """
    Tests for L{MonkeyPatcher} monkey-patching class.
    """

    def setUp(self):
        self.testObject = TestObj()
        self.originalObject = TestObj()
        self.monkeyPatcher = MonkeyPatcher()


    def test_empty(self):
        """
        A monkey patcher without patches shouldn't change a thing.
        """
        self.monkeyPatcher.patch()

        # We can't assert that all state is unchanged, but at least we can
        # check our test object.
        self.assertEqual(self.originalObject.foo, self.testObject.foo)
        self.assertEqual(self.originalObject.bar, self.testObject.bar)
        self.assertEqual(self.originalObject.baz, self.testObject.baz)


    def test_constructWithPatches(self):
        """
        Constructing a L{MonkeyPatcher} with patches should add all of the
        given patches to the patch list.
        """
        patcher = MonkeyPatcher((self.testObject, 'foo', 'haha'),
                                (self.testObject, 'bar', 'hehe'))
        patcher.patch()
        self.assertEqual('haha', self.testObject.foo)
        self.assertEqual('hehe', self.testObject.bar)
        self.assertEqual(self.originalObject.baz, self.testObject.baz)


    def test_patchExisting(self):
        """
        Patching an attribute that exists sets it to the value defined in the
        patch.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
        self.monkeyPatcher.patch()
        self.assertEqual(self.testObject.foo, 'haha')


    def test_patchNonExisting(self):
        """
        Patching a non-existing attribute fails with an C{AttributeError}.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'nowhere',
                                    'blow up please')
        self.assertRaises(AttributeError, self.monkeyPatcher.patch)


    def test_patchAlreadyPatched(self):
        """
        Adding a patch for an object and attribute that already have a patch
        overrides the existing patch.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'blah')
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'BLAH')
        self.monkeyPatcher.patch()
        self.assertEqual(self.testObject.foo, 'BLAH')
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)


    def test_restoreTwiceIsANoOp(self):
        """
        Restoring an already-restored monkey patch is a no-op.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'blah')
        self.monkeyPatcher.patch()
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)


    def test_runWithPatchesDecoration(self):
        """
        runWithPatches should run the given callable, passing in all arguments
        and keyword arguments, and return the return value of the callable.
        """
        log = []

        def f(a, b, c=None):
            log.append((a, b, c))
            return 'foo'

        result = self.monkeyPatcher.runWithPatches(f, 1, 2, c=10)
        self.assertEqual('foo', result)
        self.assertEqual([(1, 2, 10)], log)


    def test_repeatedRunWithPatches(self):
        """
        We should be able to call the same function with runWithPatches more
        than once. All patches should apply for each call.
        """
        def f():
            return (self.testObject.foo, self.testObject.bar,
                    self.testObject.baz)

        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
        result = self.monkeyPatcher.runWithPatches(f)
        self.assertEqual(
            ('haha', self.originalObject.bar, self.originalObject.baz), result)
        result = self.monkeyPatcher.runWithPatches(f)
        self.assertEqual(
            ('haha', self.originalObject.bar, self.originalObject.baz),
            result)


    def test_runWithPatchesRestores(self):
        """
        C{runWithPatches} should restore the original values after the function
        has executed.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
        self.assertEqual(self.originalObject.foo, self.testObject.foo)
        self.monkeyPatcher.runWithPatches(lambda: None)
        self.assertEqual(self.originalObject.foo, self.testObject.foo)


    def test_runWithPatchesRestoresOnException(self):
        """
        Test runWithPatches restores the original values even when the function
        raises an exception.
        """
        def _():
            self.assertEqual(self.testObject.foo, 'haha')
            self.assertEqual(self.testObject.bar, 'blahblah')
            raise RuntimeError("Something went wrong!")

        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
        self.monkeyPatcher.addPatch(self.testObject, 'bar', 'blahblah')

        self.assertRaises(RuntimeError, self.monkeyPatcher.runWithPatches, _)
        self.assertEqual(self.testObject.foo, self.originalObject.foo)
        self.assertEqual(self.testObject.bar, self.originalObject.bar)
Beispiel #41
0
 def setUp(self):
     self.testObject = TestObj()
     self.originalObject = TestObj()
     self.monkeyPatcher = MonkeyPatcher()
Beispiel #42
0
 def monkey_patch(self, obj, attribute, value):
     monkey_patch = MonkeyPatcher((obj, attribute, value))
     self._monkey_patches.append(monkey_patch)
     monkey_patch.patch()
     return monkey_patch
Beispiel #43
0
from twisted.trial import unittest
from twisted.python.monkey import MonkeyPatcher

from mdht import contact
from mdht import constants
from mdht.coding import basic_coder
from mdht.test.utils import Clock

monkey_patcher = MonkeyPatcher()


class NodeTestCase(unittest.TestCase):
    def setUp(self):
        # Reach into the contact module and replace
        # its time function with a custom one
        self.clock = Clock()
        monkey_patcher.addPatch(contact, "time", self.clock)
        monkey_patcher.patch()

    def tearDown(self):
        # Restore the old time module
        monkey_patcher.restore()

    def test_distance(self):
        node_ids1 = [0, 1024, 2**150, 2**159 + 124, 2**34 - 58]
        node_ids2 = [0, 857081, 6**7, 8**9 + 7**3, 4**8 + 9**10 + 18]
        for id1 in node_ids1:
            for id2 in node_ids2:
                n = contact.Node(id1, ("127.0.0.1", 8000))
                self.assertEqual(id1 ^ id2, n.distance(id2))
                n = contact.Node(id2, ("127.0.0.1", 8000))
Beispiel #44
0
    def _build_and_test_api(self, listening_port):
        """
        Build the CinderBlockDeviceAPI configured to connect to the Mimic
        server at ``listening_port``.
        Patch twisted.web to force the mimic server to drop incoming
        connections.
        And attempt to interact with the disabled API server first and then
        after re-enabling it to show that the API will re-authenticate even
        after an initial failure.
        """
        import twisted.web.http
        patch = MonkeyPatcher()
        patch.addPatch(
            twisted.web.http.HTTPChannel,
            'connectionMade',
            lambda self: self.transport.loseConnection()
        )
        self.addCleanup(patch.restore)
        backend, api_args = backend_and_api_args_from_configuration({
            "backend": "openstack",
            "auth_plugin": "rackspace",
            "region": "ORD",
            "username": "******",
            "api_key": "12345",
            "auth_url": "http://127.0.0.1:{}/identity/v2.0".format(
                listening_port.getHost().port
            ),
        })
        # Cause the Mimic server to close incoming connections
        patch.patch()
        api = get_api(
            backend=backend,
            api_args=api_args,
            reactor=object(),
            cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
        )
        # List volumes with API patched to close incoming connections.
        try:
            result = api.list_volumes()
        except ConnectFailure:
            # Can't use self.assertRaises here because that would call the
            # function in the main thread.
            pass
        else:
            self.fail(
                'ConnectFailure was not raised. '
                'Got {!r} instead.'.format(
                    result
                )
            )
        finally:
            # Re-enable the Mimic server.
            # The API operations that follow should succeed.
            patch.restore()

        # List volumes with API re-enabled
        result = api.list_volumes()
        self.assertEqual([], result)

        # Close the connection from the client side so that the mimic server
        # can close down without leaving behind lingering persistent HTTP
        # channels which cause dirty reactor errors.
        # XXX: This is gross. Perhaps we need ``IBlockDeviceAPI.close``
        (api
         .cinder_volume_manager
         ._original
         ._client_v2
         ._cinder_volumes
         .api
         .client
         .session
         .session.close())
Beispiel #45
0
class RateLimiterPatcherTestCase(unittest.TestCase):
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

        self.address = ("127.0.0.1", 55)
        self.query = Query()
        self.query.rpctype = "ping"
        self.query._from = 15
        self.query._transaction_id = 99
        self.packet = krpc_coder.encode(self.query)
        # Patch in hardcoded value for the bandwidth
        # limits so that changing the constants will
        # not effect the usefulness of this test case
        # (The global bandwidth is set to 3 standard ping queries)
        # (The per user bandwidth is set to 1 standard ping query)
        self.monkey_patcher.addPatch(rate_limiter.constants,
                                     "global_bandwidth_rate",
                                     3 * len(self.packet))
        self.monkey_patcher.addPatch(rate_limiter.constants,
                                     "host_bandwidth_rate",
                                     1 * len(self.packet))
        self.monkey_patcher.patch()

    def tearDown(self):
        self.monkey_patcher.restore()

    def _patched_sender(self):
        ksender = KRPC_Sender(TreeRoutingTable, 2**50)
        ksender.transport = HollowTransport()
        # Start the protocol to simulate
        # a regular environment
        rate_limited_proto = RateLimiter_Patcher(ksender)
        rate_limited_proto.startProtocol()
        return rate_limited_proto

    def test_inbound_overflowHostAndReset(self):
        """
        Make sure that we cannot overflow our inbound host bandwidth limit

        @see dhtbot.constants.host_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        counter = Counter()
        rate_limited_proto.krpcReceived = counter
        # One packet should be accepted without problems
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            self.address)
        self.assertEquals(1, counter.count)
        counter.reset()
        # The second packet should be dropped
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            self.address)
        self.assertEquals(0, counter.count)
        # Reset the rate limiter and the next packet should
        # be accepted
        self.clock.set(1)
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            self.address)
        self.assertEquals(1, counter.count)

    def test_inbound_overflowGlobalAndReset(self):
        """
        Make sure that we cannot overflow our inbound global bandwidth limit

        @see dhtbot.constants.host_global_rate

        """
        address1 = ("127.0.0.1", 66)
        address2 = ("127.0.0.1", 76)
        address3 = ("127.0.0.1", 86)
        address4 = ("127.0.0.1", 555)
        rate_limited_proto = self._patched_sender()
        counter = Counter()
        rate_limited_proto.krpcReceived = counter
        # The first three packets should be accepted without
        # any problems
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            address1)
        self.assertEquals(1, counter.count)
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            address2)
        self.assertEquals(2, counter.count)
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            address3)
        self.assertEquals(3, counter.count)
        # The fourth packet should be dropped
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            address4)
        self.assertEquals(3, counter.count)
        # Reset the rate limiter and the next packet should be
        # accepted
        self.clock.set(1)
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            self.address)
        self.assertEquals(4, counter.count)

    def test_outbound_overflowHostAndReset(self):
        """
        Make sure that we cannot overflow our outbound host bandwidth limit

        @see dhtbot.constants.host_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        # The first packet should go through without any problems
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
            rate_limited_proto._original.transport._packet_was_sent())
        # Second packet should not go through
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertFalse(
            rate_limited_proto._original.transport._packet_was_sent())
        # Update the clock (reseting the rate limiter)
        self.clock.set(1)
        # This packet should now go through)
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
            rate_limited_proto._original.transport._packet_was_sent())

    def test_outbound_overflowGlobalAndReset(self):
        """
        Make sure that we cannot overflow our outbound global bandwidth limit

        @see dhtbot.constants.global_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        # Reset the hollow transport
        rate_limited_proto._original.transport._reset()
        # The first three packets should go through without any problems
        address1 = ("127.0.0.1", 66)
        address2 = ("127.0.0.1", 76)
        address3 = ("127.0.0.1", 86)
        address4 = ("127.0.0.1", 555)

        # Packet 1, 2, 3
        for i in range(1, 4):
            rate_limited_proto.sendKRPC(self.query,
                                        locals()['address' + str(i)])
            self.assertTrue(
                rate_limited_proto._original.transport._packet_was_sent())

        # The fourth packet should not go through
        rate_limited_proto.sendKRPC(self.query, address4)
        self.assertFalse(
            rate_limited_proto._original.transport._packet_was_sent())
        # Change the time to reset the rate limiter
        self.clock.set(1)
        # This packet should now go through
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
            rate_limited_proto._original.transport._packet_was_sent())
Beispiel #46
0
    def test_config_drive(self):
        """
        The instance ID is retrieved from the config drive in preference to the
        metadata server.
        """
        patch = MonkeyPatcher()
        # A compute_instance_id found on config drive
        drive_compute_instance_id = unicode(uuid4())
        # A compute_instance_id found from the metadata service
        server_compute_instance_id = unicode(uuid4())

        # Set up a fake config drive and point the API to its label
        configdrive_label = filesystem_label_for_test(self)
        device = formatted_loopback_device_for_test(
            self,
            label=configdrive_label,
        )
        with temporary_mount(device.device) as mountpoint:
            metadata_file = mountpoint.descendant(
                METADATA_RELATIVE_PATH
            )
            metadata_file.parent().makedirs()
            metadata_file.setContent(
                json.dumps({
                    "uuid": drive_compute_instance_id
                })
            )
        patch.addPatch(
            self.api,
            '_config_drive_label',
            configdrive_label,
        )
        # Set up a fake metadata service and point the API to its endpoint
        listening = webserver_for_test(
            self,
            url_path="/" + "/".join(METADATA_RELATIVE_PATH),
            response_content=json.dumps(
                {"uuid": server_compute_instance_id}
            ),
        )

        def set_metadata_service_endpoint(port):
            address = port.getHost()
            endpoint = (address.host, address.port)
            patch.addPatch(
                self.api,
                '_metadata_service_endpoint',
                endpoint,
            )
            return port

        listening.addCallback(set_metadata_service_endpoint)

        # Run compute_instance_id in a separate thread.
        # With the API patched to check the fake metadata sources.
        def start_compute_instance_id(port):
            patch.patch()
            return deferToThread(
                self.api.compute_instance_id
            )
        connecting = listening.addCallback(start_compute_instance_id)

        def check(result):
            self.assertEqual(drive_compute_instance_id, result)
        checking = connecting.addCallback(check)
        return checking
Beispiel #47
0
class RateLimiterPatcherTestCase(unittest.TestCase):
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

        self.address = ("127.0.0.1", 55)
        self.query = Query()
        self.query.rpctype = "ping"
        self.query._from = 15
        self.query._transaction_id = 99
        self.packet = krpc_coder.encode(self.query)
        # Patch in hardcoded value for the bandwidth
        # limits so that changing the constants will
        # not effect the usefulness of this test case
        # (The global bandwidth is set to 3 standard ping queries)
        # (The per user bandwidth is set to 1 standard ping query)
        self.monkey_patcher.addPatch(rate_limiter.constants,
                "global_bandwidth_rate", 3 * len(self.packet))
        self.monkey_patcher.addPatch(rate_limiter.constants,
                "host_bandwidth_rate", 1 * len(self.packet))
        self.monkey_patcher.patch()

    def tearDown(self):
        self.monkey_patcher.restore()

    def _patched_sender(self):
        ksender = KRPC_Sender(TreeRoutingTable, 2**50)
        ksender.transport = HollowTransport()
        # Start the protocol to simulate
        # a regular environment
        rate_limited_proto = RateLimiter_Patcher(ksender)
        rate_limited_proto.startProtocol()
        return rate_limited_proto

    def test_inbound_overflowHostAndReset(self):
        """
        Make sure that we cannot overflow our inbound host bandwidth limit

        @see dhtbot.constants.host_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        counter = Counter()
        rate_limited_proto.krpcReceived = counter
        # One packet should be accepted without problems
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), self.address)
        self.assertEquals(1, counter.count)
        counter.reset()
        # The second packet should be dropped
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), self.address)
        self.assertEquals(0, counter.count)
        # Reset the rate limiter and the next packet should
        # be accepted
        self.clock.set(1)
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), self.address)
        self.assertEquals(1, counter.count)

    def test_inbound_overflowGlobalAndReset(self):
        """
        Make sure that we cannot overflow our inbound global bandwidth limit

        @see dhtbot.constants.host_global_rate

        """
        address1 = ("127.0.0.1", 66)
        address2 = ("127.0.0.1", 76)
        address3 = ("127.0.0.1", 86)
        address4 = ("127.0.0.1", 555)
        rate_limited_proto = self._patched_sender()
        counter = Counter()
        rate_limited_proto.krpcReceived = counter
        # The first three packets should be accepted without
        # any problems
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), address1)
        self.assertEquals(1, counter.count)
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), address2)
        self.assertEquals(2, counter.count)
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), address3)
        self.assertEquals(3, counter.count)
        # The fourth packet should be dropped
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), address4)
        self.assertEquals(3, counter.count)
        # Reset the rate limiter and the next packet should be
        # accepted
        self.clock.set(1)
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), self.address)
        self.assertEquals(4, counter.count)

    def test_outbound_overflowHostAndReset(self):
        """
        Make sure that we cannot overflow our outbound host bandwidth limit

        @see dhtbot.constants.host_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        # The first packet should go through without any problems
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
                rate_limited_proto._original.transport._packet_was_sent())
        # Second packet should not go through
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertFalse(
                rate_limited_proto._original.transport._packet_was_sent())
        # Update the clock (reseting the rate limiter)
        self.clock.set(1)
        # This packet should now go through)
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
                rate_limited_proto._original.transport._packet_was_sent())

    def test_outbound_overflowGlobalAndReset(self):
        """
        Make sure that we cannot overflow our outbound global bandwidth limit

        @see dhtbot.constants.global_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        # Reset the hollow transport
        rate_limited_proto._original.transport._reset()
        # The first three packets should go through without any problems
        address1 = ("127.0.0.1", 66)
        address2 = ("127.0.0.1", 76)
        address3 = ("127.0.0.1", 86)
        address4 = ("127.0.0.1", 555)

        # Packet 1, 2, 3
        for i in range(1, 4):
            rate_limited_proto.sendKRPC(
                    self.query, locals()['address' + str(i)])
            self.assertTrue(
                    rate_limited_proto._original.transport._packet_was_sent())

        # The fourth packet should not go through
        rate_limited_proto.sendKRPC(self.query, address4)
        self.assertFalse(
                rate_limited_proto._original.transport._packet_was_sent())
        # Change the time to reset the rate limiter
        self.clock.set(1)
        # This packet should now go through
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
                rate_limited_proto._original.transport._packet_was_sent())
Beispiel #48
0
 def _persist_patch(self, obj, attribute, value):
     monkey_patch = MonkeyPatcher((obj, attribute, value))
     self._persist_patches.append(monkey_patch)
     monkey_patch.patch()
     return monkey_patch
Beispiel #49
0
class KRPC_Iterator_TestCase(unittest.TestCase):
    # TODO
    # 
    # This inheritance and patching pattern is messy, complex,
    # and doesn't make for maintainable code.
    #
    # Refactor it so that KRPC_Sender has a single reactor
    # reference bound within its constructor (at definition time
    # as a default argument). This way, you can simply just pass
    # in a hollow reactor instead of hacking it in
    #
    # What about KRPC_Responder and KRPC_Iterator?
    #   - A pass through argument that floats up through
    #       the constructors
    # TODO
    def setUp(self):
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(krpc_sender, "reactor", HollowReactor())
        self.monkey_patcher.patch()
        self.k_iter = KRPC_Iterator()
        self.k_iter.transport = HollowTransport()
        self.target_id = 5

    def tearDown(self):
        self.monkey_patcher.restore()

    #
    # Find iterate test cases 
    #
    def test_find_iterate_properNumberOfQueriesSent_noNodesInRT(self):
        self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
                self.k_iter.find_iterate)

    def test_find_iterate_firesAfterAllQueriesFire(self):
        self._check_k_iter_firesAfterAllQueriesFire(
                self.k_iter.find_iterate)

    def test_find_iterate_usesNodesFromRoutingTable(self):
        self._check_k_iter_usesNodesFromRoutingTable(
                self.k_iter.find_iterate)

    def test_find_iterate_noNodesRaisesIterationError(self):
        self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
                self.k_iter.find_iterate)

    def test_find_iterate_allQueriesTimeoutRaisesIterationError(self):
        self._check_k_iter_failsWhenAllQueriesTimeOut(
                self.k_iter.find_iterate)

    def test_find_iterate_returnsNewNodes(self):
        # deferreds is a (query, deferred) tuple list
        (deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
                self.k_iter.find_iterate)
        num_queries = len(deferreds)
        # Use any nodes as result nodes (even the nodes themselves)
        result_nodes = test_nodes[:num_queries]
        # Set up dummy node_id's
        node_id = 1
        for (query, deferred), node in zip(deferreds, result_nodes):
            response = query.build_response(nodes=[node])
            response._from = node_id
            node_id += 1
            deferred.callback(response)
        expected_nodes = set(result_nodes)
        d.addErrback(self._fail_errback)
        d.addCallback(self._compare_nodes, expected_nodes)
        # Make sure we don't accidentally slip past an
        # uncalled deferred
        self.assertTrue(d.called)

    #
    # Get iterate test cases
    #
    def test_get_iterate_properNumberOfQueriesSent_noNodesInRT(self):
        self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
                self.k_iter.get_iterate)

    def test_get_iterate_firesAfterAllQueriesFire(self):
        self._check_k_iter_firesAfterAllQueriesFire(
                self.k_iter.get_iterate)

    def test_get_iterate_usesNodesFromRoutingTable(self):
        self._check_k_iter_usesNodesFromRoutingTable(
                self.k_iter.get_iterate)

    def test_get_iterate_noNodesRaisesIterationError(self):
        self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
                self.k_iter.get_iterate)

    def test_get_iterate_allQueriesTimeoutRaisesIterationError(self):
        self._check_k_iter_failsWhenAllQueriesTimeOut(
                self.k_iter.get_iterate)

    def test_get_iterate_returnsNewNodesAndPeers(self):
        # deferreds is a (query, deferred) tuple list
        # where each tuple corresponds to one outbound query
        # and deferred result
        #
        # and d is a deferred result of the iter_func
        (deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
                self.k_iter.get_iterate)
        num_queries = len(deferreds)

        # Use any nodes as result nodes (even the nodes themselves)
        result_nodes = test_nodes[:num_queries]
        result_peers = test_peers[:num_queries]

        # Set up dummy node_id's
        node_id = 1

        # Simulate the event that every outbound
        # query received a result (by making dummy valid
        # responses and feeding them into the deferred)
        for (query, deferred), node, peer in \
            zip(deferreds, result_nodes, result_peers):
            response = query.build_response(nodes=[node], peers=[peer])
            response._from = node_id
            node_id += 1
            deferred.callback(response)

        expected_nodes = result_nodes
        expected_peers = result_peers
        d.addErrback(self._fail_errback)
        d.addCallback(self._compare_peers, expected_peers)
        d.addCallback(self._compare_nodes, expected_nodes)
        # Make sure we don't accidentally slip past an
        # uncalled deferred
        self.assertTrue(d.called)

    # Auxilary test functions
    # that are generalizations of the test
    # cases below
    def _check_k_iter_sendsProperNumberOfQueries_noNodesInRT(self, iter_func):
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = Counter(sendQuery)
        expected_num_queries = 15
        iter_func(self.target_id, test_nodes[:expected_num_queries])
        self.assertEquals(expected_num_queries, self.k_iter.sendQuery.count)

    def _check_k_iter_firesAfterAllQueriesFire(self, iter_func):
        """
        Ensure one 'iterative' query fires after all its subqueries fire
        """
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        num_queries = 5
        d = iter_func(self.target_id, test_nodes[:num_queries])
        deferreds = self.k_iter.sendQuery.deferreds
        test_node_id = 1
        # Make sure that `num_queries` queries were sent
        self.assertEquals(num_queries, len(deferreds))
        for (query, deferred) in deferreds:
            # Grab any node as a response node
            nodes = [test_nodes[55]]
            # Make a valid response node to feed
            # into the subdeferreds
            response = query.build_response(nodes=nodes)
            # Any node id works
            response._from = test_node_id
            test_node_id += 1
            if query.rpctype == "get_peers":
                response.token = 555
            deferred.callback(response)
        # After "receiving a response" to every outgoing
        # query, our main deferred should fire
        self.assertTrue(d.called)

    def _check_k_iter_usesNodesFromRoutingTable(self, iter_func):
        get_closest_nodes = self.k_iter.routing_table.get_closest_nodes
        self.k_iter.routing_table.get_closest_nodes = \
            Counter(get_closest_nodes)
        # If we dont supply any testing nodes,
        # the protocol should check its routingtable
        d = iter_func(self.target_id)
        d.addErrback(self._silence_iteration_error)
        looked_for_nodes = \
                self.k_iter.routing_table.get_closest_nodes.count > 0
        self.assertTrue(looked_for_nodes)

    def _check_k_iter_raisesIterationErrorOnNoSeedNodes(self, iter_func):
        d = iter_func(self.target_id)
        d.addCallbacks(callback=self._ensure_iteration_error_callback,
                errback=self._ensure_iteration_error_errback)

    def _ensure_iteration_error_errback(self, failure):
        isnt_iteration_error = failure.check(IterationError) is None
        if isnt_iteration_error:
            self.fail("KRPC_Iterator threw an error that wasn't " +
                    "an IterationError")

    def _ensure_iteration_error_callback(self, _ignored_result):
        self.fail("KRPC_Iterator did not throw an IterationError " +
                "and was incorrectly successful instead")

    def _check_k_iter_failsWhenAllQueriesTimeOut(self, iter_func):
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        num_queries = 5
        d = iter_func(self.target_id, test_nodes[:num_queries])
        deferreds = self.k_iter.sendQuery.deferreds

        # Make sure an IterationError is thrown once we
        # artificially timeout all queries
        d.addCallbacks(callback=self._ensure_iteration_error_callback,
                errback=self._ensure_iteration_error_errback)

        # Timeout all queries
        for (query, deferred) in deferreds:
            deferred.errback(TimeoutError())
        
    def _compare_nodes(self, result_node_list, expected_nodes):
        # Assert that our resulting list of nodes
        # matches what we expected
        for node in result_node_list:
            self.assertTrue(node in expected_nodes)
        self.assertEquals(len(expected_nodes),
                len(result_node_list))

    def _compare_peers(self, result, expected_peers):
        (result_nodes, result_peers) = result
        self.assertEquals(set(expected_peers), set(result_peers))
        # Return the nodes, since the next callback
        # will check the expected nodes
        return result_nodes

    def _fail_errback(self, failure):
        exception = failure.value
        self.fail("KRPC_Iterator failed when it shouldn't have: " 
                + str(exception))

    def _iterate_and_returnQueriesAndDeferreds(self, iter_func):
        # Capture all outbound queries
        # and all deferreds
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        # Use the first 10 nodes as our seeds
        d = iter_func(self.target_id, test_nodes[:10])
        deferreds = self.k_iter.sendQuery.deferreds
        return (deferreds, d)

    def _silence_iteration_error(self, failure):
        failure.trap(IterationError)
Beispiel #50
0
class MonkeyPatcherTest(unittest.TestCase):
    """
    Tests for L{MonkeyPatcher} monkey-patching class.
    """

    def setUp(self):
        self.testObject = TestObj()
        self.originalObject = TestObj()
        self.monkeyPatcher = MonkeyPatcher()

    def test_empty(self):
        """
        A monkey patcher without patches shouldn't change a thing.
        """
        self.monkeyPatcher.patch()

        # We can't assert that all state is unchanged, but at least we can
        # check our test object.
        self.assertEqual(self.originalObject.foo, self.testObject.foo)
        self.assertEqual(self.originalObject.bar, self.testObject.bar)
        self.assertEqual(self.originalObject.baz, self.testObject.baz)

    def test_constructWithPatches(self):
        """
        Constructing a L{MonkeyPatcher} with patches should add all of the
        given patches to the patch list.
        """
        patcher = MonkeyPatcher((self.testObject, "foo", "haha"), (self.testObject, "bar", "hehe"))
        patcher.patch()
        self.assertEqual("haha", self.testObject.foo)
        self.assertEqual("hehe", self.testObject.bar)
        self.assertEqual(self.originalObject.baz, self.testObject.baz)

    def test_patchExisting(self):
        """
        Patching an attribute that exists sets it to the value defined in the
        patch.
        """
        self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
        self.monkeyPatcher.patch()
        self.assertEqual(self.testObject.foo, "haha")

    def test_patchNonExisting(self):
        """
        Patching a non-existing attribute fails with an C{AttributeError}.
        """
        self.monkeyPatcher.addPatch(self.testObject, "nowhere", "blow up please")
        self.assertRaises(AttributeError, self.monkeyPatcher.patch)

    def test_patchAlreadyPatched(self):
        """
        Adding a patch for an object and attribute that already have a patch
        overrides the existing patch.
        """
        self.monkeyPatcher.addPatch(self.testObject, "foo", "blah")
        self.monkeyPatcher.addPatch(self.testObject, "foo", "BLAH")
        self.monkeyPatcher.patch()
        self.assertEqual(self.testObject.foo, "BLAH")
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)

    def test_restoreTwiceIsANoOp(self):
        """
        Restoring an already-restored monkey patch is a no-op.
        """
        self.monkeyPatcher.addPatch(self.testObject, "foo", "blah")
        self.monkeyPatcher.patch()
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)

    def test_runWithPatchesDecoration(self):
        """
        runWithPatches should run the given callable, passing in all arguments
        and keyword arguments, and return the return value of the callable.
        """
        log = []

        def f(a, b, c=None):
            log.append((a, b, c))
            return "foo"

        result = self.monkeyPatcher.runWithPatches(f, 1, 2, c=10)
        self.assertEqual("foo", result)
        self.assertEqual([(1, 2, 10)], log)

    def test_repeatedRunWithPatches(self):
        """
        We should be able to call the same function with runWithPatches more
        than once. All patches should apply for each call.
        """

        def f():
            return (self.testObject.foo, self.testObject.bar, self.testObject.baz)

        self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
        result = self.monkeyPatcher.runWithPatches(f)
        self.assertEqual(("haha", self.originalObject.bar, self.originalObject.baz), result)
        result = self.monkeyPatcher.runWithPatches(f)
        self.assertEqual(("haha", self.originalObject.bar, self.originalObject.baz), result)

    def test_runWithPatchesRestores(self):
        """
        C{runWithPatches} should restore the original values after the function
        has executed.
        """
        self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
        self.assertEqual(self.originalObject.foo, self.testObject.foo)
        self.monkeyPatcher.runWithPatches(lambda: None)
        self.assertEqual(self.originalObject.foo, self.testObject.foo)

    def test_runWithPatchesRestoresOnException(self):
        """
        Test runWithPatches restores the original values even when the function
        raises an exception.
        """

        def _():
            self.assertEqual(self.testObject.foo, "haha")
            self.assertEqual(self.testObject.bar, "blahblah")
            raise RuntimeError, "Something went wrong!"

        self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
        self.monkeyPatcher.addPatch(self.testObject, "bar", "blahblah")

        self.assertRaises(RuntimeError, self.monkeyPatcher.runWithPatches, _)
        self.assertEqual(self.testObject.foo, self.originalObject.foo)
        self.assertEqual(self.testObject.bar, self.originalObject.bar)
Beispiel #51
0
def get_inotify_module():
    # Until Twisted #9579 is fixed, the Docker check just screws things up.
    # Disable it.
    monkey = MonkeyPatcher()
    monkey.addPatch(runtime.platform, "isDocker", lambda: False)
    return monkey.runWithPatches(_get_inotify_module)
Beispiel #52
0
 def setUp(self):
     self.clock = Clock()
     self.monkey_patcher = MonkeyPatcher()
     self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
     self.monkey_patcher.patch()
Beispiel #53
0
    def build(self, projectName, projectURL, sourceURL, packagePath,
              outputPath):
        """
        Call pydoctor's entry point with options which will generate HTML
        documentation for the specified package's API.

        @type projectName: C{str}
        @param projectName: The name of the package for which to generate
            documentation.

        @type projectURL: C{str}
        @param projectURL: The location (probably an HTTP URL) of the project
            on the web.

        @type sourceURL: C{str}
        @param sourceURL: The location (probably an HTTP URL) of the root of
            the source browser for the project.

        @type packagePath: L{FilePath}
        @param packagePath: The path to the top-level of the package named by
            C{projectName}.

        @type outputPath: L{FilePath}
        @param outputPath: An existing directory to which the generated API
            documentation will be written.
        """
        intersphinxes = []

        for intersphinx in intersphinxURLs:
            intersphinxes.append("--intersphinx")
            intersphinxes.append(intersphinx)

        # Super awful monkeypatch that will selectively use our templates.
        from pydoctor.templatewriter import util
        originalTemplatefile = util.templatefile

        def templatefile(filename):

            if filename in ["summary.html", "index.html", "common.html"]:
                twistedPythonDir = FilePath(__file__).parent()
                templatesDir = twistedPythonDir.child("_pydoctortemplates")
                return templatesDir.child(filename).path
            else:
                return originalTemplatefile(filename)

        monkeyPatch = MonkeyPatcher((util, "templatefile", templatefile))
        monkeyPatch.patch()

        from pydoctor.driver import main

        args = [u"--project-name", projectName,
                u"--project-url", projectURL,
                u"--system-class", u"twisted.python._pydoctor.TwistedSystem",
                u"--project-base-dir", packagePath.parent().path,
                u"--html-viewsource-base", sourceURL,
                u"--add-package", packagePath.path,
                u"--html-output", outputPath.path,
                u"--html-write-function-pages", u"--quiet", u"--make-html",
               ] + intersphinxes
        args = [arg.encode("utf-8") for arg in args]
        main(args)

        monkeyPatch.restore()
Beispiel #54
0
    def test_no_retry_authentication(self):
        """
        The API object returned by ``cinder_from_configuration`` will retry
        authentication even when initial authentication attempts fail.
        """
        import twisted.web.http
        self.patch(
            twisted.web.http.HTTPChannel,
            'checkPersistence',
            lambda self, request, version: False
        )
        patch = MonkeyPatcher()
        patch.addPatch(
            twisted.web.http.HTTPChannel,
            'connectionMade',
            lambda self: self.transport.loseConnection()
        )
        self.addCleanup(patch.restore)
        mimic_starting = mimic_for_test(test_case=self)

        def build_api(listening_port):
            backend, api_args = backend_and_api_args_from_configuration({
                "backend": "openstack",
                "auth_plugin": "rackspace",
                "region": "ORD",
                "username": "******",
                "api_key": "12345",
                "auth_url": "http://127.0.0.1:{}/identity/v2.0".format(
                    listening_port.getHost().port
                ),
            })
            patch.patch()
            api = get_api(
                backend=backend,
                api_args=api_args,
                reactor=object(),
                cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
            )
            patch.restore()
            return api

        mimic_started = mimic_starting.addCallback(build_api)

        def list_volumes(api, force_connection_failure=False):
            if force_connection_failure:
                patch.patch()
            try:
                return api.list_volumes()
            finally:
                patch.restore()

        def check_failing_connection(api):
            d = deferToThread(
                lambda api: list_volumes(api, force_connection_failure=True),
                api,
            )
            d = self.assertFailure(d, ConnectFailure)
            # return the api for further testing.
            d = d.addCallback(
                lambda failure_instance: api
            )
            return d
        listing_volumes1 = mimic_started.addCallback(check_failing_connection)

        def check_successful_connection(api):
            d = deferToThread(
                lambda api: list_volumes(api, force_connection_failure=False),
                api,
            )
            d = d.addCallback(
                lambda result: self.assertEqual([], result)
            )
            return d
        finishing = listing_volumes1.addCallback(check_successful_connection)

        return finishing
Beispiel #55
0
 def setUp(self):
     self.testObject = TestObj()
     self.originalObject = TestObj()
     self.monkeyPatcher = MonkeyPatcher()
Beispiel #56
0
    def _build_and_test_api(self, listening_port):
        """
        Build the CinderBlockDeviceAPI configured to connect to the Mimic
        server at ``listening_port``.
        Patch twisted.web to force the mimic server to drop incoming
        connections.
        And attempt to interact with the disabled API server first and then
        after re-enabling it to show that the API will re-authenticate even
        after an initial failure.
        """
        import twisted.web.http
        patch = MonkeyPatcher()
        patch.addPatch(twisted.web.http.HTTPChannel, 'connectionMade',
                       lambda self: self.transport.loseConnection())
        self.addCleanup(patch.restore)
        backend, api_args = backend_and_api_args_from_configuration({
            "backend":
            "openstack",
            "auth_plugin":
            "rackspace",
            "region":
            "ORD",
            "username":
            "******",
            "api_key":
            "12345",
            "auth_url":
            "http://127.0.0.1:{}/identity/v2.0".format(
                listening_port.getHost().port),
        })
        # Cause the Mimic server to close incoming connections
        patch.patch()
        api = get_api(
            backend=backend,
            api_args=api_args,
            reactor=object(),
            cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
        )
        # List volumes with API patched to close incoming connections.
        try:
            result = api.list_volumes()
        except ConnectFailure:
            # Can't use self.assertRaises here because that would call the
            # function in the main thread.
            pass
        else:
            self.fail('ConnectFailure was not raised. '
                      'Got {!r} instead.'.format(result))
        finally:
            # Re-enable the Mimic server.
            # The API operations that follow should succeed.
            patch.restore()

        # List volumes with API re-enabled
        result = api.list_volumes()
        self.assertEqual([], result)

        # Close the connection from the client side so that the mimic server
        # can close down without leaving behind lingering persistent HTTP
        # channels which cause dirty reactor errors.
        # XXX: This is gross. Perhaps we need ``IBlockDeviceAPI.close``
        (api.cinder_volume_manager._original._client_v2._cinder_volumes.api.
         client.session.session.close())
Beispiel #57
0
    def build(self, projectName, projectURL, sourceURL, packagePath,
              outputPath):
        """
        Call pydoctor's entry point with options which will generate HTML
        documentation for the specified package's API.

        @type projectName: C{str}
        @param projectName: The name of the package for which to generate
            documentation.

        @type projectURL: C{str}
        @param projectURL: The location (probably an HTTP URL) of the project
            on the web.

        @type sourceURL: C{str}
        @param sourceURL: The location (probably an HTTP URL) of the root of
            the source browser for the project.

        @type packagePath: L{FilePath}
        @param packagePath: The path to the top-level of the package named by
            C{projectName}.

        @type outputPath: L{FilePath}
        @param outputPath: An existing directory to which the generated API
            documentation will be written.
        """
        intersphinxes = []

        for intersphinx in intersphinxURLs:
            intersphinxes.append("--intersphinx")
            intersphinxes.append(intersphinx)

        # Super awful monkeypatch that will selectively use our templates.
        from pydoctor.templatewriter import util
        originalTemplatefile = util.templatefile

        def templatefile(filename):

            if filename in ["summary.html", "index.html", "common.html"]:
                twistedPythonDir = FilePath(__file__).parent()
                templatesDir = twistedPythonDir.child("_pydoctortemplates")
                return templatesDir.child(filename).path
            else:
                return originalTemplatefile(filename)

        monkeyPatch = MonkeyPatcher((util, "templatefile", templatefile))
        monkeyPatch.patch()

        from pydoctor.driver import main

        args = [
            u"--project-name",
            projectName,
            u"--project-url",
            projectURL,
            u"--system-class",
            u"twisted.python._pydoctor.TwistedSystem",
            u"--project-base-dir",
            packagePath.parent().path,
            u"--html-viewsource-base",
            sourceURL,
            u"--add-package",
            packagePath.path,
            u"--html-output",
            outputPath.path,
            u"--html-write-function-pages",
            u"--quiet",
            u"--make-html",
        ] + intersphinxes
        args = [arg.encode("utf-8") for arg in args]
        main(args)

        monkeyPatch.restore()
Beispiel #58
0
    def test_no_retry_authentication(self):
        """
        The API object returned by ``cinder_from_configuration`` will retry
        authentication even when initial authentication attempts fail.
        """
        import twisted.web.http
        self.patch(twisted.web.http.HTTPChannel, 'checkPersistence',
                   lambda self, request, version: False)
        patch = MonkeyPatcher()
        patch.addPatch(twisted.web.http.HTTPChannel, 'connectionMade',
                       lambda self: self.transport.loseConnection())
        self.addCleanup(patch.restore)
        mimic_starting = mimic_for_test(test_case=self)

        def build_api(listening_port):
            backend, api_args = backend_and_api_args_from_configuration({
                "backend":
                "openstack",
                "auth_plugin":
                "rackspace",
                "region":
                "ORD",
                "username":
                "******",
                "api_key":
                "12345",
                "auth_url":
                "http://127.0.0.1:{}/identity/v2.0".format(
                    listening_port.getHost().port),
            })
            patch.patch()
            api = get_api(
                backend=backend,
                api_args=api_args,
                reactor=object(),
                cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
            )
            patch.restore()
            return api

        mimic_started = mimic_starting.addCallback(build_api)

        def list_volumes(api, force_connection_failure=False):
            if force_connection_failure:
                patch.patch()
            try:
                return api.list_volumes()
            finally:
                patch.restore()

        def check_failing_connection(api):
            d = deferToThread(
                lambda api: list_volumes(api, force_connection_failure=True),
                api,
            )
            d = self.assertFailure(d, ConnectFailure)
            # return the api for further testing.
            d = d.addCallback(lambda failure_instance: api)
            return d

        listing_volumes1 = mimic_started.addCallback(check_failing_connection)

        def check_successful_connection(api):
            d = deferToThread(
                lambda api: list_volumes(api, force_connection_failure=False),
                api,
            )
            d = d.addCallback(lambda result: self.assertEqual([], result))
            return d

        finishing = listing_volumes1.addCallback(check_successful_connection)

        return finishing