Beispiel #1
0
 def test_put_verifyProperRemoval(self):
     # Replace the time function of the datastore module
     # so that we can artificially speed up time
     monkey_patcher = MonkeyPatcher()
     c = clock()
     c.set(0)
     monkey_patcher.addPatch(datastore, "time", c)
     # Replace the peer_timeout to 5 seconds
     monkey_patcher.addPatch(constants, "peer_timeout", 5)
     monkey_patcher.patch()
     # Insert a node and verify it is within the datastore
     m = self.datastore(self.reactor)
     infohash = 5
     expected_peer = ("127.0.0.1", 5151)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     # Iterate over a 1 element list
     for peer in peers:
         self.assertEqual(expected_peer, peer)
     self.assertEquals(1, len(peers))
     # Change the time and verify that the cleaning function
     # actually removes the peer
     c.set(5)
     # TODO hackish, shouldnt reach into object
     m._cleanup(infohash, peer)
     peers = m.get(infohash)
     self.assertEqual(0, len(peers))
     monkey_patcher.restore()
Beispiel #2
0
 def test_error_logging(self, logger):
     """
     Failures while applying a diff emit a log message containing the full
     diff.
     """
     o1 = DiffTestObjInvariant(
         a=1,
         b=2,
     )
     patcher = MonkeyPatcher()
     patcher.addPatch(
         DiffTestObjInvariant,
         '_perform_invariant_check',
         False
     )
     patcher.patch()
     try:
         o2 = o1.set('b', 1)
     finally:
         patcher.restore()
     diff = create_diff(o1, o2)
     self.assertRaises(
         InvariantException,
         diff.apply,
         o1,
     )
Beispiel #3
0
    def run(self, result):
        """
        Run the test case in the context of a distinct Eliot action.

        The action will finish after the test is done.  It will note the name of
        the test being run.

        All messages emitted by the test will be validated.  They will still be
        delivered to the global logger.
        """
        # The idea here is to decorate the test method itself so that all of
        # the extra logic happens at the point where test/application logic is
        # expected to be.  This `run` method is more like test infrastructure
        # and things do not go well when we add too much extra behavior here.
        # For example, exceptions raised here often just kill the whole
        # runner.
        patcher = MonkeyPatcher()

        # So, grab the test method.
        name = self.case._testMethodName
        original = getattr(self.case, name)
        decorated = with_logging(ensure_text(self.case.id()), original)
        patcher.addPatch(self.case, name, decorated)
        try:
            # Patch it in
            patcher.patch()
            # Then use the rest of the machinery to run it.
            return self._run_tests_with_factory(
                self.case,
                self.handlers,
                self.last_resort,
            ).run(result)
        finally:
            # Clean up the patching for idempotency or something.
            patcher.restore()
Beispiel #4
0
 def test_put_verifyProperRemoval(self):
     # Replace the time function of the datastore module
     # so that we can artificially speed up time
     monkey_patcher = MonkeyPatcher()
     c = clock()
     c.set(0)
     monkey_patcher.addPatch(datastore, "time", c)
     # Replace the peer_timeout to 5 seconds
     monkey_patcher.addPatch(constants, "peer_timeout", 5)
     monkey_patcher.patch()
     # Insert a node and verify it is within the datastore
     m = self.datastore(self.reactor)
     infohash = 5
     expected_peer = ("127.0.0.1", 5151)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     # Iterate over a 1 element list
     for peer in peers:
         self.assertEqual(expected_peer, peer)
     self.assertEquals(1, len(peers))
     # Change the time and verify that the cleaning function
     # actually removes the peer
     c.set(5)
     # TODO hackish, shouldnt reach into object
     m._cleanup(infohash, peer)
     peers = m.get(infohash)
     self.assertEqual(0, len(peers))
     monkey_patcher.restore()
Beispiel #5
0
class TestingBase(object):
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

    def tearDown(self):
        self.monkey_patcher.restore()
Beispiel #6
0
class TestingBase(object):
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

    def tearDown(self):
        self.monkey_patcher.restore()
Beispiel #7
0
 def test_error_logging(self, logger):
     """
     Failures while applying a diff emit a log message containing the full
     diff.
     """
     o1 = DiffTestObjInvariant(
         a=1,
         b=2,
     )
     patcher = MonkeyPatcher()
     patcher.addPatch(DiffTestObjInvariant, '_perform_invariant_check',
                      False)
     patcher.patch()
     try:
         o2 = o1.set('b', 1)
     finally:
         patcher.restore()
     diff = create_diff(o1, o2)
     self.assertRaises(
         InvariantException,
         diff.apply,
         o1,
     )
Beispiel #8
0
 def test_put_reannounceResetsTimer(self):
     # Replace the time function of the datastore module
     # so that we can artificially speed up time
     monkey_patcher = MonkeyPatcher()
     c = clock()
     c.set(0)
     monkey_patcher.addPatch(datastore, "time", c)
     # Replace the peer_timeout to 5 seconds
     monkey_patcher.addPatch(constants, "peer_timeout", 5)
     monkey_patcher.patch()
     # Insert a node and verify it is within the datastore
     m = self.datastore(self.reactor)
     infohash = 5
     expected_peer = ("127.0.0.1", 5151)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     # Iterate over a 1 element list
     self.assertEquals(1, len(peers))
     for peer in peers:
         self.assertEqual(expected_peer, peer)
     # Change the time and reannounce the peer
     # (make sure the cleanup function doesnt
     #  remove the peer yet)
     c.set(4)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(1, len(peers))
     m._cleanup(infohash, expected_peer)
     c.set(8)
     m._cleanup(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(1, len(peers))
     c.set(9)
     m._cleanup(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(0, len(peers))
     monkey_patcher.restore()
Beispiel #9
0
 def test_put_reannounceResetsTimer(self):
     # Replace the time function of the datastore module
     # so that we can artificially speed up time
     monkey_patcher = MonkeyPatcher()
     c = clock()
     c.set(0)
     monkey_patcher.addPatch(datastore, "time", c)
     # Replace the peer_timeout to 5 seconds
     monkey_patcher.addPatch(constants, "peer_timeout", 5)
     monkey_patcher.patch()
     # Insert a node and verify it is within the datastore
     m = self.datastore(self.reactor)
     infohash = 5
     expected_peer = ("127.0.0.1", 5151)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     # Iterate over a 1 element list
     self.assertEquals(1, len(peers))
     for peer in peers:
         self.assertEqual(expected_peer, peer)
     # Change the time and reannounce the peer
     # (make sure the cleanup function doesnt
     #  remove the peer yet)
     c.set(4)
     m.put(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(1, len(peers))
     m._cleanup(infohash, expected_peer)
     c.set(8)
     m._cleanup(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(1, len(peers))
     c.set(9)
     m._cleanup(infohash, expected_peer)
     peers = m.get(infohash)
     self.assertEqual(0, len(peers))
     monkey_patcher.restore()
Beispiel #10
0
    def build(self, projectName, projectURL, sourceURL, packagePath,
              outputPath):
        """
        Call pydoctor's entry point with options which will generate HTML
        documentation for the specified package's API.

        @type projectName: C{str}
        @param projectName: The name of the package for which to generate
            documentation.

        @type projectURL: C{str}
        @param projectURL: The location (probably an HTTP URL) of the project
            on the web.

        @type sourceURL: C{str}
        @param sourceURL: The location (probably an HTTP URL) of the root of
            the source browser for the project.

        @type packagePath: L{FilePath}
        @param packagePath: The path to the top-level of the package named by
            C{projectName}.

        @type outputPath: L{FilePath}
        @param outputPath: An existing directory to which the generated API
            documentation will be written.
        """
        intersphinxes = []

        for intersphinx in intersphinxURLs:
            intersphinxes.append("--intersphinx")
            intersphinxes.append(intersphinx)

        # Super awful monkeypatch that will selectively use our templates.
        from pydoctor.templatewriter import util
        originalTemplatefile = util.templatefile

        def templatefile(filename):

            if filename in ["summary.html", "index.html", "common.html"]:
                twistedPythonDir = FilePath(__file__).parent()
                templatesDir = twistedPythonDir.child("_pydoctortemplates")
                return templatesDir.child(filename).path
            else:
                return originalTemplatefile(filename)

        monkeyPatch = MonkeyPatcher((util, "templatefile", templatefile))
        monkeyPatch.patch()

        from pydoctor.driver import main

        args = [u"--project-name", projectName,
                u"--project-url", projectURL,
                u"--system-class", u"twisted.python._pydoctor.TwistedSystem",
                u"--project-base-dir", packagePath.parent().path,
                u"--html-viewsource-base", sourceURL,
                u"--add-package", packagePath.path,
                u"--html-output", outputPath.path,
                u"--html-write-function-pages", u"--quiet", u"--make-html",
               ] + intersphinxes
        args = [arg.encode("utf-8") for arg in args]
        main(args)

        monkeyPatch.restore()
Beispiel #11
0
class MonkeyPatcherTest(unittest.TestCase):
    """
    Tests for L{MonkeyPatcher} monkey-patching class.
    """

    def setUp(self):
        self.testObject = TestObj()
        self.originalObject = TestObj()
        self.monkeyPatcher = MonkeyPatcher()


    def test_empty(self):
        """
        A monkey patcher without patches shouldn't change a thing.
        """
        self.monkeyPatcher.patch()

        # We can't assert that all state is unchanged, but at least we can
        # check our test object.
        self.assertEqual(self.originalObject.foo, self.testObject.foo)
        self.assertEqual(self.originalObject.bar, self.testObject.bar)
        self.assertEqual(self.originalObject.baz, self.testObject.baz)


    def test_constructWithPatches(self):
        """
        Constructing a L{MonkeyPatcher} with patches should add all of the
        given patches to the patch list.
        """
        patcher = MonkeyPatcher((self.testObject, 'foo', 'haha'),
                                (self.testObject, 'bar', 'hehe'))
        patcher.patch()
        self.assertEqual('haha', self.testObject.foo)
        self.assertEqual('hehe', self.testObject.bar)
        self.assertEqual(self.originalObject.baz, self.testObject.baz)


    def test_patchExisting(self):
        """
        Patching an attribute that exists sets it to the value defined in the
        patch.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
        self.monkeyPatcher.patch()
        self.assertEqual(self.testObject.foo, 'haha')


    def test_patchNonExisting(self):
        """
        Patching a non-existing attribute fails with an C{AttributeError}.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'nowhere',
                                    'blow up please')
        self.assertRaises(AttributeError, self.monkeyPatcher.patch)


    def test_patchAlreadyPatched(self):
        """
        Adding a patch for an object and attribute that already have a patch
        overrides the existing patch.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'blah')
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'BLAH')
        self.monkeyPatcher.patch()
        self.assertEqual(self.testObject.foo, 'BLAH')
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)


    def test_restoreTwiceIsANoOp(self):
        """
        Restoring an already-restored monkey patch is a no-op.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'blah')
        self.monkeyPatcher.patch()
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)


    def test_runWithPatchesDecoration(self):
        """
        runWithPatches should run the given callable, passing in all arguments
        and keyword arguments, and return the return value of the callable.
        """
        log = []

        def f(a, b, c=None):
            log.append((a, b, c))
            return 'foo'

        result = self.monkeyPatcher.runWithPatches(f, 1, 2, c=10)
        self.assertEqual('foo', result)
        self.assertEqual([(1, 2, 10)], log)


    def test_repeatedRunWithPatches(self):
        """
        We should be able to call the same function with runWithPatches more
        than once. All patches should apply for each call.
        """
        def f():
            return (self.testObject.foo, self.testObject.bar,
                    self.testObject.baz)

        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
        result = self.monkeyPatcher.runWithPatches(f)
        self.assertEqual(
            ('haha', self.originalObject.bar, self.originalObject.baz), result)
        result = self.monkeyPatcher.runWithPatches(f)
        self.assertEqual(
            ('haha', self.originalObject.bar, self.originalObject.baz),
            result)


    def test_runWithPatchesRestores(self):
        """
        C{runWithPatches} should restore the original values after the function
        has executed.
        """
        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
        self.assertEqual(self.originalObject.foo, self.testObject.foo)
        self.monkeyPatcher.runWithPatches(lambda: None)
        self.assertEqual(self.originalObject.foo, self.testObject.foo)


    def test_runWithPatchesRestoresOnException(self):
        """
        Test runWithPatches restores the original values even when the function
        raises an exception.
        """
        def _():
            self.assertEqual(self.testObject.foo, 'haha')
            self.assertEqual(self.testObject.bar, 'blahblah')
            raise RuntimeError("Something went wrong!")

        self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')
        self.monkeyPatcher.addPatch(self.testObject, 'bar', 'blahblah')

        self.assertRaises(RuntimeError, self.monkeyPatcher.runWithPatches, _)
        self.assertEqual(self.testObject.foo, self.originalObject.foo)
        self.assertEqual(self.testObject.bar, self.originalObject.bar)
Beispiel #12
0
    def _build_and_test_api(self, listening_port):
        """
        Build the CinderBlockDeviceAPI configured to connect to the Mimic
        server at ``listening_port``.
        Patch twisted.web to force the mimic server to drop incoming
        connections.
        And attempt to interact with the disabled API server first and then
        after re-enabling it to show that the API will re-authenticate even
        after an initial failure.
        """
        import twisted.web.http
        patch = MonkeyPatcher()
        patch.addPatch(
            twisted.web.http.HTTPChannel,
            'connectionMade',
            lambda self: self.transport.loseConnection()
        )
        self.addCleanup(patch.restore)
        backend, api_args = backend_and_api_args_from_configuration({
            "backend": "openstack",
            "auth_plugin": "rackspace",
            "region": "ORD",
            "username": "******",
            "api_key": "12345",
            "auth_url": "http://127.0.0.1:{}/identity/v2.0".format(
                listening_port.getHost().port
            ),
        })
        # Cause the Mimic server to close incoming connections
        patch.patch()
        api = get_api(
            backend=backend,
            api_args=api_args,
            reactor=object(),
            cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
        )
        # List volumes with API patched to close incoming connections.
        try:
            result = api.list_volumes()
        except ConnectFailure:
            # Can't use self.assertRaises here because that would call the
            # function in the main thread.
            pass
        else:
            self.fail(
                'ConnectFailure was not raised. '
                'Got {!r} instead.'.format(
                    result
                )
            )
        finally:
            # Re-enable the Mimic server.
            # The API operations that follow should succeed.
            patch.restore()

        # List volumes with API re-enabled
        result = api.list_volumes()
        self.assertEqual([], result)

        # Close the connection from the client side so that the mimic server
        # can close down without leaving behind lingering persistent HTTP
        # channels which cause dirty reactor errors.
        # XXX: This is gross. Perhaps we need ``IBlockDeviceAPI.close``
        (api
         .cinder_volume_manager
         ._original
         ._client_v2
         ._cinder_volumes
         .api
         .client
         .session
         .session.close())
Beispiel #13
0
class KRPC_Iterator_TestCase(unittest.TestCase):
    def setUp(self):
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(krpc_sender, "reactor", HollowReactor())
        self.monkey_patcher.patch()
        self.k_iter = KRPC_Iterator()
        self.k_iter.transport = HollowTransport()
        self.target_id = 5

    def tearDown(self):
        self.monkey_patcher.restore()

    #
    # Find iterate test cases 
    #
    def test_find_iterate_properNumberOfQueriesSent_noNodesInRT(self):
        self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
                self.k_iter.find_iterate)

    def test_find_iterate_firesAfterAllQueriesFire(self):
        self._check_k_iter_firesAfterAllQueriesFire(
                self.k_iter.find_iterate)

    def test_find_iterate_usesNodesFromRoutingTable(self):
        self._check_k_iter_usesNodesFromRoutingTable(
                self.k_iter.find_iterate)

    def test_find_iterate_noNodesRaisesIterationError(self):
        self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
                self.k_iter.find_iterate)

    def test_find_iterate_allQueriesTimeoutRaisesIterationError(self):
        self._check_k_iter_failsWhenAllQueriesTimeOut(
                self.k_iter.find_iterate)

    def test_find_iterate_returnsNewNodes(self):
        # deferreds is a (query, deferred) tuple list
        (deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
                self.k_iter.find_iterate)
        num_queries = len(deferreds)
        # Use any nodes as result nodes (even the nodes themselves)
        result_nodes = test_nodes[:num_queries]
        # Set up dummy node_id's
        node_id = 1
        for (query, deferred), node in zip(deferreds, result_nodes):
            response = query.build_response(nodes=[node])
            response._from = node_id
            node_id += 1
            deferred.callback(response)
        expected_nodes = set(result_nodes)
        d.addErrback(self._fail_errback)
        d.addCallback(self._compare_nodes, expected_nodes)
        # Make sure we don't accidentally slip past an
        # uncalled deferred
        self.assertTrue(d.called)

    #
    # Get iterate test cases
    #
    def test_get_iterate_properNumberOfQueriesSent_noNodesInRT(self):
        self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
                self.k_iter.get_iterate)

    def test_get_iterate_firesAfterAllQueriesFire(self):
        self._check_k_iter_firesAfterAllQueriesFire(
                self.k_iter.get_iterate)

    def test_get_iterate_usesNodesFromRoutingTable(self):
        self._check_k_iter_usesNodesFromRoutingTable(
                self.k_iter.get_iterate)

    def test_get_iterate_noNodesRaisesIterationError(self):
        self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
                self.k_iter.get_iterate)

    def test_get_iterate_allQueriesTimeoutRaisesIterationError(self):
        self._check_k_iter_failsWhenAllQueriesTimeOut(
                self.k_iter.get_iterate)

    def test_get_iterate_returnsNewNodesAndPeers(self):
        # deferreds is a (query, deferred) tuple list
        # where each tuple corresponds to one outbound query
        # and deferred result
        #
        # and d is a deferred result of the iter_func
        (deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
                self.k_iter.get_iterate)
        num_queries = len(deferreds)

        # Use any nodes as result nodes (even the nodes themselves)
        result_nodes = test_nodes[:num_queries]
        result_peers = test_peers[:num_queries]

        # Set up dummy node_id's
        node_id = 1

        # Simulate the event that every outbound
        # query received a result (by making dummy valid
        # responses and feeding them into the deferred)
        for (query, deferred), node, peer in \
            zip(deferreds, result_nodes, result_peers):
            response = query.build_response(nodes=[node], peers=[peer])
            response._from = node_id
            node_id += 1
            deferred.callback(response)

        expected_nodes = result_nodes
        expected_peers = result_peers
        d.addErrback(self._fail_errback)
        d.addCallback(self._compare_peers, expected_peers)
        d.addCallback(self._compare_nodes, expected_nodes)
        # Make sure we don't accidentally slip past an
        # uncalled deferred
        self.assertTrue(d.called)

    # Auxilary test functions
    # that are generalizations of the test
    # cases below
    def _check_k_iter_sendsProperNumberOfQueries_noNodesInRT(self, iter_func):
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = Counter(sendQuery)
        expected_num_queries = 15
        iter_func(self.target_id, test_nodes[:expected_num_queries])
        self.assertEquals(expected_num_queries, self.k_iter.sendQuery.count)

    def _check_k_iter_firesAfterAllQueriesFire(self, iter_func):
        """
        Ensure one 'iterative' query fires after all its subqueries fire
        """
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        num_queries = 5
        d = iter_func(self.target_id, test_nodes[:num_queries])
        deferreds = self.k_iter.sendQuery.deferreds
        test_node_id = 1
        # Make sure that `num_queries` queries were sent
        self.assertEquals(num_queries, len(deferreds))
        for (query, deferred) in deferreds:
            # Grab any node as a response node
            nodes = [test_nodes[55]]
            # Make a valid response node to feed
            # into the subdeferreds
            response = query.build_response(nodes=nodes)
            # Any node id works
            response._from = test_node_id
            test_node_id += 1
            if query.rpctype == "get_peers":
                response.token = 555
            deferred.callback(response)
        # After "receiving a response" to every outgoing
        # query, our main deferred should fire
        self.assertTrue(d.called)

    def _check_k_iter_usesNodesFromRoutingTable(self, iter_func):
        get_closest_nodes = self.k_iter.routing_table.get_closest_nodes
        self.k_iter.routing_table.get_closest_nodes = \
            Counter(get_closest_nodes)
        # If we dont supply any testing nodes,
        # the protocol should check its routingtable
        d = iter_func(self.target_id)
        d.addErrback(self._silence_iteration_error)
        looked_for_nodes = \
                self.k_iter.routing_table.get_closest_nodes.count > 0
        self.assertTrue(looked_for_nodes)

    def _check_k_iter_raisesIterationErrorOnNoSeedNodes(self, iter_func):
        d = iter_func(self.target_id)
        d.addCallbacks(callback=self._ensure_iteration_error_callback,
                errback=self._ensure_iteration_error_errback)

    def _ensure_iteration_error_errback(self, failure):
        isnt_iteration_error = failure.check(IterationError) is None
        if isnt_iteration_error:
            self.fail("KRPC_Iterator threw an error that wasn't " +
                    "an IterationError")

    def _ensure_iteration_error_callback(self, _ignored_result):
        self.fail("KRPC_Iterator did not throw an IterationError " +
                "and was incorrectly successful instead")

    def _check_k_iter_failsWhenAllQueriesTimeOut(self, iter_func):
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        num_queries = 5
        d = iter_func(self.target_id, test_nodes[:num_queries])
        deferreds = self.k_iter.sendQuery.deferreds

        # Make sure an IterationError is thrown once we
        # artificially timeout all queries
        d.addCallbacks(callback=self._ensure_iteration_error_callback,
                errback=self._ensure_iteration_error_errback)

        # Timeout all queries
        for (query, deferred) in deferreds:
            deferred.errback(TimeoutError())
        
    def _compare_nodes(self, result_node_list, expected_nodes):
        # Assert that our resulting list of nodes
        # matches what we expected
        for node in result_node_list:
            self.assertTrue(node in expected_nodes)
        self.assertEquals(len(expected_nodes),
                len(result_node_list))

    def _compare_peers(self, result, expected_peers):
        (result_nodes, result_peers) = result
        self.assertEquals(set(expected_peers), set(result_peers))
        # Return the nodes, since the next callback
        # will check the expected nodes
        return result_nodes

    def _fail_errback(self, failure):
        exception = failure.value
        self.fail("KRPC_Iterator failed when it shouldn't have: " 
                + str(exception))

    def _iterate_and_returnQueriesAndDeferreds(self, iter_func):
        # Capture all outbound queries
        # and all deferreds
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        # Use the first 10 nodes as our seeds
        d = iter_func(self.target_id, test_nodes[:10])
        deferreds = self.k_iter.sendQuery.deferreds
        return (deferreds, d)

    def _silence_iteration_error(self, failure):
        failure.trap(IterationError)
Beispiel #14
0
    def _build_and_test_api(self, listening_port):
        """
        Build the CinderBlockDeviceAPI configured to connect to the Mimic
        server at ``listening_port``.
        Patch twisted.web to force the mimic server to drop incoming
        connections.
        And attempt to interact with the disabled API server first and then
        after re-enabling it to show that the API will re-authenticate even
        after an initial failure.
        """
        import twisted.web.http
        patch = MonkeyPatcher()
        patch.addPatch(twisted.web.http.HTTPChannel, 'connectionMade',
                       lambda self: self.transport.loseConnection())
        self.addCleanup(patch.restore)
        backend, api_args = backend_and_api_args_from_configuration({
            "backend":
            "openstack",
            "auth_plugin":
            "rackspace",
            "region":
            "ORD",
            "username":
            "******",
            "api_key":
            "12345",
            "auth_url":
            "http://127.0.0.1:{}/identity/v2.0".format(
                listening_port.getHost().port),
        })
        # Cause the Mimic server to close incoming connections
        patch.patch()
        api = get_api(
            backend=backend,
            api_args=api_args,
            reactor=object(),
            cluster_id=make_cluster_id(TestTypes.FUNCTIONAL),
        )
        # List volumes with API patched to close incoming connections.
        try:
            result = api.list_volumes()
        except ConnectFailure:
            # Can't use self.assertRaises here because that would call the
            # function in the main thread.
            pass
        else:
            self.fail('ConnectFailure was not raised. '
                      'Got {!r} instead.'.format(result))
        finally:
            # Re-enable the Mimic server.
            # The API operations that follow should succeed.
            patch.restore()

        # List volumes with API re-enabled
        result = api.list_volumes()
        self.assertEqual([], result)

        # Close the connection from the client side so that the mimic server
        # can close down without leaving behind lingering persistent HTTP
        # channels which cause dirty reactor errors.
        # XXX: This is gross. Perhaps we need ``IBlockDeviceAPI.close``
        (api.cinder_volume_manager._original._client_v2._cinder_volumes.api.
         client.session.session.close())
Beispiel #15
0
class KRPC_Iterator_TestCase(unittest.TestCase):
    # TODO
    # 
    # This inheritance and patching pattern is messy, complex,
    # and doesn't make for maintainable code.
    #
    # Refactor it so that KRPC_Sender has a single reactor
    # reference bound within its constructor (at definition time
    # as a default argument). This way, you can simply just pass
    # in a hollow reactor instead of hacking it in
    #
    # What about KRPC_Responder and KRPC_Iterator?
    #   - A pass through argument that floats up through
    #       the constructors
    # TODO
    def setUp(self):
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(krpc_sender, "reactor", HollowReactor())
        self.monkey_patcher.patch()
        self.k_iter = KRPC_Iterator()
        self.k_iter.transport = HollowTransport()
        self.target_id = 5

    def tearDown(self):
        self.monkey_patcher.restore()

    #
    # Find iterate test cases 
    #
    def test_find_iterate_properNumberOfQueriesSent_noNodesInRT(self):
        self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
                self.k_iter.find_iterate)

    def test_find_iterate_firesAfterAllQueriesFire(self):
        self._check_k_iter_firesAfterAllQueriesFire(
                self.k_iter.find_iterate)

    def test_find_iterate_usesNodesFromRoutingTable(self):
        self._check_k_iter_usesNodesFromRoutingTable(
                self.k_iter.find_iterate)

    def test_find_iterate_noNodesRaisesIterationError(self):
        self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
                self.k_iter.find_iterate)

    def test_find_iterate_allQueriesTimeoutRaisesIterationError(self):
        self._check_k_iter_failsWhenAllQueriesTimeOut(
                self.k_iter.find_iterate)

    def test_find_iterate_returnsNewNodes(self):
        # deferreds is a (query, deferred) tuple list
        (deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
                self.k_iter.find_iterate)
        num_queries = len(deferreds)
        # Use any nodes as result nodes (even the nodes themselves)
        result_nodes = test_nodes[:num_queries]
        # Set up dummy node_id's
        node_id = 1
        for (query, deferred), node in zip(deferreds, result_nodes):
            response = query.build_response(nodes=[node])
            response._from = node_id
            node_id += 1
            deferred.callback(response)
        expected_nodes = set(result_nodes)
        d.addErrback(self._fail_errback)
        d.addCallback(self._compare_nodes, expected_nodes)
        # Make sure we don't accidentally slip past an
        # uncalled deferred
        self.assertTrue(d.called)

    #
    # Get iterate test cases
    #
    def test_get_iterate_properNumberOfQueriesSent_noNodesInRT(self):
        self._check_k_iter_sendsProperNumberOfQueries_noNodesInRT(
                self.k_iter.get_iterate)

    def test_get_iterate_firesAfterAllQueriesFire(self):
        self._check_k_iter_firesAfterAllQueriesFire(
                self.k_iter.get_iterate)

    def test_get_iterate_usesNodesFromRoutingTable(self):
        self._check_k_iter_usesNodesFromRoutingTable(
                self.k_iter.get_iterate)

    def test_get_iterate_noNodesRaisesIterationError(self):
        self._check_k_iter_raisesIterationErrorOnNoSeedNodes(
                self.k_iter.get_iterate)

    def test_get_iterate_allQueriesTimeoutRaisesIterationError(self):
        self._check_k_iter_failsWhenAllQueriesTimeOut(
                self.k_iter.get_iterate)

    def test_get_iterate_returnsNewNodesAndPeers(self):
        # deferreds is a (query, deferred) tuple list
        # where each tuple corresponds to one outbound query
        # and deferred result
        #
        # and d is a deferred result of the iter_func
        (deferreds, d) = self._iterate_and_returnQueriesAndDeferreds(
                self.k_iter.get_iterate)
        num_queries = len(deferreds)

        # Use any nodes as result nodes (even the nodes themselves)
        result_nodes = test_nodes[:num_queries]
        result_peers = test_peers[:num_queries]

        # Set up dummy node_id's
        node_id = 1

        # Simulate the event that every outbound
        # query received a result (by making dummy valid
        # responses and feeding them into the deferred)
        for (query, deferred), node, peer in \
            zip(deferreds, result_nodes, result_peers):
            response = query.build_response(nodes=[node], peers=[peer])
            response._from = node_id
            node_id += 1
            deferred.callback(response)

        expected_nodes = result_nodes
        expected_peers = result_peers
        d.addErrback(self._fail_errback)
        d.addCallback(self._compare_peers, expected_peers)
        d.addCallback(self._compare_nodes, expected_nodes)
        # Make sure we don't accidentally slip past an
        # uncalled deferred
        self.assertTrue(d.called)

    # Auxilary test functions
    # that are generalizations of the test
    # cases below
    def _check_k_iter_sendsProperNumberOfQueries_noNodesInRT(self, iter_func):
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = Counter(sendQuery)
        expected_num_queries = 15
        iter_func(self.target_id, test_nodes[:expected_num_queries])
        self.assertEquals(expected_num_queries, self.k_iter.sendQuery.count)

    def _check_k_iter_firesAfterAllQueriesFire(self, iter_func):
        """
        Ensure one 'iterative' query fires after all its subqueries fire
        """
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        num_queries = 5
        d = iter_func(self.target_id, test_nodes[:num_queries])
        deferreds = self.k_iter.sendQuery.deferreds
        test_node_id = 1
        # Make sure that `num_queries` queries were sent
        self.assertEquals(num_queries, len(deferreds))
        for (query, deferred) in deferreds:
            # Grab any node as a response node
            nodes = [test_nodes[55]]
            # Make a valid response node to feed
            # into the subdeferreds
            response = query.build_response(nodes=nodes)
            # Any node id works
            response._from = test_node_id
            test_node_id += 1
            if query.rpctype == "get_peers":
                response.token = 555
            deferred.callback(response)
        # After "receiving a response" to every outgoing
        # query, our main deferred should fire
        self.assertTrue(d.called)

    def _check_k_iter_usesNodesFromRoutingTable(self, iter_func):
        get_closest_nodes = self.k_iter.routing_table.get_closest_nodes
        self.k_iter.routing_table.get_closest_nodes = \
            Counter(get_closest_nodes)
        # If we dont supply any testing nodes,
        # the protocol should check its routingtable
        d = iter_func(self.target_id)
        d.addErrback(self._silence_iteration_error)
        looked_for_nodes = \
                self.k_iter.routing_table.get_closest_nodes.count > 0
        self.assertTrue(looked_for_nodes)

    def _check_k_iter_raisesIterationErrorOnNoSeedNodes(self, iter_func):
        d = iter_func(self.target_id)
        d.addCallbacks(callback=self._ensure_iteration_error_callback,
                errback=self._ensure_iteration_error_errback)

    def _ensure_iteration_error_errback(self, failure):
        isnt_iteration_error = failure.check(IterationError) is None
        if isnt_iteration_error:
            self.fail("KRPC_Iterator threw an error that wasn't " +
                    "an IterationError")

    def _ensure_iteration_error_callback(self, _ignored_result):
        self.fail("KRPC_Iterator did not throw an IterationError " +
                "and was incorrectly successful instead")

    def _check_k_iter_failsWhenAllQueriesTimeOut(self, iter_func):
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        num_queries = 5
        d = iter_func(self.target_id, test_nodes[:num_queries])
        deferreds = self.k_iter.sendQuery.deferreds

        # Make sure an IterationError is thrown once we
        # artificially timeout all queries
        d.addCallbacks(callback=self._ensure_iteration_error_callback,
                errback=self._ensure_iteration_error_errback)

        # Timeout all queries
        for (query, deferred) in deferreds:
            deferred.errback(TimeoutError())
        
    def _compare_nodes(self, result_node_list, expected_nodes):
        # Assert that our resulting list of nodes
        # matches what we expected
        for node in result_node_list:
            self.assertTrue(node in expected_nodes)
        self.assertEquals(len(expected_nodes),
                len(result_node_list))

    def _compare_peers(self, result, expected_peers):
        (result_nodes, result_peers) = result
        self.assertEquals(set(expected_peers), set(result_peers))
        # Return the nodes, since the next callback
        # will check the expected nodes
        return result_nodes

    def _fail_errback(self, failure):
        exception = failure.value
        self.fail("KRPC_Iterator failed when it shouldn't have: " 
                + str(exception))

    def _iterate_and_returnQueriesAndDeferreds(self, iter_func):
        # Capture all outbound queries
        # and all deferreds
        sendQuery = self.k_iter.sendQuery
        self.k_iter.sendQuery = DeferredGrabber(sendQuery)
        # Use the first 10 nodes as our seeds
        d = iter_func(self.target_id, test_nodes[:10])
        deferreds = self.k_iter.sendQuery.deferreds
        return (deferreds, d)

    def _silence_iteration_error(self, failure):
        failure.trap(IterationError)
Beispiel #16
0
class RateLimiterPatcherTestCase(unittest.TestCase):
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

        self.address = ("127.0.0.1", 55)
        self.query = Query()
        self.query.rpctype = "ping"
        self.query._from = 15
        self.query._transaction_id = 99
        self.packet = krpc_coder.encode(self.query)
        # Patch in hardcoded value for the bandwidth
        # limits so that changing the constants will
        # not effect the usefulness of this test case
        # (The global bandwidth is set to 3 standard ping queries)
        # (The per user bandwidth is set to 1 standard ping query)
        self.monkey_patcher.addPatch(rate_limiter.constants,
                "global_bandwidth_rate", 3 * len(self.packet))
        self.monkey_patcher.addPatch(rate_limiter.constants,
                "host_bandwidth_rate", 1 * len(self.packet))
        self.monkey_patcher.patch()

    def tearDown(self):
        self.monkey_patcher.restore()

    def _patched_sender(self):
        ksender = KRPC_Sender(TreeRoutingTable, 2**50)
        ksender.transport = HollowTransport()
        # Start the protocol to simulate
        # a regular environment
        rate_limited_proto = RateLimiter_Patcher(ksender)
        rate_limited_proto.startProtocol()
        return rate_limited_proto

    def test_inbound_overflowHostAndReset(self):
        """
        Make sure that we cannot overflow our inbound host bandwidth limit

        @see dhtbot.constants.host_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        counter = Counter()
        rate_limited_proto.krpcReceived = counter
        # One packet should be accepted without problems
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), self.address)
        self.assertEquals(1, counter.count)
        counter.reset()
        # The second packet should be dropped
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), self.address)
        self.assertEquals(0, counter.count)
        # Reset the rate limiter and the next packet should
        # be accepted
        self.clock.set(1)
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), self.address)
        self.assertEquals(1, counter.count)

    def test_inbound_overflowGlobalAndReset(self):
        """
        Make sure that we cannot overflow our inbound global bandwidth limit

        @see dhtbot.constants.host_global_rate

        """
        address1 = ("127.0.0.1", 66)
        address2 = ("127.0.0.1", 76)
        address3 = ("127.0.0.1", 86)
        address4 = ("127.0.0.1", 555)
        rate_limited_proto = self._patched_sender()
        counter = Counter()
        rate_limited_proto.krpcReceived = counter
        # The first three packets should be accepted without
        # any problems
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), address1)
        self.assertEquals(1, counter.count)
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), address2)
        self.assertEquals(2, counter.count)
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), address3)
        self.assertEquals(3, counter.count)
        # The fourth packet should be dropped
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), address4)
        self.assertEquals(3, counter.count)
        # Reset the rate limiter and the next packet should be
        # accepted
        self.clock.set(1)
        rate_limited_proto.datagramReceived(
                krpc_coder.encode(self.query), self.address)
        self.assertEquals(4, counter.count)

    def test_outbound_overflowHostAndReset(self):
        """
        Make sure that we cannot overflow our outbound host bandwidth limit

        @see dhtbot.constants.host_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        # The first packet should go through without any problems
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
                rate_limited_proto._original.transport._packet_was_sent())
        # Second packet should not go through
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertFalse(
                rate_limited_proto._original.transport._packet_was_sent())
        # Update the clock (reseting the rate limiter)
        self.clock.set(1)
        # This packet should now go through)
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
                rate_limited_proto._original.transport._packet_was_sent())

    def test_outbound_overflowGlobalAndReset(self):
        """
        Make sure that we cannot overflow our outbound global bandwidth limit

        @see dhtbot.constants.global_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        # Reset the hollow transport
        rate_limited_proto._original.transport._reset()
        # The first three packets should go through without any problems
        address1 = ("127.0.0.1", 66)
        address2 = ("127.0.0.1", 76)
        address3 = ("127.0.0.1", 86)
        address4 = ("127.0.0.1", 555)

        # Packet 1, 2, 3
        for i in range(1, 4):
            rate_limited_proto.sendKRPC(
                    self.query, locals()['address' + str(i)])
            self.assertTrue(
                    rate_limited_proto._original.transport._packet_was_sent())

        # The fourth packet should not go through
        rate_limited_proto.sendKRPC(self.query, address4)
        self.assertFalse(
                rate_limited_proto._original.transport._packet_was_sent())
        # Change the time to reset the rate limiter
        self.clock.set(1)
        # This packet should now go through
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
                rate_limited_proto._original.transport._packet_was_sent())
Beispiel #17
0
class MonkeyPatcherTest(unittest.TestCase):
    """
    Tests for L{MonkeyPatcher} monkey-patching class.
    """

    def setUp(self):
        self.testObject = TestObj()
        self.originalObject = TestObj()
        self.monkeyPatcher = MonkeyPatcher()

    def test_empty(self):
        """
        A monkey patcher without patches shouldn't change a thing.
        """
        self.monkeyPatcher.patch()

        # We can't assert that all state is unchanged, but at least we can
        # check our test object.
        self.assertEqual(self.originalObject.foo, self.testObject.foo)
        self.assertEqual(self.originalObject.bar, self.testObject.bar)
        self.assertEqual(self.originalObject.baz, self.testObject.baz)

    def test_constructWithPatches(self):
        """
        Constructing a L{MonkeyPatcher} with patches should add all of the
        given patches to the patch list.
        """
        patcher = MonkeyPatcher((self.testObject, "foo", "haha"), (self.testObject, "bar", "hehe"))
        patcher.patch()
        self.assertEqual("haha", self.testObject.foo)
        self.assertEqual("hehe", self.testObject.bar)
        self.assertEqual(self.originalObject.baz, self.testObject.baz)

    def test_patchExisting(self):
        """
        Patching an attribute that exists sets it to the value defined in the
        patch.
        """
        self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
        self.monkeyPatcher.patch()
        self.assertEqual(self.testObject.foo, "haha")

    def test_patchNonExisting(self):
        """
        Patching a non-existing attribute fails with an C{AttributeError}.
        """
        self.monkeyPatcher.addPatch(self.testObject, "nowhere", "blow up please")
        self.assertRaises(AttributeError, self.monkeyPatcher.patch)

    def test_patchAlreadyPatched(self):
        """
        Adding a patch for an object and attribute that already have a patch
        overrides the existing patch.
        """
        self.monkeyPatcher.addPatch(self.testObject, "foo", "blah")
        self.monkeyPatcher.addPatch(self.testObject, "foo", "BLAH")
        self.monkeyPatcher.patch()
        self.assertEqual(self.testObject.foo, "BLAH")
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)

    def test_restoreTwiceIsANoOp(self):
        """
        Restoring an already-restored monkey patch is a no-op.
        """
        self.monkeyPatcher.addPatch(self.testObject, "foo", "blah")
        self.monkeyPatcher.patch()
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)
        self.monkeyPatcher.restore()
        self.assertEqual(self.testObject.foo, self.originalObject.foo)

    def test_runWithPatchesDecoration(self):
        """
        runWithPatches should run the given callable, passing in all arguments
        and keyword arguments, and return the return value of the callable.
        """
        log = []

        def f(a, b, c=None):
            log.append((a, b, c))
            return "foo"

        result = self.monkeyPatcher.runWithPatches(f, 1, 2, c=10)
        self.assertEqual("foo", result)
        self.assertEqual([(1, 2, 10)], log)

    def test_repeatedRunWithPatches(self):
        """
        We should be able to call the same function with runWithPatches more
        than once. All patches should apply for each call.
        """

        def f():
            return (self.testObject.foo, self.testObject.bar, self.testObject.baz)

        self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
        result = self.monkeyPatcher.runWithPatches(f)
        self.assertEqual(("haha", self.originalObject.bar, self.originalObject.baz), result)
        result = self.monkeyPatcher.runWithPatches(f)
        self.assertEqual(("haha", self.originalObject.bar, self.originalObject.baz), result)

    def test_runWithPatchesRestores(self):
        """
        C{runWithPatches} should restore the original values after the function
        has executed.
        """
        self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
        self.assertEqual(self.originalObject.foo, self.testObject.foo)
        self.monkeyPatcher.runWithPatches(lambda: None)
        self.assertEqual(self.originalObject.foo, self.testObject.foo)

    def test_runWithPatchesRestoresOnException(self):
        """
        Test runWithPatches restores the original values even when the function
        raises an exception.
        """

        def _():
            self.assertEqual(self.testObject.foo, "haha")
            self.assertEqual(self.testObject.bar, "blahblah")
            raise RuntimeError, "Something went wrong!"

        self.monkeyPatcher.addPatch(self.testObject, "foo", "haha")
        self.monkeyPatcher.addPatch(self.testObject, "bar", "blahblah")

        self.assertRaises(RuntimeError, self.monkeyPatcher.runWithPatches, _)
        self.assertEqual(self.testObject.foo, self.originalObject.foo)
        self.assertEqual(self.testObject.bar, self.originalObject.bar)
Beispiel #18
0
    def build(self, projectName, projectURL, sourceURL, packagePath,
              outputPath):
        """
        Call pydoctor's entry point with options which will generate HTML
        documentation for the specified package's API.

        @type projectName: C{str}
        @param projectName: The name of the package for which to generate
            documentation.

        @type projectURL: C{str}
        @param projectURL: The location (probably an HTTP URL) of the project
            on the web.

        @type sourceURL: C{str}
        @param sourceURL: The location (probably an HTTP URL) of the root of
            the source browser for the project.

        @type packagePath: L{FilePath}
        @param packagePath: The path to the top-level of the package named by
            C{projectName}.

        @type outputPath: L{FilePath}
        @param outputPath: An existing directory to which the generated API
            documentation will be written.
        """
        intersphinxes = []

        for intersphinx in intersphinxURLs:
            intersphinxes.append("--intersphinx")
            intersphinxes.append(intersphinx)

        # Super awful monkeypatch that will selectively use our templates.
        from pydoctor.templatewriter import util
        originalTemplatefile = util.templatefile

        def templatefile(filename):

            if filename in ["summary.html", "index.html", "common.html"]:
                twistedPythonDir = FilePath(__file__).parent()
                templatesDir = twistedPythonDir.child("_pydoctortemplates")
                return templatesDir.child(filename).path
            else:
                return originalTemplatefile(filename)

        monkeyPatch = MonkeyPatcher((util, "templatefile", templatefile))
        monkeyPatch.patch()

        from pydoctor.driver import main

        args = [
            u"--project-name",
            projectName,
            u"--project-url",
            projectURL,
            u"--system-class",
            u"twisted.python._pydoctor.TwistedSystem",
            u"--project-base-dir",
            packagePath.parent().path,
            u"--html-viewsource-base",
            sourceURL,
            u"--add-package",
            packagePath.path,
            u"--html-output",
            outputPath.path,
            u"--html-write-function-pages",
            u"--quiet",
            u"--make-html",
        ] + intersphinxes
        args = [arg.encode("utf-8") for arg in args]
        main(args)

        monkeyPatch.restore()
Beispiel #19
0
class RateLimiterPatcherTestCase(unittest.TestCase):
    def setUp(self):
        self.clock = Clock()
        self.monkey_patcher = MonkeyPatcher()
        self.monkey_patcher.addPatch(rate_limiter.time, "time", self.clock)
        self.monkey_patcher.patch()

        self.address = ("127.0.0.1", 55)
        self.query = Query()
        self.query.rpctype = "ping"
        self.query._from = 15
        self.query._transaction_id = 99
        self.packet = krpc_coder.encode(self.query)
        # Patch in hardcoded value for the bandwidth
        # limits so that changing the constants will
        # not effect the usefulness of this test case
        # (The global bandwidth is set to 3 standard ping queries)
        # (The per user bandwidth is set to 1 standard ping query)
        self.monkey_patcher.addPatch(rate_limiter.constants,
                                     "global_bandwidth_rate",
                                     3 * len(self.packet))
        self.monkey_patcher.addPatch(rate_limiter.constants,
                                     "host_bandwidth_rate",
                                     1 * len(self.packet))
        self.monkey_patcher.patch()

    def tearDown(self):
        self.monkey_patcher.restore()

    def _patched_sender(self):
        ksender = KRPC_Sender(TreeRoutingTable, 2**50)
        ksender.transport = HollowTransport()
        # Start the protocol to simulate
        # a regular environment
        rate_limited_proto = RateLimiter_Patcher(ksender)
        rate_limited_proto.startProtocol()
        return rate_limited_proto

    def test_inbound_overflowHostAndReset(self):
        """
        Make sure that we cannot overflow our inbound host bandwidth limit

        @see dhtbot.constants.host_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        counter = Counter()
        rate_limited_proto.krpcReceived = counter
        # One packet should be accepted without problems
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            self.address)
        self.assertEquals(1, counter.count)
        counter.reset()
        # The second packet should be dropped
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            self.address)
        self.assertEquals(0, counter.count)
        # Reset the rate limiter and the next packet should
        # be accepted
        self.clock.set(1)
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            self.address)
        self.assertEquals(1, counter.count)

    def test_inbound_overflowGlobalAndReset(self):
        """
        Make sure that we cannot overflow our inbound global bandwidth limit

        @see dhtbot.constants.host_global_rate

        """
        address1 = ("127.0.0.1", 66)
        address2 = ("127.0.0.1", 76)
        address3 = ("127.0.0.1", 86)
        address4 = ("127.0.0.1", 555)
        rate_limited_proto = self._patched_sender()
        counter = Counter()
        rate_limited_proto.krpcReceived = counter
        # The first three packets should be accepted without
        # any problems
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            address1)
        self.assertEquals(1, counter.count)
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            address2)
        self.assertEquals(2, counter.count)
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            address3)
        self.assertEquals(3, counter.count)
        # The fourth packet should be dropped
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            address4)
        self.assertEquals(3, counter.count)
        # Reset the rate limiter and the next packet should be
        # accepted
        self.clock.set(1)
        rate_limited_proto.datagramReceived(krpc_coder.encode(self.query),
                                            self.address)
        self.assertEquals(4, counter.count)

    def test_outbound_overflowHostAndReset(self):
        """
        Make sure that we cannot overflow our outbound host bandwidth limit

        @see dhtbot.constants.host_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        # The first packet should go through without any problems
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
            rate_limited_proto._original.transport._packet_was_sent())
        # Second packet should not go through
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertFalse(
            rate_limited_proto._original.transport._packet_was_sent())
        # Update the clock (reseting the rate limiter)
        self.clock.set(1)
        # This packet should now go through)
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
            rate_limited_proto._original.transport._packet_was_sent())

    def test_outbound_overflowGlobalAndReset(self):
        """
        Make sure that we cannot overflow our outbound global bandwidth limit

        @see dhtbot.constants.global_bandwidth_rate

        """
        rate_limited_proto = self._patched_sender()
        # Reset the hollow transport
        rate_limited_proto._original.transport._reset()
        # The first three packets should go through without any problems
        address1 = ("127.0.0.1", 66)
        address2 = ("127.0.0.1", 76)
        address3 = ("127.0.0.1", 86)
        address4 = ("127.0.0.1", 555)

        # Packet 1, 2, 3
        for i in range(1, 4):
            rate_limited_proto.sendKRPC(self.query,
                                        locals()['address' + str(i)])
            self.assertTrue(
                rate_limited_proto._original.transport._packet_was_sent())

        # The fourth packet should not go through
        rate_limited_proto.sendKRPC(self.query, address4)
        self.assertFalse(
            rate_limited_proto._original.transport._packet_was_sent())
        # Change the time to reset the rate limiter
        self.clock.set(1)
        # This packet should now go through
        rate_limited_proto.sendKRPC(self.query, self.address)
        self.assertTrue(
            rate_limited_proto._original.transport._packet_was_sent())