예제 #1
0
    def test_tick(self):
        """
        ``/mimic/v1.1/tick`` (handled by :func:`MimicRoot.advance_time`)
        advances the clock associated with the service.
        """
        clock = Clock()

        def do():
            do.done = True

        do.done = False
        clock.callLater(3.5, do)
        core = MimicCore(clock, [])
        root = MimicRoot(core, clock).app.resource()
        self.assertEqual(do.done, False)
        jreq = json_request(
            self, root, "POST", "/mimic/v1.1/tick", body={"amount": 3.6}
        )
        [response, json_content] = self.successResultOf(jreq)
        self.assertEqual(response.code, 200)
        expected = {
            'advanced': 3.6,
            'now': '1970-01-01T00:00:03.600000Z',
        }
        self.assertEqual(json_content, expected)
        self.assertEqual(do.done, True)
예제 #2
0
    def test_tick(self):
        """
        ``/mimic/v1.1/tick`` (handled by :func:`MimicRoot.advance_time`)
        advances the clock associated with the service.
        """
        clock = Clock()

        def do():
            do.done = True

        do.done = False
        clock.callLater(3.5, do)
        core = MimicCore(clock, [])
        root = MimicRoot(core, clock).app.resource()
        self.assertEqual(do.done, False)
        jreq = json_request(
            self, root, "POST", "/mimic/v1.1/tick", body={"amount": 3.6}
        )
        [response, json_content] = self.successResultOf(jreq)
        self.assertEqual(response.code, 200)
        expected = {
            'advanced': 3.6,
            'now': '1970-01-01T00:00:03.600000Z',
        }
        self.assertEqual(json_content, expected)
        self.assertEqual(do.done, True)
def clockIsBroken():
    """
    Returns whether twisted.internet.task.Clock has the bug that
    returns the wrong DelayedCall or not.
    """
    clock = Clock()
    dc1 = clock.callLater(10, lambda: None)
    dc2 = clock.callLater(1, lambda: None)
    if dc1 is dc2:
        return True
    else:
        return False
예제 #4
0
def clockIsBroken():
    """
    Returns whether twisted.internet.task.Clock has the bug that
    returns the wrong DelayedCall or not.
    """
    clock = Clock()
    dc1 = clock.callLater(10, lambda: None)
    dc2 = clock.callLater(1, lambda: None)
    if dc1 is dc2:
        return True
    else:
        return False
예제 #5
0
 def test_stopService_while_retrying(self):
     s, e, f = self.make_reconnector()
     clock = Clock()
     r = s._delayedRetry = clock.callLater(1.0, lambda: None)
     yield s.stopService()
     self.assertTrue(r.cancelled)
     self.assertIdentical(s._delayedRetry, None)
예제 #6
0
 def test_stopService_while_retrying(self):
     s, e, f = self.make_reconnector()
     clock = Clock()
     r = s._delayedRetry = clock.callLater(1.0, lambda: None)
     yield s.stopService()
     self.assertTrue(r.cancelled)
     self.assertIdentical(s._delayedRetry, None)
예제 #7
0
class UDPScraper(DatagramProtocol):

    def __init__(self, ip, port):
        self.ip = ip
        self.port = port
        self.expect_connection_response = True
        # Timeout after 15 seconds if nothing received.
        self.timeout_seconds = 15
        self.clock = Clock()
        self.timeout = self.clock.callLater(self.timeout_seconds, self.on_error)

    def on_error(self):
        """
        This method handles everything that needs to be done when something during
        the UDP scraping went wrong.
        """
        raise RuntimeError("error")

    def stop(self):
        """
        Stops the UDP scraper and closes the socket.
        :return: A deferred that fires once it has closed the connection.
        """
        if self.timeout.active():
            self.timeout.cancel()
        if self.transport:
            return maybeDeferred(self.transport.stopListening)
        else:
            return defer.succeed(True)

    def startProtocol(self):
        """
        This function is called when the scraper is initialized.
        Initiates the connection with the tracker.
        """
        self.transport.connect(self.ip, self.port)
        self.udpsession.on_start()

    def write_data(self, data):
        """
        This function can be called to send serialized data to the tracker.
        :param data: The serialized data to be send.
        """
        self.transport.write(data) # no need to pass the ip and port

    def datagramReceived(self, data, (host, port)):
        """
        This function dispatches data received from a UDP tracker.
        If it's the first response, it will dispatch the data to the handle_connection_response
        function of the UDP session.
        All subsequent data will be send to the _handle_response function of the UDP session.
        :param data: The data received from the UDP tracker.
        """
        # Cancel the timeout
        if self.timeout.active():
            self.timeout.cancel()
예제 #8
0
    def callLater(self, when, what, *a, **kw):
        """
        Unlike the real reactor, Clock.callLater doesn't raise an exception
        when the time is negative.

        https://twistedmatrix.com/trac/ticket/9166#comment

        Until that's fixed upstream, emulate the behavior here.
        """

        assert when >= 0, \
            "%s is not greater than or equal to 0 seconds" % (when,)
        return Clock.callLater(self, when, what, *a, **kw)
예제 #9
0
    def callLater(self, when, what, *a, **kw):
        """
        Unlike the real reactor, Clock.callLater doesn't raise an exception
        when the time is negative.

        https://twistedmatrix.com/trac/ticket/9166#comment

        Until that's fixed upstream, emulate the behavior here.
        """

        assert when >= 0, \
            "%s is not greater than or equal to 0 seconds" % (when,)
        return Clock.callLater(self, when, what, *a, **kw)
예제 #10
0
    def callLater(self, when, what, *a, **kw):
        # Buildbot often uses callLater(0, ...) to defer execution of certain
        # code to the next iteration of the reactor. This means that often
        # there are pending callbacks registered to the reactor that might
        # block other code from proceeding unless the test reactor has an
        # iteration. To avoid deadlocks in tests we give the real reactor a
        # chance to advance the test reactor whenever we detect that there
        # are callbacks that should run in the next iteration of the test
        # reactor.
        if when <= 0 and not self._pendingCurrentCalls:
            reactor.callLater(0, self._executeCurrentDelayedCalls)

        return Clock.callLater(self, when, what, *a, **kw)
예제 #11
0
class TestEventDispatcher(events.EventDispatcher):
    def __init__(self):
        events.EventDispatcher.__init__(self, lambda a: None)
        self.clock = Clock()
        self.advance = self.clock.advance

    def dispatch_delayed(self, event, delay):
        return self.clock.callLater(delay, self.dispatch, event)

    def dispatch_repeating(self, event, interval, now=False):
        t = task.LoopingCall(self.dispatch, event)
        t.clock = self.clock
        t.start(interval, now)
        return t
예제 #12
0
    def test_runDeferred_non_deferred(self):
        """
        An assertion error is raised when runDeferred is called for
        something which is not an deferred.
        Ex. a delayedCall
        """
        scheduler = Clock()
        delayed_call = scheduler.callLater(0, lambda: None)  # pragma: no cover

        with self.assertRaises(AssertionError) as context:
            self.runDeferred(delayed_call)

        self.assertEqual(
            'This is not a deferred.', context.exception.args[0])
예제 #13
0
class TestEventDispatcher(events.EventDispatcher):
    def __init__(self):
        events.EventDispatcher.__init__(self, lambda a: None)
        self.clock = Clock()
        self.advance = self.clock.advance

    def dispatch_delayed(self, event, delay):
        return self.clock.callLater(delay, self.dispatch, event)

    def dispatch_repeating(self, event, interval, now=False):
        t = task.LoopingCall(self.dispatch, event)
        t.clock = self.clock
        t.start(interval, now)
        return t
예제 #14
0
    def test_sendFile(self):
        fileToSend = StringIO(fileData)
        clock = Clock()
        cooperator = Cooperator(scheduler=lambda f: clock.callLater(0.1, f))
        d1 = self.client.sendFile(fileToSend, cooperator=cooperator)
        self.pump.pump()
        self.pump.pump()
        self.assertNoResult(d1)
        d2 = deliverContent(self.server.producer)

        clock.advance(1)
        while self.pump.pump():
            clock.advance(1)
        self.successResultOf(d1)
        self.assertEqual(self.successResultOf(d2), fileData)
예제 #15
0
    def test_deliverContent(self):
        fileToSend = StringIO(fileData)
        clock = Clock()
        cooperator = Cooperator(scheduler=lambda f: clock.callLater(0.1, f))
        @ProducerStarter
        def registerWithConsumer(consumer):
            producer = FileBodyProducer(fileToSend, cooperator=cooperator)
            d = producer.startProducing(consumer)
            d.addCallback(lambda ign: consumer.unregisterProducer())
            d.addErrback(log.err, 'error producing file body')
            consumer.registerProducer(producer, True)
        self.client.callRemote(SendProducer, producer=registerWithConsumer)
        self.pump.pump()
        d = deliverContent(self.server.producer)

        while self.pump.pump():
            clock.advance(1)
        self.assertEqual(self.successResultOf(d), fileData)
예제 #16
0
 def test_sendFileDisconnection(self):
     fileToSend = StringIO(fileData)
     clock = Clock()
     cooperator = Cooperator(scheduler=lambda f: clock.callLater(0.1, f))
     d1 = self.client.sendFile(fileToSend, cooperator=cooperator)
     self.assertFailure(d1, FakeDisconnectedError)
     self.pump.pump()
     self.pump.pump()
     self.assertNoResult(d1)
     d2 = deliverContent(self.server.producer)
     self.assertFailure(d2, FakeDisconnectedError)
     self.pump.pump()
     clock.advance(1)
     f = failure.Failure(FakeDisconnectedError())
     self.client.connectionLost(f)
     self.server.connectionLost(f)
     self.assertEqual(len(self.flushLoggedErrors(FakeDisconnectedError)), 2)
     self.successResultOf(d1)
     self.successResultOf(d2)
예제 #17
0
    def test_receivePushProducer(self):
        fileToSend = StringIO(fileData)
        consumer = FileConsumer()
        clock = Clock()
        cooperator = Cooperator(scheduler=lambda f: clock.callLater(0.1, f))
        @ProducerStarter
        def registerWithConsumer(consumer):
            producer = FileBodyProducer(fileToSend, cooperator=cooperator)
            d = producer.startProducing(consumer)
            d.addCallback(lambda ign: consumer.unregisterProducer())
            d.addErrback(log.err, 'error producing file body')
            consumer.registerProducer(producer, True)
        self.server.registerWithConsumer = registerWithConsumer
        d = self.client.callRemote(ReceiveProducer)
        self.pump.flush()
        receivedProducer = self.successResultOf(d)['producer']
        receivedProducer.registerConsumer(consumer)

        clock.advance(1)
        while self.pump.pump():
            clock.advance(1)
        self.assertEqual(consumer.value(), fileData)
        self.assertIdentical(consumer._producer, None)
예제 #18
0
 def callLater(self, delay, *args, **kw):  # noqa
     # 'real' reactors do this, but Clock doesn't assert on
     # this.
     assert delay >= 0
     return Clock.callLater(self, delay, *args, **kw)
예제 #19
0
class InputParsingTests(TestCase):
    """
    Tests for dealing with user input which may contain commands or be a
    message destined for the network.
    """
    def setUp(self):
        self.tcpConnections = []
        self.clock = Clock()

        self.transport = None
        self.terminal = TerminalBuffer()
        self.terminal.makeConnection(self.transport)
        self.protocol = UserInterface()
        self.protocol.reactor = self
        self.protocol.makeConnection(self.terminal)


    def connectTCP(self, host, port, factory, timeout=30, bindAddress=''):
        self.tcpConnections.append((host, port, factory, timeout, bindAddress))


    def callLater(self, n, f, *a, **kw):
        return self.clock.callLater(n, f, *a, **kw)


    def test_commandDispatch(self):
        """
        Verify that a line starting with C{/} and a word is dispatched to a
        function determined by that word.
        """
        dispatched = []
        self.protocol.cmd_DISPATCHTEST = dispatched.append
        self.protocol.parseInputLine('/dispatchtest')
        self.assertEqual(dispatched, ['/dispatchtest'])


    def test_serverCommand(self):
        """
        Verify that C{/server} is interpreted as a command to establish a new
        server connection.  Also some more things (that a connection attempt is
        made, that when it succeeds an IRC login is attempted over it with the
        right nickname).

        This is poorly factored.  IRC testing should be done elsewhere.
        Connection setup testing should be done elsewhere.
        """
        # XXX See #2504 in Twisted tracker
        from twisted.words.im import ircsupport
        self.patch(ircsupport, 'reactor', self)

        self.protocol.cmd_SERVER('/server irc.example.org testuser')
        self.assertEqual(len(self.tcpConnections), 1)
        self.assertEqual(self.tcpConnections[0][:2], ('irc.example.org', 6667))
        factory = self.tcpConnections[0][2]
        protocol = factory.buildProtocol(('irc.example.org', 6667))
        transport = StringTransport()
        protocol.makeConnection(transport)

        while self.clock.calls:
            self.clock.advance(1)

        self.assertEqual(
            transport.value(),
            'NICK testuser\r\n'
            'USER testuser foo bar :Twisted-IM user\r\n')

        output = str(self.terminal).splitlines()
        input = output.pop()
        status = output.pop()
        report = output.pop()
        for L in output:
            self.assertEqual(L, ' ' * 80)
        message = '== Connection to irc.example.org established.'
        self.assertEqual(report, message + ' ' * (80 - len(message)))


    def test_serverCommandFailedConnection(self):
        """
        Like L{test_serverCommand} but for a connection which fails.
        """
        # XXX See #2504 in Twisted tracker
        from twisted.words.im import ircsupport
        self.patch(ircsupport, 'reactor', self)

        self.protocol.cmd_SERVER('/server irc.example.org testuser')
        self.assertEqual(len(self.tcpConnections), 1)
        self.assertEqual(self.tcpConnections[0][:2], ('irc.example.org', 6667))
        factory = self.tcpConnections[0][2]
        factory.clientConnectionFailed(None, TimeoutError("mock"))

        while self.clock.calls:
            self.clock.advance(1)

        output = str(self.terminal).splitlines()
        input = output.pop()
        status = output.pop()
        report = output.pop()
        for L in output:
            self.assertEqual(L, ' ' * 80)
        message = '== irc.example.org failed: User timeout caused connection failure: mock.'
        self.assertEqual(report, message + ' ' * (80 - len(message)))
예제 #20
0
class APITestsMixin(APIAssertionsMixin):
    """
    Helpers for writing tests for the Docker Volume Plugin API.
    """
    NODE_A = uuid4()
    NODE_B = uuid4()

    def initialize(self):
        """
        Create initial objects for the ``VolumePlugin``.
        """
        self.volume_plugin_reactor = Clock()
        self.flocker_client = SimpleCountingProxy(FakeFlockerClient())

    def test_pluginactivate(self):
        """
        ``/Plugins.Activate`` indicates the plugin is a volume driver.
        """
        # Docker 1.8, at least, sends "null" as the body. Our test
        # infrastructure has the opposite bug so just going to send some
        # other garbage as the body (12345) to demonstrate that it's
        # ignored as per the spec which declares no body.
        return self.assertResult(b"POST", b"/Plugin.Activate", 12345, OK,
                                 {u"Implements": [u"VolumeDriver"]})

    def test_remove(self):
        """
        ``/VolumeDriver.Remove`` returns a successful result.
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Remove",
                                 {u"Name": u"vol"}, OK, {u"Err": None})

    def test_unmount(self):
        """
        ``/VolumeDriver.Unmount`` returns a successful result.
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Unmount",
                                 {u"Name": u"vol"}, OK, {u"Err": None})

    def test_create_with_opts(self):
        """
        Calling the ``/VolumerDriver.Create`` API with an ``Opts`` value
        in the request body JSON ignores this parameter and creates
        a volume with the given name.
        """
        name = u"testvolume"
        d = self.assertResult(b"POST", b"/VolumeDriver.Create",
                              {u"Name": name, 'Opts': {'ignored': 'ignored'}},
                              OK, {u"Err": None})
        d.addCallback(
            lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(self.assertItemsEqual, [
            Dataset(dataset_id=UUID(dataset_id_from_name(name)),
                    primary=self.NODE_A,
                    maximum_size=DEFAULT_SIZE,
                    metadata={u"name": name})])
        return d

    def create(self, name):
        """
        Call the ``/VolumeDriver.Create`` API to create a volume with the
        given name.

        :param unicode name: The name of the volume to create.

        :return: ``Deferred`` that fires when the volume that was created.
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Create",
                                 {u"Name": name}, OK, {u"Err": None})

    def test_create_creates(self):
        """
        ``/VolumeDriver.Create`` creates a new dataset in the configuration.
        """
        name = u"myvol"
        d = self.create(name)
        d.addCallback(
            lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(self.assertItemsEqual, [
            Dataset(dataset_id=UUID(dataset_id_from_name(name)),
                    primary=self.NODE_A,
                    maximum_size=DEFAULT_SIZE,
                    metadata={u"name": name})])
        return d

    def test_create_duplicate_name(self):
        """
        If a dataset with the given name already exists,
        ``/VolumeDriver.Create`` succeeds without create a new volume.
        """
        name = u"thename"
        # Create a dataset out-of-band with matching name but non-matching
        # dataset ID:
        d = self.flocker_client.create_dataset(
            self.NODE_A, DEFAULT_SIZE, metadata={u"name": name})
        d.addCallback(lambda _: self.create(name))
        d.addCallback(
            lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(lambda results: self.assertEqual(len(results), 1))
        return d

    def test_create_duplicate_name_race_condition(self):
        """
        If a dataset with the given name is created while the
        ``/VolumeDriver.Create`` call is in flight, the call does not
        result in an error.
        """
        name = u"thename"

        # Create a dataset out-of-band with matching dataset ID and name
        # which the docker plugin won't be able to see.
        def create_after_list():
            # Clean up the patched version:
            del self.flocker_client.list_datasets_configuration
            # But first time we're called, we create dataset and lie about
            # its existence:
            d = self.flocker_client.create_dataset(
                self.NODE_A, DEFAULT_SIZE,
                metadata={u"name": name},
                dataset_id=UUID(dataset_id_from_name(name)))
            d.addCallback(lambda _: [])
            return d
        self.flocker_client.list_datasets_configuration = create_after_list

        return self.create(name)

    def _flush_volume_plugin_reactor_on_endpoint_render(self):
        """
        This method patches ``self.app`` so that after any endpoint is
        rendered, the reactor used by the volume plugin is advanced repeatedly
        until there are no more ``delayedCalls`` pending on the reactor.
        """
        real_execute_endpoint = self.app.execute_endpoint

        def patched_execute_endpoint(*args, **kwargs):
            val = real_execute_endpoint(*args, **kwargs)
            while self.volume_plugin_reactor.getDelayedCalls():
                pending_calls = self.volume_plugin_reactor.getDelayedCalls()
                next_expiration = min(t.getTime() for t in pending_calls)
                now = self.volume_plugin_reactor.seconds()
                self.volume_plugin_reactor.advance(
                    max(0.0, next_expiration - now))
            return val
        self.patch(self.app, 'execute_endpoint', patched_execute_endpoint)

    def test_mount(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive.
        """
        name = u"myvol"
        dataset_id = UUID(dataset_id_from_name(name))
        # Create dataset on a different node:
        d = self.flocker_client.create_dataset(
            self.NODE_B, DEFAULT_SIZE, metadata={u"name": name},
            dataset_id=dataset_id)

        self._flush_volume_plugin_reactor_on_endpoint_render()

        # Pretend that it takes 5 seconds for the dataset to get established on
        # Node A.
        self.volume_plugin_reactor.callLater(
            5.0, self.flocker_client.synchronize_state)

        d.addCallback(lambda _:
                      self.assertResult(
                          b"POST", b"/VolumeDriver.Mount",
                          {u"Name": name}, OK,
                          {u"Err": None,
                           u"Mountpoint": u"/flocker/{}".format(dataset_id)}))
        d.addCallback(lambda _: self.flocker_client.list_datasets_state())

        def final_assertions(datasets):
            self.assertEqual([self.NODE_A],
                             [d.primary for d in datasets
                              if d.dataset_id == dataset_id])
            # There should be less than 20 calls to list_datasets_state over
            # the course of 5 seconds.
            self.assertLess(
                self.flocker_client.num_calls('list_datasets_state'), 20)
        d.addCallback(final_assertions)

        return d

    def test_mount_timeout(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive. If it does not arrive within 120 seconds, then it
        returns an error up to docker.
        """
        name = u"myvol"
        dataset_id = UUID(dataset_id_from_name(name))
        # Create dataset on a different node:
        d = self.flocker_client.create_dataset(
            self.NODE_B, DEFAULT_SIZE, metadata={u"name": name},
            dataset_id=dataset_id)

        self._flush_volume_plugin_reactor_on_endpoint_render()

        # Pretend that it takes 500 seconds for the dataset to get established
        # on Node A. This should be longer than the timeout.
        self.volume_plugin_reactor.callLater(
            500.0, self.flocker_client.synchronize_state)

        d.addCallback(lambda _:
                      self.assertResult(
                          b"POST", b"/VolumeDriver.Mount",
                          {u"Name": name}, OK,
                          {u"Err": u"Timed out waiting for dataset to mount.",
                           u"Mountpoint": u""}))
        return d

    def test_mount_already_exists(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive when used by the volumes that already exist and
        don't have a special dataset ID.
        """
        name = u"myvol"

        d = self.flocker_client.create_dataset(
            self.NODE_A, DEFAULT_SIZE, metadata={u"name": name})

        def created(dataset):
            self.flocker_client.synchronize_state()
            result = self.assertResult(
                b"POST", b"/VolumeDriver.Mount",
                {u"Name": name}, OK,
                {u"Err": None,
                 u"Mountpoint": u"/flocker/{}".format(
                     dataset.dataset_id)})
            result.addCallback(lambda _:
                               self.flocker_client.list_datasets_state())
            result.addCallback(lambda ds: self.assertEqual(
                [self.NODE_A], [d.primary for d in ds
                                if d.dataset_id == dataset.dataset_id]))
            return result
        d.addCallback(created)
        return d

    def test_unknown_mount(self):
        """
        ``/VolumeDriver.Mount`` returns an error when asked to mount a
        non-existent volume.
        """
        name = u"myvol"
        return self.assertResult(
            b"POST", b"/VolumeDriver.Mount",
            {u"Name": name}, OK,
            {u"Err": u"Could not find volume with given name."})

    def test_path(self):
        """
        ``/VolumeDriver.Path`` returns the mount path of the given volume if
        it is currently known.
        """
        name = u"myvol"
        dataset_id = UUID(dataset_id_from_name(name))

        d = self.create(name)
        # The dataset arrives as state:
        d.addCallback(lambda _: self.flocker_client.synchronize_state())

        d.addCallback(lambda _: self.assertResponseCode(
            b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK))

        d.addCallback(lambda _:
                      self.assertResult(
                          b"POST", b"/VolumeDriver.Path",
                          {u"Name": name}, OK,
                          {u"Err": None,
                           u"Mountpoint": u"/flocker/{}".format(dataset_id)}))
        return d

    def test_path_existing(self):
        """
        ``/VolumeDriver.Path`` returns the mount path of the given volume if
        it is currently known, including for a dataset that was created
        not by the plugin.
        """
        name = u"myvol"

        d = self.flocker_client.create_dataset(
            self.NODE_A, DEFAULT_SIZE, metadata={u"name": name})

        def created(dataset):
            self.flocker_client.synchronize_state()
            return self.assertResult(
                b"POST", b"/VolumeDriver.Path",
                {u"Name": name}, OK,
                {u"Err": None,
                 u"Mountpoint": u"/flocker/{}".format(dataset.dataset_id)})
        d.addCallback(created)
        return d

    def test_unknown_path(self):
        """
        ``/VolumeDriver.Path`` returns an error when asked for the mount path
        of a non-existent volume.
        """
        name = u"myvol"
        return self.assertResult(
            b"POST", b"/VolumeDriver.Path",
            {u"Name": name}, OK,
            {u"Err": u"Could not find volume with given name."})

    def test_non_local_path(self):
        """
        ``/VolumeDriver.Path`` returns an error when asked for the mount path
        of a volume that is not mounted locally.

        This can happen as a result of ``docker inspect`` on a container
        that has been created but is still waiting for its volume to
        arrive from another node. It seems like Docker may also call this
        after ``/VolumeDriver.Create``, so again while waiting for a
        volume to arrive.
        """
        name = u"myvol"
        dataset_id = UUID(dataset_id_from_name(name))

        # Create dataset on node B:
        d = self.flocker_client.create_dataset(
            self.NODE_B, DEFAULT_SIZE, metadata={u"name": name},
            dataset_id=dataset_id)
        d.addCallback(lambda _: self.flocker_client.synchronize_state())

        # Ask for path on node A:
        d.addCallback(lambda _:
                      self.assertResult(
                          b"POST", b"/VolumeDriver.Path",
                          {u"Name": name}, OK,
                          {u"Err": "Volume not available.",
                           u"Mountpoint": u""}))
        return d

    @capture_logging(lambda self, logger:
                     self.assertEqual(
                         len(logger.flushTracebacks(CustomException)), 1))
    def test_unexpected_error_reporting(self, logger):
        """
        If an unexpected error occurs Docker gets back a useful error message.
        """
        def error():
            raise CustomException("I've made a terrible mistake")
        self.patch(self.flocker_client, "list_datasets_configuration",
                   error)
        return self.assertResult(
            b"POST", b"/VolumeDriver.Path",
            {u"Name": u"whatever"}, OK,
            {u"Err": "CustomException: I've made a terrible mistake"})

    @capture_logging(None)
    def test_bad_request(self, logger):
        """
        If a ``BadRequest`` exception is raised it is converted to appropriate
        JSON.
        """
        def error():
            raise make_bad_request(code=423, Err=u"no good")
        self.patch(self.flocker_client, "list_datasets_configuration",
                   error)
        return self.assertResult(
            b"POST", b"/VolumeDriver.Path",
            {u"Name": u"whatever"}, 423,
            {u"Err": "no good"})
예제 #21
0
class SelfHealTests(SynchronousTestCase):
    """
    Tests for :obj:`SelfHeal`
    """

    def setUp(self):
        self.clock = Clock()
        self.log = mock_log()
        self.patch(sh, "get_groups_to_converge", intent_func("ggtc"))
        self.patch(sh, "check_and_trigger", lambda t, g: t + g)
        self.s = sh.SelfHeal(self.clock, base_dispatcher, "cf", 300.0,
                             self.log)
        self.groups = [
            {"tenantId": "t{}".format(i), "groupId": "g{}".format(i)}
            for i in range(5)]

    def test_setup(self):
        """
        ``self.s.setup()`` will setup convergences to be triggered over
        specified time range
        """
        self.s.dispatcher = SequenceDispatcher(
            [(("ggtc", "cf"), const(self.groups))])
        d = self.s.setup()
        self.successResultOf(d)
        calls = self.clock.getDelayedCalls()
        self.assertEqual(self.s._calls, calls)
        for i, c in enumerate(calls):
            self.assertEqual(c.getTime(), i * 60)
            self.assertEqual(c.func, sh.perform)
            self.assertEqual(c.args,
                             (self.s.dispatcher, "t{}g{}".format(i, i)))

    def test_setup_err(self):
        """
        ``self.s.setup()`` will log any error and return success
        """
        self.s.dispatcher = SequenceDispatcher(
            [(("ggtc", "cf"), conste(ValueError("h")))])
        d = self.s.setup()
        self.successResultOf(d)
        self.log.err.assert_called_once_with(
            CheckFailure(ValueError), "selfheal-setup-err",
            otter_service="selfheal")

    def test_setup_no_groups(self):
        """
        ``self.s.setup()`` gets groups and does nothing if there are no groups
        """
        self.s.dispatcher = SequenceDispatcher([(("ggtc", "cf"), const([]))])
        d = self.s.setup()
        self.successResultOf(d)
        self.assertEqual(self.s._calls, [])
        self.assertEqual(self.clock.getDelayedCalls(), [])

    def test_setup_still_active(self):
        """
        If there are scheduled calls when perform is called, they are
        cancelled and err is logged. Future calls are scheduled as usual
        """
        self.clock.advance(-0.6)
        call1 = self.clock.callLater(1, noop, None)
        call2 = self.clock.callLater(0, noop, None)
        call3 = self.clock.callLater(2, noop, None)
        self.clock.advance(0.6)
        self.s._calls = [call1, call2, call3]
        self.s.dispatcher = SequenceDispatcher(
            [(("ggtc", "cf"), const(self.groups))])
        d = self.s.setup()
        self.successResultOf(d)
        self.log.err.assert_called_once_with(
            matches(IsInstance(RuntimeError)), "selfheal-calls-err", active=2,
            otter_service="selfheal")
        self.assertFalse(call1.active())
        self.assertFalse(call2.active())
예제 #22
0
from twisted.internet import reactor
from twisted.internet.task import Clock

def test():
    print("HI")

clock = Clock()
clock.callLater(5, test)
clock.advance(5)
reactor.run()
예제 #23
0
class TestSingleSmsSync(VumiTestCase):

    transport_class = SingleSmsSync
    account_in_url = False

    @inlineCallbacks
    def setUp(self):
        self.clock = Clock()
        self.reply_delay = 0.5
        self.auto_advance_clock = True
        self.config = {
            'web_path': "foo",
            'web_port': 0,
            'reply_delay': self.reply_delay,
        }
        self.add_transport_config()
        self.tx_helper = self.add_helper(TransportHelper(self.transport_class))
        self.transport = yield self.tx_helper.get_transport(self.config)
        self.transport.callLater = self._dummy_call_later
        self.transport_url = self.transport.get_transport_url()

    def _dummy_call_later(self, *args, **kw):
        self.clock.callLater(*args, **kw)
        if self.auto_advance_clock:
            self.clock.advance(self.reply_delay)

    def add_transport_config(self):
        self.config["smssync_secret"] = self.smssync_secret = "secretsecret"
        self.config["country_code"] = self.country_code = "+27"
        self.config["account_id"] = self.account_id = "test_account"

    def smssync_inbound(self, content, from_addr='123', to_addr='555',
                        timestamp=None, message_id='1', secret=None):
        """Emulate an inbound message from SMSSync on an Android phone."""
        msginfo = self.default_msginfo()
        if timestamp is None:
            timestamp = datetime.datetime.utcnow()
        if hasattr(timestamp, 'strftime'):
            timestamp = timestamp.strftime("%m-%d-%y %H:%M")
        if secret is None:
            secret = msginfo.smssync_secret
        # Timestamp format: mm-dd-yy-hh:mm, e.g. 11-27-11-07:11
        params = {
            'sent_to': to_addr,
            'from': from_addr,
            'message': content,
            'sent_timestamp': timestamp,
            'message_id': message_id,
            'secret': secret,
        }
        return self.smssync_call(params, method='POST')

    def smssync_poll(self):
        """Emulate a poll from SMSSync for waiting outbound messages."""
        return self.smssync_call({'task': 'send'}, method='GET')

    def smssync_call(self, params, method):
        url = self.mkurl(params)
        d = http_request(url, '', method=method)
        d.addCallback(json.loads)
        return d

    def mkurl(self, params):
        msginfo = self.default_msginfo()
        params = dict((k.encode('utf-8'), v.encode('utf-8'))
                      for k, v in params.items())
        return '%s%s%s?%s' % (
            self.transport_url,
            self.config['web_path'],
            ("/%s/" % msginfo.account_id) if self.account_in_url else '',
            urlencode(params),
        )

    def default_msginfo(self):
        return SmsSyncMsgInfo(self.account_id, self.smssync_secret,
                              self.country_code)

    @inlineCallbacks
    def test_inbound_success(self):
        now = datetime.datetime.utcnow().replace(second=0, microsecond=0)
        response = yield self.smssync_inbound(content=u'hællo', timestamp=now)
        self.assertEqual(response, {"payload": {"success": "true",
                                                "messages": []}})

        [msg] = self.tx_helper.get_dispatched_inbound()
        self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
        self.assertEqual(msg['to_addr'], "555")
        self.assertEqual(msg['from_addr'], "123")
        self.assertEqual(msg['content'], u"hællo")
        self.assertEqual(msg['timestamp'], now)

    @inlineCallbacks
    def test_inbound_millisecond_timestamp(self):
        smssync_ms = '1377125641000'
        now = datetime.datetime.utcfromtimestamp(int(smssync_ms) / 1000)
        response = yield self.smssync_inbound(content=u'hello',
                                              timestamp=smssync_ms)
        self.assertEqual(response, {"payload": {"success": "true",
                                                "messages": []}})
        [msg] = self.tx_helper.get_dispatched_inbound()
        self.assertEqual(msg['timestamp'], now)

    @inlineCallbacks
    def test_inbound_with_reply(self):
        self.auto_advance_clock = False
        now = datetime.datetime.utcnow().replace(second=0, microsecond=0)
        inbound_d = self.smssync_inbound(content=u'hællo', timestamp=now)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        reply = yield self.tx_helper.make_dispatch_reply(msg, u'ræply')

        self.clock.advance(self.reply_delay)
        response = yield inbound_d
        self.assertEqual(response, {"payload": {"success": "true",
                                                "messages": [{
                                                    "to": reply['to_addr'],
                                                    "message": u"ræply",
                                                }],
                                                }})

    @inlineCallbacks
    def test_normalize_msisdn(self):
        yield self.smssync_inbound(content="hi", from_addr="0555-7171",
                                   to_addr="0555-7272")
        [msg] = self.tx_helper.get_dispatched_inbound()
        self.assertEqual(msg['from_addr'], "+275557171")
        self.assertEqual(msg['to_addr'], "+275557272")

    @inlineCallbacks
    def test_inbound_invalid_secret(self):
        response = yield self.smssync_inbound(content=u'hello', secret='wrong')
        if self.smssync_secret == '':
            # blank secrets should not be checked
            self.assertEqual(response, {"payload": {"success": "true",
                                                    "messages": []}})
        else:
            self.assertEqual(response, {"payload": {"success": "false"}})

    @inlineCallbacks
    def test_inbound_garbage(self):
        response = yield self.smssync_call({}, 'GET')
        self.assertEqual(response, {"payload": {"success": "false"}})

    @inlineCallbacks
    def test_poll_outbound(self):
        outbound_msg = self.tx_helper.make_outbound(u'hællo')
        msginfo = self.default_msginfo()
        self.transport.add_msginfo_metadata(outbound_msg.payload, msginfo)
        yield self.tx_helper.dispatch_outbound(outbound_msg)
        response = yield self.smssync_poll()
        self.assertEqual(response, {
            "payload": {
                "task": "send",
                "secret": self.smssync_secret,
                "messages": [{
                    "to": outbound_msg['to_addr'],
                    "message": outbound_msg['content'],
                },
                ],
            },
        })
        [event] = yield self.tx_helper.get_dispatched_events()
        self.assertEqual(event['event_type'], 'ack')
        self.assertEqual(event['user_message_id'], outbound_msg['message_id'])

    @inlineCallbacks
    def test_reply_round_trip(self):
        # test that calling .reply(...) generates a working reply (this is
        # non-trivial because the transport metadata needs to be correct for
        # this to work).
        yield self.smssync_inbound(content=u'Hi')
        [msg] = self.tx_helper.get_dispatched_inbound()
        yield self.tx_helper.make_dispatch_reply(msg, 'Hi back!')
        response = yield self.smssync_poll()
        self.assertEqual(response["payload"]["messages"], [{
            "to": msg['from_addr'],
            "message": "Hi back!",
        }])
예제 #24
0
class SelfHealTests(SynchronousTestCase):
    """
    Tests for :obj:`SelfHeal`
    """
    def setUp(self):
        self.clock = Clock()
        self.log = mock_log()
        self.patch(sh, "get_groups_to_converge", intent_func("ggtc"))
        self.patch(sh, "check_and_trigger", lambda t, g: t + g)
        self.s = sh.SelfHeal(self.clock, base_dispatcher, "cf", 300.0,
                             self.log)
        self.groups = [{
            "tenantId": "t{}".format(i),
            "groupId": "g{}".format(i)
        } for i in range(5)]

    def test_setup(self):
        """
        ``self.s.setup()`` will setup convergences to be triggered over
        specified time range
        """
        self.s.dispatcher = SequenceDispatcher([(("ggtc", "cf"),
                                                 const(self.groups))])
        d = self.s.setup()
        self.successResultOf(d)
        calls = self.clock.getDelayedCalls()
        self.assertEqual(self.s._calls, calls)
        for i, c in enumerate(calls):
            self.assertEqual(c.getTime(), i * 60)
            self.assertEqual(c.func, sh.perform)
            self.assertEqual(c.args,
                             (self.s.dispatcher, "t{}g{}".format(i, i)))

    def test_setup_err(self):
        """
        ``self.s.setup()`` will log any error and return success
        """
        self.s.dispatcher = SequenceDispatcher([(("ggtc", "cf"),
                                                 conste(ValueError("h")))])
        d = self.s.setup()
        self.successResultOf(d)
        self.log.err.assert_called_once_with(CheckFailure(ValueError),
                                             "selfheal-setup-err",
                                             otter_service="selfheal")

    def test_setup_no_groups(self):
        """
        ``self.s.setup()`` gets groups and does nothing if there are no groups
        """
        self.s.dispatcher = SequenceDispatcher([(("ggtc", "cf"), const([]))])
        d = self.s.setup()
        self.successResultOf(d)
        self.assertEqual(self.s._calls, [])
        self.assertEqual(self.clock.getDelayedCalls(), [])

    def test_setup_still_active(self):
        """
        If there are scheduled calls when perform is called, they are
        cancelled and err is logged. Future calls are scheduled as usual
        """
        self.clock.advance(-0.6)
        call1 = self.clock.callLater(1, noop, None)
        call2 = self.clock.callLater(0, noop, None)
        call3 = self.clock.callLater(2, noop, None)
        self.clock.advance(0.6)
        self.s._calls = [call1, call2, call3]
        self.s.dispatcher = SequenceDispatcher([(("ggtc", "cf"),
                                                 const(self.groups))])
        d = self.s.setup()
        self.successResultOf(d)
        self.log.err.assert_called_once_with(matches(IsInstance(RuntimeError)),
                                             "selfheal-calls-err",
                                             active=2,
                                             otter_service="selfheal")
        self.assertFalse(call1.active())
        self.assertFalse(call2.active())
예제 #25
0
 def callLater(self, delay, *args, **kw):  # noqa
     # 'real' reactors do this, but Clock doesn't assert on
     # this.
     assert delay >= 0
     return Clock.callLater(self, delay, *args, **kw)
예제 #26
0
class TestSingleSmsSync(VumiTestCase):

    transport_class = SingleSmsSync
    account_in_url = False

    @inlineCallbacks
    def setUp(self):
        self.clock = Clock()
        self.reply_delay = 0.5
        self.auto_advance_clock = True
        self.config = {
            'web_path': "foo",
            'web_port': 0,
            'reply_delay': self.reply_delay,
        }
        self.add_transport_config()
        self.tx_helper = self.add_helper(TransportHelper(self.transport_class))
        self.transport = yield self.tx_helper.get_transport(self.config)
        self.transport.callLater = self._dummy_call_later
        self.transport_url = self.transport.get_transport_url()

    def _dummy_call_later(self, *args, **kw):
        self.clock.callLater(*args, **kw)
        if self.auto_advance_clock:
            self.clock.advance(self.reply_delay)

    def add_transport_config(self):
        self.config["smssync_secret"] = self.smssync_secret = "secretsecret"
        self.config["country_code"] = self.country_code = "+27"
        self.config["account_id"] = self.account_id = "test_account"

    def smssync_inbound(self,
                        content,
                        from_addr='123',
                        to_addr='555',
                        timestamp=None,
                        message_id='1',
                        secret=None):
        """Emulate an inbound message from SMSSync on an Android phone."""
        msginfo = self.default_msginfo()
        if timestamp is None:
            timestamp = datetime.datetime.utcnow()
        if hasattr(timestamp, 'strftime'):
            timestamp = timestamp.strftime("%m-%d-%y %H:%M")
        if secret is None:
            secret = msginfo.smssync_secret
        # Timestamp format: mm-dd-yy-hh:mm, e.g. 11-27-11-07:11
        params = {
            'sent_to': to_addr,
            'from': from_addr,
            'message': content,
            'sent_timestamp': timestamp,
            'message_id': message_id,
            'secret': secret,
        }
        return self.smssync_call(params, method='POST')

    def smssync_poll(self):
        """Emulate a poll from SMSSync for waiting outbound messages."""
        return self.smssync_call({'task': 'send'}, method='GET')

    def smssync_call(self, params, method):
        url = self.mkurl(params)
        d = http_request(url, '', method=method)
        d.addCallback(json.loads)
        return d

    def mkurl(self, params):
        msginfo = self.default_msginfo()
        params = dict(
            (k.encode('utf-8'), v.encode('utf-8')) for k, v in params.items())
        return '%s%s%s?%s' % (
            self.transport_url,
            self.config['web_path'],
            ("/%s/" % msginfo.account_id) if self.account_in_url else '',
            urlencode(params),
        )

    def default_msginfo(self):
        return SmsSyncMsgInfo(self.account_id, self.smssync_secret,
                              self.country_code)

    @inlineCallbacks
    def test_inbound_success(self):
        now = datetime.datetime.utcnow().replace(second=0, microsecond=0)
        response = yield self.smssync_inbound(content=u'hællo', timestamp=now)
        self.assertEqual(response,
                         {"payload": {
                             "success": "true",
                             "messages": []
                         }})

        [msg] = self.tx_helper.get_dispatched_inbound()
        self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
        self.assertEqual(msg['to_addr'], "555")
        self.assertEqual(msg['from_addr'], "123")
        self.assertEqual(msg['content'], u"hællo")
        self.assertEqual(msg['timestamp'], now)

    @inlineCallbacks
    def test_inbound_millisecond_timestamp(self):
        smssync_ms = '1377125641000'
        now = datetime.datetime.utcfromtimestamp(int(smssync_ms) / 1000)
        response = yield self.smssync_inbound(content=u'hello',
                                              timestamp=smssync_ms)
        self.assertEqual(response,
                         {"payload": {
                             "success": "true",
                             "messages": []
                         }})
        [msg] = self.tx_helper.get_dispatched_inbound()
        self.assertEqual(msg['timestamp'], now)

    @inlineCallbacks
    def test_inbound_with_reply(self):
        self.auto_advance_clock = False
        now = datetime.datetime.utcnow().replace(second=0, microsecond=0)
        inbound_d = self.smssync_inbound(content=u'hællo', timestamp=now)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        reply = yield self.tx_helper.make_dispatch_reply(msg, u'ræply')

        self.clock.advance(self.reply_delay)
        response = yield inbound_d
        self.assertEqual(
            response, {
                "payload": {
                    "success": "true",
                    "messages": [{
                        "to": reply['to_addr'],
                        "message": u"ræply",
                    }],
                }
            })

    @inlineCallbacks
    def test_normalize_msisdn(self):
        yield self.smssync_inbound(content="hi",
                                   from_addr="0555-7171",
                                   to_addr="0555-7272")
        [msg] = self.tx_helper.get_dispatched_inbound()
        self.assertEqual(msg['from_addr'], "+275557171")
        self.assertEqual(msg['to_addr'], "+275557272")

    @inlineCallbacks
    def test_inbound_invalid_secret(self):
        response = yield self.smssync_inbound(content=u'hello', secret='wrong')
        if self.smssync_secret == '':
            # blank secrets should not be checked
            self.assertEqual(response,
                             {"payload": {
                                 "success": "true",
                                 "messages": []
                             }})
        else:
            self.assertEqual(response, {"payload": {"success": "false"}})

    @inlineCallbacks
    def test_inbound_garbage(self):
        response = yield self.smssync_call({}, 'GET')
        self.assertEqual(response, {"payload": {"success": "false"}})

    @inlineCallbacks
    def test_poll_outbound(self):
        outbound_msg = self.tx_helper.make_outbound(u'hællo')
        msginfo = self.default_msginfo()
        self.transport.add_msginfo_metadata(outbound_msg.payload, msginfo)
        yield self.tx_helper.dispatch_outbound(outbound_msg)
        response = yield self.smssync_poll()
        self.assertEqual(
            response, {
                "payload": {
                    "task":
                    "send",
                    "secret":
                    self.smssync_secret,
                    "messages": [
                        {
                            "to": outbound_msg['to_addr'],
                            "message": outbound_msg['content'],
                        },
                    ],
                },
            })
        [event] = yield self.tx_helper.get_dispatched_events()
        self.assertEqual(event['event_type'], 'ack')
        self.assertEqual(event['user_message_id'], outbound_msg['message_id'])

    @inlineCallbacks
    def test_reply_round_trip(self):
        # test that calling .reply(...) generates a working reply (this is
        # non-trivial because the transport metadata needs to be correct for
        # this to work).
        yield self.smssync_inbound(content=u'Hi')
        [msg] = self.tx_helper.get_dispatched_inbound()
        yield self.tx_helper.make_dispatch_reply(msg, 'Hi back!')
        response = yield self.smssync_poll()
        self.assertEqual(response["payload"]["messages"],
                         [{
                             "to": msg['from_addr'],
                             "message": "Hi back!",
                         }])
예제 #27
0
class APITestsMixin(APIAssertionsMixin):
    """
    Helpers for writing tests for the Docker Volume Plugin API.
    """
    NODE_A = uuid4()
    NODE_B = uuid4()

    def initialize(self):
        """
        Create initial objects for the ``VolumePlugin``.
        """
        self.volume_plugin_reactor = Clock()
        self.flocker_client = SimpleCountingProxy(FakeFlockerClient())
        # The conditional_create operation used by the plugin relies on
        # the passage of time... so make sure time passes! We still use a
        # fake clock since some tests want to skip ahead.
        self.looping = LoopingCall(
            lambda: self.volume_plugin_reactor.advance(0.001))
        self.looping.start(0.001)
        self.addCleanup(self.looping.stop)

    def test_pluginactivate(self):
        """
        ``/Plugins.Activate`` indicates the plugin is a volume driver.
        """
        # Docker 1.8, at least, sends "null" as the body. Our test
        # infrastructure has the opposite bug so just going to send some
        # other garbage as the body (12345) to demonstrate that it's
        # ignored as per the spec which declares no body.
        return self.assertResult(b"POST", b"/Plugin.Activate", 12345, OK,
                                 {u"Implements": [u"VolumeDriver"]})

    def test_remove(self):
        """
        ``/VolumeDriver.Remove`` returns a successful result.
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Remove",
                                 {u"Name": u"vol"}, OK, {u"Err": None})

    def test_unmount(self):
        """
        ``/VolumeDriver.Unmount`` returns a successful result.
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Unmount",
                                 {u"Name": u"vol"}, OK, {u"Err": None})

    def test_create_with_profile(self):
        """
        Calling the ``/VolumerDriver.Create`` API with an ``Opts`` value
        of "profile=[gold,silver,bronze] in the request body JSON create a
        volume with a given name with [gold,silver,bronze] profile.
        """
        profile = sampled_from(["gold", "silver", "bronze"]).example()
        name = random_name(self)
        d = self.assertResult(b"POST", b"/VolumeDriver.Create", {
            u"Name": name,
            'Opts': {
                u"profile": profile
            }
        }, OK, {u"Err": None})
        d.addCallback(
            lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(list)
        d.addCallback(
            lambda result: self.assertItemsEqual(result, [
                Dataset(dataset_id=result[0].dataset_id,
                        primary=self.NODE_A,
                        maximum_size=int(DEFAULT_SIZE.to_Byte()),
                        metadata={
                            u"name": name,
                            u"clusterhq:flocker:profile": unicode(profile)
                        })
            ]))
        return d

    def test_create_with_size(self):
        """
        Calling the ``/VolumerDriver.Create`` API with an ``Opts`` value
        of "size=<somesize> in the request body JSON create a volume
        with a given name and random size between 1-100G
        """
        name = random_name(self)
        size = integers(min_value=1, max_value=75).example()
        expression = volume_expression.example()
        size_opt = "".join(str(size)) + expression
        d = self.assertResult(b"POST", b"/VolumeDriver.Create", {
            u"Name": name,
            'Opts': {
                u"size": size_opt
            }
        }, OK, {u"Err": None})

        real_size = int(parse_num(size_opt).to_Byte())
        d.addCallback(
            lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(list)
        d.addCallback(
            lambda result: self.assertItemsEqual(result, [
                Dataset(dataset_id=result[0].dataset_id,
                        primary=self.NODE_A,
                        maximum_size=real_size,
                        metadata={
                            u"name": name,
                            u"maximum_size": unicode(real_size)
                        })
            ]))
        return d

    @given(expr=volume_expression, size=integers(min_value=75, max_value=100))
    def test_parsenum_size(self, expr, size):
        """
        Send different forms of size expressions
        to ``parse_num``, we expect G(Gigabyte) size results.

        :param expr str: A string representing the size expression
        :param size int: A string representing the volume size
        """
        expected_size = int(GiB(size).to_Byte())
        return self.assertEqual(expected_size,
                                int(parse_num(str(size) + expr).to_Byte()))

    @given(expr=sampled_from(["KB", "MB", "GB", "TB", ""]),
           size=integers(min_value=1, max_value=100))
    def test_parsenum_all_sizes(self, expr, size):
        """
        Send standard size expressions to ``parse_num`` in
        many sizes, we expect to get correct size results.

        :param expr str: A string representing the size expression
        :param size int: A string representing the volume size
        """
        if expr is "KB":
            expected_size = int(KiB(size).to_Byte())
        elif expr is "MB":
            expected_size = int(MiB(size).to_Byte())
        elif expr is "GB":
            expected_size = int(GiB(size).to_Byte())
        elif expr is "TB":
            expected_size = int(TiB(size).to_Byte())
        else:
            expected_size = int(Byte(size).to_Byte())
        return self.assertEqual(expected_size,
                                int(parse_num(str(size) + expr).to_Byte()))

    @given(size=sampled_from(
        [u"foo10Gb", u"10bar10", "10foogib", "10Gfoo", "GIB", "bar10foo"]))
    def test_parsenum_bad_size(self, size):
        """
        Send unacceptable size expressions, upon error
        users should expect to receive Flocker's ``DEFAULT_SIZE``

        :param size str: A string representing the bad volume size
        """
        return self.assertEqual(int(DEFAULT_SIZE.to_Byte()),
                                int(parse_num(size).to_Byte()))

    def create(self, name):
        """
        Call the ``/VolumeDriver.Create`` API to create a volume with the
        given name.

        :param unicode name: The name of the volume to create.

        :return: ``Deferred`` that fires when the volume that was created.
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Create",
                                 {u"Name": name}, OK, {u"Err": None})

    def test_create_creates(self):
        """
        ``/VolumeDriver.Create`` creates a new dataset in the configuration.
        """
        name = u"myvol"
        d = self.create(name)
        d.addCallback(
            lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(list)
        d.addCallback(
            lambda result: self.assertItemsEqual(result, [
                Dataset(dataset_id=result[0].dataset_id,
                        primary=self.NODE_A,
                        maximum_size=int(DEFAULT_SIZE.to_Byte()),
                        metadata={u"name": name})
            ]))
        return d

    def test_create_duplicate_name(self):
        """
        If a dataset with the given name already exists,
        ``/VolumeDriver.Create`` succeeds without create a new volume.
        """
        name = u"thename"
        # Create a dataset out-of-band with matching name but non-matching
        # dataset ID:
        d = self.flocker_client.create_dataset(self.NODE_A,
                                               int(DEFAULT_SIZE.to_Byte()),
                                               metadata={u"name": name})
        d.addCallback(lambda _: self.create(name))
        d.addCallback(
            lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(lambda results: self.assertEqual(len(list(results)), 1))
        return d

    def test_create_duplicate_name_race_condition(self):
        """
        If a dataset with the given name is created while the
        ``/VolumeDriver.Create`` call is in flight, the call does not
        result in an error.
        """
        name = u"thename"

        # Create a dataset out-of-band with matching dataset ID and name
        # which the docker plugin won't be able to see.
        def create_after_list():
            # Clean up the patched version:
            del self.flocker_client.list_datasets_configuration
            # But first time we're called, we create dataset and lie about
            # its existence:
            d = self.flocker_client.create_dataset(self.NODE_A,
                                                   int(DEFAULT_SIZE.to_Byte()),
                                                   metadata={u"name": name})
            d.addCallback(
                lambda _: DatasetsConfiguration(tag=u"1234", datasets={}))
            return d

        self.flocker_client.list_datasets_configuration = create_after_list

        return self.create(name)

    def _flush_volume_plugin_reactor_on_endpoint_render(self):
        """
        This method patches ``self.app`` so that after any endpoint is
        rendered, the reactor used by the volume plugin is advanced repeatedly
        until there are no more ``delayedCalls`` pending on the reactor.
        """
        real_execute_endpoint = self.app.execute_endpoint

        def patched_execute_endpoint(*args, **kwargs):
            val = real_execute_endpoint(*args, **kwargs)
            while self.volume_plugin_reactor.getDelayedCalls():
                pending_calls = self.volume_plugin_reactor.getDelayedCalls()
                next_expiration = min(t.getTime() for t in pending_calls)
                now = self.volume_plugin_reactor.seconds()
                self.volume_plugin_reactor.advance(
                    max(0.0, next_expiration - now))
            return val

        self.patch(self.app, 'execute_endpoint', patched_execute_endpoint)

    def test_mount(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive.
        """
        name = u"myvol"
        dataset_id = uuid4()

        # Create dataset on a different node:
        d = self.flocker_client.create_dataset(self.NODE_B,
                                               int(DEFAULT_SIZE.to_Byte()),
                                               metadata={u"name": name},
                                               dataset_id=dataset_id)

        self._flush_volume_plugin_reactor_on_endpoint_render()

        # Pretend that it takes 5 seconds for the dataset to get established on
        # Node A.
        self.volume_plugin_reactor.callLater(
            5.0, self.flocker_client.synchronize_state)

        d.addCallback(lambda _: self.assertResult(
            b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK, {
                u"Err": None,
                u"Mountpoint": u"/flocker/{}".format(dataset_id)
            }))
        d.addCallback(lambda _: self.flocker_client.list_datasets_state())

        def final_assertions(datasets):
            self.assertEqual(
                [self.NODE_A],
                [d.primary for d in datasets if d.dataset_id == dataset_id])
            # There should be less than 20 calls to list_datasets_state over
            # the course of 5 seconds.
            self.assertLess(
                self.flocker_client.num_calls('list_datasets_state'), 20)

        d.addCallback(final_assertions)

        return d

    def test_mount_timeout(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive. If it does not arrive within 120 seconds, then it
        returns an error up to docker.
        """
        name = u"myvol"
        dataset_id = uuid4()
        # Create dataset on a different node:
        d = self.flocker_client.create_dataset(self.NODE_B,
                                               int(DEFAULT_SIZE.to_Byte()),
                                               metadata={u"name": name},
                                               dataset_id=dataset_id)

        self._flush_volume_plugin_reactor_on_endpoint_render()

        # Pretend that it takes 500 seconds for the dataset to get established
        # on Node A. This should be longer than the timeout.
        self.volume_plugin_reactor.callLater(
            500.0, self.flocker_client.synchronize_state)

        d.addCallback(lambda _: self.assertResult(
            b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK, {
                u"Err": u"Timed out waiting for dataset to mount.",
                u"Mountpoint": u""
            }))
        return d

    def test_mount_already_exists(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive when used by the volumes that already exist and
        don't have a special dataset ID.
        """
        name = u"myvol"

        d = self.flocker_client.create_dataset(self.NODE_A,
                                               int(DEFAULT_SIZE.to_Byte()),
                                               metadata={u"name": name})

        def created(dataset):
            self.flocker_client.synchronize_state()
            result = self.assertResult(
                b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK, {
                    u"Err": None,
                    u"Mountpoint": u"/flocker/{}".format(dataset.dataset_id)
                })
            result.addCallback(
                lambda _: self.flocker_client.list_datasets_state())
            result.addCallback(lambda ds: self.assertEqual([self.NODE_A], [
                d.primary for d in ds if d.dataset_id == dataset.dataset_id
            ]))
            return result

        d.addCallback(created)
        return d

    def test_unknown_mount(self):
        """
        ``/VolumeDriver.Mount`` returns an error when asked to mount a
        non-existent volume.
        """
        name = u"myvol"
        return self.assertResult(
            b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK,
            {u"Err": u"Could not find volume with given name."})

    def test_path(self):
        """
        ``/VolumeDriver.Path`` returns the mount path of the given volume if
        it is currently known.
        """
        name = u"myvol"

        d = self.create(name)
        # The dataset arrives as state:
        d.addCallback(lambda _: self.flocker_client.synchronize_state())

        d.addCallback(lambda _: self.assertResponseCode(
            b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK))
        d.addCallback(
            lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(lambda datasets_config: self.assertResult(
            b"POST", b"/VolumeDriver.Path", {u"Name": name}, OK, {
                u"Err":
                None,
                u"Mountpoint":
                u"/flocker/{}".format(datasets_config.datasets.keys()[0])
            }))
        return d

    def test_path_existing(self):
        """
        ``/VolumeDriver.Path`` returns the mount path of the given volume if
        it is currently known, including for a dataset that was created
        not by the plugin.
        """
        name = u"myvol"

        d = self.flocker_client.create_dataset(self.NODE_A,
                                               int(DEFAULT_SIZE.to_Byte()),
                                               metadata={u"name": name})

        def created(dataset):
            self.flocker_client.synchronize_state()
            return self.assertResult(
                b"POST", b"/VolumeDriver.Path", {u"Name": name}, OK, {
                    u"Err": None,
                    u"Mountpoint": u"/flocker/{}".format(dataset.dataset_id)
                })

        d.addCallback(created)
        return d

    def test_unknown_path(self):
        """
        ``/VolumeDriver.Path`` returns an error when asked for the mount path
        of a non-existent volume.
        """
        name = u"myvol"
        return self.assertResult(
            b"POST", b"/VolumeDriver.Path", {u"Name": name}, OK,
            {u"Err": u"Could not find volume with given name."})

    def test_non_local_path(self):
        """
        ``/VolumeDriver.Path`` returns an error when asked for the mount path
        of a volume that is not mounted locally.

        This can happen as a result of ``docker inspect`` on a container
        that has been created but is still waiting for its volume to
        arrive from another node. It seems like Docker may also call this
        after ``/VolumeDriver.Create``, so again while waiting for a
        volume to arrive.
        """
        name = u"myvol"
        dataset_id = uuid4()

        # Create dataset on node B:
        d = self.flocker_client.create_dataset(self.NODE_B,
                                               int(DEFAULT_SIZE.to_Byte()),
                                               metadata={u"name": name},
                                               dataset_id=dataset_id)
        d.addCallback(lambda _: self.flocker_client.synchronize_state())

        # Ask for path on node A:
        d.addCallback(lambda _: self.assertResult(
            b"POST", b"/VolumeDriver.Path", {u"Name": name}, OK, {
                u"Err": "Volume not available.",
                u"Mountpoint": u""
            }))
        return d

    @capture_logging(lambda self, logger: self.assertEqual(
        len(logger.flushTracebacks(CustomException)), 1))
    def test_unexpected_error_reporting(self, logger):
        """
        If an unexpected error occurs Docker gets back a useful error message.
        """
        def error():
            raise CustomException("I've made a terrible mistake")

        self.patch(self.flocker_client, "list_datasets_configuration", error)
        return self.assertResult(
            b"POST", b"/VolumeDriver.Path", {u"Name": u"whatever"}, OK,
            {u"Err": "CustomException: I've made a terrible mistake"})

    @capture_logging(None)
    def test_bad_request(self, logger):
        """
        If a ``BadRequest`` exception is raised it is converted to appropriate
        JSON.
        """
        def error():
            raise make_bad_request(code=423, Err=u"no good")

        self.patch(self.flocker_client, "list_datasets_configuration", error)
        return self.assertResult(b"POST", b"/VolumeDriver.Path",
                                 {u"Name": u"whatever"}, 423,
                                 {u"Err": "no good"})

    def test_unsupported_method(self):
        """
        If an unsupported method is requested the 405 Not Allowed response
        code is returned.
        """
        return self.assertResponseCode(b"BAD_METHOD", b"/VolumeDriver.Path",
                                       None, NOT_ALLOWED)

    def test_unknown_uri(self):
        """
        If an unknown URI path is requested the 404 Not Found response code is
        returned.
        """
        return self.assertResponseCode(b"BAD_METHOD", b"/xxxnotthere", None,
                                       NOT_FOUND)

    def test_empty_host(self):
        """
        If an empty host header is sent to the Docker plugin it does not blow
        up, instead operating normally. E.g. for ``Plugin.Activate`` call
        returns the ``Implements`` response.
        """
        return self.assertResult(b"POST",
                                 b"/Plugin.Activate",
                                 12345,
                                 OK, {u"Implements": [u"VolumeDriver"]},
                                 additional_headers={b"Host": [""]})
예제 #28
0
파일: mock.py 프로젝트: nerevu/riko
class FakeReactor(MemoryReactor):
    """A fake reactor to be used in tests. This reactor doesn't actually do
    much that's useful yet. It accepts TCP connection setup attempts, but
    they will never succeed.

    Examples:
        >>> import sys
        >>> from twisted.internet.abstract import FileDescriptor
        >>> from twisted.internet.fdesc import readFromFD, setNonBlocking
        >>>
        >>> # reactor = proto_helpers.FakeReactor()
        >>> reactor = FakeReactor()
        >>> f = FileDescriptor(reactor)
        >>> f.fileno = sys.__stdout__.fileno
        >>> fd = f.fileno()
        >>> setNonBlocking(fd)
        >>> readFromFD(fd, print)
    """
    _DELAY = 1

    def __init__(self):
        super(FakeReactor, self).__init__()
        self._clock = Clock()
        reactor.fake = True
        msg = 'Attention! Running fake reactor'
        logger.debug('%s. Some deferreds may not work as intended.' % msg)
        self.running = False

    def resolve(self, *args, **kwargs):
        """Return a L{twisted.internet.defer.Deferred} that will resolve a hostname.
        """
        pass

    def run(self):
        """Fake L{IReactorCore.run}.
        """
        self.running = True

    def stop(self):
        """Fake L{IReactorCore.stop}.
        """
        self.running = False

    def crash(self):
        """Fake L{IReactorCore.crash}.
        """
        self.running = False

    def iterate(self, *args, **kwargs):
        """Fake L{IReactorCore.iterate}.
        """
        pass

    def fireSystemEvent(self, *args, **kwargs):
        """Fake L{IReactorCore.fireSystemEvent}.
        """
        pass

    def addSystemEventTrigger(self, *args, **kwargs):
        """Fake L{IReactorCore.addSystemEventTrigger}.
        """
        pass

    def removeSystemEventTrigger(self, *args, **kwargs):
        """Fake L{IReactorCore.removeSystemEventTrigger}.
        """
        pass

    def callWhenRunning(self, *args, **kwargs):
        """Fake L{IReactorCore.callWhenRunning}.
        """
        pass

    def getDelayedCalls(self):
        """Return all the outstanding delayed calls in the system.
        """
        return self._clock.getDelayedCalls()

    def callLater(self, when, what, *args, **kwargs):
        """Schedule a unit of work to be done later.
        """
        delayed = self._clock.callLater(when, what, *args, **kwargs)
        self.pump()
        return delayed

    def pump(self):
        """Perform scheduled work
        """
        self._clock.advance(self._DELAY)
예제 #29
0
class FakeReactor(MemoryReactor):
    """A fake reactor to be used in tests. This reactor doesn't actually do
    much that's useful yet. It accepts TCP connection setup attempts, but
    they will never succeed.

    Examples:
        >>> import sys
        >>> from twisted.internet.abstract import FileDescriptor
        >>> from twisted.internet.fdesc import readFromFD, setNonBlocking
        >>>
        >>> # reactor = proto_helpers.FakeReactor()
        >>> reactor = FakeReactor()
        >>> f = FileDescriptor(reactor)
        >>> f.fileno = sys.__stdout__.fileno
        >>> fd = f.fileno()
        >>> setNonBlocking(fd)
        >>> readFromFD(fd, print)
    """
    _DELAY = 1

    def __init__(self):
        super(FakeReactor, self).__init__()
        self._clock = Clock()
        reactor.fake = True
        msg = 'Attention! Running fake reactor'
        logger.debug('%s. Some deferreds may not work as intended.' % msg)
        self.running = False

    def resolve(self, *args, **kwargs):
        """Return a L{twisted.internet.defer.Deferred} that will resolve a hostname.
        """
        pass

    def run(self):
        """Fake L{IReactorCore.run}.
        """
        self.running = True

    def stop(self):
        """Fake L{IReactorCore.stop}.
        """
        self.running = False

    def crash(self):
        """Fake L{IReactorCore.crash}.
        """
        self.running = False

    def iterate(self, *args, **kwargs):
        """Fake L{IReactorCore.iterate}.
        """
        pass

    def fireSystemEvent(self, *args, **kwargs):
        """Fake L{IReactorCore.fireSystemEvent}.
        """
        pass

    def addSystemEventTrigger(self, *args, **kwargs):
        """Fake L{IReactorCore.addSystemEventTrigger}.
        """
        pass

    def removeSystemEventTrigger(self, *args, **kwargs):
        """Fake L{IReactorCore.removeSystemEventTrigger}.
        """
        pass

    def callWhenRunning(self, *args, **kwargs):
        """Fake L{IReactorCore.callWhenRunning}.
        """
        pass

    def getDelayedCalls(self):
        """Return all the outstanding delayed calls in the system.
        """
        return self._clock.getDelayedCalls()

    def callLater(self, when, what, *args, **kwargs):
        """Schedule a unit of work to be done later.
        """
        delayed = self._clock.callLater(when, what, *args, **kwargs)
        self.pump()
        return delayed

    def pump(self):
        """Perform scheduled work
        """
        self._clock.advance(self._DELAY)
예제 #30
0
class DeferredFilesystemLockTestCase(unittest.TestCase):
    """
    Test the behavior of L{DeferredFilesystemLock}
    """
    def setUp(self):
        self.clock = Clock()
        self.lock = defer.DeferredFilesystemLock(self.mktemp(),
                                                 scheduler=self.clock)


    def test_waitUntilLockedWithNoLock(self):
        """
        Test that the lock can be acquired when no lock is held
        """
        d = self.lock.deferUntilLocked(timeout=1)

        return d


    def test_waitUntilLockedWithTimeoutLocked(self):
        """
        Test that the lock can not be acquired when the lock is held
        for longer than the timeout.
        """
        self.failUnless(self.lock.lock())

        d = self.lock.deferUntilLocked(timeout=5.5)
        self.assertFailure(d, defer.TimeoutError)

        self.clock.pump([1]*10)

        return d


    def test_waitUntilLockedWithTimeoutUnlocked(self):
        """
        Test that a lock can be acquired while a lock is held
        but the lock is unlocked before our timeout.
        """
        def onTimeout(f):
            f.trap(defer.TimeoutError)
            self.fail("Should not have timed out")

        self.failUnless(self.lock.lock())

        self.clock.callLater(1, self.lock.unlock)
        d = self.lock.deferUntilLocked(timeout=10)
        d.addErrback(onTimeout)

        self.clock.pump([1]*10)

        return d


    def test_defaultScheduler(self):
        """
        Test that the default scheduler is set up properly.
        """
        lock = defer.DeferredFilesystemLock(self.mktemp())

        self.assertEquals(lock._scheduler, reactor)


    def test_concurrentUsage(self):
        """
        Test that an appropriate exception is raised when attempting
        to use deferUntilLocked concurrently.
        """
        self.lock.lock()
        self.clock.callLater(1, self.lock.unlock)

        d = self.lock.deferUntilLocked()
        d2 = self.lock.deferUntilLocked()

        self.assertFailure(d2, defer.AlreadyTryingToLockError)

        self.clock.advance(1)

        return d


    def test_multipleUsages(self):
        """
        Test that a DeferredFilesystemLock can be used multiple times
        """
        def lockAquired(ign):
            self.lock.unlock()
            d = self.lock.deferUntilLocked()
            return d

        self.lock.lock()
        self.clock.callLater(1, self.lock.unlock)

        d = self.lock.deferUntilLocked()
        d.addCallback(lockAquired)

        self.clock.advance(1)

        return d
예제 #31
0
 def callLater(self, *args, **kw):  # noqa
     laters.append((args, kw))
     Clock.callLater(self, *args, **kw)
예제 #32
0
 def callLater(self, *args, **kw):  # noqa
     laters.append((args, kw))
     Clock.callLater(self, *args, **kw)
예제 #33
0
class KademliaProtocolTest(unittest.TestCase):
    """ Test case for the Protocol class """

    udpPort = 9182

    def setUp(self):
        self._reactor = Clock()
        self.node = Node(node_id=b'1' * 48,
                         udpPort=self.udpPort,
                         externalIP="127.0.0.1",
                         listenUDP=listenUDP,
                         resolve=resolve,
                         clock=self._reactor,
                         callLater=self._reactor.callLater)
        self.remote_node = Node(node_id=b'2' * 48,
                                udpPort=self.udpPort,
                                externalIP="127.0.0.2",
                                listenUDP=listenUDP,
                                resolve=resolve,
                                clock=self._reactor,
                                callLater=self._reactor.callLater)
        self.remote_contact = self.node.contact_manager.make_contact(
            b'2' * 48, '127.0.0.2', 9182, self.node._protocol)
        self.us_from_them = self.remote_node.contact_manager.make_contact(
            b'1' * 48, '127.0.0.1', 9182, self.remote_node._protocol)
        self.node.start_listening()
        self.remote_node.start_listening()

    @defer.inlineCallbacks
    def tearDown(self):
        yield self.node.stop()
        yield self.remote_node.stop()
        del self._reactor

    @defer.inlineCallbacks
    def testReactor(self):
        """ Tests if the reactor can start/stop the protocol correctly """

        d = defer.Deferred()
        self._reactor.callLater(1, d.callback, True)
        self._reactor.advance(1)
        result = yield d
        self.assertTrue(result)

    @defer.inlineCallbacks
    def testRPCTimeout(self):
        """ Tests if a RPC message sent to a dead remote node times out correctly """
        yield self.remote_node.stop()
        self._reactor.pump([1 for _ in range(10)])
        self.node.addContact(self.remote_contact)

        @rpcmethod
        def fake_ping(*args, **kwargs):
            time.sleep(lbrynet.dht.constants.rpcTimeout + 1)
            return 'pong'

        real_ping = self.node.ping
        real_timeout = lbrynet.dht.constants.rpcTimeout
        real_attempts = lbrynet.dht.constants.rpcAttempts
        lbrynet.dht.constants.rpcAttempts = 1
        lbrynet.dht.constants.rpcTimeout = 1

        self.node.ping = fake_ping
        # Make sure the contact was added
        self.assertFalse(
            self.remote_contact not in self.node.contacts,
            'Contact not added to fake node (error in test code)')
        self.node.start_listening()

        # Run the PING RPC (which should raise a timeout error)
        df = self.remote_contact.ping()

        def check_timeout(err):
            self.assertEqual(err.type, TimeoutError)

        df.addErrback(check_timeout)

        def reset_values():
            self.node.ping = real_ping
            lbrynet.dht.constants.rpcTimeout = real_timeout
            lbrynet.dht.constants.rpcAttempts = real_attempts

        # See if the contact was removed due to the timeout
        def check_removed_contact():
            self.assertFalse(
                self.remote_contact in self.node.contacts,
                'Contact was not removed after RPC timeout; check exception types.'
            )

        df.addCallback(lambda _: reset_values())

        # Stop the reactor if a result arrives (timeout or not)
        df.addCallback(lambda _: check_removed_contact())
        self._reactor.pump([1 for _ in range(20)])

    @defer.inlineCallbacks
    def testRPCRequest(self):
        """ Tests if a valid RPC request is executed and responded to correctly """

        yield self.node.addContact(self.remote_contact)

        self.error = None

        def handleError(f):
            self.error = 'An RPC error occurred: %s' % f.getErrorMessage()

        def handleResult(result):
            expectedResult = b'pong'
            if result != expectedResult:
                self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' \
                             % (expectedResult, result)

        # Simulate the RPC
        df = self.remote_contact.ping()
        df.addCallback(handleResult)
        df.addErrback(handleError)

        self._reactor.advance(2)
        yield df

        self.assertFalse(self.error, self.error)
        # The list of sent RPC messages should be empty at this stage
        self.assertEqual(
            len(self.node._protocol._sentMessages), 0,
            'The protocol is still waiting for a RPC result, '
            'but the transaction is already done!')

    def testRPCAccess(self):
        """ Tests invalid RPC requests
        Verifies that a RPC request for an existing but unpublished
        method is denied, and that the associated (remote) exception gets
        raised locally """

        self.assertRaises(AttributeError, getattr, self.remote_contact,
                          "not_a_rpc_function")

    def testRPCRequestArgs(self):
        """ Tests if an RPC requiring arguments is executed correctly """

        self.node.addContact(self.remote_contact)
        self.error = None

        def handleError(f):
            self.error = 'An RPC error occurred: %s' % f.getErrorMessage()

        def handleResult(result):
            expectedResult = b'pong'
            if result != expectedResult:
                self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % \
                             (expectedResult, result)

        # Publish the "local" node on the network
        self.node.start_listening()
        # Simulate the RPC
        df = self.remote_contact.ping()
        df.addCallback(handleResult)
        df.addErrback(handleError)
        self._reactor.pump([1 for _ in range(10)])
        self.assertFalse(self.error, self.error)
        # The list of sent RPC messages should be empty at this stage
        self.assertEqual(
            len(self.node._protocol._sentMessages), 0,
            'The protocol is still waiting for a RPC result, '
            'but the transaction is already done!')

    @defer.inlineCallbacks
    def testDetectProtocolVersion(self):
        original_findvalue = self.remote_node.findValue
        fake_blob = unhexlify("AB" * 48)

        @rpcmethod
        def findValue(contact, key):
            result = original_findvalue(contact, key)
            result.pop(b'protocolVersion')
            return result

        self.remote_node.findValue = findValue
        d = self.remote_contact.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue('protocolVersion' not in find_value_response)

        self.remote_node.findValue = original_findvalue
        d = self.remote_contact.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 1)
        self.assertTrue('protocolVersion' not in find_value_response)

        self.remote_node.findValue = findValue
        d = self.remote_contact.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue('protocolVersion' not in find_value_response)

    @defer.inlineCallbacks
    def testStoreToPre_0_20_0_Node(self):
        def _dont_migrate(contact, method, *args):
            return args, {}

        self.remote_node._protocol._migrate_incoming_rpc_args = _dont_migrate

        original_findvalue = self.remote_node.findValue
        original_store = self.remote_node.store

        @rpcmethod
        def findValue(contact, key):
            result = original_findvalue(contact, key)
            if b'protocolVersion' in result:
                result.pop(b'protocolVersion')
            return result

        @rpcmethod
        def store(contact,
                  key,
                  value,
                  originalPublisherID=None,
                  self_store=False,
                  **kwargs):
            self.assertTrue(len(key) == 48)
            self.assertSetEqual(set(value.keys()),
                                {b'token', b'lbryid', b'port'})
            self.assertFalse(self_store)
            self.assertDictEqual(kwargs, {})
            return original_store(  # pylint: disable=too-many-function-args
                contact, key, value[b'token'], value[b'port'],
                originalPublisherID, 0)

        self.remote_node.findValue = findValue
        self.remote_node.store = store

        fake_blob = unhexlify("AB" * 48)

        d = self.remote_contact.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue(b'protocolVersion' not in find_value_response)
        token = find_value_response[b'token']
        d = self.remote_contact.store(fake_blob, token, 3333,
                                      self.node.node_id, 0)
        self._reactor.advance(3)
        response = yield d
        self.assertEqual(response, b'OK')
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue(self.remote_node._dataStore.hasPeersForBlob(fake_blob))
        self.assertEqual(len(self.remote_node._dataStore.getStoringContacts()),
                         1)

    @defer.inlineCallbacks
    def testStoreFromPre_0_20_0_Node(self):
        def _dont_migrate(contact, method, *args):
            return args

        self.remote_node._protocol._migrate_outgoing_rpc_args = _dont_migrate

        us_from_them = self.remote_node.contact_manager.make_contact(
            b'1' * 48, '127.0.0.1', self.udpPort, self.remote_node._protocol)

        fake_blob = unhexlify("AB" * 48)

        d = us_from_them.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue(b'protocolVersion' not in find_value_response)
        token = find_value_response[b'token']
        us_from_them.update_protocol_version(0)
        d = self.remote_node._protocol.sendRPC(
            us_from_them, b"store", (fake_blob, {
                b'lbryid': self.remote_node.node_id,
                b'token': token,
                b'port': 3333
            }))
        self._reactor.advance(3)
        response = yield d
        self.assertEqual(response, b'OK')
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue(self.node._dataStore.hasPeersForBlob(fake_blob))
        self.assertEqual(len(self.node._dataStore.getStoringContacts()), 1)
        self.assertIs(self.node._dataStore.getStoringContacts()[0],
                      self.remote_contact)
예제 #34
0
class APITestsMixin(APIAssertionsMixin):
    """
    Helpers for writing tests for the Docker Volume Plugin API.
    """

    NODE_A = uuid4()
    NODE_B = uuid4()

    def initialize(self):
        """
        Create initial objects for the ``VolumePlugin``.
        """
        self.volume_plugin_reactor = Clock()
        self.flocker_client = SimpleCountingProxy(FakeFlockerClient())
        # The conditional_create operation used by the plugin relies on
        # the passage of time... so make sure time passes! We still use a
        # fake clock since some tests want to skip ahead.
        self.looping = LoopingCall(lambda: self.volume_plugin_reactor.advance(0.001))
        self.looping.start(0.001)
        self.addCleanup(self.looping.stop)

    def test_pluginactivate(self):
        """
        ``/Plugins.Activate`` indicates the plugin is a volume driver.
        """
        # Docker 1.8, at least, sends "null" as the body. Our test
        # infrastructure has the opposite bug so just going to send some
        # other garbage as the body (12345) to demonstrate that it's
        # ignored as per the spec which declares no body.
        return self.assertResult(b"POST", b"/Plugin.Activate", 12345, OK, {u"Implements": [u"VolumeDriver"]})

    def test_remove(self):
        """
        ``/VolumeDriver.Remove`` returns a successful result.
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Remove", {u"Name": u"vol"}, OK, {u"Err": u""})

    def test_unmount(self):
        """
        ``/VolumeDriver.Unmount`` returns a successful result.
        """
        unmount_id = "".join(random.choice("0123456789abcdef") for n in xrange(64))
        return self.assertResult(
            b"POST", b"/VolumeDriver.Unmount", {u"Name": u"vol", u"ID": unicode(unmount_id)}, OK, {u"Err": u""}
        )

    def test_unmount_no_id(self):
        """
        ``/VolumeDriver.Unmount`` returns a successful result.

        No ID for backward compatability with Docker < 1.12
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Unmount", {u"Name": u"vol"}, OK, {u"Err": u""})

    def test_create_with_profile(self):
        """
        Calling the ``/VolumerDriver.Create`` API with an ``Opts`` value
        of "profile=[gold,silver,bronze] in the request body JSON create a
        volume with a given name with [gold,silver,bronze] profile.
        """
        profile = sampled_from(["gold", "silver", "bronze"]).example()
        name = random_name(self)
        d = self.assertResult(
            b"POST", b"/VolumeDriver.Create", {u"Name": name, "Opts": {u"profile": profile}}, OK, {u"Err": u""}
        )
        d.addCallback(lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(list)
        d.addCallback(
            lambda result: self.assertItemsEqual(
                result,
                [
                    Dataset(
                        dataset_id=result[0].dataset_id,
                        primary=self.NODE_A,
                        maximum_size=int(DEFAULT_SIZE.to_Byte()),
                        metadata={NAME_FIELD: name, u"clusterhq:flocker:profile": unicode(profile)},
                    )
                ],
            )
        )
        return d

    def test_create_with_size(self):
        """
        Calling the ``/VolumerDriver.Create`` API with an ``Opts`` value
        of "size=<somesize> in the request body JSON create a volume
        with a given name and random size between 1-100G
        """
        name = random_name(self)
        size = integers(min_value=1, max_value=75).example()
        expression = volume_expression.example()
        size_opt = "".join(str(size)) + expression
        d = self.assertResult(
            b"POST", b"/VolumeDriver.Create", {u"Name": name, "Opts": {u"size": size_opt}}, OK, {u"Err": u""}
        )

        real_size = int(parse_num(size_opt).to_Byte())
        d.addCallback(lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(list)
        d.addCallback(
            lambda result: self.assertItemsEqual(
                result,
                [
                    Dataset(
                        dataset_id=result[0].dataset_id,
                        primary=self.NODE_A,
                        maximum_size=real_size,
                        metadata={NAME_FIELD: name, u"maximum_size": unicode(real_size)},
                    )
                ],
            )
        )
        return d

    @given(expr=volume_expression, size=integers(min_value=75, max_value=100))
    def test_parsenum_size(self, expr, size):
        """
        Send different forms of size expressions
        to ``parse_num``, we expect G(Gigabyte) size results.

        :param expr str: A string representing the size expression
        :param size int: A string representing the volume size
        """
        expected_size = int(GiB(size).to_Byte())
        return self.assertEqual(expected_size, int(parse_num(str(size) + expr).to_Byte()))

    @given(expr=sampled_from(["KB", "MB", "GB", "TB", ""]), size=integers(min_value=1, max_value=100))
    def test_parsenum_all_sizes(self, expr, size):
        """
        Send standard size expressions to ``parse_num`` in
        many sizes, we expect to get correct size results.

        :param expr str: A string representing the size expression
        :param size int: A string representing the volume size
        """
        if expr is "KB":
            expected_size = int(KiB(size).to_Byte())
        elif expr is "MB":
            expected_size = int(MiB(size).to_Byte())
        elif expr is "GB":
            expected_size = int(GiB(size).to_Byte())
        elif expr is "TB":
            expected_size = int(TiB(size).to_Byte())
        else:
            expected_size = int(Byte(size).to_Byte())
        return self.assertEqual(expected_size, int(parse_num(str(size) + expr).to_Byte()))

    @given(size=sampled_from([u"foo10Gb", u"10bar10", "10foogib", "10Gfoo", "GIB", "bar10foo"]))
    def test_parsenum_bad_size(self, size):
        """
        Send unacceptable size expressions, upon error
        users should expect to receive Flocker's ``DEFAULT_SIZE``

        :param size str: A string representing the bad volume size
        """
        return self.assertEqual(int(DEFAULT_SIZE.to_Byte()), int(parse_num(size).to_Byte()))

    def create(self, name):
        """
        Call the ``/VolumeDriver.Create`` API to create a volume with the
        given name.

        :param unicode name: The name of the volume to create.

        :return: ``Deferred`` that fires when the volume that was created.
        """
        return self.assertResult(b"POST", b"/VolumeDriver.Create", {u"Name": name}, OK, {u"Err": u""})

    def test_create_creates(self):
        """
        ``/VolumeDriver.Create`` creates a new dataset in the configuration.
        """
        name = u"myvol"
        d = self.create(name)
        d.addCallback(lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(list)
        d.addCallback(
            lambda result: self.assertItemsEqual(
                result,
                [
                    Dataset(
                        dataset_id=result[0].dataset_id,
                        primary=self.NODE_A,
                        maximum_size=int(DEFAULT_SIZE.to_Byte()),
                        metadata={NAME_FIELD: name},
                    )
                ],
            )
        )
        return d

    def test_create_duplicate_name(self):
        """
        If a dataset with the given name already exists,
        ``/VolumeDriver.Create`` succeeds without create a new volume.
        """
        name = u"thename"
        # Create a dataset out-of-band with matching name but non-matching
        # dataset ID:
        d = self.flocker_client.create_dataset(self.NODE_A, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name})
        d.addCallback(lambda _: self.create(name))
        d.addCallback(lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(lambda results: self.assertEqual(len(list(results)), 1))
        return d

    def test_create_duplicate_name_race_condition(self):
        """
        If a dataset with the given name is created while the
        ``/VolumeDriver.Create`` call is in flight, the call does not
        result in an error.
        """
        name = u"thename"

        # Create a dataset out-of-band with matching dataset ID and name
        # which the docker plugin won't be able to see.
        def create_after_list():
            # Clean up the patched version:
            del self.flocker_client.list_datasets_configuration
            # But first time we're called, we create dataset and lie about
            # its existence:
            d = self.flocker_client.create_dataset(
                self.NODE_A, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name}
            )
            d.addCallback(lambda _: DatasetsConfiguration(tag=u"1234", datasets={}))
            return d

        self.flocker_client.list_datasets_configuration = create_after_list

        return self.create(name)

    def _flush_volume_plugin_reactor_on_endpoint_render(self):
        """
        This method patches ``self.app`` so that after any endpoint is
        rendered, the reactor used by the volume plugin is advanced repeatedly
        until there are no more ``delayedCalls`` pending on the reactor.
        """
        real_execute_endpoint = self.app.execute_endpoint

        def patched_execute_endpoint(*args, **kwargs):
            val = real_execute_endpoint(*args, **kwargs)
            while self.volume_plugin_reactor.getDelayedCalls():
                pending_calls = self.volume_plugin_reactor.getDelayedCalls()
                next_expiration = min(t.getTime() for t in pending_calls)
                now = self.volume_plugin_reactor.seconds()
                self.volume_plugin_reactor.advance(max(0.0, next_expiration - now))
            return val

        self.patch(self.app, "execute_endpoint", patched_execute_endpoint)

    def test_mount(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive.
        """
        name = u"myvol"
        dataset_id = uuid4()
        mount_id = "".join(random.choice("0123456789abcdef") for n in xrange(64))

        # Create dataset on a different node:
        d = self.flocker_client.create_dataset(
            self.NODE_B, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name}, dataset_id=dataset_id
        )

        self._flush_volume_plugin_reactor_on_endpoint_render()

        # Pretend that it takes 5 seconds for the dataset to get established on
        # Node A.
        self.volume_plugin_reactor.callLater(5.0, self.flocker_client.synchronize_state)

        d.addCallback(
            lambda _: self.assertResult(
                b"POST",
                b"/VolumeDriver.Mount",
                {u"Name": name, u"ID": unicode(mount_id)},
                OK,
                {u"Err": u"", u"Mountpoint": u"/flocker/{}".format(dataset_id)},
            )
        )
        d.addCallback(lambda _: self.flocker_client.list_datasets_state())

        def final_assertions(datasets):
            self.assertEqual([self.NODE_A], [d.primary for d in datasets if d.dataset_id == dataset_id])
            # There should be less than 20 calls to list_datasets_state over
            # the course of 5 seconds.
            self.assertLess(self.flocker_client.num_calls("list_datasets_state"), 20)

        d.addCallback(final_assertions)

        return d

    def test_mount_no_id(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive.

        No ID for backward compatability with Docker < 1.12
        """
        name = u"myvol"
        dataset_id = uuid4()

        # Create dataset on a different node:
        d = self.flocker_client.create_dataset(
            self.NODE_B, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name}, dataset_id=dataset_id
        )

        self._flush_volume_plugin_reactor_on_endpoint_render()

        # Pretend that it takes 5 seconds for the dataset to get established on
        # Node A.
        self.volume_plugin_reactor.callLater(5.0, self.flocker_client.synchronize_state)

        d.addCallback(
            lambda _: self.assertResult(
                b"POST",
                b"/VolumeDriver.Mount",
                {u"Name": name},
                OK,
                {u"Err": u"", u"Mountpoint": u"/flocker/{}".format(dataset_id)},
            )
        )
        d.addCallback(lambda _: self.flocker_client.list_datasets_state())

        def final_assertions(datasets):
            self.assertEqual([self.NODE_A], [d.primary for d in datasets if d.dataset_id == dataset_id])
            # There should be less than 20 calls to list_datasets_state over
            # the course of 5 seconds.
            self.assertLess(self.flocker_client.num_calls("list_datasets_state"), 20)

        d.addCallback(final_assertions)

        return d

    def test_mount_timeout(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive. If it does not arrive within 120 seconds, then it
        returns an error up to docker.
        """
        name = u"myvol"
        dataset_id = uuid4()
        mount_id = "".join(random.choice("0123456789abcdef") for n in xrange(64))
        # Create dataset on a different node:
        d = self.flocker_client.create_dataset(
            self.NODE_B, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name}, dataset_id=dataset_id
        )

        self._flush_volume_plugin_reactor_on_endpoint_render()

        # Pretend that it takes 500 seconds for the dataset to get established
        # on Node A. This should be longer than the timeout.
        self.volume_plugin_reactor.callLater(500.0, self.flocker_client.synchronize_state)

        d.addCallback(
            lambda _: self.assertResult(
                b"POST",
                b"/VolumeDriver.Mount",
                {u"Name": name, u"ID": unicode(mount_id)},
                OK,
                {u"Err": u"Timed out waiting for dataset to mount.", u"Mountpoint": u""},
            )
        )
        return d

    def test_mount_already_exists(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive when used by the volumes that already exist and
        don't have a special dataset ID.
        """
        name = u"myvol"
        mount_id = "".join(random.choice("0123456789abcdef") for n in xrange(64))

        d = self.flocker_client.create_dataset(self.NODE_A, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name})

        def created(dataset):
            self.flocker_client.synchronize_state()
            result = self.assertResult(
                b"POST",
                b"/VolumeDriver.Mount",
                {u"Name": name, u"ID": unicode(mount_id)},
                OK,
                {u"Err": u"", u"Mountpoint": u"/flocker/{}".format(dataset.dataset_id)},
            )
            result.addCallback(lambda _: self.flocker_client.list_datasets_state())
            result.addCallback(
                lambda ds: self.assertEqual(
                    [self.NODE_A], [d.primary for d in ds if d.dataset_id == dataset.dataset_id]
                )
            )
            return result

        d.addCallback(created)
        return d

    def test_unknown_mount(self):
        """
        ``/VolumeDriver.Mount`` returns an error when asked to mount a
        non-existent volume.
        """
        name = u"myvol"
        mount_id = "".join(random.choice("0123456789abcdef") for n in xrange(64))
        return self.assertResult(
            b"POST",
            b"/VolumeDriver.Mount",
            {u"Name": name, u"ID": unicode(mount_id)},
            OK,
            {u"Err": u"Could not find volume with given name."},
        )

    def test_path(self):
        """
        ``/VolumeDriver.Path`` returns the mount path of the given volume if
        it is currently known.
        """
        name = u"myvol"

        d = self.create(name)
        # The dataset arrives as state:
        d.addCallback(lambda _: self.flocker_client.synchronize_state())

        d.addCallback(lambda _: self.assertResponseCode(b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK))
        d.addCallback(lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(
            lambda datasets_config: self.assertResult(
                b"POST",
                b"/VolumeDriver.Path",
                {u"Name": name},
                OK,
                {u"Err": u"", u"Mountpoint": u"/flocker/{}".format(datasets_config.datasets.keys()[0])},
            )
        )
        return d

    def test_path_existing(self):
        """
        ``/VolumeDriver.Path`` returns the mount path of the given volume if
        it is currently known, including for a dataset that was created
        not by the plugin.
        """
        name = u"myvol"

        d = self.flocker_client.create_dataset(self.NODE_A, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name})

        def created(dataset):
            self.flocker_client.synchronize_state()
            return self.assertResult(
                b"POST",
                b"/VolumeDriver.Path",
                {u"Name": name},
                OK,
                {u"Err": u"", u"Mountpoint": u"/flocker/{}".format(dataset.dataset_id)},
            )

        d.addCallback(created)
        return d

    def test_unknown_path(self):
        """
        ``/VolumeDriver.Path`` returns an error when asked for the mount path
        of a non-existent volume.
        """
        name = u"myvol"
        return self.assertResult(
            b"POST", b"/VolumeDriver.Path", {u"Name": name}, OK, {u"Err": u"Could not find volume with given name."}
        )

    def test_non_local_path(self):
        """
        ``/VolumeDriver.Path`` returns an error when asked for the mount path
        of a volume that is not mounted locally.

        This can happen as a result of ``docker inspect`` on a container
        that has been created but is still waiting for its volume to
        arrive from another node. It seems like Docker may also call this
        after ``/VolumeDriver.Create``, so again while waiting for a
        volume to arrive.
        """
        name = u"myvol"
        dataset_id = uuid4()

        # Create dataset on node B:
        d = self.flocker_client.create_dataset(
            self.NODE_B, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name}, dataset_id=dataset_id
        )
        d.addCallback(lambda _: self.flocker_client.synchronize_state())

        # Ask for path on node A:
        d.addCallback(
            lambda _: self.assertResult(
                b"POST",
                b"/VolumeDriver.Path",
                {u"Name": name},
                OK,
                {u"Err": "Volume not available.", u"Mountpoint": u""},
            )
        )
        return d

    @capture_logging(lambda self, logger: self.assertEqual(len(logger.flushTracebacks(CustomException)), 1))
    def test_unexpected_error_reporting(self, logger):
        """
        If an unexpected error occurs Docker gets back a useful error message.
        """

        def error():
            raise CustomException("I've made a terrible mistake")

        self.patch(self.flocker_client, "list_datasets_configuration", error)
        return self.assertResult(
            b"POST",
            b"/VolumeDriver.Path",
            {u"Name": u"whatever"},
            OK,
            {u"Err": "CustomException: I've made a terrible mistake"},
        )

    @capture_logging(None)
    def test_bad_request(self, logger):
        """
        If a ``BadRequest`` exception is raised it is converted to appropriate
        JSON.
        """

        def error():
            raise make_bad_request(code=423, Err=u"no good")

        self.patch(self.flocker_client, "list_datasets_configuration", error)
        return self.assertResult(b"POST", b"/VolumeDriver.Path", {u"Name": u"whatever"}, 423, {u"Err": "no good"})

    def test_unsupported_method(self):
        """
        If an unsupported method is requested the 405 Not Allowed response
        code is returned.
        """
        return self.assertResponseCode(b"BAD_METHOD", b"/VolumeDriver.Path", None, NOT_ALLOWED)

    def test_unknown_uri(self):
        """
        If an unknown URI path is requested the 404 Not Found response code is
        returned.
        """
        return self.assertResponseCode(b"BAD_METHOD", b"/xxxnotthere", None, NOT_FOUND)

    def test_empty_host(self):
        """
        If an empty host header is sent to the Docker plugin it does not blow
        up, instead operating normally. E.g. for ``Plugin.Activate`` call
        returns the ``Implements`` response.
        """
        return self.assertResult(
            b"POST",
            b"/Plugin.Activate",
            12345,
            OK,
            {u"Implements": [u"VolumeDriver"]},
            additional_headers={b"Host": [""]},
        )

    def test_get(self):
        """
        ``/VolumeDriver.Get`` returns the mount path of the given volume if
        it is currently known.
        """
        name = u"myvol"

        d = self.create(name)
        # The dataset arrives as state:
        d.addCallback(lambda _: self.flocker_client.synchronize_state())

        d.addCallback(lambda _: self.assertResponseCode(b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK))
        d.addCallback(lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(
            lambda datasets_config: self.assertResult(
                b"POST",
                b"/VolumeDriver.Get",
                {u"Name": name},
                OK,
                {
                    u"Err": u"",
                    u"Volume": {
                        u"Name": name,
                        u"Mountpoint": u"/flocker/{}".format(datasets_config.datasets.keys()[0]),
                    },
                },
            )
        )
        return d

    def test_get_existing(self):
        """
        ``/VolumeDriver.Get`` returns the mount path of the given volume if
        it is currently known, including for a dataset that was created
        not by the plugin.
        """
        name = u"myvol"

        d = self.flocker_client.create_dataset(self.NODE_A, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name})

        def created(dataset):
            self.flocker_client.synchronize_state()
            return self.assertResult(
                b"POST",
                b"/VolumeDriver.Get",
                {u"Name": name},
                OK,
                {u"Err": u"", u"Volume": {u"Name": name, u"Mountpoint": u"/flocker/{}".format(dataset.dataset_id)}},
            )

        d.addCallback(created)
        return d

    def test_unknown_get(self):
        """
        ``/VolumeDriver.Get`` returns an error when asked for the mount path
        of a non-existent volume.
        """
        name = u"myvol"
        return self.assertResult(
            b"POST", b"/VolumeDriver.Get", {u"Name": name}, OK, {u"Err": u"Could not find volume with given name."}
        )

    def test_non_local_get(self):
        """
        ``/VolumeDriver.Get`` returns an empty mount point when asked about a
        volume that is not mounted locally.
        """
        name = u"myvol"
        dataset_id = uuid4()

        # Create dataset on node B:
        d = self.flocker_client.create_dataset(
            self.NODE_B, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name}, dataset_id=dataset_id
        )
        d.addCallback(lambda _: self.flocker_client.synchronize_state())

        # Ask for path on node A:
        d.addCallback(
            lambda _: self.assertResult(
                b"POST",
                b"/VolumeDriver.Get",
                {u"Name": name},
                OK,
                {u"Err": u"", u"Volume": {u"Name": name, u"Mountpoint": u""}},
            )
        )
        return d

    def test_list(self):
        """
        ``/VolumeDriver.List`` returns the mount path of the given volume if
        it is currently known and an empty mount point for non-local
        volumes.
        """
        name = u"myvol"
        remote_name = u"myvol3"

        d = gatherResults(
            [
                self.flocker_client.create_dataset(
                    self.NODE_A, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: name}
                ),
                self.flocker_client.create_dataset(
                    self.NODE_B, int(DEFAULT_SIZE.to_Byte()), metadata={NAME_FIELD: remote_name}
                ),
            ]
        )

        # The datasets arrive as state:
        d.addCallback(lambda _: self.flocker_client.synchronize_state())
        d.addCallback(lambda _: self.flocker_client.list_datasets_configuration())
        d.addCallback(
            lambda datasets_config: self.assertResult(
                b"POST",
                b"/VolumeDriver.List",
                {},
                OK,
                {
                    u"Err": u"",
                    u"Volumes": sorted(
                        [
                            {
                                u"Name": name,
                                u"Mountpoint": u"/flocker/{}".format(
                                    [
                                        key
                                        for (key, value) in datasets_config.datasets.items()
                                        if value.metadata["name"] == name
                                    ][0]
                                ),
                            },
                            {u"Name": remote_name, u"Mountpoint": u""},
                        ]
                    ),
                },
            )
        )
        return d

    def test_list_no_metadata_name(self):
        """
        ``/VolumeDriver.List`` omits volumes that don't have a metadata field
        for their name.
        """
        d = self.flocker_client.create_dataset(self.NODE_A, int(DEFAULT_SIZE.to_Byte()), metadata={})
        d.addCallback(
            lambda _: self.assertResult(b"POST", b"/VolumeDriver.List", {}, OK, {u"Err": u"", u"Volumes": []})
        )
        return d
예제 #35
0
class DeferredFilesystemLockTestCase(unittest.TestCase):
    """
    Test the behavior of L{DeferredFilesystemLock}
    """
    def setUp(self):
        self.clock = Clock()
        self.lock = defer.DeferredFilesystemLock(self.mktemp(),
                                                 scheduler=self.clock)

    def test_waitUntilLockedWithNoLock(self):
        """
        Test that the lock can be acquired when no lock is held
        """
        d = self.lock.deferUntilLocked(timeout=1)

        return d

    def test_waitUntilLockedWithTimeoutLocked(self):
        """
        Test that the lock can not be acquired when the lock is held
        for longer than the timeout.
        """
        self.failUnless(self.lock.lock())

        d = self.lock.deferUntilLocked(timeout=5.5)
        self.assertFailure(d, defer.TimeoutError)

        self.clock.pump([1] * 10)

        return d

    def test_waitUntilLockedWithTimeoutUnlocked(self):
        """
        Test that a lock can be acquired while a lock is held
        but the lock is unlocked before our timeout.
        """
        def onTimeout(f):
            f.trap(defer.TimeoutError)
            self.fail("Should not have timed out")

        self.failUnless(self.lock.lock())

        self.clock.callLater(1, self.lock.unlock)
        d = self.lock.deferUntilLocked(timeout=10)
        d.addErrback(onTimeout)

        self.clock.pump([1] * 10)

        return d

    def test_defaultScheduler(self):
        """
        Test that the default scheduler is set up properly.
        """
        lock = defer.DeferredFilesystemLock(self.mktemp())

        self.assertEquals(lock._scheduler, reactor)

    def test_concurrentUsage(self):
        """
        Test that an appropriate exception is raised when attempting
        to use deferUntilLocked concurrently.
        """
        self.lock.lock()
        self.clock.callLater(1, self.lock.unlock)

        d = self.lock.deferUntilLocked()
        d2 = self.lock.deferUntilLocked()

        self.assertFailure(d2, defer.AlreadyTryingToLockError)

        self.clock.advance(1)

        return d

    def test_multipleUsages(self):
        """
        Test that a DeferredFilesystemLock can be used multiple times
        """
        def lockAquired(ign):
            self.lock.unlock()
            d = self.lock.deferUntilLocked()
            return d

        self.lock.lock()
        self.clock.callLater(1, self.lock.unlock)

        d = self.lock.deferUntilLocked()
        d.addCallback(lockAquired)

        self.clock.advance(1)

        return d