Exemplo n.º 1
0
 def test_stopping_closes_connections_cleanly(self):
     service = RegionService(sentinel.ipcWorker)
     service.starting = Deferred()
     service.starting.addErrback(
         lambda failure: failure.trap(CancelledError))
     service.factory.protocol = HandshakingRegionServer
     connections = {
         service.factory.buildProtocol(None),
         service.factory.buildProtocol(None),
     }
     for conn in connections:
         # Pretend it's already connected.
         service.connections[conn.ident].add(conn)
     transports = {self.patch(conn, "transport") for conn in connections}
     yield service.stopService()
     self.assertThat(
         transports,
         AllMatch(
             AfterPreprocessing(attrgetter("loseConnection"),
                                MockCalledOnceWith())),
     )
Exemplo n.º 2
0
    def test__read(self):
        origin = make_origin()

        data = [
            {
                "filename": make_name_without_spaces()
            },
            {
                "filename": make_name_without_spaces()
            },
        ]
        origin.Files._handler.read.return_value = data

        resources = origin.Files.read()
        self.assertEquals(2, len(resources))
        self.assertThat(resources, IsInstance(origin.Files))
        self.assertThat(resources, AllMatch(IsInstance(origin.File)))
        self.assertThat(
            resources,
            MatchesSetwise(*(MatchesStructure.byEquality(
                filename=entry["filename"]) for entry in data)),
        )
Exemplo n.º 3
0
    def create_servers(self, rcs, num, wait_for=None):
        """
        Create some number of servers using just Nova, and wait until they
        are active.  This uses the same default server arguments as
        `create_group`.

        :param TestResources rcs: An instance of
            :class:`otter.integration.lib.resources.TestResources`
        :param int num: The number of servers to create.
        :param wait_for: What state to wait for for those servers - by default,
            it waits just for them to be active

        :return: an iterable of server details JSON of the created servers.
        """
        image_id = yield fetch_ubuntu_image_id(rcs, self.pool)
        as_args = create_scaling_group_dict(image_ref=image_id,
                                            flavor_ref=flavor_ref)
        server_args = as_args['launchConfiguration']['args']
        server_args['server']['name'] = "autogenerated-non-as-test-server"

        if wait_for is None:
            wait_for = ContainsDict({'status': Equals("ACTIVE")})

        server_ids = yield gatherResults(
            [create_server(rcs, self.pool, server_args) for _ in range(num)])

        self.test_case.addCleanup(delete_servers, server_ids, rcs, self.pool)

        servers = yield wait_for_servers(
            rcs,
            self.pool,
            # The list of active servers' ids has the created server ids
            AfterPreprocessing(
                lambda servers: [s for s in servers if s['id'] in server_ids],
                AllMatch(wait_for)))

        returnValue(
            [server for server in servers if server['id'] in server_ids])
    def test_gatherers_missing(self, introducer_config, storage_config):
        """
        If the log and stats gatherers are not given, the storage and
        introducer configurations are written with empty strings for
        these fields.
        """
        config = marshal_tahoe_configuration(
            introducer_pem=introducer_config["node_pem"],
            storage_pem=storage_config["node_pem"],
            storage_privkey=storage_config["node_privkey"],
            introducer_port=introducer_config["port"],
            storageserver_port=storage_config["port"],
            bucket_name=storage_config["bucket_name"],
            key_prefix=storage_config["key_prefix"],
            publichost=storage_config["publichost"],
            privatehost=storage_config["privatehost"],
            introducer_furl=storage_config["introducer_furl"],
            s3_access_key_id=storage_config["s3_access_key_id"],
            s3_secret_key=storage_config["s3_secret_key"],
            log_gatherer_furl=None,
            stats_gatherer_furl=None,
        )
        configure_tahoe({"introducer": config["introducer"]},
                        self.nodes.introducer.path)
        configure_tahoe({"storage": config["storage"]},
                        self.nodes.storage.path)

        config_files = [
            self.nodes.introducer.child(b"tahoe.cfg"),
            self.nodes.storage.child(b"tahoe.cfg"),
        ]
        self.assertThat(
            config_files,
            AllMatch(
                hasConfiguration({
                    ("node", "log_gatherer.furl", ""),
                    ("client", "stats_gatherer.furl", ""),
                })))
Exemplo n.º 5
0
def leases_current(relevant_storage_indexes, now, min_lease_remaining):
    """
    Return a matcher on a ``DummyStorageServer`` instance which matches
    servers for which the leases on the given storage indexes do not expire
    before ``min_lease_remaining``.
    """
    return AfterPreprocessing(
        # Get share stats for storage indexes we should have
        # visited and maintained.
        lambda storage_server: list(
            stat for (storage_index, stat) in storage_server.buckets.items()
            if storage_index in relevant_storage_indexes),
        AllMatch(
            AfterPreprocessing(
                # Lease expiration for anything visited must be
                # further in the future than min_lease_remaining,
                # either because it had time left or because we
                # renewed it.
                lambda share_stat: datetime.utcfromtimestamp(share_stat.
                                                             lease_expiration),
                GreaterThan(now + min_lease_remaining),
            ), ),
    )
Exemplo n.º 6
0
    def test__getConfiguration_returns_configuration_object(self):
        service = ntp.RegionNetworkTimeProtocolService(reactor)

        # Configure example time references.
        ntp_servers = {factory.make_name("ntp-server") for _ in range(5)}
        Config.objects.set_config("ntp_servers", " ".join(ntp_servers))

        # Put all addresses in the same space so they're mutually routable.
        space = factory.make_Space()
        # Populate the database with "this" region and an example peer.
        region, _, _ = make_region_with_address(space)
        self.useFixture(MAASIDFixture(region.system_id))
        peer, addr4, addr6 = make_region_with_address(space)

        observed = service._getConfiguration()
        self.assertThat(observed, IsInstance(ntp._Configuration))

        expected_references = Equals(frozenset(ntp_servers))
        expected_peers = AllMatch(ContainedBy({addr4.ip, addr6.ip}))

        self.assertThat(
            observed,
            MatchesStructure(references=expected_references,
                             peers=expected_peers))
Exemplo n.º 7
0
    def test_start_up_binds_first_of_real_endpoint_options(self):
        service = RegionService(sentinel.ipcWorker)

        # endpoint_1.listen(...) will bind to a random high-numbered port.
        endpoint_1 = TCP4ServerEndpoint(reactor, 0)
        # endpoint_2.listen(...), if attempted, will crash because only root
        # (or a user with explicit capabilities) can do stuff like that. It's
        # a reasonable assumption that the user running these tests is not
        # root, but we'll check the port number later too to be sure.
        endpoint_2 = TCP4ServerEndpoint(reactor, 1)

        service.endpoints = [[endpoint_1, endpoint_2]]

        yield service.startService()
        self.addCleanup(wait_for_reactor(service.stopService))

        # A single port has been bound.
        self.assertThat(service.ports, MatchesAll(
            HasLength(1), AllMatch(IsInstance(tcp.Port))))

        # The port is not listening on port 1; i.e. a belt-n-braces check that
        # endpoint_2 was not used.
        [port] = service.ports
        self.assertThat(port.getHost().port, Not(Equals(1)))
Exemplo n.º 8
0
    def test_unblinded_tokens_spent(
        self,
        logger,
        get_config,
        now,
        announcement,
        voucher,
        num_passes,
        public_key,
    ):
        """
        The ``ZKAPAuthorizerStorageServer`` returned by ``get_storage_client``
        spends unblinded tokens from the plugin database.
        """
        tempdir = self.useFixture(TempDir())
        node_config = get_config(
            tempdir.join(b"node"),
            b"tub.port",
        )

        store = VoucherStore.from_node_config(node_config, lambda: now)

        controller = PaymentController(
            store,
            DummyRedeemer(public_key),
            default_token_count=num_passes,
            num_redemption_groups=1,
            clock=Clock(),
        )
        # Get a token inserted into the store.
        redeeming = controller.redeem(voucher)
        self.assertThat(
            redeeming,
            succeeded(Always()),
        )

        storage_client = storage_server.get_storage_client(
            node_config,
            announcement,
            get_rref,
        )

        # None of the remote methods are implemented by our fake server and I
        # would like to continue to avoid to have a real server in these
        # tests, at least until creating a real server doesn't involve so much
        # complex setup.  So avoid using any of the client APIs that make a
        # remote call ... which is all of them.
        pass_group = storage_client._get_passes(u"request binding message",
                                                num_passes)
        pass_group.mark_spent()

        # There should be no unblinded tokens left to extract.
        self.assertThat(
            lambda: storage_client._get_passes(u"request binding message", 1),
            raises(NotEnoughTokens),
        )

        messages = LoggedMessage.of_type(logger.messages, GET_PASSES)
        self.assertThat(
            messages,
            MatchesAll(
                HasLength(1),
                AllMatch(
                    AfterPreprocessing(
                        lambda logged_message: logged_message.message,
                        ContainsDict({
                            u"message":
                            Equals(u"request binding message"),
                            u"count":
                            Equals(num_passes),
                        }),
                    ), ),
            ),
        )
    def test_complete(self, introducer_config, storage_config):
        """
        Introducer and storage configuration can be supplied via ``configure_tahoe``.
        """
        introducer_furl = introducer_config["introducer_furl"]

        config = marshal_tahoe_configuration(
            introducer_pem=introducer_config["node_pem"],
            storage_pem=storage_config["node_pem"],
            storage_privkey=storage_config["node_privkey"],
            introducer_port=introducer_config["port"],
            storageserver_port=storage_config["port"],
            bucket_name=storage_config["bucket_name"],
            key_prefix=storage_config["key_prefix"],
            publichost=storage_config["publichost"],
            privatehost=storage_config["privatehost"],
            introducer_furl=introducer_furl,
            s3_access_key_id=storage_config["s3_access_key_id"],
            s3_secret_key=storage_config["s3_secret_key"],
            log_gatherer_furl=introducer_config["log_gatherer_furl"],
            stats_gatherer_furl=introducer_config["stats_gatherer_furl"],
        )
        configure_tahoe({"introducer": config["introducer"]},
                        self.nodes.introducer.path)
        configure_tahoe({"storage": config["storage"]},
                        self.nodes.storage.path)

        intro_config_path = self.nodes.introducer.child(b"tahoe.cfg")
        storage_config_path = self.nodes.storage.child(b"tahoe.cfg")
        config_files = [intro_config_path, storage_config_path]

        # If the log and stats gatherers are given, the storage and
        # introducer configurations are written with those values for
        # those fields.
        self.expectThat(
            config_files,
            AllMatch(
                hasConfiguration({
                    ("node", "log_gatherer.furl",
                     introducer_config["log_gatherer_furl"]),
                    ("client", "stats_gatherer.furl",
                     introducer_config["stats_gatherer_furl"]),
                })))

        # The introducer furl in the introducer configuration is
        # written to the ``private/introducer.furl`` file in the
        # introducer's state/configuration directory and to the
        # storage node's configuration file.
        self.expectThat(
            self.nodes.introducer.descendant([b"private", b"introducer.furl"]),
            hasContents(introducer_furl),
        )
        tub_id, location_hints, name = decode_furl(introducer_furl)
        port = location_hints[0].split(":")[1]
        location_hints[:0] = [storage_config["privatehost"] + ":" + port]
        internal_introducer_furl = encode_furl(tub_id, location_hints, name)
        self.expectThat(
            storage_config_path,
            hasConfiguration({
                ("client", "introducer.furl", internal_introducer_furl),
            }),
        )
        self.expectThat(
            self.nodes.storage.child(b"announcement-seqnum"),
            hasContentsMatching(
                # The second hand could click over between when the file is
                # written and when this test code runs.  In fact, it could
                # click over multiple times... But the only way to really fix
                # that is to parameterize the clock and the structure of
                # configure_tahoe makes that tricky.  So just suppose that one
                # second is all the leeway we need to make this reliable.
                AfterPreprocessing(int, Not(LessThan(int(time() - 1)))), ),
        )
Exemplo n.º 10
0
 def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
     """Override testtools' version to prevent use of mappings."""
     self.assertThat((expected_seq, actual_seq),
                     AllMatch(Not(IsInstance(Mapping))),
                     "Mappings cannot be compared with assertItemsEqual")
     return super().assertItemsEqual(expected_seq, actual_seq, msg)
Exemplo n.º 11
0
 def test_gen_configuration_options_for_setting(self):
     self.assertThat(
         config.gen_configuration_options_for_setting(),
         AllMatch(MatchesListwise([Not(Contains("_")),
                                   IsInstance(dict)]), ))
Exemplo n.º 12
0
            # ordinarily get squashed.
            return Failure(
                amp.UnknownRemoteError(
                    "%s: %s" %
                    (reflect.qual(error.type), reflect.safe_str(error.value))))

    d.addErrback(eb_massage_error)

    return d


are_valid_tls_parameters = MatchesDict({
    "tls_localCertificate":
    IsInstance(ssl.PrivateCertificate),
    "tls_verifyAuthorities":
    MatchesAll(IsInstance(Sequence), AllMatch(IsInstance(ssl.Certificate))),
})


class MockClusterToRegionRPCFixtureBase(fixtures.Fixture, metaclass=ABCMeta):
    """Patch in a stub region RPC implementation to enable end-to-end testing.

    This is an abstract base class. Derive concrete fixtures from this by
    implementing the `connect` method.
    """

    starting = None
    stopping = None

    def __init__(self, maas_url=None):
        self.maas_url = maas_url
Exemplo n.º 13
0
    def test_full_jitter(self, values):
        jittered = list(full_jitter(values))

        self.assertThat(jittered, AllMatch(IsInstance(float)))
        self.assertThat(jittered, AllMatch(MatchesAll(
            GreaterThanOrEqual(0.0), LessThan(10000.0))))
Exemplo n.º 14
0
 def test_tftp_service(self):
     # A TFTP service is configured and added to the top-level service.
     interfaces = [factory.make_ipv4_address(), factory.make_ipv6_address()]
     self.patch(tftp_module, "get_all_interface_addresses",
                lambda: interfaces)
     example_root = self.make_dir()
     example_client_service = Mock()
     example_port = factory.pick_port()
     tftp_service = TFTPService(
         resource_root=example_root,
         client_service=example_client_service,
         port=example_port,
     )
     tftp_service.updateServers()
     # The "tftp" service is a multi-service containing UDP servers for
     # each interface defined by get_all_interface_addresses().
     self.assertIsInstance(tftp_service, MultiService)
     # There's also a TimerService that updates the servers every 45s.
     self.assertThat(
         tftp_service.refresher,
         MatchesStructure.byEquality(
             step=45,
             parent=tftp_service,
             name="refresher",
             call=(tftp_service.updateServers, (), {}),
         ),
     )
     expected_backend = MatchesAll(
         IsInstance(TFTPBackend),
         AfterPreprocessing(lambda backend: backend.base.path,
                            Equals(example_root)),
         AfterPreprocessing(
             lambda backend: backend.client_service,
             Equals(example_client_service),
         ),
     )
     expected_protocol = MatchesAll(
         IsInstance(TFTP),
         AfterPreprocessing(lambda protocol: protocol.backend,
                            expected_backend),
     )
     expected_server = MatchesAll(
         IsInstance(internet.UDPServer),
         AfterPreprocessing(lambda service: len(service.args), Equals(2)),
         AfterPreprocessing(
             lambda service: service.args[0],
             Equals(example_port)  # port
         ),
         AfterPreprocessing(
             lambda service: service.args[1],
             expected_protocol  # protocol
         ),
     )
     self.assertThat(tftp_service.getServers(), AllMatch(expected_server))
     # Only the interface used for each service differs.
     self.assertItemsEqual(
         [svc.kwargs for svc in tftp_service.getServers()],
         [{
             "interface": interface
         } for interface in interfaces],
     )
Exemplo n.º 15
0
 def test_list_live_nodes(self):
     """
     ``list_live_nodes`` returns an iterable of unicode values.
     """
     live_nodes = self.api.list_live_nodes()
     self.assertThat(live_nodes, AllMatch(IsInstance(unicode)))
Exemplo n.º 16
0
def StackHidden(is_hidden):
    return AllMatch(
        AfterPreprocessing(lambda module: safe_hasattr(module, '__unittest'),
                           Equals(is_hidden)))
Exemplo n.º 17
0
    def test_unblinded_tokens_extracted(
        self,
        logger,
        get_config,
        now,
        announcement,
        voucher,
        storage_index,
        renew_secret,
        cancel_secret,
        sharenums,
        size,
    ):
        """
        The ``ZKAPAuthorizerStorageServer`` returned by ``get_storage_client``
        extracts unblinded tokens from the plugin database.
        """
        tempdir = self.useFixture(TempDir())
        node_config = get_config(
            tempdir.join(b"node"),
            b"tub.port",
        )

        store = VoucherStore.from_node_config(node_config, lambda: now)
        # Give it enough for the allocate_buckets call below.
        expected_pass_cost = required_passes(store.pass_value,
                                             [size] * len(sharenums))
        # And few enough redemption groups given the number of tokens.
        num_redemption_groups = expected_pass_cost

        controller = PaymentController(
            store,
            DummyRedeemer(),
            default_token_count=expected_pass_cost,
            num_redemption_groups=num_redemption_groups,
        )
        # Get a token inserted into the store.
        redeeming = controller.redeem(voucher)
        self.assertThat(
            redeeming,
            succeeded(Always()),
        )

        storage_client = storage_server.get_storage_client(
            node_config,
            announcement,
            get_rref,
        )

        # For now, merely making the call spends the passes - regardless of
        # the ultimate success or failure of the operation.
        storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums,
            size,
            LocalReferenceable(None),
        )

        # There should be no unblinded tokens left to extract.
        self.assertThat(
            lambda: store.extract_unblinded_tokens(1),
            raises(NotEnoughTokens),
        )

        messages = LoggedMessage.of_type(logger.messages, GET_PASSES)
        self.assertThat(
            messages,
            MatchesAll(
                HasLength(1),
                AllMatch(
                    AfterPreprocessing(
                        lambda logged_message: logged_message.message,
                        ContainsDict({
                            u"message":
                            Equals(allocate_buckets_message(storage_index)),
                            u"count":
                            Equals(expected_pass_cost),
                        }),
                    ), ),
            ),
        )
    def test_complete(self, introducer_config, storage_config):
        """
        Introducer and storage configuration can be supplied via ``configure_tahoe``.
        """
        introducer_furl = introducer_config["introducer_furl"]

        config = marshal_tahoe_configuration(
            introducer_pem=introducer_config["node_pem"],
            storage_pem=storage_config["node_pem"],
            storage_privkey=storage_config["node_privkey"],
            introducer_port=introducer_config["port"],
            storageserver_port=storage_config["port"],
            bucket_name=storage_config["bucket_name"],
            publichost=storage_config["publichost"],
            privatehost=storage_config["privatehost"],
            introducer_furl=introducer_furl,
            s3_access_key_id=storage_config["s3_access_key_id"],
            s3_secret_key=storage_config["s3_secret_key"],
            log_gatherer_furl=introducer_config["log_gatherer_furl"],
            stats_gatherer_furl=introducer_config["stats_gatherer_furl"],
        )
        configure_tahoe({"introducer": config["introducer"]},
                        self.nodes.introducer.path)
        configure_tahoe({"storage": config["storage"]},
                        self.nodes.storage.path)

        intro_config_path = self.nodes.introducer.child(b"tahoe.cfg")
        storage_config_path = self.nodes.storage.child(b"tahoe.cfg")
        config_files = [intro_config_path, storage_config_path]

        # If the log and stats gatherers are given, the storage and
        # introducer configurations are written with those values for
        # those fields.
        self.expectThat(
            config_files,
            AllMatch(
                hasConfiguration({
                    ("node", "log_gatherer.furl",
                     introducer_config["log_gatherer_furl"]),
                    ("client", "stats_gatherer.furl",
                     introducer_config["stats_gatherer_furl"]),
                })))

        # The introducer furl in the introducer configuration is
        # written to the ``private/introducer.furl`` file in the
        # introducer's state/configuration directory and to the
        # storage node's configuration file.
        self.expectThat(
            self.nodes.introducer.descendant([b"private", b"introducer.furl"]),
            hasContents(introducer_furl),
        )
        tub_id, location_hints, name = decode_furl(introducer_furl)
        port = location_hints[0].split(":")[1]
        location_hints[:0] = [storage_config["privatehost"] + ":" + port]
        internal_introducer_furl = encode_furl(tub_id, location_hints, name)
        self.expectThat(
            storage_config_path,
            hasConfiguration({
                ("client", "introducer.furl", internal_introducer_furl),
            }),
        )
Exemplo n.º 19
0
 def test_constantly(self):
     """
     Return the initial constant value regardless of any arguments passed.
     """
     f = constantly(42)
     self.assertThat([f(), f(1), f(1, 2), f(1, b=2)], AllMatch(Equals(42)))
    def test_list(self, collective_contents, rw_collective_dircap):
        """
        ``IParticipants.list`` returns a ``Deferred`` that fires with a list of
        ``IParticipant`` providers with names matching the names of the child
        directories in the collective.
        """
        # The collective can't be anyone's DMD.
        assume(rw_collective_dircap not in collective_contents.values())

        # Pick someone in the collective to be us.
        author = sorted(collective_contents)[0]
        upload_dircap = collective_contents[author]

        rw_collective_dircap = rw_collective_dircap
        upload_dircap = upload_dircap

        root = create_fake_tahoe_root()
        http_client = create_tahoe_treq_client(root)
        tahoe_client = TahoeClient(
            DecodedURL.from_text(u"http://example.invalid./"),
            http_client,
        )

        root._uri.data[
            rw_collective_dircap.danger_real_capability_string()] = dumps([
                u"dirnode",
                {
                    u"children": {
                        name: format_filenode(cap, {})
                        for (name, cap) in collective_contents.items()
                    }
                },
            ]).encode("utf8")

        root._uri.data[upload_dircap.danger_real_capability_string()] = dumps([
            u"dirnode",
            {
                u"children": {}
            },
        ]).encode("utf8")

        participants = participants_from_collective(
            rw_collective_dircap,
            upload_dircap,
            tahoe_client,
        )

        self.assertThat(
            participants.list(),
            succeeded(
                MatchesAll(
                    IsInstance(list),
                    AllMatch(provides(IParticipant), ),
                    AfterPreprocessing(
                        lambda ps: sorted(p.name for p in ps),
                        Equals(sorted(collective_contents)),
                    ),
                    AfterPreprocessing(
                        # There should be exactly one participant that signals
                        # it is us.  We know it will be there because we
                        # selected our dircap from among all those DMDs in the
                        # collective at the top.
                        lambda ps: len({p
                                        for p in ps if p.is_self}),
                        Equals(1),
                    )), ),
        )
Exemplo n.º 21
0
async def test_list_tahoe_objects(request, reactor, tahoe_venv, base_dir,
                                  introducer_furl, flog_gatherer):
    """
    the 'tahoe-objects' API works concurrently
    (see also ticket #570)
    """

    yolandi = await util.MagicFolderEnabledNode.create(
        reactor,
        tahoe_venv,
        request,
        base_dir,
        introducer_furl,
        flog_gatherer,
        name="yolandi",
        tahoe_web_port="tcp:9983:interface=localhost",
        magic_folder_web_port="tcp:19983:interface=localhost",
        storage=True,
    )
    number_of_folders = 20
    folder_names = ["workstuff{}".format(n) for n in range(number_of_folders)]

    # make a bunch of folders
    for folder_name in folder_names:
        magic_dir = FilePath(base_dir).child(folder_name)
        magic_dir.makedirs()

        await yolandi.client.add_folder(
            folder_name,
            author_name="yolandi",
            local_path=magic_dir,
            poll_interval=10,
            scan_interval=10,
        )

    # concurrently put 1 file into each folder and immediately create
    # a snapshot for it via an API call
    files = []
    for folder_num, folder_name in enumerate(folder_names):
        magic_dir = FilePath(base_dir).child(folder_name)
        with magic_dir.child("a_file_name").open("w") as f:
            f.write("data {:02d}\n".format(folder_num).encode("utf8") * 100)
        files.append(yolandi.client.add_snapshot(
            folder_name,
            "a_file_name",
        ))

    # Each folder should produce [416, 800, 190] for the sizes -- this
    # is (Snapshot-size, content-size and metadata-size) for the one
    # file we've put in.  .. except the first one depends on
    # Snapshot's implementation and the last one depends on metadata
    # details, so we only want to assert that they're all the same.
    # expected_results = [[416, 800, 190]] * number_of_folders

    # The "if res else None" clauses below are because we use this in
    # the loop (to potentially succeed early), and some of the results
    # may be empty for a few iterations / seconds
    matches_expected_results = MatchesAll(
        # this says that all the content capabilities (2nd item)
        # should be size 800
        AfterPreprocessing(
            lambda results: [res[1] if res else None for res in results],
            AllMatch(Equals(800))),
        # this says that there should be exactly one thing in the set
        # of all the pairs of the Snapshot (1st item) and metadata
        # (3rd item) sizes .. that is, that all the Snapshot sizes are
        # the same and all the metadata sizes are the same.
        AfterPreprocessing(
            lambda results: {(res[0], res[2]) if res else None
                             for res in results}, HasLength(1)))

    # try for 15 seconds to get what we expect. we're waiting for each
    # of the magic-folders to upload their single "a_file_name" items
    # so that they each have one Snapshot in Tahoe-LAFS
    for _ in range(15):
        await util.twisted_sleep(reactor, 1)
        results = await DeferredList([
            yolandi.client.tahoe_objects(folder_name)
            for folder_name in folder_names
        ])
        # if any of the queries fail, we fail the test
        errors = [fail for ok, fail in results if not ok]
        assert errors == [], "At least one /tahoe-objects query failed"

        actual_results = [result for ok, result in results if ok]
        # exit early if we'll pass the test
        if matches_expected_results.match(actual_results) is None:
            break

    # check the results
    assert_that(actual_results, matches_expected_results)
    def test_add(self, collective_contents, rw_collective_dircap):
        """
        ``IParticipants.add`` correctly adds a new, previously unknown
        participant.
        """
        # The collective can't be anyone's DMD.
        assume(rw_collective_dircap not in collective_contents.values())
        rw_collective_dircap = rw_collective_dircap

        # Pick someone in the collective to be us.
        author = sorted(collective_contents)[0]
        upload_dircap = collective_contents[author]
        upload_dircap_ro = upload_dircap.to_readonly()

        root = create_fake_tahoe_root()
        http_client = create_tahoe_treq_client(root)
        tahoe_client = TahoeClient(
            DecodedURL.from_text(u"http://example.invalid./"),
            http_client,
        )

        root._uri.data[
            rw_collective_dircap.danger_real_capability_string()] = dumps([
                u"dirnode",
                {
                    u"children": {
                        normalize(author):
                        format_filenode(upload_dircap_ro, {}),
                    },
                },
            ]).encode("utf8")

        root._uri.data[upload_dircap.danger_real_capability_string()] = dumps([
            u"dirnode",
            {
                u"children": {}
            },
        ]).encode("utf8")

        participants = participants_from_collective(
            rw_collective_dircap,
            upload_dircap,
            tahoe_client,
        )

        # add all the "other" participants using .add() API
        for name, dircap in collective_contents.items():
            if name == author:
                continue
            participants.add(
                RemoteAuthor(name, VerifyKey(os.urandom(32))),
                dircap.to_readonly(),
            )

        # confirm we added all the right participants by using the
        # list() API
        self.assertThat(
            participants.list(),
            succeeded(
                MatchesAll(
                    IsInstance(list),
                    AllMatch(provides(IParticipant), ),
                    AfterPreprocessing(
                        lambda ps: sorted(p.name for p in ps),
                        Equals(sorted(map(normalize, collective_contents))),
                    ),
                    AfterPreprocessing(
                        lambda ps: sorted(p.dircap for p in ps),
                        Equals(
                            sorted(c.to_readonly()
                                   for c in collective_contents.values())),
                    ),
                    AfterPreprocessing(
                        # There should be exactly one participant that signals
                        # it is us.  We know it will be there because we
                        # selected our dircap from among all those DMDs in the
                        # collective at the top.
                        lambda ps: len({p
                                        for p in ps if p.is_self}),
                        Equals(1),
                    )), ),
        )
Exemplo n.º 23
0
                amp.UnknownRemoteError(
                    "%s: %s" %
                    (reflect.qual(error.type), reflect.safe_str(error.value))))

    d.addErrback(eb_massage_error)

    return d


are_valid_tls_parameters = MatchesDict({
    "tls_localCertificate":
    IsInstance(ssl.PrivateCertificate),
    "tls_verifyAuthorities":
    MatchesAll(
        IsInstance(collections.Sequence),
        AllMatch(IsInstance(ssl.Certificate)),
    ),
})


class MockClusterToRegionRPCFixtureBase(fixtures.Fixture, metaclass=ABCMeta):
    """Patch in a stub region RPC implementation to enable end-to-end testing.

    This is an abstract base class. Derive concrete fixtures from this by
    implementing the `connect` method.
    """

    starting = None
    stopping = None

    def __init__(self, maas_url=None):
Exemplo n.º 24
0
 def test__white_list_is_a_non_empty_set_of_file_names(self):
     self.assertThat(self.script.whitelist, IsInstance(set))
     self.assertThat(self.script.whitelist, Not(HasLength(0)))
     self.assertThat(self.script.whitelist, AllMatch(IsInstance(str)))
Exemplo n.º 25
0
 def test_allowed_list_is_a_non_empty_set_of_file_names(self):
     self.assertThat(self.script.DELETABLE_FILES, IsInstance(set))
     self.assertThat(self.script.DELETABLE_FILES, Not(HasLength(0)))
     self.assertThat(self.script.DELETABLE_FILES, AllMatch(IsInstance(str)))
Exemplo n.º 26
0
 def test_setting_defined(self):
     self.assertThat(settings.PRESEED_TEMPLATE_LOCATIONS,
                     AllMatch(IsInstance(unicode)))
Exemplo n.º 27
0
 def test_createForTeams(self):
     # Test createForTeams.
     teams = [self.factory.makeTeam()]
     policies = getUtility(IAccessPolicySource).createForTeams(teams)
     self.assertThat(policies, AllMatch(Provides(IAccessPolicy)))
     self.assertContentEqual(teams, [policy.person for policy in policies])
Exemplo n.º 28
0
    def test_duplicated_calls(self):
        """
        Verify that if every call to the :class:`GCEOperations` is
        duplicated that we handle the errors correctly.

        This should force some specific scheduling situations that resemble
        race conditions with another agent trying to converge to the same
        state, or a condition where the dataset agent as rebooted after a crash
        that happened in the middle of an :class:`IBlockDeviceAPI` call.

        In these situations we should verify that the second call to many of
        the underlying atomic methods would result in the correct underlying
        :class:`VolumeException`.
        """
        actual_api = gceblockdeviceapi_for_test(self)
        operations = actual_api._operations
        api = actual_api.set(
            '_operations',
            repeat_call_proxy_for(IGCEOperations, operations)
        )

        dataset_id = uuid4()

        # There is no :class:`VolumeException` for creating an already created
        # volume. Thus, GCE just raises its own custom exception in that case.
        self.assertThat(
            lambda: api.create_volume(
                dataset_id=dataset_id,
                size=get_minimum_allocatable_size()
            ),
            Raises(MatchesException(GCEVolumeException))
        )

        volumes = api.list_volumes()

        self.assertThat(
            volumes,
            AnyMatch(MatchesStructure(dataset_id=Equals(dataset_id)))
        )
        volume = next(v for v in volumes if v.dataset_id == dataset_id)

        compute_instance_id = api.compute_instance_id()

        self.assertThat(
            lambda: api.attach_volume(
                blockdevice_id=volume.blockdevice_id,
                attach_to=compute_instance_id,
            ),
            Raises(MatchesException(AlreadyAttachedVolume))
        )

        self.assertThat(
            api.get_device_path(volume.blockdevice_id).path,
            Contains('/dev/sd')
        )

        # Detach volume does not error out because we have cleanup code in our
        # acceptance tests that assumes that calls to detach_volume while the
        # volume is already being detached do not error out, and instead block
        # until the volume is detached.
        #
        # With the repeat call proxy, this manifests as neither call reporting
        # the unattached volume, but both calls merely block until the
        # blockdevice is detached.
        api.detach_volume(
            blockdevice_id=volume.blockdevice_id,
        )

        self.assertThat(
            lambda: api.destroy_volume(
                blockdevice_id=volume.blockdevice_id,
            ),
            Raises(MatchesException(UnknownVolume))
        )

        self.assertThat(
            api.list_volumes(),
            AllMatch(Not(MatchesStructure(dataset_id=Equals(dataset_id))))
        )
Exemplo n.º 29
0
 def test_every_event_has_details(self):
     all_events = map_enum(EVENT_TYPES)
     self.assertItemsEqual(all_events.values(), EVENT_DETAILS.keys())
     self.assertThat(EVENT_DETAILS.values(),
                     AllMatch(IsInstance(EventDetail)))
Exemplo n.º 30
0
 def test_POWER_TYPE_PARAMETERS_DictCharField_objects_have_skip_check(self):
     self.assertThat(POWER_TYPE_PARAMETERS.values(),
                     AllMatch(MatchesStructure(skip_check=Equals(True))))