Ejemplo n.º 1
0
 def test_raise_on_general_mismatch(self):
     matcher = AfterPreprocessing(str, Equals('test'))
     value_error = ValueError('mismatch')
     try:
         with ExpectedException(ValueError, matcher):
             raise value_error
     except AssertionError:
         e = sys.exc_info()[1]
         self.assertEqual(matcher.match(value_error).describe(), str(e))
     else:
         self.fail('AssertionError not raised.')
Ejemplo n.º 2
0
    def test_captures_errors(self):
        # _ErrorObserver stores all errors logged while it is active.
        from testtools.twistedsupport._runtest import (_ErrorObserver,
                                                       _LogObserver,
                                                       _NoTwistedLogObservers)

        log_observer = _LogObserver()
        error_observer = _ErrorObserver(log_observer)
        exception = ValueError('bar')

        class SomeTest(TestCase):
            def test_something(self):
                # Temporarily suppress default log observers to avoid spewing
                # to stderr.
                self.useFixture(_NoTwistedLogObservers())
                self.useFixture(error_observer)
                log.msg('foo')
                log.err(exception)

        SomeTest('test_something').run()
        self.assertThat(
            error_observer.flush_logged_errors(),
            MatchesListwise(
                [AfterPreprocessing(lambda x: x.value, Equals(exception))]))
Ejemplo n.º 3
0
 def test_run_failed(self):
     # A failed run sets the job status to FAILED and records the error
     # message.
     # The job requests builds and records the result.
     distroseries, processors = self.makeSeriesAndProcessors(
         ["avr2001", "sparc64", "x32"])
     [git_ref] = self.factory.makeGitRefs()
     snap = self.factory.makeSnap(
         git_ref=git_ref, distroseries=distroseries, processors=processors)
     expected_date_created = get_transaction_timestamp(IStore(snap))
     job = SnapRequestBuildsJob.create(
         snap, snap.registrant, distroseries.main_archive,
         PackagePublishingPocket.RELEASE, {"core": "stable"})
     self.useFixture(GitHostingFixture()).getBlob.failure = (
         CannotParseSnapcraftYaml("Nonsense on stilts"))
     with dbuser(config.ISnapRequestBuildsJobSource.dbuser):
         JobRunner([job]).runAll()
     now = get_transaction_timestamp(IStore(snap))
     [notification] = self.assertEmailQueueLength(1)
     self.assertThat(dict(notification), ContainsDict({
         "From": Equals(config.canonical.noreply_from_address),
         "To": Equals(format_address_for_person(snap.registrant)),
         "Subject": Equals(
             "Launchpad error while requesting builds of %s" % snap.name),
         }))
     self.assertEqual(
         "Launchpad encountered an error during the following operation: "
         "requesting builds of %s.  Nonsense on stilts" % snap.name,
         notification.get_payload(decode=True))
     self.assertThat(job, MatchesStructure(
         job=MatchesStructure.byEquality(status=JobStatus.FAILED),
         date_created=Equals(expected_date_created),
         date_finished=MatchesAll(
             GreaterThan(expected_date_created), LessThan(now)),
         error_message=Equals("Nonsense on stilts"),
         builds=AfterPreprocessing(set, MatchesSetwise())))
Ejemplo n.º 4
0
    def test_add_folder(self):
        d = self.service.create_folder(
            u"test",
            u"alice",
            self.magic_dir,
            60,
            60,
        )
        self.assertThat(d, succeeded(Always()))

        # confirm that we've added a magic-folder
        mf = self.config.get_magic_folder(u"test")

        # check the contents of the collective (should have alice's
        # read-only capability)
        collective_d = self.tahoe_client.directory_data(mf.collective_dircap)
        self.assertThat(collective_d, succeeded(Always()))

        metadata = collective_d.result

        # the collective should be a mutable directory and have "alice"
        # as a child pointing to a *read-only* directory.

        def extract_metadata(child_info):
            return child_info[1]  # ["dirnode", metadata]

        self.assertThat(
            metadata,
            ContainsDict({
                u"children":
                ContainsDict({
                    u"alice":
                    AfterPreprocessing(extract_metadata,
                                       Not(Contains("rw_uri")))
                }),
            }))
Ejemplo n.º 5
0
 def test_directory_data_wrong_cap_type(self):
     """
     ``directory_data`` returns a ``Deferred`` that fails when given a
     non-directory capability
     """
     self.setup_example()
     data = dumps([
         "filenode", {
             "mutable": False,
             "verify_uri":
             "URI:CHK-Verifier:vi5xqgkyo6ns46ksq44mzqy42u:lrimqiz4fyvqhfruf25rt56ncdsqojlu66hih3lkeen4lh3vgvjq:1:5:6798975",
             "format": "CHK",
             "ro_uri":
             "URI:CHK:lfnzol6woyz42falzttgxrvth4:lrimqiz4fyvqhfruf25rt56ncdsqojlu66hih3lkeen4lh3vgvjq:1:5:6798975",
             "size": 6798975
         }
     ])
     _, cap = self.root.add_data("URI:CHK:", data)
     self.assertThat(
         self.tahoe_client.directory_data(cap),
         failed(
             AfterPreprocessing(
                 lambda fail: str(fail.value),
                 Equals("{} is not a directory-capability".format(cap)))))
Ejemplo n.º 6
0
    def test_stat_shares_immutable_wrong_version(self, storage_index, sharenum,
                                                 size, clock, leases, version):
        """
        If a share file with an unexpected version is found, ``stat_shares``
        declines to offer a result (by raising ``ValueError``).
        """
        assume(version != 1)

        # Hypothesis causes our storage server to be used many times.  Clean
        # up between iterations.
        cleanup_storage_server(self.anonymous_storage_server)

        sharedir = FilePath(
            self.anonymous_storage_server.sharedir
        ).preauthChild(
            # storage_index_to_dir likes to return multiple segments
            # joined by pathsep
            storage_index_to_dir(storage_index), )
        sharepath = sharedir.child(u"{}".format(sharenum))
        sharepath.parent().makedirs()
        whitebox_write_sparse_share(
            sharepath,
            version=version,
            size=size,
            leases=leases,
            now=clock.seconds(),
        )

        self.assertThat(
            self.client.stat_shares([storage_index]),
            failed(
                AfterPreprocessing(
                    lambda f: f.value,
                    IsInstance(ValueError),
                ), ),
        )
Ejemplo n.º 7
0
 def test_when_certs_valid_certs_expired(self, fixture):
     """
     The deferred returned by ``when_certs_valid`` only fires once all
     panicing and expired certs have been renewed.
     """
     with fixture:
         service = fixture.service
         d = service.when_certs_valid()
         self.assertThat(d, has_no_result())
         service.startService()
         self.assertThat(d, succeeded(Is(None)))
         max_expiry = fixture.now + service.panic_interval
         self.assertThat(
             fixture.cert_store.as_dict(),
             succeeded(
                 AfterPreprocessing(
                     methodcaller('values'),
                     AllMatch(
                         AllMatch(
                             _match_certificate(
                                 MatchesStructure(
                                     not_valid_after=GreaterThan(
                                         max_expiry))))))))
         self.assertThat(fixture.responder.challenges, HasLength(0))
Ejemplo n.º 8
0
    def test_delete_one_local_snapshot(self):
        """
        Given a chain of three snapshots deleting the oldest one results
        in a proper chain of two snapshots.
        """
        # we have a "leaf" snapshot "snap2" with parent "snap1" and
        # grandparent "snap0" snap2->snap1->snap0

        # pretend we uploaded the oldest ancestor, the only one we
        # _can_ upload (semantically)
        remote0 = RemoteSnapshot(
            self.snap0.relpath,
            self.snap0.author,
            {
                "relpath": self.snap0.relpath,
                "modification_time": 1234,
            },
            capability=random_immutable(directory=True),
            parents_raw=[],
            content_cap=random_immutable(),
            metadata_cap=random_immutable(),
        )

        self.db.delete_local_snapshot(self.snap0, remote0)

        # we should still have a 3-snapshot chain, but there should be
        # only 2 local snapshots and one remote

        # start with the "leaf", the most-local snapshot
        dbsnap2 = self.db.get_local_snapshot(self.snap0.relpath)
        self.assertThat(dbsnap2.content_path,
                        Equals(FilePath("snap2 content")))
        self.assertThat(
            dbsnap2.parents_local,
            AfterPreprocessing(len, Equals(1)),
        )
        self.assertThat(
            dbsnap2.parents_remote,
            AfterPreprocessing(len, Equals(0)),
        )

        # the leaf had just one parent, which is local -- examine it
        dbsnap1 = dbsnap2.parents_local[0]
        self.assertThat(dbsnap1.content_path,
                        Equals(FilePath("snap1 content")))
        self.assertThat(
            dbsnap1.parents_local,
            AfterPreprocessing(len, Equals(0)),
        )
        self.assertThat(
            dbsnap1.parents_remote,
            AfterPreprocessing(len, Equals(1)),
        )

        # the "middle" parent (above) has no local parents and one
        # remote, which is correct .. the final parent should be the
        # one we replaced the local with.
        self.assertThat(
            dbsnap1.parents_remote[0],
            Equals(remote0.capability),
        )
Ejemplo n.º 9
0
    def test_rejected_passes_reported(self, storage_index, renew_secret,
                                      cancel_secret, sharenums, size, data):
        """
        Any passes rejected by the storage server are reported with a
        ``MorePassesRequired`` exception sent to the client.
        """
        # Hypothesis causes our storage server to be used many times.  Clean
        # up between iterations.
        cleanup_storage_server(self.anonymous_storage_server)

        num_passes = required_passes(self.pass_value, [size] * len(sharenums))

        # Pick some passes to mess with.
        bad_pass_indexes = data.draw(
            lists(
                integers(
                    min_value=0,
                    max_value=num_passes - 1,
                ),
                min_size=1,
                max_size=num_passes,
                unique=True,
            ), )

        # Make some passes with a key untrusted by the server.
        bad_passes = get_passes(
            allocate_buckets_message(storage_index),
            len(bad_pass_indexes),
            random_signing_key(),
        )

        # Make some passes with a key trusted by the server.
        good_passes = get_passes(
            allocate_buckets_message(storage_index),
            num_passes - len(bad_passes),
            self.signing_key,
        )

        all_passes = []
        for i in range(num_passes):
            if i in bad_pass_indexes:
                all_passes.append(bad_passes.pop())
            else:
                all_passes.append(good_passes.pop())

        # Sanity checks
        self.assertThat(bad_passes, Equals([]))
        self.assertThat(good_passes, Equals([]))
        self.assertThat(all_passes, HasLength(num_passes))

        self.assertThat(
            # Bypass the client handling of MorePassesRequired so we can see
            # it.
            self.local_remote_server.callRemote(
                "allocate_buckets",
                list(pass_.pass_text.encode("ascii") for pass_ in all_passes),
                storage_index,
                renew_secret,
                cancel_secret,
                sharenums,
                size,
                canary=self.canary,
            ),
            failed(
                AfterPreprocessing(
                    lambda f: f.value,
                    Equals(
                        MorePassesRequired(
                            valid_count=num_passes - len(bad_pass_indexes),
                            required_count=num_passes,
                            signature_check_failed=bad_pass_indexes,
                        ), ),
                ), ),
        )
Ejemplo n.º 10
0
    def test_client_cannot_control_lease_behavior(
            self, storage_index, secrets, test_and_write_vectors_for_shares):
        """
        If the client passes ``renew_leases`` to *slot_testv_and_readv_and_writev*
        it fails with ``TypeError``, no lease is updated, and no share data is
        written.
        """
        # First, tell the client to let us violate the protocol.  It is the
        # server's responsibility to defend against this attack.
        self.local_remote_server.check_args = False

        # The nice Python API doesn't let you do this so we drop down to
        # the layer below.  We also use positional arguments because they
        # transit the network differently from keyword arguments.  Yay.
        d = self.local_remote_server.callRemote(
            "slot_testv_and_readv_and_writev",
            # passes
            _encode_passes(
                self.pass_factory.get(
                    slot_testv_and_readv_and_writev_message(storage_index),
                    1,
                ), ),
            # storage_index
            storage_index,
            # secrets
            secrets,
            # tw_vectors
            {
                k: v.for_call()
                for (k, v) in test_and_write_vectors_for_shares.items()
            },
            # r_vector
            [],
            # add_leases
            True,
        )

        # The operation should fail.
        self.expectThat(
            d,
            failed(
                AfterPreprocessing(
                    lambda f: f.value,
                    IsInstance(TypeError),
                ), ),
        )

        # There should be no shares at the given storage index.
        d = self.client.slot_readv(
            storage_index,
            # Surprise.  shares=None means all shares.
            shares=None,
            r_vector=list(
                list(map(write_vector_to_read_vector, vector.write_vector))
                for vector in test_and_write_vectors_for_shares.values()),
        )
        self.expectThat(
            d,
            succeeded(Equals({}), ),
        )

        # And there should be no leases on those non-shares.
        self.expectThat(
            list(self.anonymous_storage_server.get_slot_leases(storage_index)),
            Equals([]),
        )
Ejemplo n.º 11
0
    def test_emailed_introducer_furl(
            self,
            customer_email,
            customer_id,
            subscription_id,
            old_secrets,
            introducer_port_number,
            storage_port_number,
    ):
        """
        The email signup mechanism sends an activation email including an
        introducer furl which points at the server and port identified by the
        activated subscription detail object.
        """
        assume(introducer_port_number != storage_port_number)

        emails = []

        def provision_subscription(
                smclient, subscription,
        ):
            return succeed(
                attr.assoc(
                    subscription,
                    introducer_port_number=introducer_port_number,
                    storage_port_number=storage_port_number,
                    oldsecrets=old_secrets,
                ),
            )

        def send_signup_confirmation(
                customer_email, external_introducer_furl, customer_keyinfo, stdout, stderr,
        ):
            emails.append((customer_email, "success", external_introducer_furl))
            return succeed(None)

        def send_notify_failure(
                reason, customer_email, logfilename, stdout, stderr,
        ):
            emails.append((customer_email, "failure", reason))
            return succeed(None)

        plan_identifier = u"foobar"

        reactor = object()
        signup = get_email_signup(
            reactor,
            get_provisioner(
                reactor,
                URL.fromText(u"http://subscription-manager/"),
                provision_subscription,
            ),
            send_signup_confirmation,
            send_notify_failure,
        )
        d = signup.signup(customer_email, customer_id, subscription_id, plan_identifier)
        self.successResultOf(d)

        [(recipient, result, rest)] = emails
        self.expectThat(recipient, Equals(customer_email))
        self.expectThat(result, Equals("success"))

        def get_hint_port(furl):
            tub_id, location_hints, name = decode_furl(furl)
            host, port = location_hints[0].split(u":")
            return int(port)

        self.expectThat(
            rest,
            AfterPreprocessing(
                get_hint_port,
                Equals(introducer_port_number),
            ),
        )
Ejemplo n.º 12
0
 def test__representation(self):
     matcher = AfterPreprocessing(set, Equals({1, 2, "three"}))
     self.assertThat(
         Matches(matcher),
         AfterPreprocessing(repr, Equals("<Matches " + str(matcher) + ">")),
     )
Ejemplo n.º 13
0
 def matches_tokens(num_passes, group):
     return AfterPreprocessing(
         # They've been reset so we should be able to re-get them.
         lambda store: store.get_unblinded_tokens(len(group.passes)),
         Equals(group.unblinded_tokens),
     )
Ejemplo n.º 14
0
 def assertScriptsMatch(self, *matchers):
     self.assertThat(parallel.test, MockCalledOnceWith(ANY, ANY, ANY))
     suite, results, processes = parallel.test.call_args[0]
     self.assertThat(suite,
                     AfterPreprocessing(list, MatchesSetwise(*matchers)))
Ejemplo n.º 15
0
def MatchesCIDRs(*cidrs):
    return Matches(AfterPreprocessing(CIDRSet, Equals(CIDRSet(cidrs))))
Ejemplo n.º 16
0
async def test_list_tahoe_objects(request, reactor, tahoe_venv, base_dir,
                                  introducer_furl, flog_gatherer):
    """
    the 'tahoe-objects' API works concurrently
    (see also ticket #570)
    """

    yolandi = await util.MagicFolderEnabledNode.create(
        reactor,
        tahoe_venv,
        request,
        base_dir,
        introducer_furl,
        flog_gatherer,
        name="yolandi",
        tahoe_web_port="tcp:9983:interface=localhost",
        magic_folder_web_port="tcp:19983:interface=localhost",
        storage=True,
    )
    number_of_folders = 20
    folder_names = ["workstuff{}".format(n) for n in range(number_of_folders)]

    # make a bunch of folders
    for folder_name in folder_names:
        magic_dir = FilePath(base_dir).child(folder_name)
        magic_dir.makedirs()

        await yolandi.client.add_folder(
            folder_name,
            author_name="yolandi",
            local_path=magic_dir,
            poll_interval=10,
            scan_interval=10,
        )

    # concurrently put 1 file into each folder and immediately create
    # a snapshot for it via an API call
    files = []
    for folder_num, folder_name in enumerate(folder_names):
        magic_dir = FilePath(base_dir).child(folder_name)
        with magic_dir.child("a_file_name").open("w") as f:
            f.write("data {:02d}\n".format(folder_num).encode("utf8") * 100)
        files.append(yolandi.client.add_snapshot(
            folder_name,
            "a_file_name",
        ))

    # Each folder should produce [416, 800, 190] for the sizes -- this
    # is (Snapshot-size, content-size and metadata-size) for the one
    # file we've put in.  .. except the first one depends on
    # Snapshot's implementation and the last one depends on metadata
    # details, so we only want to assert that they're all the same.
    # expected_results = [[416, 800, 190]] * number_of_folders

    # The "if res else None" clauses below are because we use this in
    # the loop (to potentially succeed early), and some of the results
    # may be empty for a few iterations / seconds
    matches_expected_results = MatchesAll(
        # this says that all the content capabilities (2nd item)
        # should be size 800
        AfterPreprocessing(
            lambda results: [res[1] if res else None for res in results],
            AllMatch(Equals(800))),
        # this says that there should be exactly one thing in the set
        # of all the pairs of the Snapshot (1st item) and metadata
        # (3rd item) sizes .. that is, that all the Snapshot sizes are
        # the same and all the metadata sizes are the same.
        AfterPreprocessing(
            lambda results: {(res[0], res[2]) if res else None
                             for res in results}, HasLength(1)))

    # try for 15 seconds to get what we expect. we're waiting for each
    # of the magic-folders to upload their single "a_file_name" items
    # so that they each have one Snapshot in Tahoe-LAFS
    for _ in range(15):
        await util.twisted_sleep(reactor, 1)
        results = await DeferredList([
            yolandi.client.tahoe_objects(folder_name)
            for folder_name in folder_names
        ])
        # if any of the queries fail, we fail the test
        errors = [fail for ok, fail in results if not ok]
        assert errors == [], "At least one /tahoe-objects query failed"

        actual_results = [result for ok, result in results if ok]
        # exit early if we'll pass the test
        if matches_expected_results.match(actual_results) is None:
            break

    # check the results
    assert_that(actual_results, matches_expected_results)
Ejemplo n.º 17
0
def match_text_content(matcher):
    """
    Match the text of a ``Content`` instance.
    """
    return AfterPreprocessing(lambda content: content.as_text(), matcher)
Ejemplo n.º 18
0
    def test_snapshot_local_parent(self, content, filename):
        """
        Create a local snapshot and then another local snapshot with the
        first as parent. Then upload both at once.
        """
        data = io.BytesIO(content)

        snapshots = []
        # create LocalSnapshot
        d = create_snapshot(
            name=filename,
            author=self.alice,
            data_producer=data,
            snapshot_stash_dir=self.stash_dir,
            parents=[],
        )
        d.addCallback(snapshots.append)
        self.assertThat(
            d,
            succeeded(Always()),
        )

        # snapshots[0] is a LocalSnapshot with no parents

        # create another LocalSnapshot with the first as parent
        d = create_snapshot(
            name=filename,
            author=self.alice,
            data_producer=data,
            snapshot_stash_dir=self.stash_dir,
            parents=[snapshots[0]],
        )
        d.addCallback(snapshots.append)
        self.assertThat(
            d,
            succeeded(Always()),
        )

        # turn them both into RemoteSnapshots
        d = write_snapshot_to_tahoe(snapshots[1], self.alice,
                                    self.tahoe_client)
        d.addCallback(snapshots.append)
        self.assertThat(d, succeeded(Always()))

        # ...the last thing we wrote is now a RemoteSnapshot and
        # should have a single parent.
        self.assertThat(
            snapshots[2],
            MatchesStructure(
                name=Equals(filename),
                parents_raw=AfterPreprocessing(len, Equals(1)),
            ))

        # turn the parent into a RemoteSnapshot
        d = snapshots[2].fetch_parent(self.tahoe_client, 0)
        d.addCallback(snapshots.append)
        self.assertThat(d, succeeded(Always()))
        self.assertThat(
            snapshots[3],
            MatchesStructure(
                name=Equals(filename),
                parents_raw=Equals([]),
            ))
Ejemplo n.º 19
0
    def test_snapshot_remote_parent(self, content, filename):
        """
        Create a local snapshot, write into tahoe to create a remote
        snapshot, then create another local snapshot with a remote
        parent. This local snapshot retains its parent when converted
        to a remote.
        """
        data = io.BytesIO(content)

        snapshots = []
        # create LocalSnapshot
        d = create_snapshot(
            name=filename,
            author=self.alice,
            data_producer=data,
            snapshot_stash_dir=self.stash_dir,
            parents=[],
        )
        d.addCallback(snapshots.append)
        self.assertThat(
            d,
            succeeded(Always()),
        )

        # snapshots[0] is a LocalSnapshot with no parents

        # turn it into a remote snapshot by uploading
        d = write_snapshot_to_tahoe(snapshots[0], self.alice,
                                    self.tahoe_client)
        d.addCallback(snapshots.append)

        self.assertThat(
            d,
            succeeded(Always()),
        )

        # snapshots[1] is a RemoteSnapshot with no parents,
        # corresponding to snapshots[0]

        d = create_snapshot(
            name=filename,
            author=self.alice,
            data_producer=data,
            snapshot_stash_dir=self.stash_dir,
            parents=[snapshots[1]],
        )
        d.addCallback(snapshots.append)
        self.assertThat(
            d,
            succeeded(Always()),
        )
        self.assertThat(
            snapshots[2],
            MatchesStructure(
                name=Equals(filename),
                parents_remote=AfterPreprocessing(len, Equals(1)),
            ))

        # upload snapshots[2], turning it into a RemoteSnapshot
        # .. which should have one parent

        d = write_snapshot_to_tahoe(snapshots[2], self.alice,
                                    self.tahoe_client)
        d.addCallback(snapshots.append)

        self.assertThat(
            d,
            succeeded(Always()),
        )
        # ...the last thing we wrote is now a RemoteSnapshot and
        # should have a single parent
        self.assertThat(
            snapshots[3],
            MatchesStructure(
                name=Equals(filename),
                parents_raw=Equals([snapshots[1].capability]),
            ))
Ejemplo n.º 20
0
def has_keys(keys):
    return AfterPreprocessing(
        lambda o: list(hierarchical_keys(o)),
        Equals(keys),
    )
Ejemplo n.º 21
0
    def test__yields_routes_with_lowest_metrics_first(self):
        space = factory.make_Space()
        # Ensure networks are disjoint but of the same family.
        networks = self.gen_disjoint_networks()

        # Create the node for the "left" that has two IP addresses, one in the
        # null space, one in a non-null space.
        origin = factory.make_Node(hostname="origin")
        origin_iface = factory.make_Interface(node=origin, disconnected=True)
        origin_subnet = factory.make_Subnet(space=space, cidr=next(networks))
        origin_subnet_null_space = factory.make_Subnet(space=None,
                                                       cidr=next(networks))
        origin_sip = factory.make_StaticIPAddress(interface=origin_iface,
                                                  subnet=origin_subnet)
        origin_sip_null_space = factory.make_StaticIPAddress(
            interface=origin_iface, subnet=origin_subnet_null_space)

        # Same subnet, different node.
        node_same_subnet = factory.make_Node(hostname="same-subnet")
        sip_same_subnet = factory.make_StaticIPAddress(
            interface=factory.make_Interface(node=node_same_subnet,
                                             disconnected=True),
            subnet=origin_subnet)

        # Same VLAN, different subnet, different node.
        node_same_vlan = factory.make_Node(hostname="same-vlan")
        sip_same_vlan = factory.make_StaticIPAddress(
            interface=factory.make_Interface(node=node_same_vlan,
                                             disconnected=True),
            subnet=factory.make_Subnet(space=space,
                                       vlan=origin_subnet.vlan,
                                       cidr=next(networks)))

        # Same space, different VLAN, subnet, and node.
        node_same_space = factory.make_Node(hostname="same-space")
        sip_same_space = factory.make_StaticIPAddress(
            interface=factory.make_Interface(node=node_same_space,
                                             disconnected=True),
            subnet=factory.make_Subnet(space=space, cidr=next(networks)))

        # Null space, different VLAN, subnet, and node.
        node_null_space = factory.make_Node(hostname="null-space")
        sip_null_space = factory.make_StaticIPAddress(
            interface=factory.make_Interface(node=node_null_space,
                                             disconnected=True),
            subnet=factory.make_Subnet(space=None, cidr=next(networks)))

        # We'll search for routes between `lefts` and `rights`.
        lefts = [
            origin,
        ]
        rights = [
            node_same_subnet,
            node_same_vlan,
            node_same_space,
            node_null_space,
        ]

        # This is in order, lowest "metric" first.
        expected = [
            (origin, origin_sip.get_ipaddress(), node_same_subnet,
             sip_same_subnet.get_ipaddress()),
            (origin, origin_sip.get_ipaddress(), node_same_vlan,
             sip_same_vlan.get_ipaddress()),
            (origin, origin_sip.get_ipaddress(), node_same_space,
             sip_same_space.get_ipaddress()),
            (origin, origin_sip_null_space.get_ipaddress(), node_null_space,
             sip_null_space.get_ipaddress()),
        ]
        self.assertThat(find_addresses_between_nodes(lefts, rights),
                        AfterPreprocessing(list, Equals(expected)))

        # Same node, same space, different VLAN and subnet. We did not add
        # this earlier because its existence allows for a large number of
        # additional routes between the origin and the other nodes, which
        # would have obscured the test.
        origin_sip_2 = factory.make_StaticIPAddress(
            interface=factory.make_Interface(node=origin, disconnected=True),
            subnet=factory.make_Subnet(space=space, cidr=next(networks)))

        # Now the first addresses returned are between those addresses we
        # created on the same node, in no particular order.
        origin_ips = origin_sip.get_ipaddress(), origin_sip_2.get_ipaddress()
        expected_mutual = {(origin, ip1, origin, ip2)
                           for ip1, ip2 in product(origin_ips, origin_ips)}
        # There's a mutual route for the null-space IP address too.
        expected_mutual.add(
            (origin, origin_sip_null_space.get_ipaddress(), origin,
             origin_sip_null_space.get_ipaddress()))
        observed_mutual = takewhile(
            (lambda route: route[0] == route[2]),  # Route is mutual.
            find_addresses_between_nodes(lefts, [origin, *rights]),
        )
        self.assertItemsEqual(expected_mutual, observed_mutual)
Ejemplo n.º 22
0
    def test_multiple_domains_whitespace(self):
        """
        When the domain label contains multiple comma-separated domains with
        whitespace inbetween, the domains should be parsed into a list of
        domains without the whitespace.
        """
        domains = parse_domain_label(' example.com, example2.com ')
        assert_that(domains, Equals(['example.com', 'example2.com']))


is_marathon_lb_sigusr_response = MatchesListwise([  # Per marathon-lb instance
    MatchesAll(
        MatchesStructure(code=Equals(200)),
        AfterPreprocessing(
            lambda r: r.text(),
            succeeded(Equals('Sent SIGUSR1 signal to marathon-lb'))))
])


class FailableTxacmeClient(FakeClient):
    """
    A fake txacme client that raises an error during the CSR issuance phase if
    the 'error' attribute has been set. Used to very *very* roughly simulate
    an error while issuing a certificate.
    """
    def __init__(self, *args, **kwargs):
        super(FailableTxacmeClient, self).__init__(*args, **kwargs)
        # Patch on support for HTTP challenge types
        self._challenge_types.append(challenges.HTTP01)
        self.issuance_error = None
Ejemplo n.º 23
0
 def has_digest(digest):
     return AfterPreprocessing(hexdigest, Equals(digest))
Ejemplo n.º 24
0
def AfterBeingDecoded(matcher):
    return AfterPreprocessing(
        (lambda content: content.decode(settings.DEFAULT_CHARSET)), matcher)
Ejemplo n.º 25
0
def failed_with(matcher):
    """
    Match against the exception of a failure.
    """
    return failed(AfterPreprocessing(attrgetter('value'), matcher))
Ejemplo n.º 26
0
 def test_form_field_is_a_plain_field(self):
     self.assertThat(
         JSONObjectField().formfield(),
         AfterPreprocessing(type, Is(forms.Field)),
     )
Ejemplo n.º 27
0
)


def IsSetOfServers(servers):
    return MatchesAll(
        IsInstance(frozenset), Equals(frozenset(servers)), first_only=True
    )


IsEmptySet = MatchesAll(
    IsInstance(frozenset), Equals(frozenset()), first_only=True
)


IsIPv6Address = AfterPreprocessing(
    IPAddress, MatchesStructure(version=Equals(6))
)


def populate_node_with_addresses(node, subnets):
    iface = factory.make_Interface(node=node)
    for subnet in subnets:
        factory.make_StaticIPAddress(interface=iface, subnet=subnet)


class TestGetServersFor_ExternalOnly(MAASServerTestCase):
    """Tests `get_servers_for` when `ntp_external_only` is set."""

    scenarios = (
        ("region", {"make_node": factory.make_RegionController}),
        ("region+rack", {"make_node": factory.make_RegionRackController}),
 def test_extraBuildArgs_git_private(self):
     # extraBuildArgs returns appropriate arguments if asked to build a
     # job for a private Git branch.
     self.useFixture(FeatureFixture({SNAP_PRIVATE_FEATURE_FLAG: "on"}))
     self.useFixture(InProcessAuthServerFixture())
     self.pushConfig("launchpad",
                     internal_macaroon_secret_key="some-secret")
     [
         ref
     ] = self.factory.makeGitRefs(information_type=InformationType.USERDATA)
     job = self.makeJob(git_ref=ref, private=True)
     expected_archives, expected_trusted_keys = (
         yield get_sources_list_for_building(job.build,
                                             job.build.distro_arch_series,
                                             None))
     args = yield job.extraBuildArgs()
     split_browse_root = urlsplit(config.codehosting.git_browse_root)
     self.assertThat(
         args,
         MatchesDict({
             "archive_private":
             Is(False),
             "archives":
             Equals(expected_archives),
             "arch_tag":
             Equals("i386"),
             "build_source_tarball":
             Is(False),
             "build_url":
             Equals(canonical_url(job.build)),
             "fast_cleanup":
             Is(True),
             "git_repository":
             AfterPreprocessing(
                 urlsplit,
                 MatchesStructure(
                     scheme=Equals(split_browse_root.scheme),
                     username=Equals(""),
                     password=AfterPreprocessing(
                         Macaroon.deserialize,
                         MatchesStructure(
                             location=Equals(
                                 config.vhost.mainsite.hostname),
                             identifier=Equals("snap-build"),
                             caveats=MatchesListwise([
                                 MatchesStructure.byEquality(
                                     caveat_id="lp.snap-build %s" %
                                     job.build.id),
                             ]))),
                     hostname=Equals(split_browse_root.hostname),
                     port=Equals(split_browse_root.port))),
             "git_path":
             Equals(ref.name),
             "name":
             Equals("test-snap"),
             "private":
             Is(True),
             "proxy_url":
             self.getProxyURLMatcher(job),
             "revocation_endpoint":
             self.getRevocationEndpointMatcher(job),
             "series":
             Equals("unstable"),
             "trusted_keys":
             Equals(expected_trusted_keys),
         }))
Ejemplo n.º 29
0
 def test_tftp_service(self):
     # A TFTP service is configured and added to the top-level service.
     interfaces = [factory.make_ipv4_address(), factory.make_ipv6_address()]
     self.patch(tftp_module, "get_all_interface_addresses",
                lambda: interfaces)
     example_root = self.make_dir()
     example_client_service = Mock()
     example_port = factory.pick_port()
     tftp_service = TFTPService(
         resource_root=example_root,
         client_service=example_client_service,
         port=example_port,
     )
     tftp_service.updateServers()
     # The "tftp" service is a multi-service containing UDP servers for
     # each interface defined by get_all_interface_addresses().
     self.assertIsInstance(tftp_service, MultiService)
     # There's also a TimerService that updates the servers every 45s.
     self.assertThat(
         tftp_service.refresher,
         MatchesStructure.byEquality(
             step=45,
             parent=tftp_service,
             name="refresher",
             call=(tftp_service.updateServers, (), {}),
         ),
     )
     expected_backend = MatchesAll(
         IsInstance(TFTPBackend),
         AfterPreprocessing(lambda backend: backend.base.path,
                            Equals(example_root)),
         AfterPreprocessing(
             lambda backend: backend.client_service,
             Equals(example_client_service),
         ),
     )
     expected_protocol = MatchesAll(
         IsInstance(TFTP),
         AfterPreprocessing(lambda protocol: protocol.backend,
                            expected_backend),
     )
     expected_server = MatchesAll(
         IsInstance(internet.UDPServer),
         AfterPreprocessing(lambda service: len(service.args), Equals(2)),
         AfterPreprocessing(
             lambda service: service.args[0],
             Equals(example_port)  # port
         ),
         AfterPreprocessing(
             lambda service: service.args[1],
             expected_protocol  # protocol
         ),
     )
     self.assertThat(tftp_service.getServers(), AllMatch(expected_server))
     # Only the interface used for each service differs.
     self.assertItemsEqual(
         [svc.kwargs for svc in tftp_service.getServers()],
         [{
             "interface": interface
         } for interface in interfaces],
     )
Ejemplo n.º 30
0
class TestGetSerial(MAASTestCase):
    def test_that_it_works_eh(self):
        nowish = datetime(2014, 0o3, 24, 16, 0o7, tzinfo=UTC)
        security_datetime = self.patch(security, "datetime")
        # Make security.datetime() work like regular datetime.
        security_datetime.side_effect = datetime
        # Make security.datetime.now() return a fixed value.
        security_datetime.now.return_value = nowish
        self.assertEqual(69005220, security.get_serial())


is_valid_region_certificate = MatchesAll(
    IsInstance(ssl.PrivateCertificate),
    AfterPreprocessing(
        lambda cert: cert.getSubject(), Equals({"commonName": b"MAAS Region"})
    ),
    AfterPreprocessing(
        lambda cert: cert.getPublicKey().original.bits(), Equals(2048)
    ),
    AfterPreprocessing(
        lambda cert: cert.privateKey.original.bits(), Equals(2048)
    ),
)


class TestCertificateFunctions(MAASServerTestCase):
    def patch_serial(self):
        serial = self.getUniqueInteger()
        self.patch(security, "get_serial").return_value = serial
        return serial
Ejemplo n.º 31
0
def performed(perform, matcher):
    """
    Match against a result after performing a single action in a memory worker.
    """
    return MatchesAll(AfterPreprocessing(lambda _: perform(), Equals(True)),
                      matcher)