Example #1
0
    def setUpClass(cls):
        """Bind a consumer to a distributor.

        Do the following:

        1. Add a consumer.
        2. Add a repository.
        3. Add a distributor to the repository.
        4. Bind the consumer to the distributor.
        """
        super(BindConsumerTestCase, cls).setUpClass()

        # Steps 1–3
        client = api.Client(cls.cfg, api.json_handler)
        cls.consumer = client.post(CONSUMER_PATH, {'id': utils.uuid4()})
        repository = client.post(REPOSITORY_PATH, gen_repo())
        distributor = client.post(
            urljoin(repository['_href'], 'distributors/'),
            gen_distributor()
        )
        cls.resources.add(repository['_href'])

        # Step 4
        client.response_handler = api.safe_handler
        path = urljoin(CONSUMER_PATH, cls.consumer['consumer']['id'] + '/')
        path = urljoin(path, 'bindings/')
        cls.request = {
            'binding_config': {'B': 21},
            'distributor_id': distributor['id'],
            'notify_agent': False,
            'repo_id': distributor['repo_id'],
        }
        cls.response = client.post(path, cls.request)
Example #2
0
 def test_location_header(self):
     """Assert the Location header is correctly set in each response."""
     for body, response in zip(self.bodies, self.responses):
         with self.subTest(body=body):
             url = urljoin(self.cfg.base_url, REPOSITORY_PATH)
             url = urljoin(url, body['id']) + '/'
             self.assertEqual(response.headers['Location'], url)
    def setUpClass(cls):
        """Create an RPM repository, upload package groups, and publish."""
        super(UploadPackageGroupsTestCase, cls).setUpClass()

        # Create a repository and add a distributor to it.
        client = api.Client(cls.cfg, api.json_handler)
        repo = client.post(REPOSITORY_PATH, gen_repo())
        cls.resources.add(repo['_href'])
        distributor = client.post(
            urljoin(repo['_href'], 'distributors/'),
            gen_distributor(),
        )

        # Generate several package groups, import them into the repository, and
        # publish the repository.
        cls.package_groups = {
            'minimal': _gen_minimal_group(),
            'realistic': _gen_realistic_group(),
        }
        cls.tasks = {}
        for key, package_group in cls.package_groups.items():
            report = _upload_import_package_group(cls.cfg, repo, package_group)
            cls.tasks[key] = tuple(api.poll_spawned_tasks(cls.cfg, report))
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distributor['id']},
        )

        # Fetch the generated repodata of type 'group' (a.k.a. 'comps')
        cls.root_element = get_repomd_xml(
            cls.cfg,
            urljoin('/pulp/repos/', distributor['config']['relative_url']),
            'group'
        )
    def setUpClass(cls):
        """Create, sync and publish a repository. Fetch its ``comps.xml``."""
        super(SyncRepoTestCase, cls).setUpClass()
        client = api.Client(cls.cfg, api.json_handler)

        # Create a repo.
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        body['distributors'] = [gen_distributor()]
        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])

        # Sync and publish the repo.
        repo = client.get(repo['_href'], params={'details': True})
        utils.sync_repo(cls.cfg, repo['_href'])
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': repo['distributors'][0]['id']},
        )
        repo = client.get(repo['_href'], params={'details': True})

        # Fetch and parse comps.xml.
        dist = repo['distributors'][0]
        dist_url = urljoin('/pulp/repos/', dist['config']['relative_url'])
        cls.root_element = get_repomd_xml(cls.cfg, dist_url, 'group')
        cls.xml_as_str = ElementTree.tostring(cls.root_element)
Example #5
0
    def setUpClass(cls):
        """Creates, publishes repo and fetches repomd.xml."""
        super(RepoMDTestCase, cls).setUpClass()

        cls.tasks = {}
        cls.errata = {}

        client = api.Client(cls.cfg, api.json_handler)

        # Create a repository for use by the test.
        repo = client.post(REPOSITORY_PATH, gen_repo())
        cls.resources.add(repo['_href'])

        # add yum distributor to the repo
        distribute = client.post(
            urljoin(repo['_href'], 'distributors/'),
            gen_distributor())

        # ask for it to be published
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distribute['id']})

        # fetch the repomd.xml
        repo_url = urljoin('/pulp/repos/',
                           distribute['config']['relative_url'])
        repomd_url = urljoin(repo_url, 'repodata/repomd.xml')

        client = api.Client(cls.cfg, xml_handler)
        cls.repomd_tree = client.get(repomd_url)
Example #6
0
def get_repomd_xml_path(distributor_rel_url):
    """Construct the path to a repository's ``repomd.xml`` file.

    :param distributor_rel_url: A distributor's ``relative_url`` option.
    :returns: An string path to a ``repomd.xml`` file.
    """
    return urljoin(urljoin("/pulp/repos/", distributor_rel_url), "repodata/repomd.xml")
Example #7
0
    def setUpClass(cls):
        """Publish a yum repo containing some updates."""
        super(UpdateInfoTestCase, cls).setUpClass()

        cls.tasks = {}
        cls.errata = {}

        client = api.Client(cls.cfg, api.json_handler)

        # Create a repository for use by the test.
        repo = client.post(REPOSITORY_PATH, gen_repo())
        cls.resources.add(repo['_href'])

        # add yum distributor to the repo
        distribute = client.post(urljoin(repo['_href'], 'distributors/'),
                                 gen_distributor())

        # import some errata
        cls.import_updates(client, repo)

        # ask for it to be published
        client.post(urljoin(repo['_href'], 'actions/publish/'),
                    {'id': distribute['id']})

        repo_url = urljoin('/pulp/repos/',
                           distribute['config']['relative_url'])
        cls.updateinfo_tree = cls.get_repodata_xml(repo_url, 'updateinfo')
Example #8
0
    def setUpClass(cls):
        """Bind a consumer to a distributor.

        Do the following:

        1. Add a consumer.
        2. Add a repository.
        3. Add a distributor to the repository.
        4. Bind the consumer to the distributor.
        """
        super(BindConsumerTestCase, cls).setUpClass()

        # Steps 1–3
        client = api.Client(cls.cfg, api.json_handler)
        cls.consumer = client.post(CONSUMER_PATH, {'id': utils.uuid4()})
        repository = client.post(REPOSITORY_PATH, gen_repo())
        distributor = client.post(
            urljoin(repository['_href'], 'distributors/'), gen_distributor())
        cls.resources.add(repository['_href'])

        # Step 4
        client.response_handler = api.safe_handler
        path = urljoin(CONSUMER_PATH, cls.consumer['consumer']['id'] + '/')
        path = urljoin(path, 'bindings/')
        cls.request = {
            'binding_config': {
                'B': 21
            },
            'distributor_id': distributor['id'],
            'notify_agent': False,
            'repo_id': distributor['repo_id'],
        }
        cls.response = client.post(path, cls.request)
    def setUpClass(cls):
        """Create an RPM repository with a valid feed and sync it.

        Do the following:

        1. Reset Pulp, including the Squid cache.
        2. Create a repository with the "on demand" download policy.
        3. Sync and publish the repository.
        4. Download an RPM from the published repository.
        5. Download the same RPM to ensure it is served by the cache.
        """
        super(OnDemandTestCase, cls).setUpClass()

        # Ensure `locally_stored_units` is 0 before we start.
        utils.reset_squid(cls.cfg)
        utils.reset_pulp(cls.cfg)

        # Create, sync and publish a repository.
        repo = _create_repo(cls.cfg, 'on_demand')
        cls.resources.add(repo['_href'])
        utils.sync_repo(cls.cfg, repo['_href'])

        # Read the repository.
        client = api.Client(cls.cfg)
        cls.repo = client.get(repo['_href'], params={'details': True}).json()

        # Download the same RPM twice.
        path = urljoin('/pulp/repos/', repo['id'] + '/')
        path = urljoin(path, RPM)
        cls.rpm = client.get(path)
        cls.same_rpm = client.get(path)
    def setUpClass(cls):
        """Create an RPM repository with a valid feed and sync it.

        Do the following:

        1. Reset Pulp, including the Squid cache.
        2. Create a repository with the "background" download policy.
        3. Sync and publish the repository.
        4. Download an RPM from the repository.
        """
        super(BackgroundTestCase, cls).setUpClass()
        if (selectors.bug_is_untestable(1905, cls.cfg.version) and
                _os_is_rhel6(cls.cfg)):
            raise unittest2.SkipTest('https://pulp.plan.io/issues/1905')

        # Required to ensure content is actually downloaded.
        utils.reset_squid(cls.cfg)
        utils.reset_pulp(cls.cfg)

        # Create, sync and publish a repository.
        repo = _create_repo(cls.cfg, 'background')
        cls.resources.add(repo['_href'])
        report = utils.sync_repo(cls.cfg, repo['_href']).json()

        # Record the tasks spawned when syncing the repository, and the state
        # of the repository itself after the sync.
        client = api.Client(cls.cfg)
        cls.repo = client.get(repo['_href'], params={'details': True}).json()
        cls.tasks = tuple(api.poll_spawned_tasks(cls.cfg, report))

        # Download an RPM.
        path = urljoin('/pulp/repos/', repo['id'] + '/')
        path = urljoin(path, RPM)
        cls.rpm = client.get(path)
Example #11
0
    def test_publish_to_web(self):
        """Publish the repository to the web, and fetch the ISO file.

        The ISO file should be available over both HTTP and HTTPS. Fetch it
        from both locations, and assert that the fetch was successful.
        """
        # Publish the repository, and re-read the distributor.
        client = api.Client(self.cfg, api.json_handler)
        path = urljoin(self.repo['_href'], 'actions/publish/')
        client.post(path, {'id': self.distributor['id']})
        distributor = client.get(self.distributor['_href'])

        # Build the path to the ISO file. By default, the file is named like
        # so: {repo_id}-{iso_creation_time}-{iso_number}.iso
        iso_creation_time = parse(
            distributor['last_publish']
        ).strftime('%Y-%m-%dT%H.%M')
        iso_name = '{}-{}-01.iso'.format(self.repo['id'], iso_creation_time)
        path = '/pulp/exports/repos/'
        path = urljoin(path, distributor['config']['relative_url'])
        iso_path = urljoin(path, iso_name)

        # Fetch the ISO file via HTTP and HTTPS.
        client.response_handler = api.safe_handler
        url = urljoin(self.cfg.base_url, iso_path)
        for scheme in ('http', 'https'):
            url = urlunparse((scheme,) + urlparse(url)[1:])
            with self.subTest(url=url):
                self.assertEqual(client.get(url).status_code, 200)
    def setUpClass(cls):
        """Create a schedule to publish the repository.

        Do the following:

        1. Create a repository with a valid feed
        2. Sync it
        3. Schedule publish to run every 30 seconds
        """
        super(CreateSuccessTestCase, cls).setUpClass()
        client = api.Client(cls.cfg)

        # Create a repo with a valid feed and sync it
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        repo = client.post(REPOSITORY_PATH, body).json()
        cls.resources.add(repo['_href'])
        utils.sync_repo(cls.cfg, repo['_href'])

        # Schedule a publish to run every 30 seconds
        distributor = gen_distributor()
        distributor_url = urljoin(repo['_href'], 'distributors/')
        client.post(
            distributor_url,
            distributor
        )
        scheduling_url = urljoin(
            distributor_url,
            '{}/schedules/publish/'.format(distributor['distributor_id']),
        )
        cls.response = client.post(
            scheduling_url,
            {'schedule': 'PT30S'}
        )
        cls.attrs = cls.response.json()
Example #13
0
def get_repomd_xml(server_config, repo_path, repomd_type):
    """Retrieve XML of a particular type from a repo.

    Given a URL, fetch, parse and return the repository XML of type
    ``repomd_type``.

    :param pulp_smash.config.ServerConfig server_config: Information about the
        Pulp server being targeted.
    :param repo_path: The path to (or URL of) a repomd repository. This path
        should not include any segments past the repository itself, such as a
        path to a particular ``repodata`` directory.
    :param repomd_type: The name of a type of repomd data, as found in the
        top-level ``repomd.xml`` file of a repository. Valid values might be
        "updateinfo" or "group".
    :returns: An ``xml.etree.ElementTree.Element`` instance containing the
        parsed repository metadata of the requested type.
    """
    # Fetch and parse repomd.xml
    client = api.Client(server_config)
    repomd_xml = client.get(urljoin(repo_path, 'repodata/repomd.xml')).text
    repomd_xml_href = get_repomd_xml_href(repomd_xml, repomd_type)

    # Fetch, parse and return updateinfo.xml or updateinfo.xml.gz
    client.response_handler = xml_handler
    return client.get(urljoin(repo_path, repomd_xml_href))
    def test_publish_override_config(self):
        """Use the ``packages_directory`` publish override option.

        Create a distributor with default options, and use it to publish the
        repository. Specify the ``packages_directory`` option during the
        publish as an override option. Verify packages end up in the specified
        directory, relative to the published repository's root.
        """
        if selectors.bug_is_untestable(1976, self.cfg.version):
            self.skipTest('https://pulp.plan.io/issues/1976')
        client = api.Client(self.cfg, api.json_handler)
        distributor = client.post(
            urljoin(self.repo_href, 'distributors/'),
            gen_distributor(),
        )
        packages_dir = utils.uuid4()
        client.post(urljoin(self.repo_href, 'actions/publish/'), {
            'id': distributor['id'],
            'override_config': {'packages_directory': packages_dir},
        })
        primary_xml = get_parse_repodata_primary_xml(self.cfg, distributor)
        package_hrefs = get_package_hrefs(primary_xml)
        self.assertGreater(len(package_hrefs), 0)
        for package_href in package_hrefs:
            with self.subTest(package_href=package_href):
                self.assertEqual(os.path.dirname(package_href), packages_dir)
Example #15
0
    def setUpClass(cls):
        """Create distributors with legal and illegal relative paths."""
        super(CreateDistributorsTestCase, cls).setUpClass()
        cls.responses = []

        relative_paths = [_gen_rel_path(), _gen_rel_path(), _gen_rel_path(3)]
        relative_paths.append(relative_paths[0])
        relative_paths.append(relative_paths[0] + '/' + utils.uuid4())
        relative_paths.append('/' + relative_paths[0])

        # Create two repositories
        client = api.Client(cls.cfg, api.json_handler)
        repos = [client.post(REPOSITORY_PATH, gen_repo()) for _ in range(2)]
        for repo in repos:
            cls.resources.add(repo['_href'])  # mark for deletion

        # Create a distributor for the first repository
        client.response_handler = api.echo_handler
        path = urljoin(repos[0]['_href'], 'distributors/')
        body = _gen_distributor(relative_paths[0])
        cls.responses.append(client.post(path, body))

        # Create distributors for the second repository
        path = urljoin(repos[1]['_href'], 'distributors/')
        for relative_path in relative_paths[1:]:
            body = _gen_distributor(relative_path)
            cls.responses.append(client.post(path, body))
    def setUpClass(cls):
        """Create a schedule to publish a repo, verify the ``total_run_count``.

        Do the following:

        1. Create a repository with a valid feed
        2. Sync it
        3. Schedule publish to run every 2 minutes
        4. Wait for 130 seconds and read the schedule to get the number of
           "publish" runs
        """
        super(ScheduledPublishTestCase, cls).setUpClass()
        client = api.Client(cls.cfg, api.json_handler)

        # Create a repo with a valid feed and sync it
        body = gen_repo()
        body["importer_config"]["feed"] = RPM_FEED_URL
        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo["_href"])
        utils.sync_repo(cls.cfg, repo["_href"])

        # Schedule a publish to run every 2 minutes
        distributor = gen_distributor()
        client.post(urljoin(repo["_href"], "distributors/"), distributor)
        scheduling_url = "/".join(["distributors", distributor["distributor_id"], "schedules/publish/"])
        schedule_path = urljoin(repo["_href"], scheduling_url)
        schedule = client.post(schedule_path, {"schedule": "PT2M"})

        # Wait for publish to run
        time.sleep(130)

        # Read the schedule
        cls.response = client.get(schedule["_href"])
    def setUpClass(cls):
        """Upload an erratum to a repo, publish, and download the erratum.

        Do the following:

        1. Create an RPM repository with a distributor.
        2. Upload an erratum to the repository.
        3. Publish the repository.
        4. Fetch the repository's ``updateinfo.xml`` file.
        """
        super(UploadErratumTestCase, cls).setUpClass()
        cls.erratum = gen_erratum()

        # Create an RPM repository with a feed and distributor.
        client = api.Client(cls.cfg, api.json_handler)
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        body['distributors'] = [gen_distributor()]
        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])

        # Sync content into the repository, and give it an erratum.
        utils.sync_repo(cls.cfg, repo['_href'])
        utils.upload_import_erratum(cls.cfg, cls.erratum, repo['_href'])
        repo = client.get(repo['_href'], params={'details': True})

        # Publish the repository, and fetch and parse updateinfo.xml
        distributor = repo['distributors'][0]
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distributor['id']},
        )
        path = urljoin('/pulp/repos/', distributor['config']['relative_url'])
        cls.updateinfo = get_repomd_xml(cls.cfg, path, 'updateinfo')
Example #18
0
    def setUpClass(cls):
        """Publish a yum repo containing some updates."""
        super(UpdateInfoTestCase, cls).setUpClass()

        cls.tasks = {}
        cls.errata = {}

        client = api.Client(cls.cfg, api.json_handler)

        # Create a repository for use by the test.
        repo = client.post(REPOSITORY_PATH, gen_repo())
        cls.resources.add(repo['_href'])

        # add yum distributor to the repo
        distribute = client.post(
            urljoin(repo['_href'], 'distributors/'),
            gen_distributor())

        # import some errata
        cls.import_updates(client, repo)

        # ask for it to be published
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distribute['id']})

        repo_url = urljoin('/pulp/repos/',
                           distribute['config']['relative_url'])
        cls.updateinfo_tree = cls.get_repodata_xml(repo_url, 'updateinfo')
Example #19
0
    def setUpClass(cls):
        """Generate, fetch and parse a ``repomd.xml`` file.

        Do the following:

        1. Create an RPM repository, add a YUM distributor, and publish the
           repository.
        2. Fetch the ``repomd.xml`` file from the distributor, and parse it.
        """
        super(RepoMDTestCase, cls).setUpClass()

        # Create a repository. Add a yum distributor and publish it.
        client = api.Client(cls.cfg, api.json_handler)
        repo = client.post(REPOSITORY_PATH, gen_repo())
        cls.resources.add(repo['_href'])
        distributor = client.post(
            urljoin(repo['_href'], 'distributors/'),
            gen_distributor(),
        )
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distributor['id']},
        )

        # Fetch and parse repomd.xml
        client.response_handler = xml_handler
        path = urljoin('/pulp/repos/', distributor['config']['relative_url'])
        path = urljoin(path, 'repodata/repomd.xml')
        cls.root_element = client.get(path)
    def setUpClass(cls):  # pylint:disable=arguments-differ
        """Create two repositories, first is feed of second one.

        Provides server config and set of iterable to delete. Following steps
        are executed:

        1. Create repository foo with feed, sync and publish it.
        2. Create repository bar with foo as a feed with
           ``retain_old_count=0``.
        3. Run sync of repo foo.
        4. Get information on both repositories.
        """
        super(RetainOldCountTestCase, cls).setUpClass()
        client = api.Client(cls.cfg)
        cls.responses = {}
        hrefs = []  # repository hrefs

        # Create and sync the first repository.
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        hrefs.append(client.post(REPOSITORY_PATH, body).json()['_href'])
        cls.responses['first sync'] = client.post(
            urljoin(hrefs[0], 'actions/sync/'),
            {'override_config': {}}
        )

        # Add distributor and publish
        cls.responses['distribute'] = client.post(
            urljoin(hrefs[0], 'distributors/'),
            gen_distributor(),
        )
        cls.responses['publish'] = client.post(
            urljoin(hrefs[0], 'actions/publish/'),
            {'id': cls.responses['distribute'].json()['id']},
        )

        # Create and sync the second repository. Ensure it fetches content from
        # the first, and that the `retain_old_count` option is set correctly.
        # We disable SSL validation for a practical reason: each HTTPS feed
        # must have a certificate to work, which is burdensome to do here.
        body = gen_repo()
        body['importer_config']['feed'] = urljoin(
            cls.cfg.base_url,
            _PUBLISH_DIR +
            cls.responses['distribute'].json()['config']['relative_url'],
        )
        body['importer_config']['retain_old_count'] = 0  # see docstring
        body['importer_config']['ssl_validation'] = False
        hrefs.append(client.post(REPOSITORY_PATH, body).json()['_href'])
        cls.responses['second sync'] = client.post(
            urljoin(hrefs[1], 'actions/sync/'),
            {'override_config': {}}
        )

        # Read the repositories and mark them for deletion.
        cls.repos = [client.get(href).json() for href in hrefs]
        cls.resources.update(set(hrefs))
 def test_unit_integrity(self):
     """Download and verify an RPM from each Pulp distributor."""
     for response in self.responses['distribute']:
         distributor = response.json()
         with self.subTest(distributor=distributor):
             url = urljoin(
                 '/pulp/repos/',
                 response.json()['config']['relative_url']
             )
             url = urljoin(url, RPM)
             rpm = api.Client(self.cfg).get(url).content
             self.assertEqual(rpm, self.rpm)
    def setUpClass(cls):  # pylint:disable=arguments-differ
        """Create two repositories, first is feed of second one.

        Provides server config and set of iterable to delete. Following steps
        are executed:

        1. Create repository foo with feed, sync and publish it.
        2. Create repository bar with foo as a feed with
           ``retain_old_count=0``.
        3. Run sync of repo foo.
        4. Get information on both repositories.
        """
        super(RetainOldCountTestCase, cls).setUpClass()
        client = api.Client(cls.cfg)
        cls.responses = {}
        hrefs = []  # repository hrefs

        # Create and sync the first repository.
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        hrefs.append(client.post(REPOSITORY_PATH, body).json()['_href'])
        cls.responses['first sync'] = client.post(
            urljoin(hrefs[0], 'actions/sync/'), {'override_config': {}})

        # Add distributor and publish
        cls.responses['distribute'] = client.post(
            urljoin(hrefs[0], 'distributors/'),
            gen_distributor(),
        )
        cls.responses['publish'] = client.post(
            urljoin(hrefs[0], 'actions/publish/'),
            {'id': cls.responses['distribute'].json()['id']},
        )

        # Create and sync the second repository. Ensure it fetches content from
        # the first, and that the `retain_old_count` option is set correctly.
        # We disable SSL validation for a practical reason: each HTTPS feed
        # must have a certificate to work, which is burdensome to do here.
        body = gen_repo()
        body['importer_config']['feed'] = urljoin(
            cls.cfg.base_url,
            _PUBLISH_DIR +
            cls.responses['distribute'].json()['config']['relative_url'],
        )
        body['importer_config']['retain_old_count'] = 0  # see docstring
        body['importer_config']['ssl_validation'] = False
        hrefs.append(client.post(REPOSITORY_PATH, body).json()['_href'])
        cls.responses['second sync'] = client.post(
            urljoin(hrefs[1], 'actions/sync/'), {'override_config': {}})

        # Read the repositories and mark them for deletion.
        cls.repos = [client.get(href).json() for href in hrefs]
        cls.resources.update(set(hrefs))
Example #23
0
    def setUpClass(cls):
        """Publish a repository, change it, and publish it again."""
        super(ChangeRepoTestCase, cls).setUpClass()
        client = api.Client(cls.cfg)
        publish_args = (urljoin(cls.repo["_href"], "actions/publish/"), {"id": cls.repo["distributors"][0]["id"]})
        relative_url = cls.repo["distributors"][0]["config"]["relative_url"]

        # Publish, remove a unit, and publish again
        cls.call_reports.append(client.post(*publish_args).json())
        cls.repomd_xmls.append(client.get(get_repomd_xml_path(relative_url)))
        client.post(urljoin(cls.repo["_href"], "actions/unassociate/"), {"criteria": {"type_ids": ["rpm"], "limit": 1}})
        cls.call_reports.append(client.post(*publish_args).json())
        cls.repomd_xmls.append(client.get(get_repomd_xml_path(relative_url)))
def get_parse_repodata_xml(server_config, distributor, file_path):
    """Fetch, parse and return an XML file from a ``repodata`` directory.

    :param pulp_smash.config.ServerConfig server_config: Information about the
        Pulp server being targeted.
    :param distributor: Information about a distributor. It should be a dict
        containing at least ``{'config': {'relative_url': …}}``.
    :param file_path: The path to an XML file, relative to the distributor's
        relative URL. For example: ``repodata/repomd.xml``.
    :returns: The XML document, parsed as an ``xml.etree.ElementTree`` object.
    """
    path = urljoin('/pulp/repos/', distributor['config']['relative_url'])
    path = urljoin(path, file_path)
    return api.Client(server_config, xml_handler).get(path)
    def setUpClass(cls):
        """Create three schedules and read, update and delete them."""
        super(ReadUpdateDeleteTestCase, cls).setUpClass()
        client = api.Client(cls.cfg, api.json_handler)

        # Create a repo with a valid feed and sync it
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])
        utils.sync_repo(cls.cfg, repo['_href'])

        # Create schedules
        distributor = gen_distributor()
        client.post(
            urljoin(repo['_href'], 'distributors/'),
            distributor
        )
        scheduling_url = '/'.join([
            'distributors', distributor['distributor_id'], 'schedules/publish/'
        ])
        scheduling_path = urljoin(repo['_href'], scheduling_url)
        cls.schedules = tuple((
            client.post(scheduling_path, {'schedule': 'PT30S'})
            for _ in range(3)
        ))
        cls.responses = {}
        client.response_handler = api.safe_handler

        # Attributes that may be changed after creation
        cls.mutable_attrs = [
            'consecutive_failures', 'last_run_at', 'last_updated', 'next_run',
            'first_run', 'remaining_runs', 'total_run_count'
        ]

        # Read the first schedule
        cls.responses['read_one'] = client.get(cls.schedules[0]['_href'])

        # Read all schedules for the repo
        cls.responses['read_many'] = client.get(scheduling_path)

        # Update the second schedule
        cls.update_body = {'schedule': 'PT1M'}
        cls.responses['update'] = client.put(
            cls.schedules[1]['_href'], cls.update_body
        )

        # Delete the third schedule
        cls.responses['delete'] = client.delete(cls.schedules[2]['_href'])
    def setUpClass(cls):
        """Create RPM repository, delete a package, and publish the repository.

        More specifically, do the following:

        1. Create an RPM repository.
        2. Add a YUM distributor.
        3. Sync the created repository.
        4. Remove the ``gorilla`` package
        5. Publish the repository. Fetch the ``updateinfo.xml`` file from the
           distributor (via ``repomd.xml``), and parse it.
        """
        super(ErratumPkgListCountTestCase, cls).setUpClass()

        # Create a repository, sync it, and add a yum distributor.
        client = api.Client(cls.cfg, api.json_handler)
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])
        utils.sync_repo(cls.cfg, repo['_href'])
        distributor = client.post(
            urljoin(repo['_href'], 'distributors/'),
            gen_distributor(),
        )

        # Remove the gorilla package unit
        client.post(
            urljoin(repo['_href'], 'actions/unassociate/'),
            {'criteria': get_unit_unassociate_criteria(RPM_ERRATUM_RPM_NAME)},
        )

        # Publish the repository
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distributor['id']},
        )

        # Fetch and parse updateinfo.xml (or updateinfo.xml.gz), via repomd.xml
        root_element = get_repomd_xml(
            cls.cfg,
            urljoin('/pulp/repos/', distributor['config']['relative_url']),
            'updateinfo'
        )

        # Fetch the erratum and erratum pkglist for the gorilla package
        updates = _get_updates_by_id(root_element)
        erratum = updates[RPM_ERRATUM_ID]
        cls.erratum_pkglists = erratum.findall('pkglist')
    def setUpClass(cls):
        """Create an RPM repository. Upload the same content into it twice."""
        super(DuplicateUploadsTestCase, cls).setUpClass()
        utils.reset_pulp(cls.cfg)

        # Download content.
        client = api.Client(cls.cfg)
        cls.rpm = client.get(urljoin(RPM_FEED_URL, RPM)).content

        # Create a feed-less repository.
        client = api.Client(cls.cfg, api.json_handler)
        repo = client.post(
            REPOSITORY_PATH, {
                'id': utils.uuid4(),
                'importer_config': {},
                'importer_type_id': 'yum_importer',
                'notes': {
                    '_repo-type': 'rpm-repo'
                },
            })
        cls.resources.add(repo['_href'])

        # Upload content and import it into the repository. Do it twice!
        cls.call_reports = tuple((_upload_import_rpm(cls.cfg, cls.rpm,
                                                     repo['_href'])
                                  for _ in range(2)))
Example #28
0
    def setUpClass(cls):
        """Create and sync two puppet repositories."""
        super(SyncValidFeedTestCase, cls).setUpClass()
        utils.reset_pulp(cls.cfg)  # See: https://pulp.plan.io/issues/1406
        bodies = tuple((_gen_repo() for _ in range(2)))
        for i, query in enumerate((
                _PUPPET_QUERY, _PUPPET_QUERY.replace('-', '_'))):
            bodies[i]['importer_config'] = {
                'feed': _PUPPET_FEED,
                'queries': [query],
            }
        client = api.Client(cls.cfg, api.json_handler)
        repos = [client.post(REPOSITORY_PATH, body) for body in bodies]
        cls.resources.update({repo['_href'] for repo in repos})

        # Trigger repository sync and collect completed tasks.
        cls.reports = []  # raw responses to "start syncing" commands
        cls.tasks = []  # completed tasks
        client.response_handler = api.echo_handler
        for repo in repos:
            report = client.post(urljoin(repo['_href'], 'actions/sync/'))
            report.raise_for_status()
            cls.reports.append(report)
            for task in api.poll_spawned_tasks(cls.cfg, report.json()):
                cls.tasks.append(task)
Example #29
0
def poll_task(server_config, href):
    """Wait for a task and its children to complete. Yield response bodies.

    Poll the task at ``href``, waiting for the task to complete. When a
    response is received indicating that the task is complete, yield that
    response body and recursively poll each child task.

    :param server_config: A :class:`pulp_smash.config.ServerConfig` object.
    :param href: The path to a task you'd like to monitor recursively.
    :returns: An generator yielding response bodies.
    :raises pulp_smash.exceptions.TaskTimedOutError: If a task takes too
        long to complete.
    """
    poll_limit = 24  # 24 * 5s == 120s
    poll_counter = 0
    while True:
        response = requests.get(urljoin(server_config.base_url, href),
                                **server_config.get_requests_kwargs())
        response.raise_for_status()
        attrs = response.json()
        if attrs['state'] in _TASK_END_STATES:
            # This task has completed. Yield its final state, then iterate
            # through each of its children and yield their final states.
            yield attrs
            for href in (task['_href'] for task in attrs['spawned_tasks']):
                for final_task_state in poll_task(server_config, href):
                    yield final_task_state
            break
        poll_counter += 1
        if poll_counter > poll_limit:
            raise exceptions.TaskTimedOutError(
                'Task {} is ongoing after {} polls.'.format(href, poll_limit))
        # This approach is dumb, in that we don't account for time spent
        # waiting for the Pulp server to respond to us.
        sleep(5)
Example #30
0
 def test_headers_location(self):
     """Assert the response's Location header is correct."""
     path = REPOSITORY_PATH + self.body['id'] + '/'
     self.assertEqual(
         self.response.headers['Location'],
         urljoin(self.cfg.base_url, path)
     )
Example #31
0
    def setUpClass(cls):
        """Create an RPM repository, sync it, and remove some units from it.

        After creating and syncing an RPM repository, we walk through the unit
        type IDs listed in
        :data:`pulp_smash.tests.rpm.api_v2.test_unassociate.RemoveAssociatedUnits.TYPE_IDS`
        and remove on unit of each kind from the repository. We verify Pulp's
        behaviour by recording repository contents pre and post removal.
        """
        super(RemoveAssociatedUnits, cls).setUpClass()
        client = api.Client(cls.cfg, api.json_handler)
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])
        sync_path = urljoin(repo['_href'], 'actions/sync/')
        client.post(sync_path, {'override_config': {}})

        # List starting content
        cls.before_units = {
            type_id: _list_repo_units_of_type(client, repo['_href'], type_id)
            for type_id in cls.TYPE_IDS
        }

        # Remove one of each unit and store its id for later assertions
        cls.removed_units = {}
        for type_id, units_list in cls.before_units.items():
            cls.removed_units[type_id] = units_list[0]
            _remove_unit(client, repo['_href'], type_id, units_list[0])

        # List final content
        cls.after_units = {
            type_id: _list_repo_units_of_type(client, repo['_href'], type_id)
            for type_id in cls.TYPE_IDS
        }
Example #32
0
    def setUpClass(cls):
        """Create an RPM repository, sync it, and remove some units from it.

        After creating and syncing an RPM repository, we walk through the unit
        type IDs listed in
        :data:`pulp_smash.tests.rpm.api_v2.test_unassociate.RemoveAssociatedUnits.TYPE_IDS`
        and remove on unit of each kind from the repository. We verify Pulp's
        behaviour by recording repository contents pre and post removal.
        """
        super(RemoveAssociatedUnits, cls).setUpClass()
        client = api.Client(cls.cfg, api.json_handler)
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])
        sync_path = urljoin(repo['_href'], 'actions/sync/')
        client.post(sync_path, {'override_config': {}})

        # List starting content
        cls.before_units = {
            type_id: _list_repo_units_of_type(client, repo['_href'], type_id)
            for type_id in cls.TYPE_IDS
        }

        # Remove one of each unit and store its id for later assertions
        cls.removed_units = {}
        for type_id, units_list in cls.before_units.items():
            cls.removed_units[type_id] = units_list[0]
            _remove_unit(client, repo['_href'], type_id, units_list[0])

        # List final content
        cls.after_units = {
            type_id: _list_repo_units_of_type(client, repo['_href'], type_id)
            for type_id in cls.TYPE_IDS
        }
Example #33
0
    def request(self, method, url, **kwargs):
        """Send an HTTP request.

        Arguments passed directly in to this method override (but do not
        overwrite!) arguments specified in ``self.request_kwargs``.
        """
        # The `self.request_kwargs` dict should *always* have a "url" argument.
        # This is enforced by `self.__init__`. This allows us to call the
        # `requests.request` function and satisfy its signature:
        #
        #     request(method, url, **kwargs)
        #
        request_kwargs = self.request_kwargs.copy()
        request_kwargs['url'] = urljoin(request_kwargs['url'], url)
        request_kwargs.update(kwargs)
        config_host = urlparse(self._cfg.base_url).netloc
        request_host = urlparse(request_kwargs['url']).netloc
        if request_host != config_host:
            warnings.warn(
                'This client is configured to make HTTP requests to {0}, but '
                'a request is being made to {1}. The request will be made, '
                'but some options may be incorrect. For example, an incorrect '
                'SSL certificate may be specified with the `verify` option. '
                'Request options: {2}'
                .format(config_host, request_host, request_kwargs),
                RuntimeWarning
            )
        return self.response_handler(
            self._cfg,
            requests.request(method, **request_kwargs),
        )
Example #34
0
    def setUpClass(cls):
        """Create and sync two puppet repositories."""
        super(SyncValidFeedTestCase, cls).setUpClass()
        utils.reset_pulp(cls.cfg)  # See: https://pulp.plan.io/issues/1406
        bodies = tuple((_gen_repo() for _ in range(2)))
        for i, query in enumerate(
            (_PUPPET_QUERY, _PUPPET_QUERY.replace('-', '_'))):
            bodies[i]['importer_config'] = {
                'feed': _PUPPET_FEED,
                'queries': [query],
            }
        client = api.Client(cls.cfg, api.json_handler)
        repos = [client.post(REPOSITORY_PATH, body) for body in bodies]
        cls.resources.update({repo['_href'] for repo in repos})

        # Trigger repository sync and collect completed tasks.
        cls.reports = []  # raw responses to "start syncing" commands
        cls.tasks = []  # completed tasks
        client.response_handler = api.echo_handler
        for repo in repos:
            report = client.post(urljoin(repo['_href'], 'actions/sync/'))
            report.raise_for_status()
            cls.reports.append(report)
            for task in api.poll_spawned_tasks(cls.cfg, report.json()):
                cls.tasks.append(task)
    def setUpClass(cls):
        """Create an RPM repository, upload errata, and publish the repository.

        More specifically, do the following:

        1. Create an RPM repository.
        2. Add a YUM distributor.
        3. Generate a pair of errata. Upload them to Pulp and import them into
           the repository.
        4. Publish the repository. Fetch the ``updateinfo.xml`` file from the
           distributor (via ``repomd.xml``), and parse it.
        """
        super(UpdateInfoTestCase, cls).setUpClass()
        cls.errata = {
            'import_no_pkglist': _gen_errata_no_pkglist(),
            'import_typical': _gen_errata_typical(),
        }
        cls.tasks = {}  # {'import_no_pkglist': (…), 'import_typical': (…)}

        # Create a repository and add a yum distributor.
        client = api.Client(cls.cfg, api.json_handler)
        repo = client.post(REPOSITORY_PATH, gen_repo())
        cls.resources.add(repo['_href'])
        distributor = client.post(
            urljoin(repo['_href'], 'distributors/'),
            gen_distributor(),
        )

        # Import errata into our repository. Publish the repository.
        for key, erratum in cls.errata.items():
            report = utils.upload_import_erratum(
                cls.cfg,
                erratum,
                repo['_href'],
            )
            cls.tasks[key] = tuple(api.poll_spawned_tasks(cls.cfg, report))
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distributor['id']},
        )

        # Fetch and parse updateinfo.xml (or updateinfo.xml.gz), via repomd.xml
        cls.root_element = get_repomd_xml(
            cls.cfg,
            urljoin('/pulp/repos/', distributor['config']['relative_url']),
            'updateinfo'
        )
Example #36
0
 def test_02_delete_by_href(self):
     """Delete an orphan by its href."""
     client = api.Client(self.cfg, api.json_handler)
     orphans_pre = client.get(ORPHANS_PATH)
     orphan = random.choice(client.get(urljoin(ORPHANS_PATH, 'erratum/')))
     client.delete(orphan['_href'])
     orphans_post = client.get(ORPHANS_PATH)
     self.check_one_orphan_deleted(orphans_pre, orphans_post, orphan)
def _upload_import_rpm(server_config, rpm, repo_href):
    """Upload an RPM to a Pulp server and import it into a repository.

    Create an upload request, upload ``rpm``, import it into the repository at
    ``repo_href``, and close the upload request. Return the call report
    returned when importing the RPM.
    """
    client = api.Client(server_config, api.json_handler)
    malloc = client.post(CONTENT_UPLOAD_PATH)
    client.put(urljoin(malloc['_href'], '0/'), data=rpm)
    call_report = client.post(urljoin(repo_href, 'actions/import_upload/'), {
        'unit_key': {},
        'unit_type_id': 'rpm',
        'upload_id': malloc['upload_id'],
    })
    client.delete(malloc['_href'])
    return call_report
 def test_02_delete_by_href(self):
     """Delete an orphan by its href."""
     client = api.Client(self.cfg, api.json_handler)
     orphans_pre = client.get(ORPHANS_PATH)
     orphan = random.choice(client.get(urljoin(ORPHANS_PATH, 'erratum/')))
     client.delete(orphan['_href'])
     orphans_post = client.get(ORPHANS_PATH)
     self.check_one_orphan_deleted(orphans_pre, orphans_post, orphan)
Example #39
0
    def setUpClass(cls):
        """Create an RPM repository, upload comps metadata, and publish.

        More specifically:

        1. Create a repository.
        2. Add yum distributor to it.
        3. Import fixture group units.
        4. Publish repository.
        5. Fetch and parse generated ``comps.xml``.
        """
        super(CompsGroupsTestCase, cls).setUpClass()

        # Create a repository and add a distributor to it.
        client = api.Client(cls.cfg, api.json_handler)
        repo = client.post(REPOSITORY_PATH, gen_repo())
        cls.resources.add(repo['_href'])
        distributor = client.post(
            urljoin(repo['_href'], 'distributors/'),
            gen_distributor(),
        )

        # Generate several package groups, import them into the repository, and
        # publish the repository.
        #
        # NOTE: The ordering of cls.package_groups matters to test methods! It
        # may be better to make this a dict in the form {label: package_group}.
        cls.package_groups = (_gen_minimal_group(), _gen_realistic_group())
        cls.tasks = {}
        for package_group in cls.package_groups:
            report = _upload_import_package_group(cls.cfg, repo, package_group)
            cls.tasks[package_group['id']] = tuple(
                api.poll_spawned_tasks(cls.cfg, report)
            )
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distributor['id']},
        )

        # Fetch the generated repodata of type 'group' (a.k.a. 'comps')
        cls.root_element = get_repomd_xml(
            cls.cfg,
            urljoin('/pulp/repos/', distributor['config']['relative_url']),
            'group'
        )
Example #40
0
    def setUpClass(cls):
        """Create an RPM repository with a valid feed and sync it.

        Do the following:

        1. Reset Pulp, including the Squid cache.
        2. Create a repository. Sync and publish it using the 'on_demand'
           download policy.
        3. Download an RPM from the published repository.
        4. Download the same RPM to ensure it is served by the cache.
        """
        super(SyncOnDemandTestCase, cls).setUpClass()
        if cls.cfg.version < Version('2.8'):
            raise unittest2.SkipTest('This test requires Pulp 2.8 or greater.')

        # Ensure `locally_stored_units` is 0 before we start.
        utils.reset_squid(cls.cfg)
        utils.reset_pulp(cls.cfg)

        # Create a repository
        client = api.Client(cls.cfg, api.json_handler)
        body = gen_repo()
        body['importer_config'] = {
            'download_policy': 'on_demand',
            'feed': RPM_FEED_URL,
        }
        distributor = gen_distributor()
        distributor['auto_publish'] = True
        distributor['distributor_config']['relative_url'] = body['id']
        body['distributors'] = [distributor]

        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])

        # Sync and read the repository
        sync_path = urljoin(repo['_href'], 'actions/sync/')
        client.post(sync_path, {'override_config': {}})
        cls.repo = client.get(repo['_href'], params={'details': True})

        # Download the same RPM twice.
        client.response_handler = api.safe_handler
        path = urljoin('/pulp/repos/', repo['id'] + '/')
        path = urljoin(path, RPM)
        cls.rpm = client.get(path)
        cls.same_rpm = client.get(path)
Example #41
0
    def setUpClass(cls):
        """Create an RPM repository with a valid feed and sync it.

        Do the following:

        1. Reset Pulp, including the Squid cache.
        2. Create a repository. Sync and publish it using the 'on_demand'
           download policy.
        3. Download an RPM from the published repository.
        4. Download the same RPM to ensure it is served by the cache.
        """
        super(SyncOnDemandTestCase, cls).setUpClass()
        if cls.cfg.version < Version('2.8'):
            raise unittest2.SkipTest('This test requires Pulp 2.8 or greater.')

        # Ensure `locally_stored_units` is 0 before we start.
        utils.reset_squid(cls.cfg)
        utils.reset_pulp(cls.cfg)

        # Create a repository
        client = api.Client(cls.cfg, api.json_handler)
        body = gen_repo()
        body['importer_config'] = {
            'download_policy': 'on_demand',
            'feed': RPM_FEED_URL,
        }
        distributor = gen_distributor()
        distributor['auto_publish'] = True
        distributor['distributor_config']['relative_url'] = body['id']
        body['distributors'] = [distributor]

        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])

        # Sync and read the repository
        sync_path = urljoin(repo['_href'], 'actions/sync/')
        client.post(sync_path, {'override_config': {}})
        cls.repo = client.get(repo['_href'], params={'details': True})

        # Download the same RPM twice.
        client.response_handler = api.safe_handler
        path = urljoin('/pulp/repos/', repo['id'] + '/')
        path = urljoin(path, RPM)
        cls.rpm = client.get(path)
        cls.same_rpm = client.get(path)
Example #42
0
 def test_01_get_by_href(self):
     """Get an orphan by its href."""
     client = api.Client(self.cfg)
     orphans = client.get(urljoin(ORPHANS_PATH, 'erratum/')).json()
     orphan = random.choice(orphans)
     response = client.get(orphan['_href'])
     with self.subTest(comment='verify status code'):
         self.assertEqual(response.status_code, 200)
     with self.subTest(comment='verify href'):
         self.assertEqual(orphan['_href'], response.json()['_href'])
Example #43
0
def sync_repo(server_config, href):
    """Sync the referenced repository. Return the raw server response.

    :param pulp_smash.config.ServerConfig server_config: Information about the
        Pulp server being targeted.
    :param href: The path to the repository to sync.
    :returns: The server's response.
    """
    return api.Client(server_config).post(
        urljoin(href, 'actions/sync/'),
        {'override_config': {}}
    )
Example #44
0
    def health_check(self):
        """Execute step three of the test plan."""
        client = api.Client(self.cfg, api.json_handler)
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        repo = client.post(REPOSITORY_PATH, body)
        self.addCleanup(api.Client(self.cfg).delete, repo['_href'])
        client.post(
            urljoin(repo['_href'], 'actions/sync/'),
            {'override_config': {}},
        )
        distributor = client.post(
            urljoin(repo['_href'], 'distributors/'),
            gen_distributor(),
        )
        client.post(
            urljoin(repo['_href'], 'actions/publish/'),
            {'id': distributor['id']},
        )
        client.response_handler = api.safe_handler
        url = urljoin('/pulp/repos/', distributor['config']['relative_url'])
        url = urljoin(url, RPM)
        pulp_rpm = client.get(url).content

        # Does this RPM match the original RPM?
        rpm = client.get(urljoin(RPM_FEED_URL, RPM)).content
        self.assertEqual(rpm, pulp_rpm)
Example #45
0
 def setUpClass(cls):
     """Create an RPM repository with an invalid feed and sync it."""
     super(SyncInvalidFeedTestCase, cls).setUpClass()
     client = api.Client(cls.cfg, api.json_handler)
     body = gen_repo()
     body['importer_config']['feed'] = utils.uuid4()
     repo = client.post(REPOSITORY_PATH, body)
     client.response_handler = api.echo_handler
     path = urljoin(repo['_href'], 'actions/sync/')
     cls.report = client.post(path, {'override_config': {}})
     cls.report.raise_for_status()
     cls.tasks = tuple(api.poll_spawned_tasks(cls.cfg, cls.report.json()))
     cls.resources.add(repo['_href'])
Example #46
0
 def setUpClass(cls):
     """Create two RPM repositories, with and without feed URLs."""
     super(CreateTestCase, cls).setUpClass()
     client = api.Client(cls.cfg, api.json_handler)
     cls.bodies = tuple((gen_repo() for _ in range(2)))
     cls.bodies[1]['importer_config'] = {'feed': utils.uuid4()}
     cls.repos = [client.post(REPOSITORY_PATH, body) for body in cls.bodies]
     cls.importers_iter = [
         client.get(urljoin(repo['_href'], 'importers/'))
         for repo in cls.repos
     ]
     for repo in cls.repos:
         cls.resources.add(repo['_href'])  # mark for deletion
Example #47
0
def _sync_repo(server_config, href):
    """Sync a repository and wait for the sync to complete.

    Verify only the call report's status code. Do not verify each individual
    task, as the default response handler does. Return ``call_report, tasks``.
    """
    response = api.Client(server_config, api.echo_handler).post(
        urljoin(href, 'actions/sync/'),
        {'override_config': {}},
    )
    response.raise_for_status()
    tasks = tuple(api.poll_spawned_tasks(server_config, response.json()))
    return response, tasks
Example #48
0
 def test_03_delete_by_content_type(self):
     """Delete orphans by their content type."""
     client = api.Client(self.cfg, api.json_handler)
     orphans_pre = client.get(ORPHANS_PATH)
     client.delete(urljoin(ORPHANS_PATH, 'erratum/'))
     orphans_post = client.get(ORPHANS_PATH)
     with self.subTest(comment='verify total count'):
         self.assertEqual(
             _count_orphans(orphans_pre) - orphans_pre['erratum']['count'],
             _count_orphans(orphans_post),
             orphans_post,
         )
     with self.subTest(comment='verify erratum count'):
         self.assertEqual(orphans_post['erratum']['count'], 0, orphans_post)
Example #49
0
    def setUpClass(cls):
        """Create a puppet repository with an invalid feed and sync it."""
        super(SyncInvalidFeedTestCase, cls).setUpClass()
        client = api.Client(cls.cfg, api.json_handler)
        body = _gen_repo()
        body['importer_config'] = {'feed': 'http://' + utils.uuid4()}
        repo = client.post(REPOSITORY_PATH, body)
        cls.resources.add(repo['_href'])

        # Trigger a repository sync and collect completed tasks.
        client.response_handler = api.echo_handler
        cls.report = client.post(urljoin(repo['_href'], 'actions/sync/'))
        cls.report.raise_for_status()
        cls.tasks = list(api.poll_spawned_tasks(cls.cfg, cls.report.json()))
Example #50
0
    def test_02_delete_by_type_and_id(self):
        """Delete an orphan by its ID and type.

        This test exercises `Pulp #1923 <https://pulp.plan.io/issues/1923>`_.
        """
        client = api.Client(self.cfg, api.json_handler)
        orphans_pre = client.get(ORPHANS_PATH)
        orphan = random.choice(client.get(urljoin(ORPHANS_PATH, 'erratum/')))
        client.post('pulp/api/v2/content/actions/delete_orphans/',
                    [{
                        'content_type_id': 'erratum',
                        'unit_id': orphan['_id'],
                    }])
        orphans_post = client.get(ORPHANS_PATH)
        self.check_one_orphan_deleted(orphans_pre, orphans_post, orphan)
Example #51
0
    def setUpClass(cls):
        """Create an RPM repo, sync it, delete the repo, remove orphans."""
        super(OrphanRemoveAllTestCase, cls).setUpClass()
        client = api.Client(cls.cfg, api.json_handler)
        body = gen_repo()
        body['importer_config']['feed'] = RPM_FEED_URL
        repo = client.post(REPOSITORY_PATH, body)
        sync_path = urljoin(repo['_href'], 'actions/sync/')
        client.post(sync_path, {'override_config': {}})

        cls.num_orphans_pre_repo_del = _count_orphans(client)
        client.delete(repo['_href'])
        cls.num_orphans_post_repo_del = _count_orphans(client)
        client.delete(ORPHANS_PATH)
        cls.num_orphans_after_rm = _count_orphans(client)
Example #52
0
    def setUpClass(cls):
        """Create an RPM repo with a valid feed, create a schedule to sync it.

        Do the following:

        1. Create a repository with a valid feed
        2. Schedule sync to run every 30 seconds
        """
        super(CreateSuccessTestCase, cls).setUpClass()
        href, importer_type_id = cls.create_repo()

        # Schedule a sync
        path = urljoin(href, _SCHEDULE_PATH.format(importer_type_id))
        cls.response = api.Client(cls.cfg).post(path, _SCHEDULE)
        cls.response_json = cls.response.json()
Example #53
0
    def get_repodata_xml(cls, repo_url, repomd_type):
        """Fetch and return an XML reader for the XML of the requested type.

        repomd.xml from the given repo_url is parsed to resolve the
        reference to the specified type. An exception is raised if the
        requested repodata can't be fetched.
        """
        repomd_url = urljoin(repo_url, 'repodata/repomd.xml')

        client = api.Client(cls.cfg, xml_handler)

        repomd_xml = client.get(repomd_url)

        namespace = 'http://linux.duke.edu/metadata/repo'
        xpath = "{%s}data[@type='%s']/{%s}location" % (namespace, repomd_type,
                                                       namespace)
        location = repomd_xml.findall(xpath)

        if len(location) != 1:
            raise ValueError('%d location tags' % len(location))

        href = location[0].get('href')

        return client.get(urljoin(repo_url, href))
Example #54
0
def _list_repo_units_of_type(client, repo_href, type_id):
    """List units of the specified type in the repository at the href.

    :param pulp_smash.api.Client client: object to make calls to API
    :param repo_href: url of the repo the unit is associated to
    :param type_id: type of unit that will be removed
    :return: list of unit identifiers
    """
    response = client.post(
        urljoin(repo_href, 'search/units/'),
        {'criteria': {
            'type_ids': [type_id],
            'filters': {
                'unit': {}
            }
        }},
    )
    key = 'name' if type_id == 'rpm' else 'id'
    return [unit['metadata'][key] for unit in response]
Example #55
0
    def request(self, method, url, **kwargs):
        """Send an HTTP request.

        Arguments passed directly in to this method override (but do not
        overwrite!) arguments specified in ``self.request_kwargs``.
        """
        # The `self.request_kwargs` dict should *always* have a "url" argument.
        # This is enforced by `self.__init__`. This allows us to call the
        # `requests.request` function and satisfy its signature:
        #
        #     request(method, url, **kwargs)
        #
        request_kwargs = self.request_kwargs.copy()
        request_kwargs['url'] = urljoin(request_kwargs['url'], url)
        request_kwargs.update(kwargs)
        return self.response_handler(
            self._cfg,
            requests.request(method, **request_kwargs),
        )
Example #56
0
    def import_erratum(cls, client, repo, erratum):
        """Import a single erratum to a repo.

        Returns the tasks created as a result of the import. There's expected
        to be only one task, but that's not verified here.
        """
        upload_request = client.post(CONTENT_UPLOAD_PATH)
        import_args = {
            'upload_id': upload_request['upload_id'],
            'unit_type_id': 'erratum',
            'unit_key': {
                'id': erratum['id']
            },
            'unit_metadata': erratum
        }

        import_response = client.post(
            urljoin(repo['_href'], 'actions/import_upload/'), import_args)
        return cls.get_spawned_tasks(client, import_response)
Example #57
0
    def setUpClass(cls):
        """Create a schedule to sync the repo, verify the ``total_run_count``.

        Do the following:

        1. Create a repository with a valid feed
        2. Schedule sync to run every 30 seconds
        3. Wait for 40 seconds and read the schedule to get the number of
           "sync" runs.

        """
        super(ScheduledSyncTestCase, cls).setUpClass()
        href, importer_type_id = cls.create_repo()

        # Schedule a sync to run every 30 seconds. Wait 40 seconds and read it.
        client = api.Client(cls.cfg, api.json_handler)
        schedule_path = urljoin(href, _SCHEDULE_PATH.format(importer_type_id))
        schedule = client.post(schedule_path, _SCHEDULE)
        time.sleep(40)
        cls.response = client.get(schedule['_href'])
Example #58
0
def _remove_unit(client, repo_href, type_id, unit_id):
    """Remove a unit from a repository.

    :param pulp_smash.api.Client client: object to make calls to API
    :param repo_href: url of the repo the unit is associated to
    :param type_id: type of unit that will be removed
    :param unit_id: id of the unit to be removed
    :return: response from server
    """
    rm_path = urljoin(repo_href, 'actions/unassociate/')
    search_field = 'name' if type_id == 'rpm' else 'id'
    rm_body = {
        'criteria': {
            'type_ids': [type_id],
            'filters': {
                'unit': {
                    search_field: {
                        '$in': [unit_id]
                    }
                }
            }
        }
    }
    return client.post(rm_path, rm_body)
Example #59
0
def create_sync_repo(server_config, body):
    """Create and sync a repository.

    :param pulp_smash.config.ServerConfig server_config: Information about the
        Pulp server being targeted.
    :param body: Data to encode as JSON and send as the body of the repository
        creation request.
    :returns: An iterable of ``(repo_href, sync_response)``. Note that
        ``sync_response.json()`` is a `call report`_.

    .. _call report:
        http://pulp.readthedocs.org/en/latest/dev-guide/conventions/sync-v-async.html#call-report
    """
    client = api.Client(server_config)
    repo = client.post(REPOSITORY_PATH, body).json()
    # When a sync is requested, the default response handler (api.safe_handler)
    # will inspect the response (a call report), poll tasks until completion,
    # and inspect completed tasks. It's in our interest to let that happen
    # rather than redundantly pushing all those checks into test cases.
    response = client.post(
        urljoin(repo['_href'], 'actions/sync/'),
        {'override_config': {}},
    )
    return repo['_href'], response