Beispiel #1
0
def test_repo_local(samplesdir, localconfig):
    keyrings = localconfig.trusted_gpg_keyrings
    repo_location = os.path.join(samplesdir, 'samplerepo', 'dummy')

    suite = ArchiveSuite('testing')
    component = ArchiveComponent('main')
    arch = ArchiveArchitecture('amd64')
    arch_all = ArchiveArchitecture('all')
    repo = Repository(repo_location, 'Dummy', trusted_keyrings=[])

    # we have no keyrings set, so this should fail
    with pytest.raises(GpgException):
        src_pkgs = repo.source_packages(suite, component)

    # try again!
    repo = Repository(repo_location, 'Dummy', trusted_keyrings=keyrings)
    src_pkgs = repo.source_packages(suite, component)
    bin_pkgs = repo.binary_packages(suite, component, arch)
    assert len(bin_pkgs) == 4
    bin_pkgs.extend(repo.binary_packages(suite, component, arch_all))

    # check packages
    assert len(src_pkgs) == 8
    assert len(bin_pkgs) == 7

    validate_src_packages(src_pkgs)
    validate_bin_packages(bin_pkgs)
Beispiel #2
0
    def _get_repo_source_package_map(self, repo, suite_name: str, component_name: str):
        ''' Get an associative array of the newest source packages present in a repository. '''

        suite = ArchiveSuite(suite_name)
        component = ArchiveComponent(component_name)
        spkgs = repo.source_packages(suite, component)
        return make_newest_packages_dict(spkgs)
Beispiel #3
0
    def _get_repo_binary_package_map(self, repo, suite_name: str, component_name: str,
                                     arch_name: str = None, with_installer: bool = True):
        ''' Get an associative array of the newest binary packages present in a repository. '''

        suite = ArchiveSuite(suite_name)
        component = ArchiveComponent(component_name)
        arch = ArchiveArchitecture(arch_name)
        arch_all = ArchiveArchitecture('all')
        bpkgs = repo.binary_packages(suite, component, arch)
        bpkgs.extend(repo.binary_packages(suite, component, arch_all))  # always append arch:all packages

        if with_installer:
            # add d-i packages to the mix
            bpkgs.extend(repo.installer_packages(suite, component, arch))
            bpkgs.extend(repo.installer_packages(suite, component, arch_all))  # always append arch:all packages
        return make_newest_packages_dict(bpkgs)
Beispiel #4
0
    def autosync(self, session, sync_conf, remove_cruft: bool = True):
        ''' Synchronize all packages that are newer '''

        self._synced_source_pkgs = []
        active_src_pkgs = []  # source packages which should have their binary packages updated
        res_issues = []

        target_suite = session.query(ArchiveSuite) \
                              .filter(ArchiveSuite.name == self._target_suite_name).one()
        sync_conf = session.query(SynchrotronConfig) \
                           .join(SynchrotronConfig.destination_suite) \
                           .join(SynchrotronConfig.source) \
                           .filter(ArchiveSuite.name == self._target_suite_name,
                                   SynchrotronSource.suite_name == self._source_suite_name).one_or_none()

        for component in target_suite.components:
            dest_pkg_map = self._get_target_source_packages(component.name)

            # The source package lists contains many different versions, some source package
            # versions are explicitly kept for GPL-compatibility.
            # Sometimes a binary package migrates into another suite, dragging a newer source-package
            # that it was built against with itslf into the target suite.
            # These packages then have a source with a high version number, but might not have any
            # binaries due to them migrating later.
            # We need to care for that case when doing binary syncs (TODO: and maybe safeguard against it
            # when doing source-only syncs too?), That's why we don't filter out the newest packages in
            # binary-sync-mode.
            if sync_conf.sync_binaries:
                src_pkg_range = self._source_repo.source_packages(ArchiveSuite(self._source_suite_name), component)
            else:
                src_pkg_range = self._get_repo_source_package_map(self._source_repo,
                                                                  self._source_suite_name,
                                                                  component).values()

            for spkg in src_pkg_range:
                # ignore blacklisted packages in automatic sync
                if spkg.name in self._sync_blacklist:
                    continue

                dpkg = dest_pkg_map.get(spkg.name)
                if dpkg:
                    if version_compare(dpkg.version, spkg.version) >= 0:
                        log.debug('Skipped sync of {}: Target version \'{}\' is equal/newer than source version \'{}\'.'
                                  .format(spkg.name, dpkg.version, spkg.version))
                        continue

                    # check if we have a modified target package,
                    # indicated via its Debian revision, e.g. "1.0-0tanglu1"
                    if self._distro_tag in version_revision(dpkg.version):
                        log.info('Not syncing {}/{}: Destination has modifications (found {}).'
                                 .format(spkg.name, spkg.version, dpkg.version))

                        # add information that this package needs to be merged to the issue list
                        issue = SynchrotronIssue()
                        issue.package_name = spkg.name
                        issue.source_version = spkg.version
                        issue.target_version = dpkg.version
                        issue.kind = SynchrotronIssueKind.MERGE_REQUIRED

                        res_issues.append(issue)
                        continue

                # sync source package
                # the source package must always be known to dak first
                ret = self._import_source_package(spkg, component.name)
                if not ret:
                    return False, []

                # a new source package is always active and needs it's binary packages synced, in
                # case we do binary syncs.
                active_src_pkgs.append(spkg)

            # all packages in the target distribution are considered active, as long as they don't
            # have modifications.
            for spkg in dest_pkg_map.values():
                if self._distro_tag in version_revision(spkg.version):
                    active_src_pkgs.append(spkg)

            # import binaries as well. We test for binary updates for all available active source packages,
            # as binNMUs might have happened in the source distribution.
            # (an active package in this context is any source package which doesn't have modifications in the
            # target distribution)
            ret = self._import_binaries_for_source(sync_conf, target_suite, component.name, active_src_pkgs)
            if not ret:
                return False, []

        # test for cruft packages
        target_pkg_index = {}
        for component in target_suite.components:
            dest_pkg_map = self._get_repo_source_package_map(self._target_repo,
                                                             target_suite.name,
                                                             component.name)
            for pkgname, pkg in dest_pkg_map.items():
                target_pkg_index[pkgname] = pkg

        # check which packages are present in the target, but not in the source suite
        for component in target_suite.components:
            src_pkg_map = self._get_repo_source_package_map(self._source_repo,
                                                            self._source_suite_name,
                                                            component.name)
            for pkgname in src_pkg_map.keys():
                target_pkg_index.pop(pkgname, None)

        # remove cruft packages
        if remove_cruft:
            for pkgname, dpkg in target_pkg_index.items():
                dpkg_ver_revision = version_revision(dpkg.version, False)
                # native packages are never removed
                if not dpkg_ver_revision:
                    continue

                # check if the package is intoduced as new in the distro, in which case we won't remove it
                if dpkg_ver_revision.startswith('0' + self._distro_tag):
                    continue

                # if this package was modified in the target distro, we will also not remove it, but flag it
                # as "potential cruft" for someone to look at.
                if self._distro_tag in dpkg_ver_revision:
                    issue = SynchrotronIssue()
                    issue.kind = SynchrotronIssueKind.MAYBE_CRUFT
                    issue.source_suite = self._source_suite_name
                    issue.target_suite = self._target_suite_name
                    issue.package_name = dpkg.name
                    issue.source_version = None
                    issue.target_version = dpkg.version

                    res_issues.append(issue)
                    continue

                # check if we can remove this package without breaking stuff
                if self._dak.package_is_removable(dpkg.name, target_suite.name):
                    # try to remove the package
                    try:
                        self._dak.remove_package(dpkg.name, target_suite.name)
                    except Exception as e:
                        issue = SynchrotronIssue()
                        issue.kind = SynchrotronIssueKind.REMOVAL_FAILED
                        issue.source_suite = self._source_suite_name
                        issue.target_suite = self._target_suite_name
                        issue.package_name = dpkg.name
                        issue.source_version = None
                        issue.target_version = dpkg.version
                        issue.details = str(e)

                        res_issues.append(issue)
                else:
                    # looks like we can not remove this
                    issue = SynchrotronIssue()
                    issue.kind = SynchrotronIssueKind.REMOVAL_FAILED
                    issue.source_suite = self._source_suite_name
                    issue.target_suite = self._target_suite_name
                    issue.package_name = dpkg.name
                    issue.source_version = None
                    issue.target_version = dpkg.version
                    issue.details = 'This package can not be removed without breaking other packages. It needs manual removal.'

                    res_issues.append(issue)

        self._publish_synced_spkg_events(sync_conf.source.os_name,
                                         sync_conf.source.suite_name,
                                         sync_conf.destination_suite.name,
                                         False)

        return True, res_issues
Beispiel #5
0
def database(localconfig, podman_ip, podman_services):
    '''
    Retrieve a pristine, empty Laniakea database connection.
    This will wipe the global database, so tests using this can
    never run in parallel.
    '''
    import json
    from laniakea.db import Database, session_scope, ArchiveRepository, ArchiveSuite, \
        ArchiveComponent, ArchiveArchitecture
    from laniakea.db.core import config_set_project_name, config_set_distro_tag

    # get IP of our database container
    db_port = podman_services.port_for('postgres', 5432)

    # update database URL to use scratch database in our container
    pgdb_url = 'postgresql://*****:*****@{}:{}/laniakea_unittest'.format(
        podman_ip, db_port)
    LocalConfig.instance._database_url = pgdb_url
    assert localconfig.database_url == pgdb_url

    # update the on-disk configuration, we may pass that on to independent modules
    with open(localconfig.fname, 'r') as f:
        config_json = json.load(f)
    config_json['Database']['host'] = podman_ip
    config_json['Database']['port'] = db_port
    with open(localconfig.fname, 'w') as f:
        json.dump(config_json, f)

    # create database factory singleton, if it didn't exist yet
    db = Database(localconfig)

    # wait for the database to become available
    podman_services.wait_until_responsive(
        timeout=60.0,
        pause=0.5,
        check=lambda: pgsql_test_available(session_scope))

    # clear database tables so test function has a pristine database to work with
    with session_scope() as session:
        session.execute('DROP owned BY lkdbuser_test;')
    db.downgrade('base')
    db.create_tables()

    # add core configuration data to the database
    config_set_project_name('Test Project')
    config_set_distro_tag('test')
    with session_scope() as session:
        # master repository, currently the only one we support
        repo = ArchiveRepository('master')
        session.add(repo)

        # components
        acpt_main = ArchiveComponent('main')
        acpt_contrib = ArchiveComponent('contrib')
        acpt_nonfree = ArchiveComponent('non-free')
        acpt_contrib.parent_component = acpt_main
        acpt_nonfree.parent_component = acpt_main

        all_components = [acpt_main, acpt_contrib, acpt_nonfree]
        session.add_all(all_components)

        # architectures
        arch_all = ArchiveArchitecture('all')
        arch_amd64 = ArchiveArchitecture('amd64')
        arch_arm64 = ArchiveArchitecture('arm64')

        all_architectures = [arch_all, arch_amd64, arch_arm64]
        session.add_all(all_architectures)

        # add 'unstable' suite
        suite_us = ArchiveSuite('unstable')
        suite_us.repos = [repo]
        suite_us.components = all_components
        suite_us.architectures = all_architectures
        suite_us.accept_uploads = True
        session.add(suite_us)

        # add 'testing' suite
        suite_te = ArchiveSuite('testing')
        suite_te.repos = [repo]
        suite_te.components = all_components
        suite_te.architectures = all_architectures
        suite_te.devel_target = True
        session.add(suite_te)

        # add 'experimental' suite
        suite_ex = ArchiveSuite('experimental')
        suite_ex.repos = [repo]
        suite_ex.components = all_components
        suite_ex.architectures = [arch_all, arch_amd64]
        suite_ex.accept_uploads = True
        suite_ex.parent = suite_us
        session.add(suite_ex)

    return db
Beispiel #6
0
def _add_new_suite(session):
    '''
    Interactively register a new suite.
    '''
    from laniakea.db import ArchiveRepository, ArchiveSuite, ArchiveComponent, ArchiveArchitecture

    repo = session.query(ArchiveRepository) \
        .filter(ArchiveRepository.name == 'master').one()

    suite_name = input_str('Adding a new suite. Please set a name')

    suite = session.query(ArchiveSuite) \
        .filter(ArchiveSuite.name == suite_name).one_or_none()
    if suite:
        print_note('Removing existing suite with the same name ("{}")'.format(
            suite_name))
        session.delete(suite)
        session.commit()

    suite = ArchiveSuite(suite_name)
    suite.repos = [repo]

    component_names = input_list('List of components for suite "{}"'.format(
        suite.name))
    add_main_dep = 'main' in component_names
    if add_main_dep:
        component_names.remove('main')
        component_names.insert(0, 'main')

    suite.components = []
    main_component = None
    for cname in component_names:
        component = session.query(ArchiveComponent) \
            .filter(ArchiveComponent.name == cname).one_or_none()
        if not component:
            component = ArchiveComponent(cname)
            if add_main_dep and main_component and cname != 'main':
                component.parent_component = main_component
            session.add(component)
        if cname == 'main':
            main_component = component
        suite.components.append(component)

    arch_names = input_list('List of architectures for suite "{}"'.format(
        suite.name))
    # every suite has the "all" architecture, so add it straight away
    if 'all' not in arch_names:
        arch_names.insert(0, 'all')

    suite.architectures = []
    for aname in arch_names:
        arch = session.query(ArchiveArchitecture) \
            .filter(ArchiveArchitecture.name == aname).one_or_none()
        if not arch:
            arch = ArchiveArchitecture(aname)
            session.add(arch)
        suite.architectures.append(arch)

    parent_suite = None
    while not parent_suite:
        parent_suite_name = input_str(
            'Set a name of the suite this suite is an overlay to. Leave empty for primary suite. (The overlay suite must have been added first!)',
            allow_empty=True)
        if not parent_suite_name:
            break

        parent_suite = session.query(ArchiveSuite) \
            .filter(ArchiveSuite.name == parent_suite_name).one_or_none()
        if not parent_suite:
            print_note(
                'Parent suite "{}" was not found.'.format(parent_suite_name))
        suite.parent = parent_suite

    session.add(suite)
    session.commit()
Beispiel #7
0
def _add_new_suite(session):
    '''
    Interactively register a new suite.
    '''
    from laniakea.db import ArchiveRepository, ArchiveSuite, ArchiveComponent, ArchiveArchitecture

    repo = session.query(ArchiveRepository) \
        .filter(ArchiveRepository.name == 'master').one()

    suite_name = input_str('Adding a new suite. Please set a name')

    suite = session.query(ArchiveSuite) \
        .filter(ArchiveSuite.name == suite_name).one_or_none()
    if suite:
        print_note('Removing existing suite with the same name ("{}")'.format(suite_name))
        session.delete(suite)
        session.commit()

    suite = ArchiveSuite()
    suite.name = suite_name
    suite.repos = [repo]

    component_names = input_list('List of components for suite "{}"'.format(suite.name))
    add_main_dep = 'main' in component_names
    if add_main_dep:
        component_names.remove('main')
        component_names.insert(0, 'main')

    suite.components = []
    main_component = None
    for cname in component_names:
        component = session.query(ArchiveComponent) \
            .filter(ArchiveComponent.name == cname).one_or_none()
        if not component:
            component = ArchiveComponent(cname)
            if add_main_dep and main_component and cname != 'main':
                component.parent_component = main_component
            session.add(component)
        if cname == 'main':
            main_component = component
        suite.components.append(component)

    arch_names = input_list('List of architectures for suite "{}"'.format(suite.name))
    # every suite has the "all" architecture, so add it straight away
    if 'all' not in arch_names:
        arch_names.insert(0, 'all')

    suite.architectures = []
    for aname in arch_names:
        arch = session.query(ArchiveArchitecture) \
            .filter(ArchiveArchitecture.name == aname).one_or_none()
        if not arch:
            arch = ArchiveArchitecture(aname)
            session.add(arch)
        suite.architectures.append(arch)

    parent_suite = None
    while not parent_suite:
        parent_suite_name = input_str('Set a name of the suite this suite is an overlay to. Leave empty for primary suite. (The overlay suite must have been added first!)',
                                      allow_empty=True)
        if not parent_suite_name:
            break

        parent_suite = session.query(ArchiveSuite) \
            .filter(ArchiveSuite.name == parent_suite_name).one_or_none()
        if not parent_suite:
            print_note('Parent suite "{}" was not found.'.format(parent_suite_name))
        suite.parent_suite = parent_suite

    session.add(suite)
    session.commit()
Beispiel #8
0
def database(localconfig):
    '''
    Retrieve a pristine, empty Laniakea database connection.
    This will wipe the global database, so tests using this can
    never run in parallel.
    '''
    from laniakea.db import Database, session_scope, ArchiveRepository, ArchiveSuite, \
        ArchiveComponent, ArchiveArchitecture
    from laniakea.db.core import config_set_project_name, config_set_distro_tag
    db = Database(localconfig)  # create singleton, if it didn't exist yet

    # clear database tables so test function has a pristine database to work with
    with session_scope() as session:
        session.execute('DROP owned BY lkdbuser_test;')
    db.downgrade('base')
    db.create_tables()

    # add core configuration data to the database
    config_set_project_name('Test Project')
    config_set_distro_tag('test')
    with session_scope() as session:
        # master repository, currently the only one we support
        repo = ArchiveRepository('master')
        session.add(repo)

        # components
        acpt_main = ArchiveComponent('main')
        acpt_contrib = ArchiveComponent('contrib')
        acpt_nonfree = ArchiveComponent('non-free')
        acpt_contrib.parent_component = acpt_main
        acpt_nonfree.parent_component = acpt_main

        all_components = [acpt_main, acpt_contrib, acpt_nonfree]
        session.add_all(all_components)

        # architectures
        arch_all = ArchiveArchitecture('all')
        arch_amd64 = ArchiveArchitecture('amd64')
        arch_arm64 = ArchiveArchitecture('arm64')

        all_architectures = [arch_all, arch_amd64, arch_arm64]
        session.add_all(all_architectures)

        # add 'unstable' suite
        suite_us = ArchiveSuite()
        suite_us.name = 'unstable'
        suite_us.repos = [repo]
        suite_us.components = all_components
        suite_us.architectures = all_architectures
        suite_us.accept_uploads = True
        session.add(suite_us)

        # add 'testing' suite
        suite_te = ArchiveSuite()
        suite_te.name = 'testing'
        suite_te.repos = [repo]
        suite_te.components = all_components
        suite_te.architectures = all_architectures
        suite_te.devel_target = True
        session.add(suite_te)

        # add 'experimental' suite
        suite_ex = ArchiveSuite()
        suite_ex.name = 'experimental'
        suite_ex.repos = [repo]
        suite_ex.components = all_components
        suite_ex.architectures = [arch_all, arch_amd64]
        suite_ex.accept_uploads = True
        suite_ex.parent = suite_us
        session.add(suite_ex)

    return db