def updates_check(self) -> Dict[str, List[Tuple[str, str]]]:
        """
		Returns the list of packages to be updated/installed
		by a dist-upgrade.
		"""
        install = []
        update = []
        remove = []

        apt = Cache(memonly=True)
        apt.update()
        apt.open()
        apt.clear()
        apt.upgrade(dist_upgrade=True)
        for pkg in apt.get_changes():
            if pkg.marked_install:
                install.append((pkg.name, pkg.candidate.version))
            if pkg.marked_upgrade:
                update.append((pkg.name, pkg.candidate.version))
            if pkg.marked_delete:
                remove.append((pkg.name, pkg.installed.version))

        return dict(
            update=sorted(update),
            install=sorted(install),
            remove=sorted(remove),
        )
Esempio n. 2
0
def configure_grub(bootdev):
    print("Configuring GRUB.")
    os.system("mkdir /etc/default/grub.d")
    if len(bootdev) == 0:
        fipscfg = open("/etc/default/grub.d/99-fips.cfg", "w+")
        fipscfg.write(
            "GRUB_CMDLINE_LINUX_DEFAULT=\"$GRUB_CMDLINE_LINUX_DEFAULT fips=1\""
        )
        fipscfg.close()
    else:
        fipscfg = open("/etc/default/grub.d/99-fips.cfg", "w+")
        fipscfg.write(
            "GRUB_CMDLINE_LINUX_DEFAULT=\"$GRUB_CMDLINE_LINUX_DEFAULT fips=1 bootdev="
            + bootdev + "\"")
        fipscfg.close()
    cache = Cache()
    cache.open()
    kernelnumbers = str(str(
        cache["linux-fips"].versions[0]).split("=")[1]).split(".")
    kernelversion = str(kernelnumbers[0]) + "." + str(
        kernelnumbers[1]) + "." + str(kernelnumbers[2]) + "-" + str(
            kernelnumbers[3]) + "-fips"
    os.system("mv /etc/default/grub /etc/default/grub.pre-fips")
    grubcfg = open("/etc/default/grub.pre-fips", "r")
    newgrubcfg = open("/etc/default/grub", "w+")
    for line in grubcfg.readlines():
        if line.startswith("GRUB_DEFAULT"):
            newgrubcfg.write("#" + line)
            newgrubcfg.write(
                "GRUB_DEFAULT=\"Advanced options for Ubuntu>Ubuntu, with Linux "
                + str(kernelversion) + "\"\n")
        else:
            newgrubcfg.write(line)
    grubcfg.close()
    newgrubcfg.close()
Esempio n. 3
0
    def __init__(self,
                 rfs,
                 log,
                 arch,
                 notifier=None,
                 norecommend=False,
                 noauth=True):
        sys.stdout = open(log, 'a', buffering=0)
        sys.stderr = open(log, 'a', buffering=0)
        self.logfile = open(log, 'a', buffering=0)

        InChRootObject.__init__(self, rfs)

        self.notifier = notifier
        config.set("APT::Architecture", arch)
        if norecommend:
            config.set("APT::Install-Recommends", "1")
        else:
            config.set("APT::Install-Recommends", "0")

        if noauth:
            config.set("APT::Get::AllowUnauthenticated", "1")
        else:
            config.set("APT::Get::AllowUnauthenticated", "0")

        self.cache = Cache(progress=ElbeOpProgress())
        self.cache.open(progress=ElbeOpProgress())
Esempio n. 4
0
    def start(self):
        os.environ["APT_LISTCHANGES_FRONTEND"] = "none"

        # Do not suspend during the update process
        self.sleep_cookie = inhibit_sleep()

        if self.action == self.ACTION_INSTALL:
            # Get the packages which should be installed and update
            pkgs_install = []
            pkgs_upgrade = []
            pkgs_remove = []
            # Get a fresh cache in case update-manager's is outdated to
            # skip operations that already took place
            fresh_cache = Cache(rootdir=self.window_main.cache.rootdir)
            for pkg in self.window_main.cache:
                try:
                    if pkg.marked_install and \
                       not fresh_cache[pkg.name].is_installed:
                        pkgname = pkg.name
                        if pkg.is_auto_installed:
                            pkgname += "#auto"
                        pkgs_install.append(pkgname)
                    elif (pkg.marked_upgrade and
                          fresh_cache[pkg.name].is_upgradable):
                        pkgs_upgrade.append(pkg.name)
                    elif (pkg.marked_delete and
                          fresh_cache[pkg.name].is_installed):
                        pkgs_remove.append(pkg.name)
                except KeyError:
                    # pkg missing from fresh_cache can't be modified
                    pass
            self.commit(pkgs_install, pkgs_upgrade, pkgs_remove)
        else:
            self.update()
Esempio n. 5
0
File: fisch.py Progetto: dadav/fisch
def get_installed_packages():
    try:
        from apt import Cache
    except ImportError as ie:
        logging.error(ie)
        return ['dnsmasq', 'hostapd']  # assume its installed
    return [pkg.name for pkg in Cache() if pkg.is_installed]
Esempio n. 6
0
def get_initvm_pkglist():
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=DeprecationWarning)
        cache = Cache()
        cache.open()
        pkglist = [APTPackage(p) for p in cache if p.is_installed]

    return pkglist
Esempio n. 7
0
    def __init__(self, db, doc=None, application=None):
        super(AppDetailsDebFile, self).__init__(db, doc, application)
        if doc:
            raise ValueError("doc must be None for deb files")

        try:
            # for some reason Cache() is much faster than "self._cache._cache"
            # on startup
            with ExecutionTime("create DebPackage"):
                self._deb = DebPackage(self._app.request, Cache())
        except:
            self._deb = None
            self._pkg = None
            if not os.path.exists(self._app.request):
                self._error = _("Not found")
                self._error_not_found = utf8(
                    _(u"The file \u201c%s\u201d does not exist.")) % utf8(
                        self._app.request)
            else:
                mimetype = guess_type(self._app.request)
                if mimetype[0] != "application/x-debian-package":
                    self._error = _("Not found")
                    self._error_not_found = utf8(
                        _(u"The file \u201c%s\u201d is not a software package."
                          )) % utf8(self._app.request)
                else:
                    # deb files which are corrupt
                    self._error = _("Internal Error")
                    self._error_not_found = utf8(
                        _(u"The file \u201c%s\u201d could not be opened.")
                    ) % utf8(self._app.request)
            return

        if self.pkgname and self.pkgname != self._app.pkgname:
            # this happens when the deb file has a quirky file name
            self._app.pkgname = self.pkgname

            # load pkg cache
            self._pkg = None
            if (self._app.pkgname in self._cache
                    and self._cache[self._app.pkgname].candidate):
                self._pkg = self._cache[self._app.pkgname]
            # load xapian document
            self._doc = None
            try:
                self._doc = self._db.get_xapian_document(
                    self._app.appname, self._app.pkgname)
            except:
                pass

        # check deb and set failure state on error
        with ExecutionTime("AppDetailsDebFile._deb.check()"):
            if not self._deb.check():
                self._error = self._deb._failure_string
Esempio n. 8
0
def security_status(cfg: UAConfig) -> Dict[str, Any]:
    """Returns the status of security updates on a system.

    The returned dict has a 'packages' key with a list of all installed
    packages which can receive security updates, with or without ESM,
    reflecting the availability of the update based on the UA status.

    There is also a summary with the UA information and the package counts.
    """
    ua_info = get_ua_info(cfg)

    summary = {"ua": ua_info}  # type: Dict[str, Any]
    packages = []
    cache = Cache()

    installed_packages = [package for package in cache if package.is_installed]
    summary["num_installed_packages"] = len(installed_packages)

    package_count = defaultdict(int)  # type: DefaultDict[str, int]
    update_count = defaultdict(int)  # type: DefaultDict[str, int]

    for package in installed_packages:
        package_origin = get_origin_for_package(package)
        package_count[package_origin] += 1

    security_upgradable_versions = filter_security_updates(installed_packages)

    for candidate in security_upgradable_versions:
        service_name, origin_site = get_service_name(candidate.origins)
        status = get_update_status(service_name, ua_info)
        update_count[service_name] += 1
        packages.append({
            "package": candidate.package.name,
            "version": candidate.version,
            "service_name": service_name,
            "status": status,
            "origin": origin_site,
        })

    summary["num_main_packages"] = package_count["main"]
    summary["num_restricted_packages"] = package_count["restricted"]
    summary["num_universe_packages"] = package_count["universe"]
    summary["num_multiverse_packages"] = package_count["multiverse"]
    summary["num_third_party_packages"] = package_count["third-party"]
    summary["num_unknown_packages"] = package_count["unknown"]
    summary["num_esm_infra_packages"] = package_count["esm-infra"]
    summary["num_esm_apps_packages"] = package_count["esm-apps"]

    summary["num_esm_infra_updates"] = update_count["esm-infra"]
    summary["num_esm_apps_updates"] = update_count["esm-apps"]
    summary["num_standard_security_updates"] = update_count[
        "standard-security"]

    return {"_schema_version": "0.1", "summary": summary, "packages": packages}
Esempio n. 9
0
def mk_source_cdrom(rfs,
                    arch,
                    codename,
                    init_codename,
                    target,
                    log,
                    cdrom_size=CDROM_SIZE):

    hostfs.mkdir_p('/var/cache/elbe/sources')
    rfs.mkdir_p('/var/cache/elbe/sources')

    repo = CdromSrcRepo(codename, init_codename,
                        os.path.join(target, "srcrepo"), log, cdrom_size)

    cache = get_rpcaptcache(rfs, "aptcache.log", arch)

    pkglist = cache.get_installed_pkgs()

    for pkg in pkglist:
        try:
            dsc = cache.download_source(pkg.name, '/var/cache/elbe/sources')
            repo.includedsc(dsc)
        except ValueError as ve:
            log.printo("No sources for Package " + pkg.name + "-" +
                       pkg.installed_version)
        except FetchError as fe:
            log.printo("Source for Package " + pkg.name + "-" +
                       pkg.installed_version + " could not be downloaded")

    pkglist = get_initvm_pkglist()
    cache = Cache()
    cache.open()

    for pkg in pkglist:
        try:
            p = cache[pkg.name]
            if pkg.name == 'elbe-bootstrap':
                pkgver = p.versions[0]
            else:
                pkgver = p.installed

            dsc = pkgver.fetch_source('/var/cache/elbe/sources',
                                      ElbeAcquireProgress(cb=None),
                                      unpack=False)
            repo.includedsc(dsc)
        except ValueError as ve:
            log.printo("No sources for Package " + pkg.name + "-" +
                       str(pkg.installed_version))
        except FetchError as fe:
            log.printo("Source for Package " + pkg.name + "-" +
                       pkgver.version + " could not be downloaded")

    return repo.buildiso(os.path.join(target, "src-cdrom.iso"))
Esempio n. 10
0
def get_initvm_pkglist ():
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore",category=DeprecationWarning)
        cache = Cache ()
        cache.open ()
        pkglist = [APTPackage (p) for p in cache if p.is_installed]
        try:
            eb = APTPackage( cache ['elbe-bootstrap'] )
            pkglist.append (eb)
        # elbe bootstrap is not installed on pc running elbe
        except KeyError:
            pass

    return pkglist
def main(ignored_packages):
    cache = Cache(memonly=True)
    not_in_repos = []
    ignore = parse_ignore(ignored_packages)
    for pkg in cache:
        if (pkg.is_installed and not pkg.is_upgradable
                and not check_ignored(pkg, ignore)
                and not any(pkg.installed.uris)):
            not_in_repos.append(pkg)

    if not_in_repos:
        print('WARNING: packages not in repositories: {}'.format(' '.join(
            pkg.name for pkg in not_in_repos)))
        exit(1)

    print('OK: All packages are found in repositories')
    exit(0)
Esempio n. 12
0
    def get_pt_installed_software(self):
        try:
            from apt import Cache
        except ModuleNotFoundError:
            # probably running in virtualenv. python3-pip is not pip installable
            print("Error: couldn't find python APT library. Skipping...")
            return []

        regex = "^pt-|-pt-|pitop|pi-top"
        apt_cache = Cache()

        pt_packages = []
        for pkg in apt_cache:
            match = search(regex, pkg.name)
            if apt_cache[pkg.name].is_installed and match:
                pt_packages.append((pkg.shortname, pkg.installed.version))
        return pt_packages
Esempio n. 13
0
    def __init__( self, rfs, logpath, arch, notifier=None, norecommend = False, noauth = True ):
        self.log = ASCIIDocLog(logpath)
        self.notifier = notifier
        InChRootObject.__init__(self, rfs)
        config.set ("APT::Architecture", arch)
        if norecommend:
            config.set ("APT::Install-Recommends", "1")
        else:
            config.set ("APT::Install-Recommends", "0")

        if noauth:
            config.set ("APT::Get::AllowUnauthenticated", "1")
        else:
            config.set ("APT::Get::AllowUnauthenticated", "0")

        self.cache = Cache()
        self.cache.open()
def prune(opt: Namespace) -> None:
    cache = Cache()

    cur = {PREFIX + uname()[2] + suffix for suffix in {"", "-signed"}}
    top = {
        "univention-kernel-image",
        PREFIX + "amd64",
        PREFIX + "rt-amd64",
        PREFIX + "686-pae",
        PREFIX + "686",
        PREFIX + "rt-686-pae",
    }
    meta = [
        pkg.installed for pkg in (cache[pkg] for pkg in top if pkg in cache)
        if pkg.is_installed
    ]
    if opt.verbose:
        print("Installed kernel meta packages:\n %s" %
              ("\n ".join(sorted(str(pkg) for pkg in meta)), ))

    keep = ({
        dep.name
        for pkg in meta for alt in pkg.dependencies
        for dep in alt if dep.name.startswith(PREFIX)
    }
            | cur
            | top)
    if opt.verbose:
        print("Exception list for kernel packages:\n %s" %
              ("\n ".join(sorted(keep)), ))

    cache.clear()
    for pkg in cache:
        if pkg.name.startswith(PREFIX) and pkg.is_installed:
            if pkg.name not in keep:
                if opt.verbose:
                    print("Purging kernel package: %s" % (pkg.name, ))
                if not opt.dry_run:
                    pkg.mark_delete(purge=True)
            else:
                if opt.verbose:
                    print("Keeping kernel package: %s" % (pkg.name, ))

    cache.commit()
Esempio n. 15
0
def restart_server():
    # This function will restart NginX, MariaDB, PHP5-FPM if installed.
    cache = Cache()
    if cache['nginx'].is_installed:
        system("sudo service nginx restart")
        if cache['mariadb-server'].is_installed:
            system("sudo service mysql restart")
            if cache['php5-fpm'].is_installed:
                system("sudo service php5-fpm restart")
            else:
                print("PHP isn't installed. So, it hasn't been started.")
        else:
            print("MariaDB isn't installed. So, it hasn't been started.")
    elif cache['mysql'].is_installed:
        system("sudo service mysql restart")
        if cache['php5-fpm'].is_installed:
            system("sudo service php5-fpm restart")
        else:
            print("PHP isn't installed. So, it hasn't been started.")
    else:
        system("sudo service php5-fpm restart")
Esempio n. 16
0
def checkInstall(pkgName, action, files):
    x, y, f_list, apt_cache = 0, 0, files.split(','), Cache()
    try:
        for e in pkgName.split('_'):
            if apt_cache[e].is_installed:
                x, y = x + 1, y + 1
    except:
        pass
    for i in f_list:
        if isfile(i) or isdir(i):
            x = x + 1
    if '--install' in action and x != 0:
        print "\033[0;31mERROR: {} is already installed.\033[0m".format(
            pkgName)
        return False
    elif '--remove' in action and x != len(f_list) + y:
        print "\033[0;31mERROR: {} is not installed.\033[0m\n".format(pkgName)
        return False
    print "%s\t%34s\033[0;32m %s \033[0m]" % (
        " * Check if {} is installed      ".format(pkgName), "[", "OK")
    return True
def main(ignored_packages):
    cache = Cache(memonly=True)
    not_in_repos = []
    ignore = parse_ignore(ignored_packages)
    for pkg in cache:
        if (pkg.is_installed and not pkg.is_upgradable
                and not check_ignored(pkg, ignore)
                and not any(pkg.installed.uris)):
            not_in_repos.append(pkg)

    if not_in_repos:
        pkgs = ' '.join([pkg.name for pkg in not_in_repos])
        msg = u'WARNING: {0} packages have no candidate in repositorys!' \
                ' | {1}'.format(len(not_in_repos), pkgs)
        sig = 1
    else:
        msg = 'OK: All packages are found in repositorys'
        sig = 0

    print(msg)
    exit(sig)
Esempio n. 18
0
def check_for_installed_packages(config, provider):
    """
    Checks if the required host packages have been installed
    :param dict config: The config provided by get_config()
    :param str provider: The provider to check the required host packages for
    :return bool: True if all required packages have been installed, False otherwise
    """
    logger.debug(
        "Checking if all required host packages have been install for provider {}"
        .format(provider))
    all_installed = True
    cache = Cache()
    if "apt" not in modules:
        logger.warning(
            "Skipping host package rquirements check, apt module missing")
    else:
        for package in config["providers"][provider]["required_host_packages"]:
            if not cache[package].is_installed:
                logger.error(
                    "Required host package {} for provider {} is not installed"
                    .format(package, provider))
                all_installed = False
    return all_installed
Esempio n. 19
0
    def train_model(self, pkgs_list, axi, save_files=True):
        cache = Cache()
        ml_data = MachineLearningData()

        pkgs_description, pkg_classification = self.prepare_data(
            pkgs_list, axi, cache, ml_data)
        pkg_features = self.vectorizer.fit_transform(pkgs_description)
        features_array = pkg_features.toarray()

        terms, debtags = self.get_used_terms_and_debtags(
            self.vectorizer.get_feature_names())

        self.classifier = GaussianNB()
        self.classifier.fit(features_array, pkg_classification)

        path = BagOfWords.BAG_OF_WORDS_PKGS_CLASSIFICATION

        if save_files:
            self.save_features(terms, BagOfWords.BAG_OF_WORDS_TERMS)
            self.save_features(debtags, BagOfWords.BAG_OF_WORDS_DEBTAGS)
            self.save_pkgs_features(path, pkgs_list, features_array,
                                    pkg_classification)

        return BagOfWords.CREATED_MODEL
Esempio n. 20
0
    def __init__(self,
                 rfs,
                 arch,
                 notifier=None,
                 norecommend=False,
                 noauth=True):

        # pylint: disable=too-many-arguments
        InChRootObject.__init__(self, rfs)

        self.notifier = notifier
        config.set("APT::Architecture", arch)
        if norecommend:
            config.set("APT::Install-Recommends", "0")
        else:
            config.set("APT::Install-Recommends", "1")

        if noauth:
            config.set("APT::Get::AllowUnauthenticated", "1")
        else:
            config.set("APT::Get::AllowUnauthenticated", "0")

        self.cache = Cache(progress=ElbeOpProgress())
        self.cache.open(progress=ElbeOpProgress())
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description='Lists available binary packages and versions which are'
        'needed to satisfy rosdep keys for ROS packages in the workspace')

    # Positional
    add_argument_rosdistro_name(parser)
    add_argument_os_name(parser)
    add_argument_os_code_name(parser)

    add_argument_output_dir(parser)
    add_argument_package_selection_args(parser)
    add_argument_skip_rosdep_keys(parser)
    parser.add_argument('--package-root',
                        nargs='+',
                        help='The path to the directory containing packages')
    args = parser.parse_args(argv)

    workspace_root = args.package_root[-1]
    os.chdir(workspace_root)

    with Scope('SUBSECTION', 'mark packages with IGNORE files'):
        all_packages = locate_packages(workspace_root)
        selected_packages = all_packages
        if args.package_selection_args:
            print('Using package selection arguments:',
                  args.package_selection_args)
            selected_packages = locate_packages(
                workspace_root, extra_args=args.package_selection_args)

            to_ignore = all_packages.keys() - selected_packages.keys()
            print('Ignoring %d packages' % len(to_ignore))
            for package in sorted(to_ignore):
                print('-', package)
                package_root = all_packages[package]
                Path(package_root, 'COLCON_IGNORE').touch()

        print('There are %d packages which meet selection criteria' %
              len(selected_packages))

    with Scope('SUBSECTION', 'Enumerating packages needed to build'):
        # find all of the underlay packages
        underlay_pkgs = {}
        all_underlay_pkg_names = set()
        for package_root in args.package_root[0:-1]:
            print("Crawling for packages in '%s'" % package_root)
            underlay_pkgs.update(find_packages(package_root))

            # Check for a colcon index for non-ROS package detection
            colcon_index = os.path.join(package_root, 'colcon-core',
                                        'packages')
            try:
                all_underlay_pkg_names.update(os.listdir(colcon_index))
            except FileNotFoundError:
                pass

        underlay_pkg_names = [pkg.name for pkg in underlay_pkgs.values()]
        print('Found the following ROS underlay packages:')
        for pkg_name in sorted(underlay_pkg_names):
            print('  -', pkg_name)

        # get direct build dependencies
        package_root = args.package_root[-1]
        print("Crawling for packages in '%s'" % package_root)
        pkgs = find_packages(package_root)

        pkg_names = [pkg.name for pkg in pkgs.values()]
        print('Found the following ROS packages:')
        for pkg_name in sorted(pkg_names):
            print('  -', pkg_name)

        # get build dependencies and map them to binary packages
        all_pkgs = set(pkgs.values()).union(underlay_pkgs.values())

        for pkg in all_pkgs:
            pkg.evaluate_conditions(os.environ)
        for pkg in all_pkgs:
            for group_depend in pkg.group_depends:
                if group_depend.evaluated_condition:
                    group_depend.extract_group_members(all_pkgs)

        dependency_keys_build = get_dependencies(
            all_pkgs, 'build', _get_build_and_recursive_run_dependencies,
            pkgs.values())

        dependency_keys_test = get_dependencies(
            all_pkgs, 'run and test', _get_test_and_recursive_run_dependencies,
            pkgs.values())

        if args.skip_rosdep_keys:
            dependency_keys_build.difference_update(args.skip_rosdep_keys)
            dependency_keys_test.difference_update(args.skip_rosdep_keys)

        # remove all non-ROS packages and packages which are present but
        # specifically ignored
        every_package_name = all_packages.keys() | all_underlay_pkg_names
        dependency_keys_build -= every_package_name
        dependency_keys_test -= every_package_name

        context = initialize_resolver(args.rosdistro_name, args.os_name,
                                      args.os_code_name)

        os_pkg_names_build = resolve_names(dependency_keys_build, **context)
        os_pkg_names_test = resolve_names(dependency_keys_test, **context)

        os_pkg_names_test -= os_pkg_names_build

    with Scope('SUBSECTION', 'Resolving packages versions using apt cache'):
        apt_cache = Cache()
        os_pkg_versions = get_binary_package_versions(
            apt_cache, os_pkg_names_build | os_pkg_names_test)

    with open(os.path.join(args.output_dir, 'install_list_build.txt'),
              'w') as out_file:
        for package in sorted(os_pkg_names_build):
            out_file.write('%s=%s\n' % (package, os_pkg_versions[package]))

    with open(os.path.join(args.output_dir, 'install_list_test.txt'),
              'w') as out_file:
        for package in sorted(os_pkg_names_test):
            out_file.write('%s=%s\n' % (package, os_pkg_versions[package]))
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description="Generate a 'Dockerfile' for building the binarydeb")
    add_argument_rosdistro_index_url(parser)
    add_argument_rosdistro_name(parser)
    add_argument_package_name(parser)
    add_argument_os_name(parser)
    add_argument_os_code_name(parser)
    add_argument_arch(parser)
    add_argument_distribution_repository_urls(parser)
    add_argument_distribution_repository_key_files(parser)
    add_argument_binarydeb_dir(parser)
    add_argument_dockerfile_dir(parser)
    add_argument_env_vars(parser)
    args = parser.parse_args(argv)

    debian_package_name = get_debian_package_name(
        args.rosdistro_name, args.package_name)

    # get expected package version from rosdistro
    index = get_index(args.rosdistro_index_url)
    dist_file = get_distribution_file(index, args.rosdistro_name)
    assert args.package_name in dist_file.release_packages
    pkg = dist_file.release_packages[args.package_name]
    repo = dist_file.repositories[pkg.repository_name]
    package_version = repo.release_repository.version

    debian_package_version = package_version

    # build_binarydeb dependencies
    debian_pkg_names = ['apt-src']

    # add build dependencies from .dsc file
    dsc_file = get_dsc_file(
        args.binarydeb_dir, debian_package_name, debian_package_version)
    debian_pkg_names += sorted(get_build_depends(dsc_file))

    # get versions for build dependencies
    apt_cache = Cache()
    debian_pkg_versions = get_binary_package_versions(
        apt_cache, debian_pkg_names)

    # generate Dockerfile
    data = {
        'os_name': args.os_name,
        'os_code_name': args.os_code_name,
        'arch': args.arch,

        'uid': get_user_id(),

        'distribution_repository_urls': args.distribution_repository_urls,
        'distribution_repository_keys': get_distribution_repository_keys(
            args.distribution_repository_urls,
            args.distribution_repository_key_files),

        'build_environment_variables': args.env_vars,

        'dependencies': debian_pkg_names,
        'dependency_versions': debian_pkg_versions,
        'install_lists': [],

        'rosdistro_name': args.rosdistro_name,
        'package_name': args.package_name,
        'binarydeb_dir': args.binarydeb_dir,
    }
    create_dockerfile(
        'release/binarydeb_task.Dockerfile.em', data, args.dockerfile_dir)

    # output hints about necessary volumes to mount
    ros_buildfarm_basepath = os.path.normpath(
        os.path.join(os.path.dirname(__file__), '..', '..'))
    print('Mount the following volumes when running the container:')
    print('  -v %s:/tmp/ros_buildfarm:ro' % ros_buildfarm_basepath)
    print('  -v %s:/tmp/binarydeb' % args.binarydeb_dir)
Esempio n. 23
0
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description="Generate a 'Dockerfile' for the CI job")

    # Positional
    add_argument_rosdistro_name(parser)
    add_argument_os_name(parser)
    add_argument_os_code_name(parser)
    add_argument_arch(parser)

    add_argument_distribution_repository_key_files(parser)
    add_argument_distribution_repository_urls(parser)
    add_argument_dockerfile_dir(parser)
    add_argument_env_vars(parser)
    add_argument_package_selection_args(parser)
    add_argument_repos_file_urls(parser)
    add_argument_repository_names(parser, optional=True)
    add_argument_skip_rosdep_keys(parser)
    add_argument_test_branch(parser)
    parser.add_argument('--workspace-root',
                        nargs='+',
                        help='The root path of the workspace to compile')
    args = parser.parse_args(argv)

    assert args.repos_file_urls or args.repository_names

    debian_pkg_names = [
        'git',
        'python3-apt',
        'python3-colcon-metadata',
        'python3-colcon-package-information',
        'python3-colcon-package-selection',
        'python3-colcon-recursive-crawl',
        'python3-colcon-ros',
        'python3-rosdep',
        'python3-vcstool',
    ]

    # get versions for build dependencies
    apt_cache = Cache()
    debian_pkg_versions = get_binary_package_versions(apt_cache,
                                                      debian_pkg_names)

    # generate Dockerfile
    data = {
        'os_name':
        args.os_name,
        'os_code_name':
        args.os_code_name,
        'arch':
        args.arch,
        'distribution_repository_urls':
        args.distribution_repository_urls,
        'distribution_repository_keys':
        get_distribution_repository_keys(
            args.distribution_repository_urls,
            args.distribution_repository_key_files),
        'rosdistro_name':
        args.rosdistro_name,
        'custom_rosdep_urls': [],
        'uid':
        get_user_id(),
        'build_environment_variables':
        ['%s=%s' % key_value for key_value in args.env_vars.items()],
        'dependencies':
        debian_pkg_names,
        'dependency_versions':
        debian_pkg_versions,
        'repos_file_urls':
        args.repos_file_urls,
        'repository_names':
        args.repository_names,
        'test_branch':
        args.test_branch,
        'skip_rosdep_keys':
        args.skip_rosdep_keys,
        'package_selection_args':
        args.package_selection_args,
        'workspace_root':
        args.workspace_root,
    }
    create_dockerfile('ci/create_workspace.Dockerfile.em', data,
                      args.dockerfile_dir)
Esempio n. 24
0
#!/usr/bin/python

from apt import Cache
import argparse
from collections import namedtuple
import re
import subprocess
import sys

APT_CACHE = Cache()
DEPENDS = ('Depends', 'PreDepends', 'Recommends')
DEPENDENCY_RE = re.compile(
    ' +(?P<type>[^:]+): (?P<pkg_name>\S+)(?: (?P<version>.+))')
DOTTY_STYLE = {
    'Reverse Recommends': '[style=dotted color="#999999"]',
}

PackageDependency = namedtuple('PackageDependency',
                               ('name', 'type', 'version'))
TreeNode = namedtuple('TreeNode', ('name', 'children'))


class Error(Exception):
    """Error ocurred."""


class DependencyTree(object):
    def __init__(self, package):
        self.package_ = package
        self.dependencies_ = None
        self.tree_ = None
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description="Generate a 'Dockerfile' for the CI job")

    # Positional
    add_argument_rosdistro_name(parser)
    add_argument_os_name(parser)
    add_argument_os_code_name(parser)
    add_argument_arch(parser)

    add_argument_build_tool(parser, required=True)
    add_argument_build_tool_args(parser)
    add_argument_distribution_repository_key_files(parser)
    add_argument_distribution_repository_urls(parser)
    add_argument_dockerfile_dir(parser)
    add_argument_env_vars(parser)
    add_argument_install_packages(parser)
    add_argument_ros_version(parser)
    add_argument_testing(parser)
    parser.add_argument(
        '--workspace-root', nargs='*',
        action=check_len_action(1, 2),
        help='The root path of the workspace to compile')
    args = parser.parse_args(argv)

    apt_cache = Cache()

    debian_pkg_names = set(['build-essential'])
    debian_pkg_names.update(args.install_packages)
    if args.build_tool == 'colcon':
        debian_pkg_names.update([
            'python3-catkin-pkg-modules',
            'python3-colcon-output',
            'python3-colcon-parallel-executor',
            'python3-colcon-ros',
            'python3-colcon-test-result',
            'python3-rosdistro-modules',
        ])

    print('Always install the following generic dependencies:')
    for debian_pkg_name in sorted(debian_pkg_names):
        print('  -', debian_pkg_name)

    install_list = 'install_list.txt'
    write_install_list(
        os.path.join(args.dockerfile_dir, install_list),
        debian_pkg_names, apt_cache)
    install_lists = [install_list, 'install_list_build.txt']
    if args.testing:
        install_lists.append('install_list_test.txt')

    # generate Dockerfile
    data = {
        'os_name': args.os_name,
        'os_code_name': args.os_code_name,
        'arch': args.arch,

        'distribution_repository_urls': args.distribution_repository_urls,
        'distribution_repository_keys': get_distribution_repository_keys(
            args.distribution_repository_urls,
            args.distribution_repository_key_files),

        'rosdistro_name': args.rosdistro_name,

        'uid': get_user_id(),

        'build_tool': args.build_tool,
        'build_tool_args': args.build_tool_args,
        'ros_version': args.ros_version,

        'build_environment_variables': args.env_vars,

        'install_lists': install_lists,
        'dependencies': [],
        'dependency_versions': [],

        'testing': args.testing,
        'prerelease_overlay': len(args.workspace_root) > 1,
    }
    create_dockerfile(
        'devel/devel_task.Dockerfile.em', data, args.dockerfile_dir)

    # output hints about necessary volumes to mount
    ros_buildfarm_basepath = os.path.normpath(
        os.path.join(os.path.dirname(__file__), '..', '..'))
    print('Mount the following volumes when running the container:')
    print('  -v %s:/tmp/ros_buildfarm:ro' % ros_buildfarm_basepath)
    if len(args.workspace_root) == 1:
        print('  -v %s:/tmp/ws' % args.workspace_root[0])
    else:
        for i, workspace_root in enumerate(args.workspace_root[0:-1]):
            print('  -v %s:/tmp/ws%s' % (workspace_root, i or ''))
        print('  -v %s:/tmp/ws_overlay' % args.workspace_root[-1])
Esempio n. 26
0
def is_installed(package):
    cache = Cache()
    return cache[package].is_installed if package in cache else False
Esempio n. 27
0
def run_command(argv):

    # TODO - Set threshold and remove pylint directives
    #
    # We might want to make the threshold higher for certain
    # files/directories or just globaly.

    # pylint: disable=too-many-locals
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements

    oparser = OptionParser(
        usage="usage: %prog fetch_initvm_pkgs [options] <xmlfile>")

    oparser.add_option("-b",
                       "--binrepo",
                       dest="binrepo",
                       default="/var/cache/elbe/initvm-bin-repo",
                       help="directory where the bin repo should reside")

    oparser.add_option("-s",
                       "--srcrepo",
                       dest="srcrepo",
                       default="/var/cache/elbe/initvm-src-repo",
                       help="directory where the src repo should reside")

    oparser.add_option("--skip-validation",
                       action="store_true",
                       dest="skip_validation",
                       default=False,
                       help="Skip xml schema validation")

    oparser.add_option("--cdrom-mount-path",
                       dest="cdrom_path",
                       help="path where cdrom is mounted")

    oparser.add_option("--cdrom-device",
                       dest="cdrom_device",
                       help="cdrom device, in case it has to be mounted")

    oparser.add_option("--apt-archive",
                       dest="archive",
                       default="/var/cache/elbe/binaries/main",
                       help="path where binary packages are downloaded to.")

    oparser.add_option("--src-archive",
                       dest="srcarchive",
                       default="/var/cache/elbe/sources",
                       help="path where src packages are downloaded to.")

    oparser.add_option("--skip-build-sources",
                       action="store_false",
                       dest="build_sources",
                       default=True,
                       help="Skip downloading Source Packages")

    oparser.add_option("--skip-build-bin",
                       action="store_false",
                       dest="build_bin",
                       default=True,
                       help="Skip downloading binary packages")

    (opt, args) = oparser.parse_args(argv)

    if len(args) != 1:
        print("wrong number of arguments")
        oparser.print_help()
        sys.exit(20)

    try:
        xml = ElbeXML(args[0], skip_validate=opt.skip_validation)
    except ValidationError as e:
        print(str(e))
        print("xml validation failed. Bailing out")
        sys.exit(20)

    with elbe_logging({"streams": sys.stdout}):

        if opt.cdrom_path:
            if opt.cdrom_device:
                do('mount "%s" "%s"' % (opt.cdrom_device, opt.cdrom_path))

            # a cdrom build is identified by the cdrom option
            # the xml file that is copied into the initvm
            # by the initrd does not have the cdrom tags setup.
            mirror = "file://%s" % opt.cdrom_path
        else:
            mirror = xml.get_initvm_primary_mirror(opt.cdrom_path)

        init_codename = xml.get_initvm_codename()

        # Binary Repo
        #
        repo = CdromInitRepo(init_codename, opt.binrepo, mirror)

        hostfs.mkdir_p(opt.archive)

        if opt.build_bin:
            pkglist = get_initvm_pkglist()
            cache = Cache()
            cache.open()
            for pkg in pkglist:
                pkg_id = "%s-%s" % (pkg.name, pkg.installed_version)
                try:
                    p = cache[pkg.name]
                    pkgver = p.installed
                    deb = fetch_binary(pkgver, opt.archive,
                                       ElbeAcquireProgress(cb=None))
                    repo.includedeb(deb, 'main')
                except ValueError:
                    logging.exception('No package "%s"', pkg_id)
                except FetchError:
                    logging.exception(
                        'Package "%s-%s" could not be downloaded', pkg.name,
                        pkgver.version)
                except TypeError:
                    logging.exception('Package "%s" missing name or version',
                                      pkg_id)

        repo.finalize()

        # Source Repo
        #
        repo = CdromSrcRepo(init_codename, init_codename, opt.srcrepo, 0,
                            mirror)
        hostfs.mkdir_p(opt.srcarchive)

        # a cdrom build does not have sources
        # skip adding packages to the source repo
        #
        # FIXME: we need a way to add source cdroms later on
        if opt.cdrom_path:
            opt.build_sources = False

        if opt.build_sources:
            for pkg in pkglist:
                pkg_id = "%s-%s" % (pkg.name, pkg.installed_version)
                try:
                    p = cache[pkg.name]
                    pkgver = p.installed
                    dsc = pkgver.fetch_source(opt.srcarchive,
                                              ElbeAcquireProgress(cb=None),
                                              unpack=False)
                    repo.include_init_dsc(dsc, 'initvm')
                except ValueError:
                    logging.exception('No package "%s"', pkg_id)
                except FetchError:
                    logging.exception(
                        'Package "%s-%s" could not be downloaded', pkg.name,
                        pkgver.version)
                except TypeError:
                    logging.exception('Package "%s" missing name or version',
                                      pkg_id)

        repo.finalize()

        if opt.cdrom_device:
            do('umount "%s"' % opt.cdrom_device)
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description="Generate a 'Dockerfile' for the doc job")
    add_argument_config_url(parser)
    parser.add_argument(
        '--rosdistro-name',
        required=True,
        help='The name of the ROS distro to identify the setup file to be '
        'sourced')
    add_argument_build_name(parser, 'doc')
    parser.add_argument('--workspace-root',
                        required=True,
                        help='The root path of the workspace to compile')
    parser.add_argument('--rosdoc-lite-dir',
                        required=True,
                        help='The root path of the rosdoc_lite repository')
    parser.add_argument('--catkin-sphinx-dir',
                        required=True,
                        help='The root path of the catkin-sphinx repository')
    parser.add_argument('--rosdoc-index-dir',
                        required=True,
                        help='The root path of the rosdoc_index folder')
    add_argument_repository_name(parser)
    parser.add_argument('--os-name',
                        required=True,
                        help="The OS name (e.g. 'ubuntu')")
    parser.add_argument('--os-code-name',
                        required=True,
                        help="The OS code name (e.g. 'xenial')")
    parser.add_argument('--arch',
                        required=True,
                        help="The architecture (e.g. 'amd64')")
    add_argument_build_tool(parser, required=True)
    add_argument_vcs_information(parser)
    add_argument_distribution_repository_urls(parser)
    add_argument_distribution_repository_key_files(parser)
    add_argument_force(parser)
    add_argument_output_dir(parser, required=True)
    add_argument_dockerfile_dir(parser)
    args = parser.parse_args(argv)

    config = get_config_index(args.config_url)
    index = get_index(config.rosdistro_index_url)

    condition_context = get_package_condition_context(index,
                                                      args.rosdistro_name)

    with Scope('SUBSECTION', 'packages'):
        # find packages in workspace
        source_space = os.path.join(args.workspace_root, 'src')
        print("Crawling for packages in workspace '%s'" % source_space)
        pkgs = find_packages(source_space)

        for pkg in pkgs.values():
            pkg.evaluate_conditions(condition_context)

        pkg_names = [pkg.name for pkg in pkgs.values()]
        print('Found the following packages:')
        for pkg_name in sorted(pkg_names):
            print('  -', pkg_name)

        maintainer_emails = set([])
        for pkg in pkgs.values():
            for m in pkg.maintainers:
                maintainer_emails.add(m.email)
        if maintainer_emails:
            print('Package maintainer emails: %s' %
                  ' '.join(sorted(maintainer_emails)))

    rosdoc_index = RosdocIndex(
        [os.path.join(args.rosdoc_index_dir, args.rosdistro_name)])

    vcs_type, vcs_version, vcs_url = args.vcs_info.split(' ', 2)

    with Scope('SUBSECTION', 'determine need to run documentation generation'):
        # compare hashes to determine if documentation needs to be regenerated
        current_hashes = {}
        current_hashes['ros_buildfarm'] = 2  # increase to retrigger doc jobs
        current_hashes['rosdoc_lite'] = get_git_hash(args.rosdoc_lite_dir)
        current_hashes['catkin-sphinx'] = get_git_hash(args.catkin_sphinx_dir)
        repo_dir = os.path.join(args.workspace_root, 'src',
                                args.repository_name)
        current_hashes[args.repository_name] = get_hash(repo_dir)
        print('Current repository hashes: %s' % current_hashes)
        tag_index_hashes = rosdoc_index.hashes.get(args.repository_name, {})
        print('Stored repository hashes: %s' % tag_index_hashes)
        skip_doc_generation = current_hashes == tag_index_hashes

    if skip_doc_generation:
        print('No changes to the source repository or any tooling repository')

        if not args.force:
            print('Skipping generation of documentation')

            # create stamp files
            print('Creating marker files to identify that documentation is ' +
                  'up-to-date')
            create_stamp_files(pkg_names, os.path.join(args.output_dir, 'api'))

            # check if any entry needs to be updated
            print('Creating update manifest.yaml files')
            for pkg_name in pkg_names:
                # update manifest.yaml files
                current_manifest_yaml_file = os.path.join(
                    args.rosdoc_index_dir, args.rosdistro_name, 'api',
                    pkg_name, 'manifest.yaml')
                if not os.path.exists(current_manifest_yaml_file):
                    print('- %s: skipping no manifest.yaml yet' % pkg_name)
                    continue
                with open(current_manifest_yaml_file, 'r') as h:
                    remote_data = yaml.safe_load(h)
                data = copy.deepcopy(remote_data)

                data['vcs'] = vcs_type
                data['vcs_uri'] = vcs_url
                data['vcs_version'] = vcs_version

                data['depends_on'] = sorted(
                    rosdoc_index.reverse_deps.get(pkg_name, []))

                if data == remote_data:
                    print('- %s: skipping same data' % pkg_name)
                    continue

                # write manifest.yaml if it has changes
                print('- %s: api/%s/manifest.yaml' % (pkg_name, pkg_name))
                dst = os.path.join(args.output_dir, 'api', pkg_name,
                                   'manifest.yaml')
                dst_dir = os.path.dirname(dst)
                if not os.path.exists(dst_dir):
                    os.makedirs(dst_dir)
                with open(dst, 'w') as h:
                    yaml.dump(data, h, default_flow_style=False)

            return 0

        print("But job was started with the 'force' parameter set")

    else:
        print('The source repository and/or a tooling repository has changed')

    print('Running generation of documentation')
    rosdoc_index.hashes[args.repository_name] = current_hashes
    rosdoc_index.write_modified_data(args.output_dir, ['hashes'])

    # create stamp files
    print('Creating marker files to identify that documentation is ' +
          'up-to-date')
    create_stamp_files(pkg_names, os.path.join(args.output_dir, 'api_rosdoc'))

    dist_file = get_distribution_file(index, args.rosdistro_name)
    assert args.repository_name in dist_file.repositories
    valid_package_names = \
        set(pkg_names) | set(dist_file.release_packages.keys())

    # update package deps and metapackage deps
    with Scope('SUBSECTION', 'updated rosdoc_index information'):
        for pkg in pkgs.values():
            print("Updating dependendencies for package '%s'" % pkg.name)
            depends = _get_build_run_doc_dependencies(pkg)
            ros_dependency_names = sorted(
                set([d.name for d in depends
                     if d.name in valid_package_names]))
            rosdoc_index.set_forward_deps(pkg.name, ros_dependency_names)

            if pkg.is_metapackage():
                print("Updating dependendencies for metapackage '%s'" %
                      pkg.name)
                depends = _get_run_dependencies(pkg)
                ros_dependency_names = sorted(
                    set([
                        d.name for d in depends
                        if d.name in valid_package_names
                    ]))
            else:
                ros_dependency_names = None
            rosdoc_index.set_metapackage_deps(pkg.name, ros_dependency_names)
        rosdoc_index.write_modified_data(args.output_dir,
                                         ['deps', 'metapackage_deps'])

    # generate changelog html from rst
    package_names_with_changelogs = set([])
    with Scope('SUBSECTION', 'generate changelog html from rst'):
        for pkg_path, pkg in pkgs.items():
            abs_pkg_path = os.path.join(source_space, pkg_path)
            assert os.path.exists(os.path.join(abs_pkg_path, 'package.xml'))
            changelog_file = os.path.join(abs_pkg_path, 'CHANGELOG.rst')
            if os.path.exists(changelog_file):
                print(("Package '%s' contains a CHANGELOG.rst, generating " +
                       "html") % pkg.name)
                package_names_with_changelogs.add(pkg.name)

                with open(changelog_file, 'r') as h:
                    rst_code = h.read()
                from docutils.core import publish_string
                html_code = publish_string(rst_code, writer_name='html')
                html_code = html_code.decode()

                # strip system message from html output
                open_tag = re.escape('<div class="first system-message">')
                close_tag = re.escape('</div>')
                pattern = '(' + open_tag + '.+?' + close_tag + ')'
                html_code = re.sub(pattern, '', html_code, flags=re.DOTALL)

                pkg_changelog_doc_path = os.path.join(args.output_dir,
                                                      'changelogs', pkg.name)
                os.makedirs(pkg_changelog_doc_path)
                with open(
                        os.path.join(pkg_changelog_doc_path, 'changelog.html'),
                        'w') as h:
                    h.write(html_code)

    ordered_pkg_tuples = topological_order_packages(pkgs)

    # create rosdoc tag list and location files
    with Scope('SUBSECTION', 'create rosdoc tag list and location files'):
        rosdoc_config_files = {}
        for pkg_path, pkg in pkgs.items():
            abs_pkg_path = os.path.join(source_space, pkg_path)

            rosdoc_exports = [
                e.attributes['content'] for e in pkg.exports
                if e.tagname == 'rosdoc' and 'content' in e.attributes
            ]
            prefix = '${prefix}'
            rosdoc_config_file = rosdoc_exports[-1] \
                if rosdoc_exports else '%s/rosdoc.yaml' % prefix
            rosdoc_config_file = rosdoc_config_file.replace(
                prefix, abs_pkg_path)
            if os.path.isfile(rosdoc_config_file):
                rosdoc_config_files[pkg.name] = rosdoc_config_file

        for _, pkg in ordered_pkg_tuples:
            dst = os.path.join(args.output_dir, 'rosdoc_tags',
                               '%s.yaml' % pkg.name)
            print("Generating rosdoc tag list file for package '%s'" %
                  pkg.name)

            dep_names = rosdoc_index.get_recursive_dependencies(pkg.name)
            # make sure that we don't pass our own tagfile to ourself
            # bad things happen when we do this
            assert pkg.name not in dep_names
            locations = []
            for dep_name in sorted(dep_names):
                if dep_name not in rosdoc_index.locations:
                    print("- skipping not existing location file of " +
                          "dependency '%s'" % dep_name)
                    continue
                print("- including location files of dependency '%s'" %
                      dep_name)
                dep_locations = rosdoc_index.locations[dep_name]
                if dep_locations:
                    for dep_location in dep_locations:
                        assert dep_location['package'] == dep_name
                        # update tag information to point to local location
                        location = copy.deepcopy(dep_location)
                        if not location['location'].startswith('file://'):
                            location['location'] = 'file://%s' % os.path.join(
                                args.rosdoc_index_dir, location['location'])
                        locations.append(location)

            dst_dir = os.path.dirname(dst)
            if not os.path.exists(dst_dir):
                os.makedirs(dst_dir)
            with open(dst, 'w') as h:
                yaml.dump(locations, h)

            print("Creating location file for package '%s'" % pkg.name)
            data = {
                'docs_url':
                '../../../api/%s/html' % pkg.name,
                'location':
                'file://%s' %
                os.path.join(args.output_dir, 'symbols', '%s.tag' % pkg.name),
                'package':
                pkg.name,
            }

            # fetch generator specific output folders from rosdoc_lite
            if pkg.name in rosdoc_config_files:
                output_folders = get_generator_output_folders(
                    rosdoc_config_files[pkg.name], pkg.name)
                if 'doxygen' in output_folders:
                    data['docs_url'] += '/' + output_folders['doxygen']

            rosdoc_index.locations[pkg.name] = [data]
            # do not write these local locations

    # used to determine all source and release jobs
    source_build_files = get_source_build_files(config, args.rosdistro_name)
    release_build_files = get_release_build_files(config, args.rosdistro_name)

    # TODO this should reuse the logic from the job generation
    used_source_build_names = []
    for source_build_name, build_file in source_build_files.items():
        repo_names = build_file.filter_repositories([args.repository_name])
        if not repo_names:
            continue
        matching_dist_file = get_distribution_file_matching_build_file(
            index, args.rosdistro_name, build_file)
        repo = matching_dist_file.repositories[args.repository_name]
        if not repo.source_repository:
            continue
        if not repo.source_repository.version:
            continue
        if build_file.test_commits_force is False:
            continue
        elif repo.source_repository.test_commits is False:
            continue
        elif repo.source_repository.test_commits is None and \
                not build_file.test_commits_default:
            continue
        used_source_build_names.append(source_build_name)

    doc_build_files = get_doc_build_files(config, args.rosdistro_name)
    doc_build_file = doc_build_files[args.doc_build_name]

    # create manifest.yaml files from repository / package meta information
    # will be merged with the manifest.yaml file generated by rosdoc_lite later
    repository = dist_file.repositories[args.repository_name]
    with Scope('SUBSECTION', 'create manifest.yaml files'):
        for pkg in pkgs.values():

            data = {}

            data['vcs'] = vcs_type
            data['vcs_uri'] = vcs_url
            data['vcs_version'] = vcs_version

            data['repo_name'] = args.repository_name
            data['timestamp'] = time.time()

            data['depends'] = sorted(
                rosdoc_index.forward_deps.get(pkg.name, []))
            data['depends_on'] = sorted(
                rosdoc_index.reverse_deps.get(pkg.name, []))

            if pkg.name in rosdoc_index.metapackage_index:
                data['metapackages'] = rosdoc_index.metapackage_index[pkg.name]

            if pkg.name in rosdoc_index.metapackage_deps:
                data['packages'] = rosdoc_index.metapackage_deps[pkg.name]

            if pkg.name in package_names_with_changelogs:
                data['has_changelog_rst'] = True

            data['api_documentation'] = '%s/%s/api/%s/html' % \
                (doc_build_file.canonical_base_url, args.rosdistro_name, pkg.name)

            pkg_status = None
            pkg_status_description = None
            # package level status information
            if pkg.name in repository.status_per_package:
                pkg_status_data = repository.status_per_package[pkg.name]
                pkg_status = pkg_status_data.get('status', None)
                pkg_status_description = pkg_status_data.get(
                    'status_description', None)
            # repository level status information
            if pkg_status is None:
                pkg_status = repository.status
            if pkg_status_description is None:
                pkg_status_description = repository.status_description
            if pkg_status is not None:
                data['maintainer_status'] = pkg_status
            if pkg_status_description is not None:
                data['maintainer_status_description'] = pkg_status_description

            # add doc job url
            data['doc_job'] = get_doc_job_url(config.jenkins_url,
                                              args.rosdistro_name,
                                              args.doc_build_name,
                                              args.repository_name,
                                              args.os_name, args.os_code_name,
                                              args.arch)

            # add devel job urls
            build_files = {}
            for build_name in used_source_build_names:
                build_files[build_name] = source_build_files[build_name]
            devel_job_urls = get_devel_job_urls(config.jenkins_url,
                                                build_files,
                                                args.rosdistro_name,
                                                args.repository_name)
            if devel_job_urls:
                data['devel_jobs'] = devel_job_urls

            # TODO this should reuse the logic from the job generation
            used_release_build_names = []
            for release_build_name, build_file in release_build_files.items():
                filtered_pkg_names = build_file.filter_packages([pkg.name])
                if not filtered_pkg_names:
                    continue
                matching_dist_file = get_distribution_file_matching_build_file(
                    index, args.rosdistro_name, build_file)
                repo = matching_dist_file.repositories[args.repository_name]
                if not repo.release_repository:
                    continue
                if not repo.release_repository.version:
                    continue
                used_release_build_names.append(release_build_name)

            # add release job urls
            build_files = {}
            for build_name in used_release_build_names:
                build_files[build_name] = release_build_files[build_name]
            release_job_urls = get_release_job_urls(config.jenkins_url,
                                                    build_files,
                                                    args.rosdistro_name,
                                                    pkg.name)
            if release_job_urls:
                data['release_jobs'] = release_job_urls

            # write manifest.yaml
            dst = os.path.join(args.output_dir, 'manifests', pkg.name,
                               'manifest.yaml')
            dst_dir = os.path.dirname(dst)
            if not os.path.exists(dst_dir):
                os.makedirs(dst_dir)
            with open(dst, 'w') as h:
                yaml.dump(data, h)

    # overwrite CMakeLists.txt files of each package
    with Scope('SUBSECTION',
               'overwrite CMakeLists.txt files to only generate messages'):
        for pkg_path, pkg in pkgs.items():
            abs_pkg_path = os.path.join(source_space, pkg_path)

            build_types = [
                e.content for e in pkg.exports if e.tagname == 'build_type'
            ]
            build_type_cmake = build_types and build_types[0] == 'cmake'

            data = {
                'package_name': pkg.name,
                'build_type_cmake': build_type_cmake,
            }
            content = expand_template('doc/CMakeLists.txt.em', data)
            print("Generating 'CMakeLists.txt' for package '%s'" % pkg.name)
            cmakelist_file = os.path.join(abs_pkg_path, 'CMakeLists.txt')
            with open(cmakelist_file, 'w') as h:
                h.write(content)

    with Scope('SUBSECTION', 'determine dependencies and generate Dockerfile'):
        # initialize rosdep view
        context = initialize_resolver(args.rosdistro_name, args.os_name,
                                      args.os_code_name)

        apt_cache = Cache()

        debian_pkg_names = [
            'build-essential',
            'openssh-client',
            'python3',
            'python3-yaml',
            'rsync',
            # the following are required by rosdoc_lite
            'doxygen',
            # since catkin is not a run dependency but provides the setup files
            get_os_package_name(args.rosdistro_name, 'catkin'),
            # rosdoc_lite does not work without genmsg being importable
            get_os_package_name(args.rosdistro_name, 'genmsg'),
        ]

        if '3' == str(condition_context['ROS_PYTHON_VERSION']):
            # the following are required by rosdoc_lite
            debian_pkg_names.extend([
                'python3-catkin-pkg-modules', 'python3-kitchen',
                'python3-rospkg-modules', 'python3-sphinx', 'python3-yaml'
            ])
        else:
            if '2' != str(condition_context['ROS_PYTHON_VERSION']):
                print('Unknown python version, using Python 2',
                      condition_context)
            # the following are required by rosdoc_lite
            debian_pkg_names.extend([
                'python-catkin-pkg-modules', 'python-epydoc', 'python-kitchen',
                'python-rospkg', 'python-sphinx', 'python-yaml'
            ])

        if args.build_tool == 'colcon':
            debian_pkg_names.append('python3-colcon-ros')
        if 'actionlib_msgs' in pkg_names:
            # to document actions in other packages in the same repository
            debian_pkg_names.append(
                get_os_package_name(args.rosdistro_name, 'actionlib_msgs'))
        print('Always install the following generic dependencies:')
        for debian_pkg_name in sorted(debian_pkg_names):
            print('  -', debian_pkg_name)

        debian_pkg_versions = {}

        # get build, run and doc dependencies and map them to binary packages
        depends = get_dependencies(pkgs.values(), 'build, run and doc',
                                   _get_build_run_doc_dependencies)
        debian_pkg_names_depends = resolve_names(depends, **context)
        debian_pkg_names_depends -= set(debian_pkg_names)
        debian_pkg_names += order_dependencies(debian_pkg_names_depends)
        missing_debian_pkg_names = []
        for debian_pkg_name in debian_pkg_names:
            try:
                debian_pkg_versions.update(
                    get_binary_package_versions(apt_cache, [debian_pkg_name]))
            except KeyError:
                missing_debian_pkg_names.append(debian_pkg_name)
        if missing_debian_pkg_names:
            # we allow missing dependencies to support basic documentation
            # of packages which use not released dependencies
            print(
                '# BEGIN SUBSECTION: MISSING DEPENDENCIES might result in failing build'
            )
            for debian_pkg_name in missing_debian_pkg_names:
                print("Could not find apt package '%s', skipping dependency" %
                      debian_pkg_name)
                debian_pkg_names.remove(debian_pkg_name)
            print('# END SUBSECTION')

        # generate Dockerfile
        data = {
            'os_name':
            args.os_name,
            'os_code_name':
            args.os_code_name,
            'arch':
            args.arch,
            'build_tool':
            doc_build_file.build_tool,
            'distribution_repository_urls':
            args.distribution_repository_urls,
            'distribution_repository_keys':
            get_distribution_repository_keys(
                args.distribution_repository_urls,
                args.distribution_repository_key_files),
            'environment_variables': [
                'ROS_PYTHON_VERSION={}'.format(
                    condition_context['ROS_PYTHON_VERSION'])
            ],
            'rosdistro_name':
            args.rosdistro_name,
            'uid':
            get_user_id(),
            'dependencies':
            debian_pkg_names,
            'dependency_versions':
            debian_pkg_versions,
            'install_lists': [],
            'canonical_base_url':
            doc_build_file.canonical_base_url,
            'ordered_pkg_tuples':
            ordered_pkg_tuples,
            'rosdoc_config_files':
            rosdoc_config_files,
        }
        create_dockerfile('doc/doc_task.Dockerfile.em', data,
                          args.dockerfile_dir)
Esempio n. 29
0
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description="Generate a 'Dockerfile' for the devel job")
    parser.add_argument(
        '--rosdistro-name',
        required=True,
        help='The name of the ROS distro to identify the setup file to be '
        'sourced')
    parser.add_argument('--workspace-root',
                        nargs='+',
                        help='The root path of the workspace to compile')
    parser.add_argument('--os-name',
                        required=True,
                        help="The OS name (e.g. 'ubuntu')")
    parser.add_argument('--os-code-name',
                        required=True,
                        help="The OS code name (e.g. 'xenial')")
    parser.add_argument('--arch',
                        required=True,
                        help="The architecture (e.g. 'amd64')")
    add_argument_distribution_repository_urls(parser)
    add_argument_distribution_repository_key_files(parser)
    add_argument_build_tool(parser, required=True)
    add_argument_ros_version(parser)
    add_argument_env_vars(parser)
    add_argument_dockerfile_dir(parser)
    add_argument_run_abichecker(parser)
    add_argument_require_gpu_support(parser)
    a1 = add_argument_build_tool_args(parser)
    a2 = add_argument_build_tool_test_args(parser)
    parser.add_argument(
        '--testing',
        action='store_true',
        help='The flag if the workspace should be built with tests enabled '
        'and instead of installing the tests are ran')

    remainder_args = extract_multiple_remainders(argv, (a1, a2))
    args = parser.parse_args(argv)
    for k, v in remainder_args.items():
        setattr(args, k, v)

    condition_context = dict(args.env_vars)
    condition_context['ROS_DISTRO'] = args.rosdistro_name
    condition_context['ROS_VERSION'] = args.ros_version

    # get direct build dependencies
    pkgs = get_packages_in_workspaces(args.workspace_root, condition_context)
    pkg_names = [pkg.name for pkg in pkgs.values()]
    print("Found the following packages:")
    for pkg_name in sorted(pkg_names):
        print('  -', pkg_name)

    maintainer_emails = set([])
    for pkg in pkgs.values():
        for m in pkg.maintainers:
            maintainer_emails.add(m.email)
    if maintainer_emails:
        print('Package maintainer emails: %s' %
              ' '.join(sorted(maintainer_emails)))

    context = initialize_resolver(args.rosdistro_name, args.os_name,
                                  args.os_code_name)

    apt_cache = Cache()

    debian_pkg_names = [
        'build-essential',
        'python3',
    ]
    if args.build_tool == 'colcon':
        debian_pkg_names += [
            'python3-colcon-metadata',
            'python3-colcon-output',
            'python3-colcon-parallel-executor',
            'python3-colcon-ros',
            'python3-colcon-test-result',
        ]
    elif 'catkin' not in pkg_names:
        debian_pkg_names += resolve_names(['catkin'], **context)
    print('Always install the following generic dependencies:')
    for debian_pkg_name in sorted(debian_pkg_names):
        print('  -', debian_pkg_name)

    debian_pkg_versions = {}

    # get build dependencies and map them to binary packages
    build_depends = get_dependencies(
        pkgs.values(), 'build', _get_build_and_recursive_run_dependencies)
    debian_pkg_names_building = resolve_names(build_depends, **context)
    debian_pkg_names_building -= set(debian_pkg_names)
    debian_pkg_names += order_dependencies(debian_pkg_names_building)
    debian_pkg_versions.update(
        get_binary_package_versions(apt_cache, debian_pkg_names))

    # get run and test dependencies and map them to binary packages
    run_and_test_depends = get_dependencies(pkgs.values(), 'run and test',
                                            _get_run_and_test_dependencies)
    debian_pkg_names_testing = resolve_names(run_and_test_depends, **context)
    # all additional run/test dependencies
    # are added after the build dependencies
    # in order to reuse existing images in the docker container
    debian_pkg_names_testing -= set(debian_pkg_names)
    debian_pkg_versions.update(
        get_binary_package_versions(apt_cache, debian_pkg_names_testing))
    if args.testing:
        debian_pkg_names += order_dependencies(debian_pkg_names_testing)

    mapped_workspaces = [
        (workspace_root, '/tmp/ws%s' % (index if index > 1 else ''))
        for index, workspace_root in enumerate(args.workspace_root, 1)
    ]

    parent_result_space = []
    if len(args.workspace_root) > 1:
        parent_result_space = ['/opt/ros/%s' % args.rosdistro_name] + \
            [mapping[1] for mapping in mapped_workspaces[:-1]]

    # generate Dockerfile
    data = {
        'os_name':
        args.os_name,
        'os_code_name':
        args.os_code_name,
        'arch':
        args.arch,
        'distribution_repository_urls':
        args.distribution_repository_urls,
        'distribution_repository_keys':
        get_distribution_repository_keys(
            args.distribution_repository_urls,
            args.distribution_repository_key_files),
        'rosdistro_name':
        args.rosdistro_name,
        'uid':
        get_user_id(),
        'build_tool':
        args.build_tool,
        'build_tool_args':
        args.build_tool_args,
        'build_tool_test_args':
        args.build_tool_test_args,
        'ros_version':
        args.ros_version,
        'build_environment_variables':
        ['%s=%s' % key_value for key_value in args.env_vars.items()],
        'dependencies':
        debian_pkg_names,
        'dependency_versions':
        debian_pkg_versions,
        'install_lists': [],
        'testing':
        args.testing,
        'run_abichecker':
        args.run_abichecker,
        'require_gpu_support':
        args.require_gpu_support,
        'workspace_root':
        mapped_workspaces[-1][1],
        'parent_result_space':
        parent_result_space,
    }
    create_dockerfile('devel/devel_task.Dockerfile.em', data,
                      args.dockerfile_dir)

    # output hints about necessary volumes to mount
    ros_buildfarm_basepath = os.path.normpath(
        os.path.join(os.path.dirname(__file__), '..', '..'))
    print('Mount the following volumes when running the container:')
    print('  -v %s:/tmp/ros_buildfarm:ro' % ros_buildfarm_basepath)
    for mapping in mapped_workspaces:
        print('  -v %s:%s' % mapping)
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description="Generate a 'Dockerfile' for the devel job")
    parser.add_argument(
        '--rosdistro-name',
        required=True,
        help='The name of the ROS distro to identify the setup file to be '
        'sourced')
    parser.add_argument('--workspace-root',
                        nargs='+',
                        help='The root path of the workspace to compile')
    parser.add_argument('--os-name',
                        required=True,
                        help="The OS name (e.g. 'ubuntu')")
    parser.add_argument('--os-code-name',
                        required=True,
                        help="The OS code name (e.g. 'trusty')")
    parser.add_argument('--arch',
                        required=True,
                        help="The architecture (e.g. 'amd64')")
    add_argument_distribution_repository_urls(parser)
    add_argument_distribution_repository_key_files(parser)
    add_argument_dockerfile_dir(parser)
    parser.add_argument(
        '--testing',
        action='store_true',
        help='The flag if the workspace should be built with tests enabled '
        'and instead of installing the tests are ran')
    args = parser.parse_args(argv)

    # get direct build dependencies
    pkgs = {}
    for workspace_root in args.workspace_root:
        source_space = os.path.join(workspace_root, 'src')
        print("Crawling for packages in workspace '%s'" % source_space)
        pkgs.update(find_packages(source_space))

    pkg_names = [pkg.name for pkg in pkgs.values()]
    print("Found the following packages:")
    for pkg_name in sorted(pkg_names):
        print('  -', pkg_name)

    maintainer_emails = set([])
    for pkg in pkgs.values():
        for m in pkg.maintainers:
            maintainer_emails.add(m.email)
    if maintainer_emails:
        print('Package maintainer emails: %s' %
              ' '.join(sorted(maintainer_emails)))

    context = initialize_resolver(args.rosdistro_name, args.os_name,
                                  args.os_code_name)

    apt_cache = Cache()

    debian_pkg_names = [
        'build-essential',
        'python3',
    ]
    if 'catkin' not in pkg_names:
        debian_pkg_names.append(
            get_debian_package_name(args.rosdistro_name, 'catkin'))
    print('Always install the following generic dependencies:')
    for debian_pkg_name in sorted(debian_pkg_names):
        print('  -', debian_pkg_name)

    debian_pkg_versions = {}

    # get build dependencies and map them to binary packages
    build_depends = get_dependencies(
        pkgs.values(), 'build', _get_build_and_recursive_run_dependencies)
    debian_pkg_names_building = resolve_names(build_depends, **context)
    debian_pkg_names_building -= set(debian_pkg_names)
    debian_pkg_names += order_dependencies(debian_pkg_names_building)
    debian_pkg_versions.update(
        get_binary_package_versions(apt_cache, debian_pkg_names))

    # get run and test dependencies and map them to binary packages
    run_and_test_depends = get_dependencies(pkgs.values(), 'run and test',
                                            _get_run_and_test_dependencies)
    debian_pkg_names_testing = resolve_names(run_and_test_depends, **context)
    # all additional run/test dependencies
    # are added after the build dependencies
    # in order to reuse existing images in the docker container
    debian_pkg_names_testing -= set(debian_pkg_names)
    debian_pkg_versions.update(
        get_binary_package_versions(apt_cache, debian_pkg_names_testing))
    if args.testing:
        debian_pkg_names += order_dependencies(debian_pkg_names_testing)

    # generate Dockerfile
    data = {
        'os_name':
        args.os_name,
        'os_code_name':
        args.os_code_name,
        'arch':
        args.arch,
        'distribution_repository_urls':
        args.distribution_repository_urls,
        'distribution_repository_keys':
        get_distribution_repository_keys(
            args.distribution_repository_urls,
            args.distribution_repository_key_files),
        'rosdistro_name':
        args.rosdistro_name,
        'uid':
        get_user_id(),
        'dependencies':
        debian_pkg_names,
        'dependency_versions':
        debian_pkg_versions,
        'testing':
        args.testing,
        'prerelease_overlay':
        len(args.workspace_root) > 1,
    }
    create_dockerfile('devel/devel_task.Dockerfile.em', data,
                      args.dockerfile_dir)

    # output hints about necessary volumes to mount
    ros_buildfarm_basepath = os.path.normpath(
        os.path.join(os.path.dirname(__file__), '..', '..'))
    print('Mount the following volumes when running the container:')
    print('  -v %s:/tmp/ros_buildfarm:ro' % ros_buildfarm_basepath)
    print('  -v %s:/tmp/catkin_workspace' % args.workspace_root[-1])