def main(opts): try: # Determine if the user is trying to perform some action, in which # case, the workspace should be automatically initialized ignored_opts = ['main', 'verb'] actions = [v for k, v in vars(opts).items() if k not in ignored_opts] no_action = not any(actions) # Try to find a metadata directory to get context defaults # Otherwise use the specified directory context = Context.Load(opts.workspace, opts.profile, opts) do_init = opts.init or not no_action summary_notes = [] if not context.initialized() and do_init: summary_notes.append( clr('@!@{cf}Initialized new catkin workspace in `%s`@|' % context.workspace)) if context.initialized() or do_init: Context.Save(context) if opts.mkdirs and not context.source_space_exists(): os.makedirs(context.source_space_abs) print(context.summary(notes=summary_notes)) except IOError as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print('error: could not configure catkin workspace: %s' % exc.message) return 1 return 0
def main(opts): try: # Load a context with initialization ctx = Context.load(opts.workspace, strict=True) # Initialize the workspace if necessary if ctx: print( 'Catkin workspace `%s` is already initialized. No action taken.' % ctx.workspace) else: print('Initializing catkin workspace in `%s`.' % (opts.workspace or os.getcwd())) # initialize the workspace init_metadata_root(opts.workspace or os.getcwd(), opts.reset) ctx = Context.load(opts.workspace) print(ctx.summary()) except IOError as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print('error: could not initialize catkin workspace: %s' % exc.message) return 1 return 0
def run_rsync(args): cwd = os.getcwd() ws = find_enclosing_workspace(cwd) context = Context.load(ws) if ws: sync_pathes = get_sync_pathes(context, args.space) run_sync_pathes(sync_pathes, args.remote) else: logging.error( f'No catkin workspace found. Is "{cwd}" contained in a workspace?')
def setup_workspace(workspace): ctx = Context.load(workspace_hint=workspace, load_env=False) if not ctx.source_space_exists(): print("Source space doesn't exist at {}".format(ctx.source_space_abs)) return for path, package in find_packages(ctx.source_space_abs).items(): Popen([ 'ln', '-s', '{}{}{}{}compile_commands.json'.format(ctx.build_space_abs, os.path.sep, package.name, os.path.sep), '{}{}{}'.format(ctx.source_space_abs, os.path.sep, path) ])
def main(opts): try: # Determine if the user is trying to perform some action, in which # case, the workspace should be automatically initialized ignored_opts = ['main', 'verb'] actions = [v for k, v in vars(opts).items() if k not in ignored_opts] no_action = not any(actions) # Try to find a metadata directory to get context defaults # Otherwise use the specified directory context = Context.load( opts.workspace, opts.profile, opts, append=opts.append_args, remove=opts.remove_args) do_init = opts.init or not no_action summary_notes = [] if not context.initialized() and do_init: summary_notes.append(clr('@!@{cf}Initialized new catkin workspace in `%s`@|' % sanitize(context.workspace))) if context.initialized() or do_init: Context.save(context) if opts.mkdirs and not context.source_space_exists(): os.makedirs(context.source_space_abs) print(context.summary(notes=summary_notes)) except IOError as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print('error: could not configure catkin workspace: %s' % exc.message) return 1 return 0
def main(opts): try: # Load a context with initialization ctx = Context.load(opts.workspace, strict=True) # Initialize the workspace if necessary if ctx: print('Catkin workspace `%s` is already initialized. No action taken.' % (ctx.workspace)) else: print('Initializing catkin workspace in `%s`.' % (opts.workspace or os.getcwd())) # initialize the workspace init_metadata_root( opts.workspace or os.getcwd(), opts.reset) ctx = Context.load(opts.workspace) print(ctx.summary()) except IOError as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print('error: could not initialize catkin workspace: %s' % exc.message) return 1 return 0
def main(opts): if opts.folders: folders = opts.folders else: # Load the context ctx = Context.load(opts.workspace, opts.profile, load_env=False) if not ctx: print(clr("@{rf}ERROR: Could not determine workspace.@|"), file=sys.stderr) sys.exit(1) folders = [ctx.source_space_abs] list_entry_format = '@{pf}-@| @{cf}%s@|' if not opts.unformatted else '%s' opts.depends_on = set(opts.depends_on) if opts.depends_on else set() warnings = [] try: for folder in folders: for pkg_pth, pkg in find_packages(folder, warnings=warnings).items(): build_depend_names = [d.name for d in pkg.build_depends] is_build_dep = opts.depends_on.intersection(build_depend_names) run_depend_names = [d.name for d in pkg.run_depends] is_run_dep = opts.depends_on.intersection(run_depend_names) if not opts.depends_on or is_build_dep or is_run_dep: print(clr(list_entry_format % pkg.name)) if opts.deps: if build_depend_names: print(clr(' @{yf}build_depend:@|')) for dep in build_depend_names: print(clr(' @{pf}-@| %s' % dep)) if run_depend_names: print(clr(' @{yf}run_depend:@|')) for dep in run_depend_names: print(clr(' @{pf}-@| %s' % dep)) except InvalidPackage as ex: message = '\n'.join(ex.args) print( clr("@{rf}Error:@| The directory %s contains an invalid package." " See below for details:\n\n%s" % (folder, message))) # Print out warnings if not opts.quiet: for warning in warnings: print(clr("@{yf}Warning:@| %s" % warning), file=sys.stderr)
def try_find_ros_compilation_database(filename): try: from catkin_tools.metadata import find_enclosing_workspace from catkin_tools.context import Context import rospkg workspace = find_enclosing_workspace(filename) ctx = Context.load(workspace, {}, {}, load_env=False) package = rospkg.get_package_name(filename) path = os.path.join(ctx.build_space_abs, package) candidate = os.path.join(path, 'compile_commands.json') if os.path.isfile(candidate) or os.path.isdir(candidate): logging.info("Found ROS compilation database for " + filename + " at " + candidate) return candidate except: pass return None
def main(opts): """Run the script. Args: opts (dict): Options populated by an arg parser. Returns: int: Return code """ # Load the context if opts.verbose: log.setLevel(logging.getLevelName("DEBUG")) log.debug(" Enabling DEBUG output.") else: log.setLevel(logging.getLevelName("INFO")) if opts.no_status: log.info(" Not printing status messages while cloning.") use_preprint = False else: log.info(" Will print status messages while cloning.") use_preprint = True log.info(" Using %s threads.", opts.num_threads) context = Context.load(opts.workspace, opts.profile, opts, append=True) if opts.default_url != Tools.PACKAGE_TAG: opts.default_urls += "," + opts.default_url # Prepare the set of default urls default_urls = Tools.prepare_default_urls(opts.default_urls) if not opts.workspace: log.critical(" Workspace undefined! Abort!") return 1 if opts.verb == 'fetch' or opts.subverb == 'fetch': return fetch(packages=opts.packages, workspace=opts.workspace, context=context, default_urls=default_urls, use_preprint=use_preprint, num_threads=opts.num_threads, pull_after_fetch=opts.update) if opts.subverb == 'update': return update(packages=opts.packages, workspace=opts.workspace, context=context, use_preprint=use_preprint, num_threads=opts.num_threads)
def main(opts): if opts.folders: folders = opts.folders else: # Load the context ctx = Context.load(opts.workspace, opts.profile, load_env=False) if not ctx: print(clr("@{rf}ERROR: Could not determine workspace.@|"), file=sys.stderr) sys.exit(1) folders = [ctx.source_space_abs] list_entry_format = '@{pf}-@| @{cf}%s@|' if not opts.unformatted else '%s' opts.depends_on = set(opts.depends_on) if opts.depends_on else set() warnings = [] try: for folder in folders: for pkg_pth, pkg in find_packages(folder, warnings=warnings).items(): build_depend_names = [d.name for d in pkg.build_depends] is_build_dep = opts.depends_on.intersection( build_depend_names) run_depend_names = [d.name for d in pkg.run_depends] is_run_dep = opts.depends_on.intersection( run_depend_names) if not opts.depends_on or is_build_dep or is_run_dep: print(clr(list_entry_format % pkg.name)) if opts.deps: if build_depend_names: print(clr(' @{yf}build_depend:@|')) for dep in build_depend_names: print(clr(' @{pf}-@| %s' % dep)) if run_depend_names: print(clr(' @{yf}run_depend:@|')) for dep in run_depend_names: print(clr(' @{pf}-@| %s' % dep)) except InvalidPackage as ex: message = '\n'.join(ex.args) print(clr("@{rf}Error:@| The directory %s contains an invalid package." " See below for details:\n\n%s" % (folder, message))) # Print out warnings if not opts.quiet: for warning in warnings: print(clr("@{yf}Warning:@| %s" % warning), file=sys.stderr)
def main(opts): ctx = Context.load(opts.workspace, opts.profile, opts, append=True) job_server.initialize(max_jobs=4, max_load=None, gnu_make_enabled=False) return document_workspace(ctx, packages=opts.packages, start_with=opts.start_with, no_deps=opts.no_deps, n_jobs=int(opts.parallel_jobs or 4), force_color=opts.force_color, quiet=not opts.verbose, interleave_output=opts.interleave_output, no_status=opts.no_status, limit_status_rate=opts.limit_status_rate, no_notify=opts.no_notify, continue_on_failure=opts.continue_on_failure, summarize_build=opts.summarize)
def main(opts): ctx = Context.load(opts.workspace, opts.profile, opts, append=True) if opts.get_env: return print_test_env(ctx, opts.get_env) job_server.initialize(max_jobs=4, max_load=None, gnu_make_enabled=False) return test_workspace(ctx, packages=opts.packages, tests=opts.tests, list_tests=opts.list_tests, n_jobs=int(opts.parallel_jobs or 4), force_color=opts.force_color, quiet=not opts.verbose, interleave_output=opts.interleave_output, no_status=opts.no_status, limit_status_rate=opts.limit_status_rate, no_notify=opts.no_notify)
def main(opts): opts = sys.argv[1:] if opts is None else opts #print(opts) workspace = os.getcwd() if opts.workspace is None else opts.workspace workspace = find_enclosing_workspace(workspace) if not workspace: print("No workspace found") sys.exit(1) ctx = Context.load(workspace, opts.profile, opts, load_env=False) packages = find_packages(ctx.source_space_abs) pkg_path = [pkg_path for pkg_path, p in packages.items() if p.name == opts.package] pkg_path = None if not pkg_path else pkg_path[0] if not pkg_path: print("Package '{}' not found!".format(opts.package)) sys.exit(2) pkg_name = packages[pkg_path].name build_space = ctx.build_space_abs + os.path.sep + pkg_name compile_db = build_space + os.path.sep + "compile_commands.json" if not os.path.isfile(compile_db): print("No compile_commands.json in {}".format(build_space)) sys.exit(3) pkg_root = ctx.source_space_abs + os.path.sep + pkg_path if len(opts.src_file) == 0: opts.src_file = findSrcFiles(pkg_root) if len(opts.src_file) == 0: print("No .cpp files found!") sys.exit(4) export_file = None if opts.export is None else opts.export[0] if export_file: opts.fix = False runClangTidy(clang_binary=opts.clang_tidy, pkg_root=pkg_root, package=opts.package, filenames=opts.src_file, cfg=None, compile_db=build_space, fix=opts.fix, export_file=export_file, dry_run = False) return 0
def load(self): """Read in .rosinstall from workspace""" self.wstool_config = multiproject_cli.multiproject_cmd.get_config( self.src, config_filename=".rosinstall") self.catkin_config = Context.load() self.catkin_pkgs = self.get_catkin_packages()
def main(opts): # Load the context ctx = Context.load(opts.workspace, opts.profile, opts, append=True) try: # Get absolute path to directory containing package package_dest_path = os.path.abspath(opts.path) # Sort list of maintainers and authors (it will also be sorted inside # PackageTemplate so by sorting it here, we ensure that the same order # is used. This is important later when email addresses are assigned. if not opts.maintainers: maintainers = [] for x in ctx.maintainers: email = x.split()[-1] name = ' '.join(x.split()[:-1]) maintainers += [[name, email]] opts.maintainers = maintainers if opts.maintainers: opts.maintainers.sort(key=lambda x: x[0]) if not opts.authors: authors = [] for x in ctx.authors: email = x.split()[-1] name = ' '.join(x.split()[:-1]) authors += [[name, email]] opts.authors = authors if opts.authors: opts.authors.sort(key=lambda x: x[0]) if not opts.license: opts.license = ctx.licenses or [] for package_name in opts.name: print('Creating package "%s" in "%s"...' % (package_name, package_dest_path)) target_path = os.path.join(package_dest_path, package_name) package_template = PackageTemplate._create_package_template( package_name=package_name, description=opts.description, licenses=opts.license, maintainer_names=[m[0] for m in opts.maintainers] if opts.maintainers else [], author_names=[a[0] for a in opts.authors] if opts.authors else [], version=opts.version, catkin_deps=opts.catkin_deps, system_deps=opts.system_deps, boost_comps=opts.boost_components) # Add maintainer and author e-mails if opts.maintainers: for (pm, om) in zip(package_template.maintainers, opts.maintainers): pm.email = om[1] if opts.authors: for (pa, oa) in zip(package_template.authors, opts.authors): pa.email = oa[1] # Add build type export # if opts.build_type and opts.build_type != 'catkin': # build_type = Export('build_type', content=opts.build_type) # package_template.exports.append(build_type) create_package_files(target_path=target_path, package_template=package_template, rosdistro=opts.rosdistro, newfiles={}) print('Successfully created package files in %s.' % target_path) except ValueError as vae: print(str(vae)) return 1 return 0
def main(opts): # Load the context ctx = Context.load(opts.workspace, opts.profile, load_env=False) if not ctx: print(clr("@{rf}ERROR: Could not determine workspace.@|"), file=sys.stderr) sys.exit(1) folders = [ctx.source_space_abs] list_entry_format = '@{pf}-@| @{cf}%s@|' if not opts.unformatted else '%s' opts.depends_on = set(opts.depends_on) if opts.depends_on else set() warnings = [] for folder in folders: try: packages = find_packages(folder, warnings=warnings) ordered_packages = topological_order_packages(packages) packages_by_name = {pkg.name: (pth, pkg) for pth, pkg in ordered_packages} if opts.depends_on or opts.rdepends_on: dependents = set() for pth, pkg in ordered_packages: is_dep = opts.depends_on.intersection([ p.name for p in pkg.build_depends + pkg.run_depends]) if is_dep: dependents.add(pkg.name) for pth, pkg in [packages_by_name.get(n) for n in opts.rdepends_on]: if pkg is None: continue rbd = get_recursive_build_dependents_in_workspace(pkg.name, ordered_packages) rrd = get_recursive_run_dependents_in_workspace(pkg.name, ordered_packages) dependents.update([p.name for _, p in rbd]) dependents.update([p.name for _, p in rrd]) filtered_packages = [ (pth, pkg) for pth, pkg in ordered_packages if pkg.name in dependents] elif opts.this: this_package = find_enclosing_package( search_start_path=getcwd(), ws_path=ctx.workspace, warnings=[]) if this_package is None: sys.exit(1) if this_package in packages_by_name: filtered_packages = [packages_by_name[this_package]] else: filtered_packages = [] else: filtered_packages = ordered_packages for pkg_pth, pkg in filtered_packages: print(clr(list_entry_format % pkg.name)) if opts.rdeps: build_deps = [p for dp, p in get_recursive_build_depends_in_workspace(pkg, ordered_packages)] run_deps = [p for dp, p in get_recursive_run_depends_in_workspace([pkg], ordered_packages)] else: build_deps = pkg.build_depends run_deps = pkg.run_depends if opts.deps or opts.rdeps: if len(build_deps) > 0: print(clr(' @{yf}build_depend:@|')) for dep in build_deps: print(clr(' @{pf}-@| %s' % dep.name)) if len(run_deps) > 0: print(clr(' @{yf}run_depend:@|')) for dep in run_deps: print(clr(' @{pf}-@| %s' % dep.name)) except InvalidPackage as ex: message = '\n'.join(ex.args) print(clr("@{rf}Error:@| The directory %s contains an invalid package." " See below for details:\n\n%s" % (folder, message))) # Print out warnings if not opts.quiet: for warning in warnings: print(clr("@{yf}Warning:@| %s" % warning), file=sys.stderr)
def main(opts): try: # Load a context with initialization ctx = Context.load(opts.workspace, load_env=False) if not ctx.initialized(): print( "A catkin workspace must be initialized before profiles can be managed." ) return 1 profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) if opts.subcommand == 'list': print( list_profiles(profiles, active_profile, unformatted=opts.unformatted, active=opts.active)) elif opts.subcommand == 'add': if opts.name in profiles: if opts.force: print( clr('[profile] @{yf}Warning:@| Overwriting existing profile named @{cf}%s@|' % (opts.name))) else: print( clr('catkin profile: error: A profile named ' '@{cf}%s@| already exists. Use `--force` to ' 'overwrite.' % (opts.name))) return 1 if opts.copy_active: ctx.profile = opts.name Context.save(ctx) print( clr('[profile] Created a new profile named @{cf}%s@| ' 'based on active profile @{cf}%s@|' % (opts.name, active_profile))) elif opts.copy: if opts.copy in profiles: new_ctx = Context.load(opts.workspace, profile=opts.copy) new_ctx.profile = opts.name Context.save(new_ctx) print( clr('[profile] Created a new profile named @{cf}%s@| ' 'based on profile @{cf}%s@|' % (opts.name, opts.copy))) else: print( clr('[profile] @{rf}A profile with this name does not exist: %s@|' % opts.copy)) else: new_ctx = Context(workspace=ctx.workspace, profile=opts.name) Context.save(new_ctx) print( clr('[profile] Created a new profile named @{cf}%s@| with default settings.' % (opts.name))) profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) print(list_profiles(profiles, active_profile)) elif opts.subcommand == 'set': if opts.name in profiles: set_active_profile(ctx.workspace, opts.name) active_profile = get_active_profile(ctx.workspace) print( clr('[profile] Activated catkin metadata profile: @{cf}%s@|' % active_profile)) else: print( 'catkin profile: error: Profile `%s` does not exist in workspace `%s`.' % (opts.name, ctx.workspace)) return 1 profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) print(list_profiles(profiles, active_profile)) elif opts.subcommand == 'rename': if opts.current_name in profiles: if opts.new_name in profiles: if opts.force: print( clr('[profile] @{yf}Warning:@| Overwriting ' 'existing profile named @{cf}%s@|' % (opts.new_name))) else: print( clr('catkin profile: error: A profile named ' '@{cf}%s@| already exists. Use `--force` to ' 'overwrite.' % (opts.new_name))) return 1 ctx.profile = opts.new_name Context.save(ctx) remove_profile(ctx.workspace, opts.current_name) if opts.current_name == active_profile: set_active_profile(ctx.workspace, opts.new_name) print( clr('[profile] Renamed profile @{cf}%s@| to @{cf}%s@|' % (opts.current_name, opts.new_name))) else: print( 'catkin profile: error: Profile `%s` does not exist in workspace `%s`.' % (opts.current_name, ctx.workspace)) return 1 profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) print(list_profiles(profiles, active_profile)) elif opts.subcommand == 'remove': for name in opts.name: if name == active_profile: print( 'Profile `%s` is currently active. Re-setting active profile to `%s`.' % (name, DEFAULT_PROFILE_NAME)) set_active_profile(ctx.workspace, DEFAULT_PROFILE_NAME) if name in profiles: remove_profile(ctx.workspace, name) else: print( 'catkin profile: error: Profile `%s` does not exist in workspace `%s`.' % (name, ctx.workspace)) return 1 print(clr('[profile] Removed profile: @{rf}%s@|' % name)) profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) print(list_profiles(profiles, active_profile)) except IOError as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print('error: could not %s catkin profile: %s' % (opts.subcommand, exc.message)) return 1 return 0
def main(opts): try: # Load a context with initialization ctx = Context.load(opts.workspace) if not ctx.initialized(): print("A catkin workspace must be initialized before profiles can be managed.") return 1 profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) if opts.subcommand == 'list': print(list_profiles(profiles, active_profile, unformatted=opts.unformatted)) elif opts.subcommand == 'add': if opts.name in profiles: if opts.force: print(clr('[profile] @{yf}Warning:@| Overwriting existing profile named @{cf}%s@|' % (opts.name))) else: print(clr('catkin profile: error: A profile named ' '@{cf}%s@| already exists. Use `--force` to ' 'overwrite.' % (opts.name))) return 1 if opts.copy_active: ctx.profile = opts.name Context.save(ctx) print(clr('[profile] Created a new profile named @{cf}%s@| ' 'based on active profile @{cf}%s@|' % (opts.name, active_profile))) elif opts.copy: if opts.copy in profiles: new_ctx = Context.load(opts.workspace, profile=opts.copy) new_ctx.profile = opts.name Context.save(new_ctx) print(clr('[profile] Created a new profile named @{cf}%s@| ' 'based on profile @{cf}%s@|' % (opts.name, opts.copy))) else: print(clr('[profile] @{rf}A profile with this name does not exist: %s@|' % opts.copy)) else: new_ctx = Context(workspace=ctx.workspace, profile=opts.name) Context.save(new_ctx) print(clr('[profile] Created a new profile named @{cf}%s@| with default settings.' % (opts.name))) profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) print(list_profiles(profiles, active_profile)) elif opts.subcommand == 'set': if opts.name in profiles: set_active_profile(ctx.workspace, opts.name) active_profile = get_active_profile(ctx.workspace) print(clr('[profile] Activated catkin metadata profile: @{cf}%s@|' % active_profile)) else: print('catkin profile: error: Profile `%s` does not exist in workspace `%s`.' % (opts.name[0], ctx.workspace)) return 1 profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) print(list_profiles(profiles, active_profile)) elif opts.subcommand == 'rename': if opts.current_name in profiles: if opts.new_name in profiles: if opts.force: print(clr('[profile] @{yf}Warning:@| Overwriting ' 'existing profile named @{cf}%s@|' % (opts.new_name))) else: print(clr('catkin profile: error: A profile named ' '@{cf}%s@| already exists. Use `--force` to ' 'overwrite.' % (opts.new_name))) return 1 ctx.profile = opts.new_name Context.save(ctx) remove_profile(ctx.workspace, opts.current_name) if opts.current_name == active_profile: set_active_profile(ctx.workspace, opts.new_name) print(clr('[profile] Renamed profile @{cf}%s@| to @{cf}%s@|' % (opts.current_name, opts.new_name))) else: print('catkin profile: error: Profile `%s` does not exist in workspace `%s`.' % (opts.current_name, ctx.workspace)) return 1 profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) print(list_profiles(profiles, active_profile)) elif opts.subcommand == 'remove': for name in opts.name: if name == active_profile: print('Profile `%s` is currently active. Re-setting active profile to `%s`.' % (name, DEFAULT_PROFILE_NAME)) set_active_profile(ctx.workspace, DEFAULT_PROFILE_NAME) if name in profiles: remove_profile(ctx.workspace, name) else: print('catkin profile: error: Profile `%s` does not exist in workspace `%s`.' % (name, ctx.workspace)) return 1 print(clr('[profile] Removed profile: @{rf}%s@|' % name)) profiles = get_profile_names(ctx.workspace) active_profile = get_active_profile(ctx.workspace) print(list_profiles(profiles, active_profile)) except IOError as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print('error: could not %s catkin profile: %s' % (opts.subcommand, exc.message)) return 1 return 0
def main(opts): actions = ['all', 'build', 'devel', 'install', 'cmake_cache', 'orphans', 'setup_files'] if not any([v for (k, v) in vars(opts).items() if k in actions]): print("[clean] No actions performed. See `catkin clean -h` for usage.") return 0 needs_force = False # Load the context ctx = Context.load(opts.workspace, opts.profile, opts, strict=True, load_env=False) if not ctx: if not opts.workspace: print( "catkin clean: error: The current or desired workspace could not be " "determined. Please run `catkin clean` from within a catkin " "workspace or specify the workspace explicitly with the " "`--workspace` option.") else: print( "catkin clean: error: Could not clean workspace \"%s\" because it " "either does not exist or it has no catkin_tools metadata." % opts.workspace) return 1 # Remove the requested spaces if opts.all: opts.build = opts.devel = opts.install = True if opts.build: if os.path.exists(ctx.build_space_abs): print("[clean] Removing buildspace: %s" % ctx.build_space_abs) shutil.rmtree(ctx.build_space_abs) else: # Orphan removal if opts.orphans: if os.path.exists(ctx.build_space_abs): # TODO: Check for merged build and report error # Get all enabled packages in source space # Suppress warnings since this is looking for packages which no longer exist found_source_packages = [ pkg.name for (path, pkg) in find_packages(ctx.source_space_abs, warnings=[]).items()] # Iterate over all packages with build dirs print("[clean] Removing orphaned build directories from %s" % ctx.build_space_abs) no_orphans = True for pkg_build_name in os.listdir(ctx.build_space_abs): if pkg_build_name not in exempt_build_files: pkg_build_path = os.path.join(ctx.build_space_abs, pkg_build_name) # Remove package build dir if not found if pkg_build_name not in found_source_packages: no_orphans = False print(" - Removing %s" % pkg_build_path) shutil.rmtree(pkg_build_path) if no_orphans: print("[clean] No orphans found, nothing removed from buildspace.") else: # Remove the develspace # TODO: For isolated devel, this could just remove individual packages if os.path.exists(ctx.devel_space_abs): print("Removing develspace: %s" % ctx.devel_space_abs) shutil.rmtree(ctx.devel_space_abs) needs_force = True else: print("[clean] No buildspace exists, no potential for orphans.") return 0 # CMake Cache removal if opts.cmake_cache: # Clear the CMakeCache for each package if os.path.exists(ctx.build_space_abs): # Remove CMakeCaches print("[clean] Removing CMakeCache.txt files from %s" % ctx.build_space_abs) for pkg_build_name in os.listdir(ctx.build_space_abs): if pkg_build_name not in exempt_build_files: pkg_build_path = os.path.join(ctx.build_space_abs, pkg_build_name) ccache_path = os.path.join(pkg_build_path, 'CMakeCache.txt') if os.path.exists(ccache_path): print(" - Removing %s" % ccache_path) os.remove(ccache_path) needs_force = True else: print("[clean] No buildspace exists, no CMake caches to clear.") if opts.devel: if os.path.exists(ctx.devel_space_abs): print("[clean] Removing develspace: %s" % ctx.devel_space_abs) shutil.rmtree(ctx.devel_space_abs) else: if opts.setup_files: print("[clean] Removing setup files from develspace: %s" % ctx.devel_space_abs) for filename in setup_files: full_path = os.path.join(ctx.devel_space_abs, filename) if os.path.exists(full_path): print(" - Removing %s" % full_path) os.remove(full_path) needs_force = True if opts.install: if os.path.exists(ctx.install_space_abs): print("[clean] Removing installspace: %s" % ctx.install_space_abs) shutil.rmtree(ctx.install_space_abs) if needs_force: print( "NOTE: Parts of the workspace have been cleaned which will " "necessitate re-configuring CMake on the next build.") update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': True}) return 0
def prepare_arguments(parser): parser.description = "This verb is used to configure a catkin workspace's\ configuration and layout. Calling `catkin config` with no arguments will\ display the current config and affect no changes if a config already exists\ for the current workspace and profile." # Workspace / profile args add_context_args(parser) behavior_group = parser.add_argument_group('Behavior', 'Options affecting argument handling.') add = behavior_group.add_mutually_exclusive_group().add_argument add('--append-args', '-a', action='store_true', default=False, help='For list-type arguments, append elements.') add('--remove-args', '-r', action='store_true', default=False, help='For list-type arguments, remove elements.') context_group = parser.add_argument_group('Workspace Context', 'Options affecting the context of the workspace.') add = context_group.add_argument add('--init', action='store_true', default=False, help='Initialize a workspace if it does not yet exist.') add = context_group.add_mutually_exclusive_group().add_argument add('--extend', '-e', dest='extend_path', type=str, help='Explicitly extend the result-space of another catkin workspace, ' 'overriding the value of $CMAKE_PREFIX_PATH.') add('--no-extend', dest='extend_path', action='store_const', const='', help='Un-set the explicit extension of another workspace as set by --extend.') add = context_group.add_argument add('--mkdirs', action='store_true', default=False, help='Create directories required by the configuration (e.g. source space) if they do not already exist.') lists_group = parser.add_argument_group( 'Package Build Defaults', 'Packages to include or exclude from default build behavior.') add = lists_group.add_mutually_exclusive_group().add_argument add('--whitelist', metavar="PKG", dest='whitelist', nargs="+", required=False, type=str, default=None, help='Set the packages on the whitelist. If the whitelist is non-empty, ' 'only the packages on the whitelist are built with a bare call to ' '`catkin build`.') add('--no-whitelist', dest='whitelist', action='store_const', const=[], default=None, help='Clear all packages from the whitelist.') add = lists_group.add_mutually_exclusive_group().add_argument add('--blacklist', metavar="PKG", dest='blacklist', nargs="+", required=False, type=str, default=None, help='Set the packages on the blacklist. Packages on the blacklist are ' 'not built with a bare call to `catkin build`.') add('--no-blacklist', dest='blacklist', action='store_const', const=[], default=None, help='Clear all packages from the blacklist.') spaces_group = parser.add_argument_group('Spaces', 'Location of parts of the catkin workspace.') Context.setup_space_keys() for space, space_dict in Context.SPACES.items(): add = spaces_group.add_mutually_exclusive_group().add_argument flags = ['--{}-space'.format(space)] flags.extend([space_dict['short_flag']] if 'short_flag' in space_dict else []) add(*flags, default=None, help='The path to the {} space.'.format(space)) add('--default-{}-space'.format(space), action='store_const', dest='{}_space'.format(space), default=None, const=space_dict['default'], help='Use the default path to the {} space ("{}")'.format(space, space_dict['default'])) add = spaces_group.add_argument add('-x', '--space-suffix', help='Suffix for build, devel, and install space if they are not otherwise explicitly set.') devel_group = parser.add_argument_group( 'Devel Space', 'Options for configuring the structure of the devel space.') add = devel_group.add_mutually_exclusive_group().add_argument add('--link-devel', dest='devel_layout', action='store_const', const='linked', default=None, help='Build products from each catkin package into isolated spaces,' ' then symbolically link them into a merged devel space.') add('--merge-devel', dest='devel_layout', action='store_const', const='merged', default=None, help='Build products from each catkin package into a single merged devel spaces.') add('--isolate-devel', dest='devel_layout', action='store_const', const='isolated', default=None, help='Build products from each catkin package into isolated devel spaces.') install_group = parser.add_argument_group( 'Install Space', 'Options for configuring the structure of the install space.') add = install_group.add_mutually_exclusive_group().add_argument add('--install', action='store_true', default=None, help='Causes each package to be installed to the install space.') add('--no-install', dest='install', action='store_false', default=None, help='Disables installing each package into the install space.') add = install_group.add_mutually_exclusive_group().add_argument add('--isolate-install', action='store_true', default=None, help='Install each catkin package into a separate install space.') add('--merge-install', dest='isolate_install', action='store_false', default=None, help='Install each catkin package into a single merged install space.') build_group = parser.add_argument_group('Build Options', 'Options for configuring the way packages are built.') add_cmake_and_make_and_catkin_make_args(build_group) return parser
def prepare_arguments(parser): # Workspace / profile args add_context_args(parser) add = parser.add_argument add('--dry-run', '-n', action='store_true', default=False, help='Show the effects of the clean action without modifying the workspace.') add('--verbose', '-v', action='store_true', default=False, help='Verbose status output.') add('--yes', '-y', action='store_true', default=False, help='Assume "yes" to all interactive checks.') add('--force', '-f', action='store_true', default=False, help='Allow cleaning files outside of the workspace root.') add('--all-profiles', action='store_true', default=False, help='Apply the specified clean operation for all profiles in this workspace.') full_group = parser.add_argument_group( 'Full', 'Remove everything except the source space.') add = full_group.add_argument add('--deinit', action='store_true', default=False, help='De-initialize the workspace, delete all build profiles and' ' configuration. This will also clean subdirectories for all profiles in' ' the workspace.') # Basic group spaces_group = parser.add_argument_group( 'Spaces', 'Clean workspace subdirectories for the selected profile.') Context.setup_space_keys() add = spaces_group.add_argument for space, space_dict in Context.SPACES.items(): if space == 'source': continue flags = [space_dict['short_flag']] if 'short_flag' in space_dict else [] flags.append('--{}'.format(space_dict['default'])) flags.append('--{}-space'.format(space)) add(*flags, dest='spaces', action='append_const', const=space, help='Remove the entire {} space.'.format(space)) # Packages group packages_group = parser.add_argument_group( 'Packages', "Clean products from specific packages in the workspace. Note that" " these options are only available in a `linked` devel space layout." " These options will also automatically enable the --force-cmake" " option for the next build invocation.") add = packages_group.add_argument add('packages', metavar='PKGNAME', nargs='*', help='Explicilty specify a list of specific packages to clean from the build, devel, and install space.') add('--this', dest='clean_this', action='store_true', default=False, help='Clean the package containing the current working directory from the build, devel, and install space.') add('--dependents', '--deps', action='store_true', default=False, help='Clean the packages which depend on the packages to be cleaned.') add('--orphans', action='store_true', default=False, help='Remove products from packages are no longer in the source space. ' 'Note that this also removes packages which are ' 'skiplisted or which contain `CATKIN_IGNORE` marker files.') # Advanced group advanced_group = parser.add_argument_group( 'Advanced', "Clean other specific parts of the workspace.") add = advanced_group.add_argument add('--setup-files', action='store_true', default=False, help='Clear the catkin-generated setup files from the devel and install spaces.') return parser
def clean_profile(opts, profile): # Load the context ctx = Context.load(opts.workspace, profile, opts, strict=True, load_env=False) if not ctx: if not opts.workspace: log(clr("[clean] @!@{rf}Error:@| The current or desired workspace could not be " "determined. Please run `catkin clean` from within a catkin " "workspace or specify the workspace explicitly with the " "`--workspace` option.")) else: log(clr("[clean] @!@{rf}Error:@| Could not clean workspace \"%s\" because it " "either does not exist or it has no catkin_tools metadata." % opts.workspace)) return False profile = ctx.profile # Check if the user wants to do something explicit actions = ['spaces', 'packages', 'clean_this', 'orphans', 'deinit', 'setup_files'] paths = {} # noqa paths_exists = {} # noqa paths['install'] = ( os.path.join(ctx.destdir, ctx.install_space_abs.lstrip(os.sep)) if ctx.destdir else ctx.install_space_abs) paths_exists['install'] = os.path.exists(paths['install']) and os.path.isdir(paths['install']) for space in Context.SPACES.keys(): if space in paths: continue paths[space] = getattr(ctx, '{}_space_abs'.format(space)) paths_exists[space] = getattr(ctx, '{}_space_exists'.format(space))() # Default is to clean all products for this profile no_specific_action = not any([ v for (k, v) in vars(opts).items() if k in actions]) clean_all = opts.deinit or no_specific_action # Initialize action options if clean_all: opts.spaces = [k for k in Context.SPACES.keys() if k != 'source'] # Make sure the user intends to clean everything spaces_to_clean_msgs = [] if opts.spaces and not (opts.yes or opts.dry_run): for space in opts.spaces: if getattr(ctx, '{}_space_exists'.format(space))(): space_name = Context.SPACES[space]['space'] space_abs = getattr(ctx, '{}_space_abs'.format(space)) spaces_to_clean_msgs.append(clr("[clean] {:14} @{yf}{}").format(space_name + ':', space_abs)) if len(spaces_to_clean_msgs) == 0 and not opts.deinit: log("[clean] Nothing to be cleaned for profile: `{}`".format(profile)) return True if len(spaces_to_clean_msgs) > 0: log("") log(clr("[clean] @!@{yf}Warning:@| This will completely remove the " "following directories. (Use `--yes` to skip this check)")) for msg in spaces_to_clean_msgs: log(msg) try: yes = yes_no_loop( "\n[clean] Are you sure you want to completely remove the directories listed above?") if not yes: log(clr("[clean] Not removing any workspace directories for" " this profile.")) return True except KeyboardInterrupt: log("\n[clean] No actions performed.") sys.exit(0) # Initialize flag to be used on the next invocation needs_force = False try: for space in opts.spaces: if space == 'devel': # Remove all develspace files if paths_exists['devel']: log("[clean] Removing {}: {}".format(Context.SPACES['devel']['space'], ctx.devel_space_abs)) if not opts.dry_run: safe_rmtree(ctx.devel_space_abs, ctx.workspace, opts.force) # Clear the cached metadata from the last build run _, build_metadata_file = get_metadata_paths(ctx.workspace, profile, 'build') if os.path.exists(build_metadata_file): os.unlink(build_metadata_file) # Clear the cached packages data, if it exists packages_metadata_path = ctx.package_metadata_path() if os.path.exists(packages_metadata_path): safe_rmtree(packages_metadata_path, ctx.workspace, opts.force) else: if paths_exists[space]: space_name = Context.SPACES[space]['space'] space_path = paths[space] log("[clean] Removing {}: {}".format(space_name, space_path)) if not opts.dry_run: safe_rmtree(space_path, ctx.workspace, opts.force) # Setup file removal if opts.setup_files: if paths_exists['devel']: log("[clean] Removing setup files from {}: {}".format(Context.SPACES['devel']['space'], paths['devel'])) opts.packages.append('catkin') opts.packages.append('catkin_tools_prebuild') else: log("[clean] No {} exists, no setup files to clean.".format(Context.SPACES['devel']['space'])) # Find orphaned packages if ctx.link_devel or ctx.isolate_devel and not ('devel' in opts.spaces or 'build' in opts.spaces): if opts.orphans: if os.path.exists(ctx.build_space_abs): log("[clean] Determining orphaned packages...") # Get all existing packages in source space and the # Suppress warnings since this is looking for packages which no longer exist found_source_packages = [ pkg.name for (path, pkg) in find_packages(ctx.source_space_abs, warnings=[]).items()] built_packages = [ pkg.name for (path, pkg) in find_packages(ctx.package_metadata_path(), warnings=[]).items()] # Look for orphaned products in the build space orphans = [p for p in built_packages if (p not in found_source_packages and p != 'catkin_tools_prebuild')] if len(orphans) > 0: opts.packages.extend(list(orphans)) else: log("[clean] No orphans in the workspace.") else: log("[clean] No {} exists, no potential for orphans.".format(Context.SPACES['build']['space'])) # Remove specific packages if len(opts.packages) > 0 or opts.clean_this: # Determine the enclosing package try: ws_path = find_enclosing_workspace(getcwd()) # Suppress warnings since this won't necessarily find all packages # in the workspace (it stops when it finds one package), and # relying on it for warnings could mislead people. this_package = find_enclosing_package( search_start_path=getcwd(), ws_path=ws_path, warnings=[]) except InvalidPackage as ex: sys.exit(clr("[clean] @!@{rf}Error:@| The file {} is an invalid package.xml file." " See below for details:\n\n{}").format(ex.package_path, ex.msg)) # Handle context-based package cleaning if opts.clean_this: if this_package: opts.packages += [this_package] else: sys.exit( clr("[clean] @!@{rf}Error:@| In order to use --this, the current directory" " must be part of a catkin package.")) try: # Clean the packages needs_force = clean_packages( ctx, opts.packages, opts.dependents, opts.verbose, opts.dry_run) except KeyboardInterrupt: wide_log("[clean] User interrupted!") return False elif opts.orphans or len(opts.packages) > 0 or opts.clean_this: log(clr("[clean] @!@{rf}Error:@| Individual packages cannot be cleaned from " "workspaces with merged develspaces, use a symbolically-linked " "or isolated develspace instead.")) except: # noqa: E722 # Silencing E722 here since we immediately re-raise the exception. log("[clean] Failed to clean profile `{}`".format(profile)) needs_force = True raise finally: if needs_force: log(clr( "[clean] @/@!Note:@| @/Parts of the workspace have been cleaned which will " "necessitate re-configuring CMake on the next build.@|")) update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': True}) return True
def main(opts): # Set color options opts.force_color = os.environ.get('CATKIN_TOOLS_FORCE_COLOR', opts.force_color) if (opts.force_color or is_tty(sys.stdout)) and not opts.no_color: set_color(True) else: set_color(False) # Context-aware args if opts.build_this: # Determine the enclosing package try: ws_path = find_enclosing_workspace(getcwd()) # Suppress warnings since this won't necessarily find all packages # in the workspace (it stops when it finds one package), and # relying on it for warnings could mislead people. this_package = find_enclosing_package(search_start_path=getcwd(), ws_path=ws_path, warnings=[]) except InvalidPackage as ex: sys.exit( clr("[test] @!@{rf}Error:@| The file {} is an invalid package.xml file." " See below for details:\n\n{}").format( ex.package_path, ex.msg)) # Handle context-based package building if this_package: opts.packages += [this_package] else: sys.exit( clr("[test] @!@{rf}Error:@| In order to use --this, " "the current directory must be part of a catkin package.")) # Load the context ctx = Context.load(opts.workspace, opts.profile, opts, append=True) # Load the environment of the workspace to extend if ctx.extend_path is not None: try: load_resultspace_environment(ctx.extend_path) except IOError as exc: sys.exit( clr("[test] @!@{rf}Error:@| Unable to extend workspace from \"{}\": {}" ).format(ctx.extend_path, str(exc))) # Check if the context is valid before writing any metadata if not ctx.source_space_exists(): sys.exit( clr("[test] @!@{rf}Error:@| Unable to find source space `{}`"). format(ctx.source_space_abs)) # Extract make arguments make_args, _, _, _ = configure_make_args(ctx.make_args, ctx.jobs_args, ctx.use_internal_make_jobserver) ctx.make_args = make_args # Get parallel toplevel jobs try: parallel_jobs = int(opts.parallel_jobs) except TypeError: parallel_jobs = None # Set VERBOSE environment variable if opts.verbose and 'VERBOSE' not in os.environ: os.environ['VERBOSE'] = '1' # Get test targets catkin_test_target = 'run_tests' cmake_test_target = 'test' if opts.test_target: catkin_test_target = opts.test_target cmake_test_target = opts.test_target if opts.catkin_test_target: catkin_test_target = opts.catkin_test_target return test_workspace( ctx, packages=opts.packages, n_jobs=parallel_jobs, quiet=not opts.verbose, interleave_output=opts.interleave_output, no_status=opts.no_status, limit_status_rate=opts.limit_status_rate, no_notify=opts.no_notify, continue_on_failure=opts.continue_on_failure, summarize_build=opts.summarize, catkin_test_target=catkin_test_target, cmake_test_target=cmake_test_target, )
def main(opts): # Initialize dictionary version of opts namespace opts_vars = vars(opts) if opts else {} # Check for special locations root_resource_path = os.path.join(os.path.dirname(__file__), '..', '..') if opts.shell_verbs: shell_verbs = os.path.join(root_resource_path, 'verbs', 'catkin_shell_verbs.bash') print(os.path.normpath(shell_verbs)) sys.exit(0) elif opts.examples: shell_verbs = os.path.join(root_resource_path, '..', 'docs', 'examples') print(os.path.normpath(shell_verbs)) sys.exit(0) # Get the workspace (either the given directory or the enclosing ws) workspace_hint = opts_vars.get('workspace', None) or getcwd() workspace = find_enclosing_workspace(workspace_hint) if not workspace: if not opts.quiet: print(clr( "[locate] @!@{rf}Error:@| No workspace found containing '{}'"). format(workspace_hint), file=sys.stderr) sys.exit(1) # Load the context to get the subspaces ctx = Context.load(workspace, opts.profile, opts, load_env=False) path = None if opts.space: path = getattr(ctx, "{}_space_abs".format(opts.space)) package = None if opts.package or opts.this: if opts.this: try: package = find_enclosing_package(search_start_path=getcwd(), ws_path=ctx.workspace, warnings=[]) if package is None: sys.exit( clr("[locate] @!@{rf}Error:@| Passed '--this' but could not determine enclosing package. " "Is '{}' in a package in '{}' workspace?").format( getcwd(), ctx.workspace)) except InvalidPackage as ex: sys.exit( clr("[locate] @!@{rf}Error:@| The file {} is an invalid package.xml file." " See below for details:\n\n{}").format( ex.package_path, ex.msg)) else: package = opts.package # Get the path to the given package path = path or ctx.source_space_abs if not opts.space or opts.space == 'source': try: packages = find_packages(path, warnings=[]) catkin_package = [ pkg_path for pkg_path, p in packages.items() if p.name == package ] if catkin_package: path = os.path.join(path, catkin_package[0]) else: sys.exit( clr("[locate] @!@{rf}Error:@| Could not locate a package named '{}' in path '{}'" ).format(package, path)) except RuntimeError as e: sys.exit(clr('[locate] @!@{rf}Error:@| {}').format(str(e))) elif opts.space in ['devel', 'install']: path = os.path.join(path, 'share', package) else: path = os.path.join(path, package) if not opts.space and package is None: # Get the path to the workspace root path = workspace # Check if the path exists if opts.existing_only and not os.path.exists(path): sys.exit( clr("[locate] @!@{rf}Error:@| Requested path '{}' does not exist." ).format(path)) # Make the path relative if desired if opts.relative: path = os.path.relpath(path, getcwd()) # Print the path print(path)
def main(opts): # Context-aware args if opts.build_this or opts.start_with_this: # Determine the enclosing package try: ws_path = find_enclosing_workspace(getcwd()) # Suppress warnings since this won't necessaraly find all packages # in the workspace (it stops when it finds one package), and # relying on it for warnings could mislead people. this_package = find_enclosing_package(search_start_path=getcwd(), ws_path=ws_path, warnings=[]) except (InvalidPackage, RuntimeError): this_package = None # Handle context-based package building if opts.build_this: if this_package: opts.packages += [this_package] else: sys.exit( "catkin build: --this was specified, but this directory is not in a catkin package." ) # If --start--with was used without any packages and --this was specified, start with this package if opts.start_with_this: if this_package: opts.start_with = this_package else: sys.exit( "catkin build: --this was specified, but this directory is not in a catkin package." ) if opts.no_deps and not opts.packages: sys.exit("With --no-deps, you must specify packages to build.") # Load the context ctx = Context.load(opts.workspace, opts.profile, opts, append=True) # Initialize the build configuration make_args, makeflags, cli_flags, jobserver = configure_make_args( ctx.make_args, ctx.use_internal_make_jobserver) # Set the jobserver memory limit if jobserver and opts.mem_limit: log( clr("@!@{pf}EXPERIMENTAL: limit memory to '%s'@|" % str(opts.mem_limit))) # At this point psuitl will be required, check for it and bail out if not set try: import psutil # noqa except ImportError as exc: log("Could not import psutil, but psutil is required when using --mem-limit." ) log("Please either install psutil or avoid using --mem-limit.") sys.exit("Exception: {0}".format(exc)) set_jobserver_max_mem(opts.mem_limit) ctx.make_args = make_args # Load the environment of the workspace to extend if ctx.extend_path is not None: try: load_resultspace_environment(ctx.extend_path) except IOError as exc: log( clr("@!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" % (ctx.extend_path, exc.message))) return 1 # Display list and leave the file system untouched if opts.dry_run: dry_run(ctx, opts.packages, opts.no_deps, opts.start_with) return # Check if the context is valid before writing any metadata if not ctx.source_space_exists(): print("catkin build: error: Unable to find source space `%s`" % ctx.source_space_abs) return 1 # Always save the last context under the build verb update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict()) build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build') if build_metadata.get('needs_force', False): opts.force_cmake = True update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': False}) # Save the context as the configuration if opts.save_config: Context.save(ctx) start = time.time() try: return build_isolated_workspace( ctx, packages=opts.packages, start_with=opts.start_with, no_deps=opts.no_deps, jobs=opts.parallel_jobs, force_cmake=opts.force_cmake, force_color=opts.force_color, quiet=not opts.verbose, interleave_output=opts.interleave_output, no_status=opts.no_status, limit_status_rate=opts.limit_status_rate, lock_install=not opts.no_install_lock, no_notify=opts.no_notify, continue_on_failure=opts.continue_on_failure, summarize_build=opts.summarize # Can be True, False, or None ) finally: log("[build] Runtime: {0}".format( format_time_delta(time.time() - start)))
def main(opts): # Context-aware args if opts.build_this or opts.start_with_this: # Determine the enclosing package try: this_package = find_enclosing_package() except InvalidPackage: pass # Handle context-based package building if opts.build_this: if this_package: opts.packages += [this_package] else: sys.exit( "catkin build: --this was specified, but this directory is not in a catkin package." ) # If --start--with was used without any packages and --this was specified, start with this package if opts.start_with_this: if this_package: opts.start_with = this_package else: sys.exit( "catkin build: --this was specified, but this directory is not in a catkin package." ) if opts.no_deps and not opts.packages: sys.exit("With --no-deps, you must specify packages to build.") if not opts.force_color and not is_tty(sys.stdout): set_color(False) # Load the context ctx = Context.Load(opts.workspace, opts.profile, opts) # Load the environment of the workspace to extend if ctx.extend_path is not None: try: load_resultspace_environment(ctx.extend_path) except IOError as exc: log( clr("@!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" % (ctx.extend_path, exc.message))) return 1 # Display list and leave the filesystem untouched if opts.dry_run: dry_run(ctx, opts.packages, opts.no_deps, opts.start_with) return # Check if the context is valid before writing any metadata if not ctx.source_space_exists(): print("catkin build: error: Unable to find source space `%s`" % ctx.source_space_abs) return 1 # Always save the last context under the build verb update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict()) build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build') if build_metadata.get('needs_force', False): opts.force_cmake = True update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': False}) # Save the context as the configuration if opts.save_config: Context.Save(ctx) start = time.time() try: return build_isolated_workspace( ctx, packages=opts.packages, start_with=opts.start_with, no_deps=opts.no_deps, jobs=opts.parallel_jobs, force_cmake=opts.force_cmake, force_color=opts.force_color, quiet=not opts.verbose, interleave_output=opts.interleave_output, no_status=opts.no_status, limit_status_rate=opts.limit_status_rate, lock_install=not opts.no_install_lock, no_notify=opts.no_notify) finally: log("[build] Runtime: {0}".format( format_time_delta(time.time() - start)))
def clean_profile(opts, profile): # Load the context ctx = Context.load(opts.workspace, profile, opts, strict=True, load_env=False) if not ctx: if not opts.workspace: log( "[clean] Error: The current or desired workspace could not be " "determined. Please run `catkin clean` from within a catkin " "workspace or specify the workspace explicitly with the " "`--workspace` option.") else: log( "[clean] Error: Could not clean workspace \"%s\" because it " "either does not exist or it has no catkin_tools metadata." % opts.workspace) return False profile = ctx.profile # Check if the user wants to do something explicit actions = [ 'build', 'devel', 'install', 'logs', 'packages', 'orphans', 'deinit', 'setup_files'] logs_exists = os.path.exists(ctx.log_space_abs) build_exists = os.path.exists(ctx.build_space_abs) devel_exists = os.path.exists(ctx.devel_space_abs) install_path = ( os.path.join(ctx.destdir, ctx.install_space_abs.lstrip(os.sep)) if ctx.destdir else ctx.install_space_abs) install_exists = os.path.exists(install_path) # Default is to clean all products for this profile no_specific_action = not any([ v for (k, v) in vars(opts).items() if k in actions]) clean_all = opts.deinit or no_specific_action # Initialize action options if clean_all: opts.logs = opts.build = opts.devel = opts.install = True # Make sure the user intends to clena everything spaces_to_clean = (opts.logs or opts.build or opts.devel or opts.install) spaces_to_clean_msgs = [] if spaces_to_clean and not (opts.yes or opts.dry_run): if opts.logs and logs_exists: spaces_to_clean_msgs.append(clr("[clean] Log Space: @{yf}{}").format(ctx.log_space_abs)) if opts.build and build_exists: spaces_to_clean_msgs.append(clr("[clean] Build Space: @{yf}{}").format(ctx.build_space_abs)) if opts.devel and devel_exists: spaces_to_clean_msgs.append(clr("[clean] Devel Space: @{yf}{}").format(ctx.devel_space_abs)) if opts.install and install_exists: spaces_to_clean_msgs.append(clr("[clean] Install Space: @{yf}{}").format(install_path)) if len(spaces_to_clean_msgs) == 0 and not opts.deinit: log("[clean] Nothing to be cleaned for profile: `{}`".format(profile)) return True if len(spaces_to_clean_msgs) > 0: log("") log(clr("[clean] @!@{yf}Warning:@| This will completely remove the " "following directories. (Use `--yes` to skip this check)")) for msg in spaces_to_clean_msgs: log(msg) try: yes = yes_no_loop( "\n[clean] Are you sure you want to completely remove the directories listed above?") if not yes: log(clr("[clean] Not removing any workspace directories for" " this profile.")) return True except KeyboardInterrupt: log("\n[clean] No actions performed.") sys.exit(0) # Initialize flag to be used on the next invocation needs_force = False try: # Remove all installspace files if opts.install and install_exists: log("[clean] Removing installspace: %s" % install_path) if not opts.dry_run: safe_rmtree(install_path, ctx.workspace, opts.force) # Remove all develspace files if opts.devel: if devel_exists: log("[clean] Removing develspace: %s" % ctx.devel_space_abs) if not opts.dry_run: safe_rmtree(ctx.devel_space_abs, ctx.workspace, opts.force) # Clear the cached metadata from the last build run _, build_metadata_file = get_metadata_paths(ctx.workspace, profile, 'build') if os.path.exists(build_metadata_file): os.unlink(build_metadata_file) # Clear the cached packages data, if it exists packages_metadata_path = ctx.package_metadata_path() if os.path.exists(packages_metadata_path): safe_rmtree(packages_metadata_path, ctx.workspace, opts.force) # Remove all buildspace files if opts.build and build_exists: log("[clean] Removing buildspace: %s" % ctx.build_space_abs) if not opts.dry_run: safe_rmtree(ctx.build_space_abs, ctx.workspace, opts.force) # Setup file removal if opts.setup_files: if devel_exists: log("[clean] Removing setup files from develspace: %s" % ctx.devel_space_abs) opts.packages.append('catkin') opts.packages.append('catkin_tools_prebuild') else: log("[clean] No develspace exists, no setup files to clean.") # Clean log files if opts.logs and logs_exists: log("[clean] Removing log space: {}".format(ctx.log_space_abs)) if not opts.dry_run: safe_rmtree(ctx.log_space_abs, ctx.workspace, opts.force) # Find orphaned packages if ctx.link_devel and not any([opts.build, opts.devel]): if opts.orphans: if os.path.exists(ctx.build_space_abs): log("[clean] Determining orphaned packages...") # Get all existing packages in source space and the # Suppress warnings since this is looking for packages which no longer exist found_source_packages = [ pkg.name for (path, pkg) in find_packages(ctx.source_space_abs, warnings=[]).items()] built_packages = [ pkg.name for (path, pkg) in find_packages(ctx.package_metadata_path(), warnings=[]).items()] # Look for orphaned products in the build space orphans = [p for p in built_packages if (p not in found_source_packages and p != 'catkin_tools_prebuild')] if len(orphans) > 0: opts.packages.extend(list(orphans)) else: log("[clean] No orphans in the workspace.") else: log("[clean] No buildspace exists, no potential for orphans.") # Remove specific packages if len(opts.packages) > 0: try: # Clean the packages needs_force = clean_packages( ctx, opts.packages, opts.dependents, opts.verbose, opts.dry_run) except KeyboardInterrupt: wide_log("[build] User interrupted!") return False elif opts.orphans or len(opts.packages) > 0: log("[clean] Error: Individual packages can only be cleaned from " "workspaces with symbolically-linked develspaces (`catkin " "config --link-devel`).") except: log("[clean] Failed to clean profile `{}`".format(profile)) needs_force = True raise finally: if needs_force: log(clr( "[clean] @/@!Note:@| @/Parts of the workspace have been cleaned which will " "necessitate re-configuring CMake on the next build.@|")) update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': True}) return True
def main(opts): # Initialize dictionary version of opts namespace opts_vars = vars(opts) if opts else {} # Check for special locations root_resource_path = os.path.join(os.path.dirname(__file__), '..', '..') if opts.shell_verbs: shell_verbs = os.path.join(root_resource_path, 'verbs', 'catkin_shell_verbs.bash') print(os.path.normpath(shell_verbs)) sys.exit(0) elif opts.examples: shell_verbs = os.path.join(root_resource_path, '..', 'docs', 'examples') print(os.path.normpath(shell_verbs)) sys.exit(0) # Get the workspace (either the given directory or the enclosing ws) workspace_hint = opts_vars.get('workspace', None) or os.getcwd() workspace = find_enclosing_workspace(workspace_hint) if not workspace: if not opts.quiet: print(clr("@{rf}ERROR: No workspace found containing '%s'@|" % workspace_hint), file=sys.stderr) sys.exit(1) # Load the context to get the subspaces ctx = Context.load(workspace, opts.profile, opts, load_env=False) path = None if opts.space: # Get the subspace if opts.space == 'src': path = ctx.source_space_abs elif opts.space == 'build': path = ctx.build_space_abs elif opts.space == 'devel': path = ctx.devel_space_abs elif opts.space == 'install': path = ctx.install_space_abs package = None if opts.package or opts.this: if opts.this: package = find_enclosing_package( search_start_path=getcwd(), ws_path=ctx.workspace, warnings=[]) if package is None: print(clr("@{rf}ERROR: Passed '--this' but could not determine enclosing package. " "Is '%s' in a package in '%s' workspace?@|" % (getcwd(), ctx.workspace)), file=sys.stderr) sys.exit(2) else: package = opts.package # Get the path to the given package path = path or ctx.source_space_abs if opts.space == 'build': path = os.path.join(path, package) elif opts.space in ['devel', 'install']: path = os.path.join(path, 'share', package) else: try: packages = find_packages(path, warnings=[]) catkin_package = [pkg_path for pkg_path, p in packages.items() if p.name == package] if catkin_package: path = os.path.join(path, catkin_package[0]) else: print(clr("@{rf}ERROR: Could not locate a package named '%s' in path '%s'@|" % (package, path)), file=sys.stderr) sys.exit(2) except RuntimeError as e: print(clr('@{rf}ERROR: %s@|' % str(e)), file=sys.stderr) sys.exit(1) if not opts.space and package is None: # Get the path to the workspace root path = workspace # Check if the path exists if opts.existing_only and not os.path.exists(path): print(clr("@{rf}ERROR: Requested path '%s' does not exist.@|" % path), file=sys.stderr) sys.exit(1) # Make the path relative if desired if opts.relative: path = os.path.relpath(path, os.getcwd()) # Print the path print(path)
def main(opts): # Load the context ctx = Context.load(opts.workspace, opts.profile, load_env=False) if not ctx: sys.exit(clr("@{rf}ERROR: Could not determine workspace.@|"), file=sys.stderr) if opts.directory: folders = opts.directory else: folders = [ctx.source_space_abs] list_entry_format = '@{pf}-@| @{cf}%s@|' if not opts.unformatted else '%s' opts.depends_on = set(opts.depends_on) if opts.depends_on else set() warnings = [] for folder in folders: try: packages = find_packages(folder, warnings=warnings) ordered_packages = topological_order_packages(packages) if ordered_packages and ordered_packages[-1][0] is None: sys.exit( clr("@{rf}ERROR: Circular dependency within packages:@| " + ordered_packages[-1][1]), file=sys.stderr) packages_by_name = { pkg.name: (pth, pkg) for pth, pkg in ordered_packages } if opts.depends_on or opts.rdepends_on: dependents = set() for pth, pkg in ordered_packages: is_dep = opts.depends_on.intersection( [p.name for p in pkg.build_depends + pkg.run_depends]) if is_dep: dependents.add(pkg.name) for pth, pkg in [ packages_by_name.get(n) for n in opts.rdepends_on ]: if pkg is None: continue rbd = get_recursive_build_dependents_in_workspace( pkg.name, ordered_packages) rrd = get_recursive_run_dependents_in_workspace( pkg.name, ordered_packages) dependents.update([p.name for _, p in rbd]) dependents.update([p.name for _, p in rrd]) filtered_packages = [(pth, pkg) for pth, pkg in ordered_packages if pkg.name in dependents] elif opts.this: this_package = find_enclosing_package( search_start_path=getcwd(), ws_path=ctx.workspace, warnings=[]) if this_package is None: sys.exit(1) if this_package in packages_by_name: filtered_packages = [packages_by_name[this_package]] else: filtered_packages = [] else: filtered_packages = ordered_packages for pkg_pth, pkg in filtered_packages: print(clr(list_entry_format % pkg.name)) if opts.rdeps: build_deps = [ p for dp, p in get_recursive_build_depends_in_workspace( pkg, ordered_packages) ] run_deps = [ p for dp, p in get_recursive_run_depends_in_workspace( [pkg], ordered_packages) ] else: build_deps = [ dep for dep in pkg.build_depends if dep.evaluated_condition ] run_deps = [ dep for dep in pkg.run_depends if dep.evaluated_condition ] if opts.deps or opts.rdeps: if len(build_deps) > 0: print(clr(' @{yf}build_depend:@|')) for dep in build_deps: print(clr(' @{pf}-@| %s' % dep.name)) if len(run_deps) > 0: print(clr(' @{yf}run_depend:@|')) for dep in run_deps: print(clr(' @{pf}-@| %s' % dep.name)) except InvalidPackage as ex: sys.exit( clr("@{rf}Error:@| The file %s is an invalid package.xml file." " See below for details:\n\n%s" % (ex.package_path, ex.msg))) # Print out warnings if not opts.quiet: for warning in warnings: print(clr("@{yf}Warning:@| %s" % warning), file=sys.stderr)
def main(opts): # Context-aware args if opts.build_this or opts.start_with_this: # Determine the enclosing package try: ws_path = find_enclosing_workspace(getcwd()) # Suppress warnings since this won't necessaraly find all packages # in the workspace (it stops when it finds one package), and # relying on it for warnings could mislead people. this_package = find_enclosing_package( search_start_path=getcwd(), ws_path=ws_path, warnings=[]) except (InvalidPackage, RuntimeError): this_package = None # Handle context-based package building if opts.build_this: if this_package: opts.packages += [this_package] else: sys.exit("catkin build: --this was specified, but this directory is not in a catkin package.") # If --start--with was used without any packages and --this was specified, start with this package if opts.start_with_this: if this_package: opts.start_with = this_package else: sys.exit("catkin build: --this was specified, but this directory is not in a catkin package.") if opts.no_deps and not opts.packages: sys.exit("With --no-deps, you must specify packages to build.") # Load the context ctx = Context.load(opts.workspace, opts.profile, opts, append=True) # Initialize the build configuration make_args, makeflags, cli_flags, jobserver = configure_make_args(ctx.make_args, ctx.use_internal_make_jobserver) # Set the jobserver memory limit if jobserver and opts.mem_limit: log(clr("@!@{pf}EXPERIMENTAL: limit memory to '%s'@|" % str(opts.mem_limit))) # At this point psuitl will be required, check for it and bail out if not set try: import psutil # noqa except ImportError as exc: log("Could not import psutil, but psutil is required when using --mem-limit.") log("Please either install psutil or avoid using --mem-limit.") sys.exit("Exception: {0}".format(exc)) set_jobserver_max_mem(opts.mem_limit) ctx.make_args = make_args # Load the environment of the workspace to extend if ctx.extend_path is not None: try: load_resultspace_environment(ctx.extend_path) except IOError as exc: log(clr("@!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" % (ctx.extend_path, exc.message))) return 1 # Display list and leave the file system untouched if opts.dry_run: dry_run(ctx, opts.packages, opts.no_deps, opts.start_with) return # Check if the context is valid before writing any metadata if not ctx.source_space_exists(): print("catkin build: error: Unable to find source space `%s`" % ctx.source_space_abs) return 1 # Always save the last context under the build verb update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict()) build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build') if build_metadata.get('needs_force', False): opts.force_cmake = True update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': False}) # Save the context as the configuration if opts.save_config: Context.save(ctx) start = time.time() try: return build_isolated_workspace( ctx, packages=opts.packages, start_with=opts.start_with, no_deps=opts.no_deps, jobs=opts.parallel_jobs, force_cmake=opts.force_cmake, force_color=opts.force_color, quiet=not opts.verbose, interleave_output=opts.interleave_output, no_status=opts.no_status, limit_status_rate=opts.limit_status_rate, lock_install=not opts.no_install_lock, no_notify=opts.no_notify, continue_on_failure=opts.continue_on_failure, summarize_build=opts.summarize # Can be True, False, or None ) finally: log("[build] Runtime: {0}".format(format_time_delta(time.time() - start)))
def prepare_arguments(parser): parser.description = "This verb is used to configure a catkin workspace's\ configuration and layout. Calling `catkin config` with no arguments will\ display the current config and affect no changes if a config already exists\ for the current workspace and profile." # Workspace / profile args add_context_args(parser) behavior_group = parser.add_argument_group( 'Behavior', 'Options affecting argument handling.') add = behavior_group.add_mutually_exclusive_group().add_argument add('--append-args', '-a', action='store_true', default=False, help='For list-type arguments, append elements.') add('--remove-args', '-r', action='store_true', default=False, help='For list-type arguments, remove elements.') context_group = parser.add_argument_group( 'Workspace Context', 'Options affecting the context of the workspace.') add = context_group.add_argument add('--init', action='store_true', default=False, help='Initialize a workspace if it does not yet exist.') add = context_group.add_mutually_exclusive_group().add_argument add('--extend', '-e', dest='extend_path', type=str, help='Explicitly extend the result-space of another catkin workspace, ' 'overriding the value of $CMAKE_PREFIX_PATH.') add('--no-extend', dest='extend_path', action='store_const', const='', help= 'Un-set the explicit extension of another workspace as set by --extend.' ) add = context_group.add_argument add('--mkdirs', action='store_true', default=False, help= 'Create directories required by the configuration (e.g. source space) if they do not already exist.' ) lists_group = parser.add_argument_group( 'Package Build Defaults', 'Packages to include or exclude from default build behavior.') add = lists_group.add_mutually_exclusive_group().add_argument add('--whitelist', metavar="PKG", dest='whitelist', nargs="+", required=False, type=str, default=None, help='Set the packages on the whitelist. If the whitelist is non-empty, ' 'only the packages on the whitelist are built with a bare call to ' '`catkin build`.') add('--no-whitelist', dest='whitelist', action='store_const', const=[], default=None, help='Clear all packages from the whitelist.') add = lists_group.add_mutually_exclusive_group().add_argument add('--blacklist', metavar="PKG", dest='blacklist', nargs="+", required=False, type=str, default=None, help='Set the packages on the blacklist. Packages on the blacklist are ' 'not built with a bare call to `catkin build`.') add('--no-blacklist', dest='blacklist', action='store_const', const=[], default=None, help='Clear all packages from the blacklist.') spaces_group = parser.add_argument_group( 'Spaces', 'Location of parts of the catkin workspace.') Context.setup_space_keys() for space, space_dict in Context.SPACES.items(): add = spaces_group.add_mutually_exclusive_group().add_argument flags = ['--{}-space'.format(space)] flags.extend([space_dict['short_flag']] if 'short_flag' in space_dict else []) add(*flags, default=None, help='The path to the {} space.'.format(space)) add('--default-{}-space'.format(space), action='store_const', dest='{}_space'.format(space), default=None, const=space_dict['default'], help='Use the default path to the {} space ("{}")'.format( space, space_dict['default'])) add = spaces_group.add_argument add('-x', '--space-suffix', help= 'Suffix for build, devel, and install space if they are not otherwise explicitly set.' ) devel_group = parser.add_argument_group( 'Devel Space', 'Options for configuring the structure of the devel space.') add = devel_group.add_mutually_exclusive_group().add_argument add('--link-devel', dest='devel_layout', action='store_const', const='linked', default=None, help='Build products from each catkin package into isolated spaces,' ' then symbolically link them into a merged devel space.') add('--merge-devel', dest='devel_layout', action='store_const', const='merged', default=None, help= 'Build products from each catkin package into a single merged devel spaces.' ) add('--isolate-devel', dest='devel_layout', action='store_const', const='isolated', default=None, help= 'Build products from each catkin package into isolated devel spaces.') install_group = parser.add_argument_group( 'Install Space', 'Options for configuring the structure of the install space.') add = install_group.add_mutually_exclusive_group().add_argument add('--install', action='store_true', default=None, help='Causes each package to be installed to the install space.') add('--no-install', dest='install', action='store_false', default=None, help='Disables installing each package into the install space.') add = install_group.add_mutually_exclusive_group().add_argument add('--isolate-install', action='store_true', default=None, help='Install each catkin package into a separate install space.') add('--merge-install', dest='isolate_install', action='store_false', default=None, help='Install each catkin package into a single merged install space.') build_group = parser.add_argument_group( 'Build Options', 'Options for configuring the way packages are built.') add_cmake_and_make_and_catkin_make_args(build_group) return parser
def main(opts): try: sysargs = sys.argv[1:] # Deprecated options deprecated_args = [ ('--blacklist', '--skiplist',), ('--no-blacklist', '--no-skiplist'), ('--whitelist', '--buildlist'), ('--no-whitelist', '--no-buildlist')] used_deprecated_args = [(old, new) for old, new in deprecated_args if old in sysargs] if any(used_deprecated_args): print(fmt('@!@{rf}WARNING:@| Some arguments are deprecated and will be' ' removed in a future release.\n')) print('Please switch to using their replacements as follows:') for old_arg, new_arg in used_deprecated_args: print(" - '{}' is deprecated, use '{}' instead".format(old_arg, new_arg)) print() # Determine if the user is trying to perform some action, in which # case, the workspace should be automatically initialized ignored_opts = ['main', 'verb'] actions = [v for k, v in vars(opts).items() if k not in ignored_opts] no_action = not any(actions) # Handle old argument names necessary for Context.load if opts.buildlist is not None: opts.whitelist = opts.buildlist del opts.buildlist if opts.skiplist is not None: opts.blacklist = opts.skiplist del opts.skiplist # Try to find a metadata directory to get context defaults # Otherwise use the specified directory context = Context.load( opts.workspace, opts.profile, opts, append=opts.append_args, remove=opts.remove_args) do_init = opts.init or not no_action summary_notes = [] if not context.initialized() and do_init: summary_notes.append(clr('@!@{cf}Initialized new catkin workspace in `{}`@|').format(context.workspace)) if context.initialized() or do_init: Context.save(context) if opts.mkdirs and not context.source_space_exists(): os.makedirs(context.source_space_abs) print(context.summary(notes=summary_notes)) except IOError as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print(clr("@!@{rf}Error:@| Could not configure catkin workspace: {}").format(exc), file=sys.stderr) return 1 return 0
def main(opts): # Initialize dictionary version of opts namespace opts_vars = vars(opts) if opts else {} # Check for special locations root_resource_path = os.path.join(os.path.dirname(__file__), '..', '..') if opts.shell_verbs: shell_verbs = os.path.join(root_resource_path, 'verbs', 'catkin_shell_verbs.bash') print(os.path.normpath(shell_verbs)) sys.exit(0) elif opts.examples: shell_verbs = os.path.join(root_resource_path, '..', 'docs', 'examples') print(os.path.normpath(shell_verbs)) sys.exit(0) # Get the workspace (either the given directory or the enclosing ws) workspace_hint = opts_vars.get('workspace', None) or os.getcwd() workspace = find_enclosing_workspace(workspace_hint) if not workspace: if not opts.quiet: print(clr("@{rf}ERROR: No workspace found containing '%s'@|" % workspace_hint), file=sys.stderr) sys.exit(1) # Load the context to get the subspaces ctx = Context.load(workspace, opts.profile, opts, load_env=False) path = None if opts.space: # Get the subspace if opts.space == 'src': path = ctx.source_space_abs elif opts.space == 'build': path = ctx.build_space_abs elif opts.space == 'devel': path = ctx.devel_space_abs elif opts.space == 'install': path = ctx.install_space_abs if opts.package: # Get the path to the given package path = path or ctx.source_space_abs if opts.space == 'build': path = os.path.join(path, opts.package) elif opts.space in ['devel', 'install']: path = os.path.join(path, 'share', opts.package) else: try: packages = find_packages(path, warnings=[]) catkin_package = [pkg_path for pkg_path, p in packages.items() if p.name == opts.package] if catkin_package: path = os.path.join(path, catkin_package[0]) else: print(clr("@{rf}ERROR: Could not locate a package named '%s' in path '%s'@|" % (opts.package, path)), file=sys.stderr) sys.exit(2) except RuntimeError as e: print(clr('@{rf}ERROR: %s@|' % str(e)), file=sys.stderr) sys.exit(1) elif not opts.space: # Get the path to the workspace root path = workspace # Check if the path exists if opts.existing_only and not os.path.exists(path): print(clr("@{rf}ERROR: Requested path '%s' does not exist.@|" % path), file=sys.stderr) sys.exit(1) # Make the path relative if desired if opts.relative: path = os.path.relpath(path, os.getcwd()) # Print the path print(path)
def main(opts): # Check for develdebug mode if opts.develdebug is not None: os.environ['TROLLIUSDEBUG'] = opts.develdebug.lower() logging.basicConfig(level=opts.develdebug.upper()) # Set color options opts.force_color = os.environ.get('CATKIN_TOOLS_FORCE_COLOR', opts.force_color) if (opts.force_color or is_tty(sys.stdout)) and not opts.no_color: set_color(True) else: set_color(False) # Context-aware args if opts.build_this or opts.start_with_this: # Determine the enclosing package try: ws_path = find_enclosing_workspace(getcwd()) # Suppress warnings since this won't necessaraly find all packages # in the workspace (it stops when it finds one package), and # relying on it for warnings could mislead people. this_package = find_enclosing_package(search_start_path=getcwd(), ws_path=ws_path, warnings=[]) except (InvalidPackage, RuntimeError): this_package = None # Handle context-based package building if opts.build_this: if this_package: opts.packages += [this_package] else: sys.exit( "[build] Error: In order to use --this, the current directory must be part of a catkin package." ) # If --start--with was used without any packages and --this was specified, start with this package if opts.start_with_this: if this_package: opts.start_with = this_package else: sys.exit( "[build] Error: In order to use --this, the current directory must be part of a catkin package." ) if opts.no_deps and not opts.packages and not opts.unbuilt: sys.exit( clr("[build] @!@{rf}Error:@| With --no-deps, you must specify packages to build." )) # Load the context ctx = Context.load(opts.workspace, opts.profile, opts, append=True) # Initialize the build configuration make_args, makeflags, cli_flags, jobserver = configure_make_args( ctx.make_args, ctx.jobs_args, ctx.use_internal_make_jobserver) # Set the jobserver memory limit if jobserver and opts.mem_limit: log( clr("@!@{pf}EXPERIMENTAL: limit memory to '%s'@|" % str(opts.mem_limit))) # At this point psuitl will be required, check for it and bail out if not set try: import psutil # noqa except ImportError as exc: log("Could not import psutil, but psutil is required when using --mem-limit." ) log("Please either install psutil or avoid using --mem-limit.") sys.exit("Exception: {0}".format(exc)) job_server.set_max_mem(opts.mem_limit) ctx.make_args = make_args # Load the environment of the workspace to extend if ctx.extend_path is not None: try: load_resultspace_environment(ctx.extend_path) except IOError as exc: sys.exit( clr("[build] @!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" % (ctx.extend_path, exc.message))) # Check if the context is valid before writing any metadata if not ctx.source_space_exists(): sys.exit( clr("[build] @!@{rf}Error:@| Unable to find source space `%s`") % ctx.source_space_abs) # ensure the build space was previously built by catkin_tools previous_tool = get_previous_tool_used_on_the_space(ctx.build_space_abs) if previous_tool is not None and previous_tool != 'catkin build': if opts.override_build_tool_check: log( clr("@{yf}Warning: build space at '%s' was previously built by '%s', " "but --override-build-tool-check was passed so continuing anyways." % (ctx.build_space_abs, previous_tool))) else: sys.exit( clr("@{rf}The build space at '%s' was previously built by '%s'. " "Please remove the build space or pick a different build space." % (ctx.build_space_abs, previous_tool))) # the build space will be marked as catkin build's if dry run doesn't return # ensure the devel space was previously built by catkin_tools previous_tool = get_previous_tool_used_on_the_space(ctx.devel_space_abs) if previous_tool is not None and previous_tool != 'catkin build': if opts.override_build_tool_check: log( clr("@{yf}Warning: devel space at '%s' was previously built by '%s', " "but --override-build-tool-check was passed so continuing anyways." % (ctx.devel_space_abs, previous_tool))) else: sys.exit( clr("@{rf}The devel space at '%s' was previously built by '%s'. " "Please remove the devel space or pick a different devel space." % (ctx.devel_space_abs, previous_tool))) # the devel space will be marked as catkin build's if dry run doesn't return # Display list and leave the file system untouched if opts.dry_run: # TODO: Add unbuilt dry_run(ctx, opts.packages, opts.no_deps, opts.start_with) return # Print the build environment for a given package and leave the filesystem untouched if opts.get_env: return print_build_env(ctx, opts.get_env[0]) # Now mark the build and devel spaces as catkin build's since dry run didn't return. mark_space_as_built_by(ctx.build_space_abs, 'catkin build') mark_space_as_built_by(ctx.devel_space_abs, 'catkin build') # Get the last build context build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build') # Force cmake if the CMake arguments have changed if build_metadata.get('cmake_args') != ctx.cmake_args: opts.force_cmake = True # Check the devel layout compatibility last_devel_layout = build_metadata.get('devel_layout', ctx.devel_layout) if last_devel_layout != ctx.devel_layout: sys.exit( clr("@{rf}@!Error:@|@{rf} The current devel space layout, `{}`," "is incompatible with the configured layout, `{}`.@|").format( last_devel_layout, ctx.devel_layout)) # Check if some other verb has changed the workspace in such a way that it needs to be forced if build_metadata.get('needs_force', False): opts.force_cmake = True update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': False}) # Always save the last context under the build verb update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict()) # Save the context as the configuration if opts.save_config: Context.save(ctx) # Get parallel toplevel jobs try: parallel_jobs = int(opts.parallel_jobs) except TypeError: parallel_jobs = None # Set VERBOSE environment variable if opts.verbose: os.environ['VERBOSE'] = '1' return build_isolated_workspace( ctx, packages=opts.packages, start_with=opts.start_with, no_deps=opts.no_deps, unbuilt=opts.unbuilt, n_jobs=parallel_jobs, force_cmake=opts.force_cmake, pre_clean=opts.pre_clean, force_color=opts.force_color, quiet=not opts.verbose, interleave_output=opts.interleave_output, no_status=opts.no_status, limit_status_rate=opts.limit_status_rate, lock_install=not opts.no_install_lock, no_notify=opts.no_notify, continue_on_failure=opts.continue_on_failure, summarize_build=opts.summarize # Can be True, False, or None )
def main(opts): # Initialize dictionary version of opts namespace opts_vars = vars(opts) if opts else {} # Get the workspace (either the given directory or the enclosing ws) workspace_hint = opts_vars.get('workspace', None) or os.getcwd() workspace = find_enclosing_workspace(workspace_hint) if not workspace: print(clr("@{rf}ERROR: No workspace found containing '%s'@|" % workspace_hint), file=sys.stderr) sys.exit(1) # Load the context to get the subspaces ctx = Context.load(workspace, opts.profile, opts, load_env=False) path = None if opts.space: # Get the subspace if opts.space == 'src': path = ctx.source_space_abs elif opts.space == 'build': path = ctx.build_space_abs elif opts.space == 'devel': path = ctx.devel_space_abs elif opts.space == 'install': path = ctx.install_space_abs if opts.package: # Get the path to the given package path = path or ctx.source_space_abs if opts.space == 'build': path = os.path.join(path, opts.package) elif opts.space in ['devel', 'install']: path = os.path.join(path, 'share', opts.package) else: try: packages = find_packages(path, warnings=[]) catkin_package = [ pkg_path for pkg_path, p in packages.items() if p.name == opts.package ] if catkin_package: path = os.path.join(path, catkin_package[0]) else: print(clr( "@{rf}ERROR: Could not locate a package named '%s' in path '%s'@|" % (opts.package, path)), file=sys.stderr) sys.exit(2) except RuntimeError as e: print(clr('@{rf}ERROR: %s@|' % str(e)), file=sys.stderr) sys.exit(1) elif not opts.space: # Get the path to the workspace root path = workspace # Check if the path exists if opts.existing_only and not os.path.exists(path): print(clr("@{rf}ERROR: Requested path '%s' does not exist.@|" % path), file=sys.stderr) sys.exit(1) # Make the path relative if desired if opts.relative: path = os.path.relpath(path, os.getcwd()) # Print the path print(path)
def main(opts): # Check for develdebug mode if opts.develdebug is not None: os.environ['TROLLIUSDEBUG'] = opts.develdebug.lower() logging.basicConfig(level=opts.develdebug.upper()) # Set color options if (opts.force_color or is_tty(sys.stdout)) and not opts.no_color: set_color(True) else: set_color(False) # Context-aware args if opts.build_this or opts.start_with_this: # Determine the enclosing package try: ws_path = find_enclosing_workspace(getcwd()) # Suppress warnings since this won't necessaraly find all packages # in the workspace (it stops when it finds one package), and # relying on it for warnings could mislead people. this_package = find_enclosing_package( search_start_path=getcwd(), ws_path=ws_path, warnings=[]) except (InvalidPackage, RuntimeError): this_package = None # Handle context-based package building if opts.build_this: if this_package: opts.packages += [this_package] else: sys.exit( "[build] Error: In order to use --this, the current directory must be part of a catkin package.") # If --start--with was used without any packages and --this was specified, start with this package if opts.start_with_this: if this_package: opts.start_with = this_package else: sys.exit( "[build] Error: In order to use --this, the current directory must be part of a catkin package.") if opts.no_deps and not opts.packages and not opts.unbuilt: sys.exit(clr("[build] @!@{rf}Error:@| With --no-deps, you must specify packages to build.")) # Load the context ctx = Context.load(opts.workspace, opts.profile, opts, append=True) # Initialize the build configuration make_args, makeflags, cli_flags, jobserver = configure_make_args( ctx.make_args, ctx.jobs_args, ctx.use_internal_make_jobserver) # Set the jobserver memory limit if jobserver and opts.mem_limit: log(clr("@!@{pf}EXPERIMENTAL: limit memory to '%s'@|" % str(opts.mem_limit))) # At this point psuitl will be required, check for it and bail out if not set try: import psutil # noqa except ImportError as exc: log("Could not import psutil, but psutil is required when using --mem-limit.") log("Please either install psutil or avoid using --mem-limit.") sys.exit("Exception: {0}".format(exc)) job_server.set_max_mem(opts.mem_limit) ctx.make_args = make_args # Load the environment of the workspace to extend if ctx.extend_path is not None: try: load_resultspace_environment(ctx.extend_path) except IOError as exc: sys.exit(clr("[build] @!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" % (ctx.extend_path, exc.message))) # Check if the context is valid before writing any metadata if not ctx.source_space_exists(): sys.exit(clr("[build] @!@{rf}Error:@| Unable to find source space `%s`") % ctx.source_space_abs) # ensure the build space was previously built by catkin_tools previous_tool = get_previous_tool_used_on_the_space(ctx.build_space_abs) if previous_tool is not None and previous_tool != 'catkin build': if opts.override_build_tool_check: log(clr( "@{yf}Warning: build space at '%s' was previously built by '%s', " "but --override-build-tool-check was passed so continuing anyways." % (ctx.build_space_abs, previous_tool))) else: sys.exit(clr( "@{rf}The build space at '%s' was previously built by '%s'. " "Please remove the build space or pick a different build space." % (ctx.build_space_abs, previous_tool))) # the build space will be marked as catkin build's if dry run doesn't return # ensure the devel space was previously built by catkin_tools previous_tool = get_previous_tool_used_on_the_space(ctx.devel_space_abs) if previous_tool is not None and previous_tool != 'catkin build': if opts.override_build_tool_check: log(clr( "@{yf}Warning: devel space at '%s' was previously built by '%s', " "but --override-build-tool-check was passed so continuing anyways." % (ctx.devel_space_abs, previous_tool))) else: sys.exit(clr( "@{rf}The devel space at '%s' was previously built by '%s'. " "Please remove the devel space or pick a different devel space." % (ctx.devel_space_abs, previous_tool))) # the devel space will be marked as catkin build's if dry run doesn't return # Display list and leave the file system untouched if opts.dry_run: # TODO: Add unbuilt dry_run(ctx, opts.packages, opts.no_deps, opts.start_with) return # Print the build environment for a given package and leave the filesystem untouched if opts.get_env: return print_build_env(ctx, opts.get_env[0]) # Now mark the build and devel spaces as catkin build's since dry run didn't return. mark_space_as_built_by(ctx.build_space_abs, 'catkin build') mark_space_as_built_by(ctx.devel_space_abs, 'catkin build') # Get the last build context build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build') if build_metadata.get('cmake_args') != ctx.cmake_args or build_metadata.get('cmake_args') != opts.cmake_args: opts.force_cmake = True if build_metadata.get('needs_force', False): opts.force_cmake = True update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': False}) # Always save the last context under the build verb update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict()) # Save the context as the configuration if opts.save_config: Context.save(ctx) # Get parallel toplevel jobs try: parallel_jobs = int(opts.parallel_jobs) except TypeError: parallel_jobs = None # Set VERBOSE environment variable if opts.verbose: os.environ['VERBOSE'] = '1' return build_isolated_workspace( ctx, packages=opts.packages, start_with=opts.start_with, no_deps=opts.no_deps, unbuilt=opts.unbuilt, n_jobs=parallel_jobs, force_cmake=opts.force_cmake, pre_clean=opts.pre_clean, force_color=opts.force_color, quiet=not opts.verbose, interleave_output=opts.interleave_output, no_status=opts.no_status, limit_status_rate=opts.limit_status_rate, lock_install=not opts.no_install_lock, no_notify=opts.no_notify, continue_on_failure=opts.continue_on_failure, summarize_build=opts.summarize # Can be True, False, or None )
def main(opts): # Check for exclusivity full_options = opts.deinit space_options = opts.logs or opts.build or opts.devel or opts.install package_options = len(opts.packages) > 0 or opts.orphans advanced_options = opts.setup_files if full_options: if space_options or package_options or advanced_options: log("[clean] Error: Using `--deinit` will remove all spaces, so" " additional partial cleaning options will be ignored.") elif space_options: if package_options: log("[clean] Error: Package arguments are not allowed with space" " arguments (--build, --devel, --install, --logs). See usage.") elif advanced_options: log("[clean] Error: Advanced arguments are not allowed with space" " arguments (--build, --devel, --install, --logs). See usage.") # Check for all profiles option if opts.all_profiles: profiles = get_profile_names(opts.workspace or os.getcwd()) else: profiles = [opts.profile] # Initialize job server job_server.initialize( max_jobs=1, max_load=None, gnu_make_enabled=False) # Clean the requested profiles retcode = 0 for profile in profiles: if not clean_profile(opts, profile): retcode = 1 # Warn before nuking .catkin_tools if retcode == 0: if opts.deinit and not opts.yes: log("") log(clr("[clean] @!@{yf}Warning:@| If you deinitialize this workspace" " you will lose all profiles and all saved build" " configuration. (Use `--yes` to skip this check)")) try: opts.deinit = yes_no_loop("\n[clean] Are you sure you want to deinitialize this workspace?") if not opts.deinit: log(clr("[clean] Not deinitializing workspace.")) except KeyboardInterrupt: log("\n[clean] No actions performed.") sys.exit(0) # Nuke .catkin_tools if opts.deinit: ctx = Context.load(opts.workspace, profile, opts, strict=True, load_env=False) metadata_dir = os.path.join(ctx.workspace, METADATA_DIR_NAME) log("[clean] Deinitializing workspace by removing catkin_tools config: %s" % metadata_dir) if not opts.dry_run: safe_rmtree(metadata_dir, ctx.workspace, opts.force) return retcode
def prepare_arguments(parser): add_context_args( parser) # Adds the --profile option, possibly other things. # Behavior behavior_group = parser.add_argument_group('Behavior') add = behavior_group.add_argument add('-e', '--existing-only', action='store_true', help="Only print paths to existing directories.") add('-r', '--relative', action='store_true', help="Print relative paths instead of the absolute paths.") add('-q', '--quiet', action='store_true', help="Suppress warning output.") # Path options spaces_group = parser.add_argument_group( 'Sub-Space Options', 'Get the absolute path to one of the following locations in the given ' 'workspace with the given profile.') Context.setup_space_keys() add = spaces_group.add_mutually_exclusive_group().add_argument for space, space_dict in Context.SPACES.items(): flags = [space_dict['short_flag'] ] if 'short_flag' in space_dict else [] flags.append('--{}'.format(space_dict['default'])) flags.append('--{}-space'.format(space)) add(*flags, dest='space', action='store_const', const=space, help='Get the path to the {} space.'.format(space)) pkg_group = parser.add_argument_group( 'Package Directories', "Get the absolute path to package directories in the given workspace " "and sub-space. By default this will output paths in the workspace's " "source space. If the -b (--build) flag is given, it will output the " "path to the package's build directory. If the -d or -i (--devel or " "--install) flags are given, it will output the path to the package's " "share directory in that space. If no package is provided, the base " "space paths are printed, e.g. `catkin locate -s` might return " "`/path/to/ws/src` and `catkin locate -s foo` might return " "`/path/to/ws/src/foo`.") pkg_group_mut = pkg_group.add_mutually_exclusive_group() add = pkg_group_mut.add_argument add('package', metavar='PACKAGE', nargs='?', help="The name of a package to locate.") add('--this', action="store_true", help="Locate package containing current working directory.") special_group = parser.add_argument_group( 'Special Directories', 'Get the absolute path to a special catkin location') add = special_group.add_argument add('--shell-verbs', action='store_true', help="Get the path to the shell verbs script.") add('--examples', action='store_true', help="Get the path to the examples directory.") return parser