def parseCMakeCacheFile(filePath): log.dbg(f"parsing CMake cache file at {filePath}") kv = {} try: with open(filePath, "r") as f: # should be a short file, so we'll use readlines lines = f.readlines() # walk through and look for non-comment, non-empty lines for line in lines: sline = line.strip() if sline == "": continue if sline.startswith("#") or sline.startswith("//"): continue # parse out : and = characters pline1 = sline.split(":", maxsplit=1) if len(pline1) != 2: continue pline2 = pline1[1].split("=", maxsplit=1) if len(pline2) != 2: continue kv[pline1[0]] = pline2[1] return kv except OSError as e: log.err(f"Error loading {filePath}: {str(e)}") return {}
def do_run(self, args, user_args): if args.exclude_west: log.wrn('ignoring --exclude-west') failed_rebases = [] for project in _projects(args, listed_must_be_cloned=False, exclude_manifest=True): _banner(project.format('updating {name_and_path}:')) returncode = _update(project, args.rebase, args.keep_descendants) if returncode: failed_rebases.append(project) log.err(project.format('{name_and_path} failed to rebase')) if failed_rebases: # Avoid printing this message if exactly one project # was specified on the command line. if len(args.projects) != 1: log.err(('The following project{} failed to rebase; ' 'see above for details: {}').format( 's' if len(failed_rebases) > 1 else '', ', '.join(p.format('{name_and_path}') for p in failed_rebases))) raise CommandError(1)
def use_runner_cls(command, board, args, runners_yaml, cache): # Get the ZephyrBinaryRunner class from its name, and make sure it # supports the command. Print a message about the choice, and # return the class. runner = args.runner or runners_yaml.get(command.runner_key) if runner is None: log.die(f'no {command.name} runner available for board {board}. ' "Check the board's documentation for instructions.") _banner(f'west {command.name}: using runner {runner}') available = runners_yaml.get('runners', []) if runner not in available: if 'BOARD_DIR' in cache: board_cmake = Path(cache['BOARD_DIR']) / 'board.cmake' else: board_cmake = 'board.cmake' log.err(f'board {board} does not support runner {runner}', fatal=True) log.inf(f'To fix, configure this runner in {board_cmake} and rebuild.') sys.exit(1) try: runner_cls = get_runner_cls(runner) except ValueError as e: log.die(e) if command.name not in runner_cls.capabilities().commands: log.die(f'runner {runner} does not support command {command.name}') return runner_cls
def makeDocuments(self): # parse CMake cache file and get compiler path log.inf("parsing CMake Cache file") self.getCacheFile() # parse codemodel from Walker cfg's build dir log.inf("parsing CMake Codemodel files") self.cm = self.getCodemodel() if not self.cm: log.err("could not parse codemodel from CMake API reply; bailing") return False # set up Documents log.inf("setting up SPDX documents") retval = self.setupDocuments() if not retval: return False # walk through targets in codemodel to gather information log.inf("walking through targets") self.walkTargets() # walk through pending sources and create corresponding files log.inf("walking through pending sources files") self.walkPendingSources() # walk through pending relationship data and create relationships log.inf("walking through pending relationships") self.walkRelationships() return True
def do_run_spdx(args): if not args.build_dir: log.die("Build directory not specified; call `west spdx --build-dir=BUILD_DIR`") # create the SPDX files cfg = SBOMConfig() cfg.buildDir = args.build_dir if args.namespace_prefix: cfg.namespacePrefix = args.namespace_prefix else: # create default namespace according to SPDX spec # note that this is intentionally _not_ an actual URL where # this document will be stored cfg.namespacePrefix = f"http://spdx.org/spdxdocs/zephyr-{str(uuid.uuid4())}" if args.spdx_dir: cfg.spdxDir = args.spdx_dir else: cfg.spdxDir = os.path.join(args.build_dir, "spdx") if args.analyze_includes: cfg.analyzeIncludes = True if args.include_sdk: cfg.includeSDK = True # make sure SPDX directory exists, or create it if it doesn't if os.path.exists(cfg.spdxDir): if not os.path.isdir(cfg.spdxDir): log.err(f'SPDX output directory {cfg.spdxDir} exists but is not a directory') return # directory exists, we're good else: # create the directory os.makedirs(cfg.spdxDir, exist_ok=False) makeSPDX(cfg)
def main(argv=None): # Makes ANSI color escapes work on Windows, and strips them when # stdout/stderr isn't a terminal colorama.init() if argv is None: argv = sys.argv[1:] args, unknown = parse_args(argv) # Read the configuration files config.read_config() for_stack_trace = 'run as "west -v ... {} ..." for a stack trace'.format( args.command) try: args.handler(args, unknown) except WestUpdated: # West has been automatically updated. Restart ourselves to run the # latest version, with the same arguments that we were given. os.execv(sys.executable, [sys.executable] + sys.argv) except KeyboardInterrupt: sys.exit(0) except CalledProcessError as cpe: log.err('command exited with status {}: {}'.format( cpe.args[0], quote_sh_list(cpe.args[1]))) if args.verbose: raise else: log.inf(for_stack_trace) except CommandContextError as cce: log.die('command', args.command, 'cannot be run in this context:', *cce.args)
def setupCmakeQuery(build_dir): # check that query dir exists as a directory, or else create it cmakeApiDirPath = os.path.join(build_dir, ".cmake", "api", "v1", "query") if os.path.exists(cmakeApiDirPath): if not os.path.isdir(cmakeApiDirPath): log.err( f'cmake api query directory {cmakeApiDirPath} exists and is not a directory' ) return False # directory exists, we're good else: # create the directory os.makedirs(cmakeApiDirPath, exist_ok=False) # check that codemodel-v2 exists as a file, or else create it queryFilePath = os.path.join(cmakeApiDirPath, "codemodel-v2") if os.path.exists(queryFilePath): if not os.path.isfile(queryFilePath): log.err( f'cmake api query file {queryFilePath} exists and is not a directory' ) return False # file exists, we're good return True else: # file doesn't exist, let's create it os.mknod(queryFilePath) return True
def makeSPDX(cfg): # report any odd configuration settings if cfg.analyzeIncludes and not cfg.includeSDK: log.wrn(f"config: requested to analyze includes but not to generate SDK SPDX document;") log.wrn(f"config: will proceed but will discard detected includes for SDK header files") # set up walker configuration walkerCfg = WalkerConfig() walkerCfg.namespacePrefix = cfg.namespacePrefix walkerCfg.buildDir = cfg.buildDir walkerCfg.analyzeIncludes = cfg.analyzeIncludes walkerCfg.includeSDK = cfg.includeSDK # make and run the walker w = Walker(walkerCfg) retval = w.makeDocuments() if not retval: log.err("SPDX walker failed; bailing") return False # set up scanner configuration scannerCfg = ScannerConfig() # scan each document from walker if cfg.includeSDK: scanDocument(scannerCfg, w.docSDK) scanDocument(scannerCfg, w.docApp) scanDocument(scannerCfg, w.docZephyr) scanDocument(scannerCfg, w.docBuild) # write each document, in this particular order so that the # hashes for external references are calculated # write SDK document, if we made one if cfg.includeSDK: retval = writeSPDX(os.path.join(cfg.spdxDir, "sdk.spdx"), w.docSDK) if not retval: log.err("SPDX writer failed for SDK document; bailing") return False # write app document retval = writeSPDX(os.path.join(cfg.spdxDir, "app.spdx"), w.docApp) if not retval: log.err("SPDX writer failed for app document; bailing") return False # write zephyr document writeSPDX(os.path.join(cfg.spdxDir, "zephyr.spdx"), w.docZephyr) if not retval: log.err("SPDX writer failed for zephyr document; bailing") return False # write build document writeSPDX(os.path.join(cfg.spdxDir, "build.spdx"), w.docBuild) if not retval: log.err("SPDX writer failed for build document; bailing") return False return True
def main(argv=None): # Makes ANSI color escapes work on Windows, and strips them when # stdout/stderr isn't a terminal colorama.init() # See if we're in an installation. try: topdir = west_topdir() except WestNotFound: topdir = None # Read the configuration files before looking for extensions. # We need this to find the manifest path in order to load extensions. config.read_config() # Load any extension command specs if we're in an installation. if topdir: try: extensions = get_extension_commands() except (MalformedConfig, FileNotFoundError): extensions = {} else: extensions = {} if argv is None: argv = sys.argv[1:] args, unknown = parse_args(argv, extensions, topdir) try: args.handler(args, unknown) except KeyboardInterrupt: sys.exit(0) except CalledProcessError as cpe: log.err('command exited with status {}: {}'.format( cpe.returncode, quote_sh_list(cpe.cmd))) if args.verbose: traceback.print_exc() sys.exit(cpe.returncode) except ExtensionCommandError as ece: msg = 'extension command "{}" could not be run{}.'.format( args.command, ': ' + ece.hint if ece.hint else '') if args.verbose: log.err(msg) traceback.print_exc() else: log.err(msg, 'See {} for a traceback.'.format(dump_traceback())) sys.exit(ece.returncode) except CommandContextError as cce: log.err('command', args.command, 'cannot be run in this context:', *cce.args) log.err('see {} for a traceback.'.format(dump_traceback())) sys.exit(cce.returncode) except CommandError as ce: # No need to dump_traceback() here. The command is responsible # for logging its own errors. sys.exit(ce.returncode)
def do_run(self, args, user_args): if args.update: _update_west() failed_rebases = [] for project in _projects(args, listed_must_be_cloned=False, exclude_manifest=True): _fetch(project) branch = _current_branch(project) sha = _sha(project, _MANIFEST_REV_BRANCH) if branch is not None: is_ancestor = _is_ancestor_of(project, sha, branch) try_rebase = args.rebase else: # If no branch is checked out, -k and -r don't matter. is_ancestor = False try_rebase = False if args.keep_descendants and is_ancestor: # A descendant is currently checked out and -k was # given, so there's nothing more to do. _inf( project, 'Left branch "{}", a descendant of {}, checked out'.format( branch, sha)) elif try_rebase: # Attempt a rebase. Don't exit the program on error; # instead, append to the list of failed rebases and # continue trying to update the other projects. We'll # tell the user a complete list of errors when we're done. cp = _rebase(project, check=False) if cp.returncode: failed_rebases.append(project) _err(project, '{name_and_path} failed to rebase') else: # We can't keep a descendant or rebase, so just check # out the new detached HEAD and print helpful # information about things they can do with any # locally checked out branch. _checkout_detach(project, _MANIFEST_REV_BRANCH) self._post_checkout_help(args, project, branch, sha, is_ancestor) if failed_rebases: # Avoid printing this message if exactly one project # was specified on the command line. if len(args.projects) != 1: log.err(('The following project{} failed to rebase; ' 'see above for details: {}').format( 's' if len(failed_rebases) > 1 else '', ', '.join( _expand_shorthands(p, '{name_and_path}') for p in failed_rebases))) raise CommandError(1)
def parse_args(argv): # The prog='west' override avoids the absolute path of the main.py script # showing up when West is run via the wrapper west_parser = argparse.ArgumentParser( prog='west', description='The Zephyr RTOS meta-tool.', epilog='Run "west <command> -h" for help on each command.') # Remember to update scripts/west-completion.bash if you add or remove # flags west_parser.add_argument('-z', '--zephyr-base', default=None, help='''Override the Zephyr base directory. The default is the manifest project with path "zephyr".''') west_parser.add_argument('-v', '--verbose', default=0, action='count', help='''Display verbose output. May be given multiple times to increase verbosity.''') west_parser.add_argument('-V', '--version', action='store_true') subparser_gen = west_parser.add_subparsers(title='commands', dest='command') for command in COMMANDS: parser = command.add_parser(subparser_gen) parser.set_defaults(handler=partial(command_handler, command)) args, unknown = west_parser.parse_known_args(args=argv) if args.version: print_version_info() sys.exit(0) # Set up logging verbosity before doing anything else, so # e.g. verbose messages related to argument handling errors # work properly. log.set_verbosity(args.verbose) if IN_MULTIREPO_INSTALL: set_zephyr_base(args) if 'handler' not in args: log.err('west installation found (in {}), but no command given'.format( west_dir()), fatal=True) west_parser.print_help(file=sys.stderr) sys.exit(1) return args, unknown
def _load_manifest(self, path): try: return manifest.Manifest.from_file(path) except manifest.MalformedManifest: print(path, 'is a malformed manifest!', file=sys.stderr) sys.exit(1) except manifest.MalformedConfig: log.err("can't load manifest due to bad configuration settings") raise sys.exit(1)
def parseCodemodel(replyDir, codemodelFile): codemodelPath = os.path.join(replyDir, codemodelFile) try: with open(codemodelPath, 'r') as cmFile: js = json.load(cmFile) cm = zspdx.cmakefileapi.Codemodel() # for correctness, check kind and version kind = js.get("kind", "") if kind != "codemodel": log.err( f"Error loading CMake API reply: expected \"kind\":\"codemodel\" in {codemodelPath}, got {kind}" ) return None version = js.get("version", {}) versionMajor = version.get("major", -1) if versionMajor != 2: if versionMajor == -1: log.err( f"Error loading CMake API reply: expected major version 2 in {codemodelPath}, no version found" ) return None log.err( f"Error loading CMake API reply: expected major version 2 in {codemodelPath}, got {versionMajor}" ) return None # get paths paths_dict = js.get("paths", {}) cm.paths_source = paths_dict.get("source", "") cm.paths_build = paths_dict.get("build", "") # get configurations configs_arr = js.get("configurations", []) for cfg_dict in configs_arr: cfg = parseConfig(cfg_dict, replyDir) if cfg: cm.configurations.append(cfg) # and after parsing is done, link all the indices linkCodemodel(cm) return cm except OSError as e: log.err(f"Error loading {codemodelPath}: {str(e)}") return None except json.decoder.JSONDecodeError as e: log.err(f"Error parsing JSON in {codemodelPath}: {str(e)}") return None
def check_force(self, cond, msg): '''Abort if the command needs to be forced and hasn't been. The "forced" predicate must be in self.args.forced. If cond and self.args.force are both False, scream and die with message msg. Otherwise, return. That is, "cond" is a condition which means everything is OK; if it's False, only self.args.force being True can allow execution to proceed. ''' if not (cond or self.args.force): log.err(msg) log.die('refusing to proceed without --force due to above error')
def do_run_init(args): log.inf("initializing Cmake file-based API prior to build") if not args.build_dir: log.die("Build directory not specified; call `west spdx --init --build-dir=BUILD_DIR`") # initialize CMake file-based API - empty query file query_ready = setupCmakeQuery(args.build_dir) if query_ready: log.inf("initialized; run `west build` then run `west spdx`") else: log.err("Couldn't create Cmake file-based API query directory") log.err("You can manually create an empty file at $BUILDDIR/.cmake/api/v1/query/codemodel-v2")
def _update(project, fetch, rebase, keep_descendants): if not project.is_cloned(): _clone(project) if fetch == 'always' or _rev_type(project) not in ('tag', 'commit'): _fetch(project) else: log.dbg('skipping unnecessary fetch') project.git('update-ref ' + QUAL_MANIFEST_REV + ' {revision}^{{commit}}') try: sha = project.sha(QUAL_MANIFEST_REV) except subprocess.CalledProcessError: # This is a sign something's really wrong. Add more help. log.err( project.format( "no SHA for branch {mr} in {name_and_path}; " 'was the branch deleted?', mr=MANIFEST_REV)) raise cp = project.git('rev-parse --abbrev-ref HEAD', capture_stdout=True) current_branch = cp.stdout.decode('utf-8').strip() if current_branch != 'HEAD': is_ancestor = project.is_ancestor_of(sha, current_branch) try_rebase = rebase else: # HEAD means no branch is checked out. # If no branch is checked out, 'rebase' and 'keep_descendants' don't # matter. is_ancestor = False try_rebase = False if keep_descendants and is_ancestor: # A descendant is currently checked out and keep_descendants was # given, so there's nothing more to do. log.small_banner( project.format('{name}: left descendant branch "{b}" checked out', b=current_branch)) elif try_rebase: # Attempt a rebase. log.small_banner(project.format('{name}: rebasing to ' + MANIFEST_REV)) project.git('rebase ' + QUAL_MANIFEST_REV) else: # We can't keep a descendant or rebase, so just check # out the new detached HEAD, then print some helpful context. project.git('checkout --detach --quiet ' + sha) log.small_banner( project.format("{name}: checked out {r} as detached HEAD", r=sha)) _post_checkout_help(project, current_branch, sha, is_ancestor)
def getCodemodel(self): log.dbg("getting codemodel from CMake API reply files") # make sure the reply directory exists cmakeReplyDirPath = os.path.join(self.cfg.buildDir, ".cmake", "api", "v1", "reply") if not os.path.exists(cmakeReplyDirPath): log.err( f'cmake api reply directory {cmakeReplyDirPath} does not exist' ) log.err('was query directory created before cmake build ran?') return None if not os.path.isdir(cmakeReplyDirPath): log.err( f'cmake api reply directory {cmakeReplyDirPath} exists but is not a directory' ) return None # find file with "index" prefix; there should only be one indexFilePath = "" for f in os.listdir(cmakeReplyDirPath): if f.startswith("index"): indexFilePath = os.path.join(cmakeReplyDirPath, f) break if indexFilePath == "": # didn't find it log.err( f'cmake api reply index file not found in {cmakeReplyDirPath}') return None # parse it return parseReply(indexFilePath)
def detect_file(file: FileInfo) -> 'set(str)': '''Read input file content and try to detect licenses by its content.''' try: with open(file.file_path, 'r', encoding='8859') as fd: content = fd.read() except Exception as e: log.err(f'Error reading file "{file.file_path}": {e}') raise SbomException() from e results = set() for m in SPDX_TAG_RE.finditer(content): id = m.group(1).strip() if id != '': results.add(id.upper()) return results
def _handle_failed(self, args, failed): # Shared code for commands (like status, diff, update) that need # to do the same thing to multiple projects, but collect # and report errors if anything failed. if not failed: return elif len(failed) < 20: s = 's:' if len(failed) > 1 else '' projects = ', '.join(f'{p.name}' for p in failed) log.err(f'{self.name} failed for project{s} {projects}') else: log.err(f'{self.name} failed for multiple projects; see above') raise CommandError(1)
def emit(self, record): fmt = self.format(record) lvl = record.levelno if lvl > logging.CRITICAL: log.die(fmt) elif lvl >= logging.ERROR: log.err(fmt) elif lvl >= logging.WARNING: log.wrn(fmt) elif lvl >= logging.INFO: _banner(fmt) elif lvl >= logging.DEBUG: log.dbg(fmt) else: log.dbg(fmt, level=log.VERBOSE_EXTREME)
def parseTarget(targetPath): try: with open(targetPath, 'r') as targetFile: js = json.load(targetFile) target = zspdx.cmakefileapi.Target() target.name = js.get("name", "") target.id = js.get("id", "") target.type = parseTargetType(js.get("type", "UNKNOWN")) target.backtrace = js.get("backtrace", -1) target.folder = js.get("folder", "") # get paths paths_dict = js.get("paths", {}) target.paths_source = paths_dict.get("source", "") target.paths_build = paths_dict.get("build", "") target.nameOnDisk = js.get("nameOnDisk", "") # parse artifacts if present artifacts_arr = js.get("artifacts", []) target.artifacts = [] for artifact_dict in artifacts_arr: artifact_path = artifact_dict.get("path", "") if artifact_path != "": target.artifacts.append(artifact_path) target.isGeneratorProvided = js.get("isGeneratorProvided", False) # call separate functions to parse subsections parseTargetInstall(target, js) parseTargetLink(target, js) parseTargetArchive(target, js) parseTargetDependencies(target, js) parseTargetSources(target, js) parseTargetSourceGroups(target, js) parseTargetCompileGroups(target, js) parseTargetBacktraceGraph(target, js) return target except OSError as e: log.err(f"Error loading {targetPath}: {str(e)}") return None except json.decoder.JSONDecodeError as e: log.err(f"Error parsing JSON in {targetPath}: {str(e)}") return None
def parse_args(argv): # The prog='west' override avoids the absolute path of the main.py script # showing up when West is run via the wrapper west_parser = argparse.ArgumentParser( prog='west', description='The Zephyr RTOS meta-tool.', epilog='Run "west <command> -h" for help on each command.') west_parser.add_argument('-z', '--zephyr-base', default=None, help='''Path to the Zephyr base directory. If not given, ZEPHYR_BASE must be defined in the environment, and will be used instead.''') west_parser.add_argument('-v', '--verbose', default=0, action='count', help='''Display verbose output. May be given multiple times to increase verbosity.''') west_parser.add_argument('-V', '--version', action='store_true') subparser_gen = west_parser.add_subparsers(title='commands', dest='command') for command in COMMANDS: parser = command.add_parser(subparser_gen) parser.set_defaults(handler=partial(command_handler, command)) args, unknown = west_parser.parse_known_args(args=argv) if args.version: print_version_info() sys.exit(0) # Set up logging verbosity before doing anything else, so # e.g. verbose messages related to argument handling errors # work properly. log.set_verbosity(args.verbose) if IN_MULTIREPO_INSTALL: set_zephyr_base(args) if 'handler' not in args: log.err('you must specify a command', fatal=True) west_parser.print_usage(file=sys.stderr) sys.exit(1) return args, unknown
def writeSPDX(spdxPath, doc): # create and write document to disk try: log.inf(f"Writing SPDX document {doc.cfg.name} to {spdxPath}") with open(spdxPath, "w") as f: writeDocumentSPDX(f, doc) except OSError as e: log.err(f"Error: Unable to write to {spdxPath}: {str(e)}") return False # calculate hash of the document we just wrote hashes = getHashes(spdxPath) if not hashes: log.err(f"Error: created document but unable to calculate hash values") return False doc.myDocSHA1 = hashes[0] return True
def main(argv=None): # Makes ANSI color escapes work on Windows, and strips them when # stdout/stderr isn't a terminal colorama.init() # Read the configuration files config.read_config() # Load any external command specs. If the config file isn't # fully set up yet, ignore the error. This allows west init to # work properly. try: externals = get_external_commands() except MalformedConfig: externals = {} if argv is None: argv = sys.argv[1:] args, unknown = parse_args(argv, externals) for_stack_trace = 'run as "west -v ... {} ..." for a stack trace'.format( args.command) try: args.handler(args, unknown) except WestUpdated: # West has been automatically updated. Restart ourselves to run the # latest version, with the same arguments that we were given. os.execv(sys.executable, [sys.executable] + argv) except KeyboardInterrupt: sys.exit(0) except CalledProcessError as cpe: log.err('command exited with status {}: {}'.format( cpe.args[0], quote_sh_list(cpe.args[1]))) if args.verbose: raise else: log.inf(for_stack_trace) except CommandContextError as cce: log.err('command', args.command, 'cannot be run in this context:', *cce.args) sys.exit(cce.returncode) except CommandError as ce: sys.exit(ce.returncode)
def resolve_globs(path: Path, globs: 'list[str]') -> 'set(Path)': '''Resolves list of globs (optionally with "!" at the beginning) are returns a set of files.''' result = set() for glob in globs: if glob.startswith('!'): for file in glob_with_abs_patterns(path, glob[1:]): result.discard(file) else: added_files = 0 for file in glob_with_abs_patterns(path, glob): if file.is_file(): result.add(file) added_files += 1 if added_files == 0: if is_glob(glob): log.wrn(f'Input glob "{glob}" does not match any file.') else: log.err(f'Input file "{glob}" does not exists.') raise SbomException('Invalid input') return result
def do_run(self, args, extra_args): env = Manifest.from_file() for query in extra_args: try: prj = env.get_projects([query]) print(prj[0].abspath) except ValueError as e: # check if `manifest` is the kAFL repo.. if query != 'kafl': log.err( "Could not find %s in west projects. Try `west list`." % query) return try: # check if manifest repo is kAFL kafl_path = env.get_projects(['manifest'])[0].abspath if os.path.exists(kafl_path + '/kafl_fuzz.py'): log.wrn( "Returning `manifest` repo path for query `%s`.." % query) print(kafl_path) except ValueError as e: log.err( "Could not find %s in west projects. Try `west list`." % query) except Exception as e: log.err(str(e))
def main(argv=None): # Makes ANSI color escapes work on Windows, and strips them when # stdout/stderr isn't a terminal colorama.init() # See if we're in an installation. try: topdir = west_topdir() except WestNotFound: topdir = None # Read the configuration files before looking for extensions. # We need this to find the manifest path in order to load extensions. config.read_config() # Load any extension command specs if we're in an installation. if topdir: try: extensions = get_extension_commands() except (MalformedConfig, FileNotFoundError): extensions = {} else: extensions = {} if argv is None: argv = sys.argv[1:] args, unknown = parse_args(argv, extensions, topdir) for_stack_trace = 'run as "west -v {}" for a stack trace'.format( quote_sh_list(argv)) try: args.handler(args, unknown) except KeyboardInterrupt: sys.exit(0) except CalledProcessError as cpe: log.err('command exited with status {}: {}'.format( cpe.args[0], quote_sh_list(cpe.args[1]))) if args.verbose: traceback.print_exc() else: log.inf(for_stack_trace) sys.exit(cpe.returncode) except ExtensionCommandError as ece: log.err( 'extension command', args.command, 'was improperly defined and could not be run{}'.format( ': ' + ece.hint if ece.hint else '')) if args.verbose: traceback.print_exc() else: log.inf(for_stack_trace) sys.exit(ece.returncode) except CommandContextError as cce: log.err('command', args.command, 'cannot be run in this context:', *cce.args) sys.exit(cce.returncode) except CommandError as ce: sys.exit(ce.returncode)
def parseReply(replyIndexPath): replyDir, _ = os.path.split(replyIndexPath) # first we need to find the codemodel reply file try: with open(replyIndexPath, 'r') as indexFile: js = json.load(indexFile) # get reply object reply_dict = js.get("reply", {}) if reply_dict == {}: log.err(f"no \"reply\" field found in index file") return None # get codemodel object cm_dict = reply_dict.get("codemodel-v2", {}) if cm_dict == {}: log.err( f"no \"codemodel-v2\" field found in \"reply\" object in index file" ) return None # and get codemodel filename jsonFile = cm_dict.get("jsonFile", "") if jsonFile == "": log.err( f"no \"jsonFile\" field found in \"codemodel-v2\" object in index file" ) return None return parseCodemodel(replyDir, jsonFile) except OSError as e: log.err(f"Error loading {replyIndexPath}: {str(e)}") return None except json.decoder.JSONDecodeError as e: log.err(f"Error parsing JSON in {replyIndexPath}: {str(e)}") return None
def do_run_common(command, args, unknown_args): # This is the main routine for all the "west flash", "west debug", # etc. commands. if args.context: dump_context(command, args, unknown_args) return command_name = command.name build_dir = get_build_dir(args) cache = load_cmake_cache(build_dir, args) board = cache['CACHED_BOARD'] if not args.skip_rebuild: rebuild(command, build_dir, args) # Load runners.yaml. runners_yaml = runners_yaml_path(cache) runner_config = load_runners_yaml(runners_yaml, args) # Get a concrete ZephyrBinaryRunner subclass to use based on # runners.yaml and command line arguments. runner_cls = use_runner_cls(command, board, args, runner_config) runner_name = runner_cls.name() # Set up runner logging to delegate to west.log commands. logger = logging.getLogger('runners') logger.setLevel(LOG_LEVEL) logger.addHandler(WestLogHandler()) # If the user passed -- to force the parent argument parser to stop # parsing, it will show up here, and needs to be filtered out. runner_args = [arg for arg in unknown_args if arg != '--'] # Arguments are provided in this order to allow the specific to # override the general: # # - common runners.yaml arguments # - runner-specific runners.yaml arguments # - command line arguments final_argv = (runner_config['args']['common'] + runner_config['args'][runner_name] + runner_args) # At this point, 'args' contains parsed arguments which are both: # # 1. provided on the command line # 2. handled by add_parser_common() # # This doesn't include runner specific arguments on the command line or # anything from runners.yaml. # # We therefore have to re-parse now that we know everything, # including the final runner. parser = argparse.ArgumentParser(prog=runner_name) add_parser_common(command, parser=parser) runner_cls.add_parser(parser) final_args, unknown = parser.parse_known_args(args=final_argv) if unknown: log.die(f'runner {runner_name} received unknown arguments: {unknown}') # Create the RunnerConfig from the values assigned to common # arguments. This is a hacky way to go about this; probably # ZephyrBinaryRunner should define what it needs to make this # happen by itself. That would be a larger refactoring of the # runners package than there's time for right now, though. # # Use that RunnerConfig to create the ZephyrBinaryRunner instance # and call its run(). runner = runner_cls.create(runner_cfg_from_args(final_args, build_dir), final_args) try: runner.run(command_name) except ValueError as ve: log.err(str(ve), fatal=True) dump_traceback() raise CommandError(1) except MissingProgram as e: log.die('required program', e.filename, 'not found; install it or add its location to PATH') except RuntimeError as re: if not args.verbose: log.die(re) else: log.err('verbose mode enabled, dumping stack:', fatal=True) raise
def do_run_common(command, user_args, user_runner_args): # This is the main routine for all the "west flash", "west debug", # etc. commands. if user_args.context: dump_context(command, user_args, user_runner_args) return command_name = command.name build_dir = get_build_dir(user_args) cache = load_cmake_cache(build_dir, user_args) board = cache['CACHED_BOARD'] if not user_args.skip_rebuild: rebuild(command, build_dir, user_args) # Load runners.yaml. yaml_path = runners_yaml_path(build_dir, board) runners_yaml = load_runners_yaml(yaml_path) # Get a concrete ZephyrBinaryRunner subclass to use based on # runners.yaml and command line arguments. runner_cls = use_runner_cls(command, board, user_args, runners_yaml, cache) runner_name = runner_cls.name() # Set up runner logging to delegate to west.log commands. logger = logging.getLogger('runners') logger.setLevel(LOG_LEVEL) logger.addHandler(WestLogHandler()) # If the user passed -- to force the parent argument parser to stop # parsing, it will show up here, and needs to be filtered out. runner_args = [arg for arg in user_runner_args if arg != '--'] # Arguments in this order to allow specific to override general: # # - runner-specific runners.yaml arguments # - user-provided command line arguments final_argv = runners_yaml['args'][runner_name] + runner_args # 'user_args' contains parsed arguments which are: # # 1. provided on the command line, and # 2. handled by add_parser_common(), and # 3. *not* runner-specific # # 'final_argv' contains unparsed arguments from either: # # 1. runners.yaml, or # 2. the command line # # We next have to: # # - parse 'final_argv' now that we have all the command line # arguments # - create a RunnerConfig using 'user_args' and the result # of parsing 'final_argv' parser = argparse.ArgumentParser(prog=runner_name) add_parser_common(command, parser=parser) runner_cls.add_parser(parser) args, unknown = parser.parse_known_args(args=final_argv) if unknown: log.die(f'runner {runner_name} received unknown arguments: {unknown}') # Override args with any user_args. The latter must take # precedence, or e.g. --hex-file on the command line would be # ignored in favor of a board.cmake setting. for a, v in vars(user_args).items(): if v is not None: setattr(args, a, v) # Create the RunnerConfig from runners.yaml and any command line # overrides. runner_config = get_runner_config(build_dir, yaml_path, runners_yaml, args) log.dbg(f'runner_config: {runner_config}', level=log.VERBOSE_VERY) # Use that RunnerConfig to create the ZephyrBinaryRunner instance # and call its run(). try: runner = runner_cls.create(runner_config, args) runner.run(command_name) except ValueError as ve: log.err(str(ve), fatal=True) dump_traceback() raise CommandError(1) except MissingProgram as e: log.die('required program', e.filename, 'not found; install it or add its location to PATH') except RuntimeError as re: if not user_args.verbose: log.die(re) else: log.err('verbose mode enabled, dumping stack:', fatal=True) raise