def kotlin_build(src, reports_dir, lang_tools): """ Automatically build kotlin project :param src: Source directory :param reports_dir: Reports directory to store any logs :param lang_tools: Language specific build tools :return: boolean status from the build. True if the command executed successfully. False otherwise """ # Check if this is a android kotlin project gradle_kts_files = [p.as_posix() for p in Path(src).rglob("build.gradle.kts")] if find_files(src, "proguard-rules.pro", False, True) or find_files( src, "AndroidManifest.xml", False, True ): return android_build(src, reports_dir, lang_tools) if gradle_kts_files: cmd_args = get_gradle_cmd(src, lang_tools.get("gradle")) cp = exec_tool( "auto-build", cmd_args, src, env=get_env(), stdout=subprocess.PIPE ) if cp: LOG.debug(cp.stdout) return cp.returncode == 0 else: return java_build(src, reports_dir, lang_tools)
def populate_www_offsets(self, tablename="www"): """ :param tablename: :return: """ for architecture, versions in self._available_versions.items(): if "x86" not in architecture: # LOCK to x86 for now print("Skipping architecture {0} because this version works with x86 only currently".format( architecture)) else: temp_table = self["www"][architecture] with print_progress("Generating offsets for version") as progress: for version in versions: progress.status(version) for firmware_path in find_files( defines.BASE_STORAGE_PATH, defines.WWW_BIN_FMT.format(architecture, version) ): ropper = MikroROP(firmware_path) temp_table[version]["offsets"] = ropper.offsets._asdict() progress.success() self.update_nested_key(tablename, architecture, temp_table) return True
def populate_www_table(self, tablename="www"): """ :param tablename: :return: """ if not self._available_versions: self.prepare_versions() if tablename not in self: self[tablename] = dict() for architecture, versions in self._available_versions.items(): if not versions: continue www_temptable = dict() if architecture not in self[tablename]: self.update_nested_key(tablename, architecture, dict()) for version in versions: for shafile in find_files( defines.BASE_STORAGE_PATH, defines.WWW_BIN_SHA256_FMT.format(architecture, version) ): with open(shafile) as shafile_fd: sha256hash = shafile_fd.read() shafile_fd.close() www_temptable[version] = {"sha256hash": sha256hash} self.update_nested_key(tablename, architecture, www_temptable) return True
def kotlin_build(src, reports_dir, lang_tools): """ Automatically build kotlin project :param src: Source directory :param reports_dir: Reports directory to store any logs :param lang_tools: Language specific build tools :return: boolean status from the build. True if the command executed successfully. False otherwise """ # Check if this is a android kotlin project gradle_kts_files = [ p.as_posix() for p in Path(src).rglob("build.gradle.kts") ] if (gradle_kts_files or find_files(src, "proguard-rules.pro", False, True) or find_files(src, "AndroidManifest.xml", False, True)): return android_build(src, reports_dir, lang_tools) return java_build(src, reports_dir, lang_tools)
def test_summary(): test_reports_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), "data") test_sarif_files = utils.find_files(test_reports_dir, ".sarif") report_summary, build_status = analysis.summary(test_sarif_files) assert len(report_summary.keys()) == 7 for k, v in report_summary.items(): if k == "findsecbugs": assert v["status"] == "❌" elif k == "nodejsscan": assert v["status"] == "✅" assert build_status == "fail"
def test_summary_with_agg(): test_reports_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), "data") test_sarif_files = utils.find_files(test_reports_dir, ".sarif") with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=False) as afile: report_summary, build_status = analysis.summary( test_sarif_files, afile.name) assert len(report_summary.keys()) == 7 afile.close() with open(afile.name, "r") as outfile: data = outfile.read() assert data os.unlink(afile.name)
def populate_www_storage(self): """ :return: """ if not self._available_versions: self.prepare_versions() for architecture, versions in self._available_versions.items(): if not versions: continue architecture_www_dir = self.generate_base_dir(architecture, "www") for version in versions: for firmware_path in find_files( defines.BASE_STORAGE_PATH, defines.ROS_NPK_FMT.format(architecture, version) ): filepath = os.path.join( architecture_www_dir, defines.WWW_BIN_FMT.format(architecture, version) ) offset = defines.SQUASHFS_OFFSET if architecture != "tile" else defines.SQUASHFS_TILE_OFFSET if not check_squashfs_offset(firmware_path, offset=offset): raise RuntimeWarning("Unaccounted error occured during squashfs offset validation") else: squashfs = SquashFsImage(firmware_path, offset=offset) www_search = [ www_bin.getContent() for www_bin in squashfs.root.findAll() if www_bin.name == b"www" and www_bin.hasAttribute(0o100000) ] if not www_search: raise RuntimeWarning("Could not locate www binary for npk: {}".format( firmware_path.split("/")[-1])) write_to_file(www_search[0], filepath) sha256hash = sha256(www_search[0]).hexdigest() write_to_file("{}".format(sha256hash).encode(), "{}.sha256".format(filepath)) return True
def test_summary_strict(): test_reports_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), "data") test_sarif_files = utils.find_files(test_reports_dir, ".sarif") report_summary, build_status = analysis.summary( test_sarif_files, None, { "max_critical": 0, "max_high": 0, "max_medium": 0, "max_low": 0 }, ) assert len(report_summary.keys()) == 7 for k, v in report_summary.items(): assert v["status"] == "❌" assert build_status == "fail"
def test_aggregate(): test_reports_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), "data") test_sarif_files = utils.find_files(test_reports_dir, ".sarif") run_data_list = [] for sf in test_sarif_files: with open(sf, mode="r") as report_file: report_data = json.loads(report_file.read()) run_data_list += report_data["runs"] with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=False) as afile: aggregate.jsonl_aggregate(run_data_list, afile.name) afile.close() with open(afile.name, "r") as outfile: data = outfile.read() assert data os.unlink(afile.name)
def _get_statistics_file(self): files = utils.find_files( os.path.join( self.config.get("rdiff_backup_path", "/mnt/backup/system"), "rdiff-backup-data"), "session_statistics") return utils.determine_newest_file(files)
def execute_default_cmd( cmd_map_list, type_str, tool_name, src, reports_dir, convert, ): """ Method to execute default command for the given type Args: cmd_map_list Default commands in the form of a dict (multiple) or list type_str Project type tool_name Tool name src Project dir reports_dir Directory for output reports convert Boolean to enable normalisation of reports json """ # Check if there is a default command specified for the given type # Create the reports dir os.makedirs(reports_dir, exist_ok=True) report_fname_prefix = os.path.join(reports_dir, tool_name + "-report") default_cmd = " ".join(cmd_map_list) % dict( src=src, reports_dir=reports_dir, report_fname_prefix=report_fname_prefix, type=type_str, ) # Try to detect if the output could be json outext = ".out" if default_cmd.find("json") > -1: outext = ".json" if default_cmd.find("csv") > -1: outext = ".csv" if default_cmd.find("sarif") > -1: outext = ".sarif" report_fname = report_fname_prefix + outext # If the command doesn't support file output then redirect stdout automatically stdout = None if reports_dir and default_cmd.find(report_fname_prefix) == -1: report_fname = report_fname_prefix + outext stdout = io.open(report_fname, "w") LOG.debug("Output will be written to {}".format(report_fname)) # If the command is requesting list of files then construct the argument filelist_prefix = "(filelist=" if default_cmd.find(filelist_prefix) > -1: si = default_cmd.find(filelist_prefix) ei = default_cmd.find(")", si + 10) ext = default_cmd[si + 10:ei] filelist = utils.find_files(src, ext) default_cmd = default_cmd.replace(filelist_prefix + ext + ")", " ".join(filelist)) cmd_with_args = default_cmd.split(" ") exec_tool(cmd_with_args, cwd=src, stdout=stdout) # Should we attempt to convert the report to sarif format if (convert and config.tool_purpose_message.get(cmd_with_args[0]) and os.path.isfile(report_fname)): crep_fname = utils.get_report_file(tool_name, reports_dir, convert, ext_name="sarif") convertLib.convert_file( cmd_with_args[0], cmd_with_args[1:], src, report_fname, crep_fname, )
def execute_default_cmd( # scan:ignore cmd_map_list, type_str, tool_name, src, reports_dir, convert, scan_mode, repo_context, ): """ Method to execute default command for the given type Args: cmd_map_list Default commands in the form of a dict (multiple) or list type_str Project type tool_name Tool name src Project dir reports_dir Directory for output reports convert Boolean to enable normalisation of reports json scan_mode Scan mode string repo_context Repo context """ # Check if there is a default command specified for the given type # Create the reports dir report_fname_prefix = os.path.join(reports_dir, tool_name + "-report") # Look for any additional direct arguments for the tool and inject them if config.get(tool_name + "_direct_args"): direct_args = config.get(tool_name + "_direct_args").split(" ") if direct_args: cmd_map_list += direct_args src_or_file = src if config.get("SHIFTLEFT_ANALYZE_FILE"): src_or_file = config.get("SHIFTLEFT_ANALYZE_FILE") default_cmd = " ".join(cmd_map_list) % dict( src=src, src_or_file=src_or_file, reports_dir=reports_dir, report_fname_prefix=report_fname_prefix, type=type_str, scan_mode=scan_mode, ) # Try to detect if the output could be json outext = ".out" if "json" in default_cmd: outext = ".json" elif "csv" in default_cmd: outext = ".csv" elif "sarif" in default_cmd: outext = ".sarif" elif "xml" in default_cmd: outext = ".xml" report_fname = report_fname_prefix + outext # If the command doesn't support file output then redirect stdout automatically stdout = None if LOG.isEnabledFor(DEBUG): stdout = None if reports_dir and report_fname_prefix not in default_cmd: report_fname = report_fname_prefix + outext stdout = io.open(report_fname, "w") LOG.debug("Output will be written to {}".format(report_fname)) # If the command is requesting list of files then construct the argument filelist_prefix = "(filelist=" if default_cmd.find(filelist_prefix) > -1: si = default_cmd.find(filelist_prefix) ei = default_cmd.find(")", si + 10) ext = default_cmd[si + 10:ei] filelist = utils.find_files(src, ext) # Temporary fix for the yaml issue if ext == "yaml": yml_list = utils.find_files(src, "yml") if yml_list: filelist.extend(yml_list) delim = " " default_cmd = default_cmd.replace(filelist_prefix + ext + ")", delim.join(filelist)) cmd_with_args = default_cmd.split(" ") # Suppress psalm output if should_suppress_output(type_str, cmd_with_args[0]): stdout = subprocess.DEVNULL exec_tool(tool_name, cmd_with_args, cwd=src, stdout=stdout) # Should we attempt to convert the report to sarif format if should_convert(convert, tool_name, cmd_with_args[0], report_fname): crep_fname = utils.get_report_file(tool_name, reports_dir, convert, ext_name="sarif") if (cmd_with_args[0] == "java" or "pmd-bin" in cmd_with_args[0] or "php" in tool_name): convertLib.convert_file( tool_name, cmd_with_args, src, report_fname, crep_fname, ) else: convertLib.convert_file( cmd_with_args[0], cmd_with_args[1:], src, report_fname, crep_fname, ) try: if not LOG.isEnabledFor(DEBUG): os.remove(report_fname) except Exception: LOG.debug("Unable to remove file {}".format(report_fname)) elif type_str == "depscan": # Convert depscan and license scan files to html depscan_files = utils.find_files(reports_dir, "depscan", True) for df in depscan_files: if not df.endswith(".html"): depscan_data = grafeas.parse(df) if depscan_data and len(depscan_data): html_fname = df.replace(".json", ".html") grafeas.render_html(depscan_data, html_fname) track({ "id": config.get("run_uuid"), "depscan_summary": depscan_data }) LOG.debug( "Depscan and HTML report written to file: %s, %s :thumbsup:", df, html_fname, ) licence_files = utils.find_files(reports_dir, "license", True) for lf in licence_files: if not lf.endswith(".html"): licence_data = licence.parse(lf) if licence_data and len(licence_data): html_fname = lf.replace(".json", ".html") licence.render_html(licence_data, html_fname) track({ "id": config.get("run_uuid"), "license_summary": licence_data }) LOG.debug( "License check and HTML report written to file: %s, %s :thumbsup:", lf, html_fname, )
def execute_default_cmd( cmd_map_list, type_str, tool_name, src, reports_dir, convert, scan_mode, repo_context, ): """ Method to execute default command for the given type Args: cmd_map_list Default commands in the form of a dict (multiple) or list type_str Project type tool_name Tool name src Project dir reports_dir Directory for output reports convert Boolean to enable normalisation of reports json scan_mode Scan mode string repo_context Repo context """ # Check if there is a default command specified for the given type # Create the reports dir os.makedirs(reports_dir, exist_ok=True) report_fname_prefix = os.path.join(reports_dir, tool_name + "-report") default_cmd = " ".join(cmd_map_list) % dict( src=src, reports_dir=reports_dir, report_fname_prefix=report_fname_prefix, type=type_str, scan_mode=scan_mode, ) # Try to detect if the output could be json outext = ".out" if default_cmd.find("json") > -1: outext = ".json" if default_cmd.find("csv") > -1: outext = ".csv" if default_cmd.find("sarif") > -1: outext = ".sarif" report_fname = report_fname_prefix + outext # If the command doesn't support file output then redirect stdout automatically stdout = None if reports_dir and default_cmd.find(report_fname_prefix) == -1: report_fname = report_fname_prefix + outext stdout = io.open(report_fname, "w") LOG.debug("Output will be written to {}".format(report_fname)) # If the command is requesting list of files then construct the argument filelist_prefix = "(filelist=" if default_cmd.find(filelist_prefix) > -1: si = default_cmd.find(filelist_prefix) ei = default_cmd.find(")", si + 10) ext = default_cmd[si + 10 : ei] filelist = utils.find_files(src, ext) delim = " " default_cmd = default_cmd.replace( filelist_prefix + ext + ")", delim.join(filelist) ) cmd_with_args = default_cmd.split(" ") exec_tool(cmd_with_args, cwd=src, stdout=stdout) # Should we attempt to convert the report to sarif format if ( convert and config.tool_purpose_message.get(cmd_with_args[0]) and os.path.isfile(report_fname) ): crep_fname = utils.get_report_file( tool_name, reports_dir, convert, ext_name="sarif" ) convertLib.convert_file( cmd_with_args[0], cmd_with_args[1:], src, report_fname, crep_fname, ) try: if not os.environ.get("SCAN_DEBUG_MODE") == "debug": os.remove(report_fname) except Exception: LOG.debug("Unable to remove file {}".format(report_fname)) elif type_str == "depscan": # Convert depscan and license scan files to html depscan_files = utils.find_files(reports_dir, "depscan", True) for df in depscan_files: if not df.endswith(".html"): depscan_data = grafeas.parse(df) if depscan_data and len(depscan_data): html_fname = df.replace(".json", ".html") grafeas.render_html(depscan_data, html_fname) track( {"id": config.get("run_uuid"), "depscan_summary": depscan_data} ) LOG.debug( "Depscan and HTML report written to file: %s, %s 👍", df, html_fname, ) licence_files = utils.find_files(reports_dir, "license", True) for lf in licence_files: if not lf.endswith(".html"): licence_data = licence.parse(lf) if licence_data and len(licence_data): html_fname = lf.replace(".json", ".html") licence.render_html(licence_data, html_fname) track( {"id": config.get("run_uuid"), "license_summary": licence_data} ) LOG.debug( "License check and HTML report written to file: %s, %s 👍", lf, html_fname, )
def _get_statistics_file(self): files = utils.find_files( os.path.join(self.config.get("rdiff_backup_path", "/mnt/backup/system"), "rdiff-backup-data"), "session_statistics") return utils.determine_newest_file(files)
def inspect_scan(language, src, reports_dir, convert, repo_context): """ Method to perform inspect cloud scan Args: language Project language src Project dir reports_dir Directory for output reports convert Boolean to enable normalisation of reports json repo_context Repo context """ convert_args = [] report_fname = utils.get_report_file("inspect", reports_dir, convert, ext_name="json") sl_cmd = config.get("SHIFTLEFT_INSPECT_CMD") java_target_dir = config.get("SHIFTLEFT_ANALYZE_DIR", os.path.join(src, "target")) jar_files = utils.find_files(java_target_dir, ".jar") app_name = config.get("SHIFTLEFT_APP", repo_context.get("repositoryName")) if not app_name: app_name = os.path.dirname(src) repository_uri = repo_context.get("repositoryUri") branch = repo_context.get("revisionId") if not jar_files: LOG.warning( "Unable to find any jar files in {}. Run mvn package or a similar command before invoking inspect scan" .format(java_target_dir)) return if len(jar_files) > 1: LOG.warning( "Multiple jar files found in {}. Only {} will be analyzed".format( java_target_dir, jar_files[0])) sl_args = [ sl_cmd, "analyze", "--no-auto-update", "--wait", "--java", "--app", app_name, ] if repository_uri: sl_args += ["--git-remote-name", repository_uri] if branch: sl_args += ["--tag", "branch=" + branch] sl_args += [jar_files[0]] env = os.environ.copy() env["JAVA_HOME"] = os.environ.get("JAVA_8_HOME") LOG.info( "About to perform Inspect cloud analyze. This might take few minutes ..." ) cp = exec_tool(sl_args, src, env=env) if cp.returncode != 0: LOG.warning("Inspect cloud analyze has failed with the below logs") LOG.info(cp.stdout) LOG.info(cp.stderr) return findings_data = fetch_findings(app_name, branch, report_fname) if findings_data and convert: crep_fname = utils.get_report_file("inspect", reports_dir, convert, ext_name="sarif") convertLib.convert_file( "inspect", sl_args[1:], src, report_fname, crep_fname, )