def internal_server_error(exc): """Adjust 500 page to be consistent with errors reported back from API.""" if _REPORT_EXCEPTIONS: exc_type, exc_value, exc_traceback = sys.exc_info() return ( jsonify( { "error": "Internal server error occurred", "details": { "type": exc.__class__.__name__, "datetime": datetime2datetime_str(datetime.utcnow()), "exception": { "cause": exc.__cause__, "context": exc.__context__, "traceback": traceback.format_exception(exc_type, exc_value, exc_traceback), "args": list(exc.args), }, }, } ), 500, ) # Provide some additional information so we can easily find exceptions in logs (time and exception type). # Later we should remove exception type (for security reasons). return ( jsonify( { "error": "Internal server error occurred, please contact administrator with provided details.", "details": {"type": exc.__class__.__name__, "datetime": datetime2datetime_str(datetime.utcnow())}, } ), 500, )
def print_command_result(click_ctx: click.core.Command, result: typing.Union[dict, list], analyzer: str, analyzer_version: str, output: str = None, duration: float = None, pretty: bool = True, dry_run: bool = False) -> None: """Print or submit results, nicely if requested.""" metadata = { 'analyzer': analyzer, 'datetime': datetime2datetime_str(datetime.datetime.utcnow()), 'timestamp': int(time.time()), 'hostname': platform.node(), 'analyzer_version': analyzer_version, 'distribution': distro.info(), 'arguments': _get_click_arguments(click_ctx), 'duration': int(duration) if duration is not None else None, 'python': { 'major': sys.version_info.major, 'minor': sys.version_info.minor, 'micro': sys.version_info.micro, 'releaselevel': sys.version_info.releaselevel, 'serial': sys.version_info.serial, 'api_version': sys.api_version, 'implementation_name': sys.implementation.name } } content = {'result': result, 'metadata': metadata} if dry_run: _LOG.info("Printing results to log") _LOG.info(content) return if isinstance(output, str) and output.startswith(('http://', 'https://')): _LOG.info("Submitting results to %r", output) response = requests.post(output, json=content) response.raise_for_status() _LOG.info("Successfully submitted results to %r, response: %s", output, response.json()) # Ignore PycodestyleBear (E501) return kwargs = {} if pretty: kwargs['sort_keys'] = True kwargs['separators'] = (',', ': ') kwargs['indent'] = 2 content = json.dumps(content, **kwargs, cls=SafeJSONEncoder) if output is None or output == '-': sys.stdout.write(content) else: _LOG.info("Writing results to %r", output) with open(output, 'w') as output_file: output_file.write(content)
def internal_server_error(exc): """Adjust 500 page to be consistent with errors reported back from API.""" # Provide some additional information so we can easily find exceptions in logs (time and exception type). # Later we should remove exception type (for security reasons). return ( jsonify( { "error": "Internal server error occurred, please contact administrator with provided details.", "details": {"type": exc.__class__.__name__, "datetime": datetime2datetime_str(datetime.utcnow())}, } ), 500, )
def post_inspection(specification: dict) -> tuple: """Create new inspection for the given software stack.""" # Generate first Dockerfile so we do not end up with an empty imagestream if Dockerfile creation fails. dockerfile, run_job_or_error = _do_create_dockerfile(specification) if dockerfile is None: return ( { "parameters:": specification, # If not dockerfile is produced, run_job holds the error message. "error": run_job_or_error, }, 400, ) run_job = run_job_or_error specification = _parse_specification(specification) _adjust_default_requests(specification["run"]) _adjust_default_requests(specification["build"]) parameters, use_hw_template = _construct_parameters_dict( specification.get("build", {})) # Mark this for later use - in get_inspection_specification(). specification["@created"] = datetime2datetime_str() target = "inspection-run-result" if run_job else "inspection-build" dockerfile = dockerfile.replace("'", "''") workflow_id = _OPENSHIFT.schedule_inspection(dockerfile=dockerfile, specification=specification, target=target, parameters=parameters) # TODO: Check whether the workflow spec has been resolved successfully # The resolution happens on the server side, therefore even if the WF # is submitted successfully, it mail fail due to an invalid spec later on return ( { "inspection_id": inspection_id, "parameters": specification, "workflow_id": workflow_id, "workflow_target": target, }, 202, )
def post_inspection(specification: dict) -> tuple: """Create new inspection for the given software stack.""" from amun.entrypoint import __service_version__ as __service_version__ # Generate first Dockerfile so we do not end up with an empty imagestream if Dockerfile creation fails. dockerfile, run_job_or_error = _do_create_dockerfile(specification) if dockerfile is None: return ( { "parameters:": specification, # If not dockerfile is produced, run_job holds the error message. "error": run_job_or_error, }, 400, ) run_job = run_job_or_error if "build" not in specification: specification["build"] = {} if "run" not in specification: specification["run"] = {} _adjust_default_requests(specification["run"]) _adjust_default_requests(specification["build"]) # Mark this for later use - in get_inspection_specification(). specification["@created"] = datetime2datetime_str() specification["@amun_service_version"] = __service_version__ specification["@amun_api_url"] = _AMUN_API_URL specification["@amun_deployment_name"] = _AMUN_DEPLOYMENT_NAME # Without escaped characters, as retrieved on endpoint with defaults. raw_specification = copy.deepcopy(specification) if "batch_size" in specification: # Convert to a string due to serialization when submitting to Argo Workflows. specification["batch_size"] = str(specification["batch_size"]) else: specification["batch_size"] = "1" raw_specification["batch_size"] = 1 specification = _parse_specification(specification) parameters, use_hw_template = _construct_parameters_dict(specification.get("build", {})) target = "inspection-run-result" if run_job else "inspection-build" dockerfile = dockerfile.replace("'", "''") inspection_id = _OPENSHIFT.schedule_inspection( dockerfile=dockerfile, specification=specification, target=target, parameters=parameters, raw_specification=raw_specification, ) # TODO: Check whether the workflow spec has been resolved successfully # The resolution happens on the server side, therefore even if the WF # is submitted successfully, it mail fail due to an invalid spec later on return ( {"inspection_id": inspection_id, "parameters": raw_specification,}, 202, )
def cli(advisory_db: str) -> None: """Aggregate CVE information and store it in Thoth's database.""" cve_timestamp = datetime.utcnow() with tempfile.TemporaryDirectory() as repo_dir: Repo.clone_from(advisory_db, repo_dir, depth=1) vulnerabilities_dir = os.path.join(repo_dir, "vulns") cve_messages_sent = 0 for package_name in os.listdir(os.path.join(vulnerabilities_dir)): if package_name.startswith("."): continue _LOGGER.info("Parsing vulnerabilities for %r", package_name) for vulnerability_file in os.listdir( os.path.join(vulnerabilities_dir, package_name)): _LOGGER.info("Loading vulnerability file %r", vulnerability_file) vulnerability_file_path = os.path.join(vulnerabilities_dir, package_name, vulnerability_file) try: with open(vulnerability_file_path) as f: vulnerability_file_content = yaml.safe_load(f) except Exception: _LOGGER.exception( "Failed to parse vulnerability file %r, skipping...", vulnerability_file_path, ) else: try: cve_messages_sent += _record_vulnerability( _GRAPH_DB, vulnerability_file_content, cve_messages_sent) except Exception: _LOGGER.exception( "Failed to record vulnerability from file %r, skipping..", vulnerability_file_path, ) _METRIC_MESSSAGES_SENT.labels( message_type=cve_provided_message.topic_name, env=THOTH_DEPLOYMENT_NAME, version=__component_version__, ).inc(cve_messages_sent) if _THOTH_METRICS_PUSHGATEWAY_URL: try: _LOGGER.debug( "Submitting metrics to Prometheus pushgateway %s", _THOTH_METRICS_PUSHGATEWAY_URL, ) push_to_gateway( _THOTH_METRICS_PUSHGATEWAY_URL, job="cve-update", registry=prometheus_registry, ) except Exception as e: _LOGGER.exception( f"An error occurred pushing the metrics: {str(e)}") _LOGGER.info("Flushing pending messages") _PRODUCER.flush() _LOGGER.info("Setting CVE timestamp to %r", datetime2datetime_str(cve_timestamp)) _GRAPH_DB.set_cve_timestamp(cve_timestamp)
def print_command_result( click_ctx: click.core.Command, result: typing.Union[dict, list], analyzer: str, analyzer_version: str, output: str = None, duration: float = None, pretty: bool = True, dry_run: bool = False, ) -> None: """Print or submit results, nicely if requested.""" metadata = { "analyzer": analyzer, "datetime": datetime2datetime_str(datetime.datetime.utcnow()), "document_id": os.getenv("THOTH_DOCUMENT_ID"), "timestamp": int(time.time()), "hostname": platform.node(), "analyzer_version": analyzer_version, "distribution": distro.info(), "arguments": _get_click_arguments(click_ctx), "duration": int(duration) if duration is not None else None, "python": { "major": sys.version_info.major, "minor": sys.version_info.minor, "micro": sys.version_info.micro, "releaselevel": sys.version_info.releaselevel, "serial": sys.version_info.serial, "api_version": sys.api_version, "implementation_name": sys.implementation.name, }, "os_release": _gather_os_release(), "thoth_deployment_name": os.getenv("THOTH_DEPLOYMENT_NAME"), } content = {"result": result, "metadata": metadata} if dry_run: _LOG.info("Printing results to log") _LOG.info(content) return if isinstance(output, str) and output.startswith(("http://", "https://")): _LOG.info("Submitting results to %r", output) response = requests.post(output, json=content) response.raise_for_status() _LOG.info( "Successfully submitted results to %r, response: %s", output, response.json(), ) # Ignore PycodestyleBear (E501) return kwargs = {} if pretty: kwargs["sort_keys"] = True kwargs["separators"] = (",", ": ") kwargs["indent"] = 2 content = json.dumps(content, **kwargs, cls=SafeJSONEncoder) if output is None or output == "-": sys.stdout.write(content) else: _LOG.info("Writing results to %r", output) with open(output, "w") as output_file: output_file.write(content)