def set_failure(message, *args): """Set global run of plugins failure. :param str message: the message to log :param list[*] args: arguments of the `message` format string """ Console.set_failure(message % args, prefix=MODULE)
def warn(message, *args): """Display a warning message, prefixed with the plug-in name. :param str message: the message to log :param list[*] args: arguments of the `message` format string """ Console.warn(message % args, prefix=MODULE)
def report(self): """Parse GNATmetric XML report and save data to the database. Returns according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: transactions committed to database * ``GNAThub.EXEC_FAILURE``: error while parsing the xml report """ self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('analyse report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse XML report: %s', self.output) try: tree = ElementTree.parse(self.output) # Fetch all files files = tree.findall('./file') total = len(files) # List of resource messages suitable for tool level bulk insertion resources_messages = [] for index, node in enumerate(files, start=1): resource = GNAThub.Resource.get(node.attrib.get('name')) # Save file level metrics if not resource: self.warn('skip "%s" message (file not found)' % node.attrib.get('name')) continue self.firstunit = True resources_messages.append([resource, self.parse_metrics(node)]) self.tool.add_messages([], self.parse_units(node, resource)) Console.progress(index, total, new_line=(index == total)) # Retrieve the project metrics resource = GNAThub.Resource(GNAThub.Project.name(), GNAThub.PROJECT_KIND) resources_messages.append([resource, self.parse_metrics(tree)]) self.tool.add_messages(resources_messages, []) except ParseError as why: self.log.exception('failed to parse XML report') self.error('%s (%s:%s)' % (why, why.filename, why.lineno)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def report(self): """Parse GNATcheck output file report. Returns according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error Identify two type of messages with different format: * basic message * message for package instantiation """ self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('analyse report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse report: %s', self.output) if not os.path.exists(self.output): self.error('no report found') return GNAThub.EXEC_FAILURE try: with open(self.output, 'r') as output: lines = output.readlines() total = len(lines) for index, line in enumerate(lines, start=1): self.log.debug('parse line: %s', line) match = self._MESSAGE.match(line) if match: self.log.debug('matched: %s', str(match.groups())) self.__parse_line(match) else: match2 = self._MESSAGE_INST.match(line) if match2: self.log.debug('matched 2: %s', str(match2.groups())) self.__parse_line_inst(match2) Console.progress(index, total, new_line=(index == total)) except IOError as why: self.log.exception('failed to parse report') self.error('%s (%s:%d)' % (why, os.path.basename(self.output), total)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
def mainloop(self): """Plugin main loop.""" LOG.info('registered %d plugins', len(self.plugins)) backlog = [] # Early exit if no plug-in are scheduled to be run if not self.plugins: self.info('nothing to do') return # Execute each plug-in in order try: for cls in self.plugins: try: # Create a new instance plugin, elapsed = cls(), None # Execute the plug-in elapsed = self.execute(plugin) except KeyboardInterrupt: raise except Exception as why: LOG.exception('plug-in execution failed') self.error('%s: unexpected error: %s', plugin.name, why) finally: if plugin.exec_status != GNAThub.NOT_EXECUTED: # A plugin could not have been executed depending on # the command line (--runners-only/--reporters-only). backlog.append((plugin.name, { 'time': elapsed or 0, 'success': (plugin.exec_status == GNAThub.EXEC_SUCCESS) })) except KeyboardInterrupt: self.info(os.linesep + 'Interrupt caught...') # Write results to file fname = os.path.join(GNAThub.root(), 'gnathub.backlog') try: with open(fname, 'w') as fd: fd.write(json.dumps(backlog)) except IOError as why: LOG.exception('could not write result file %s', fname) self.error('%s: unexpected error: %s', fname, why) if not GNAThub.dry_run() and not GNAThub.quiet(): # Display a summary for plugin, results in backlog: if results['success']: Console.ok(plugin) else: Console.ko(plugin)
def mainloop(self): """Plugin main loop.""" LOG.info('registered %d plugins', len(self.plugins)) backlog = [] # Early exit if no plug-in are scheduled to be run if not self.plugins: self.info('nothing to do') return # Execute each plug-in in order try: for cls in self.plugins: try: # Create a new instance plugin, elapsed = cls(), None # Execute the plug-in elapsed = self.execute(plugin) except KeyboardInterrupt: raise except Exception as why: LOG.exception('plug-in execution failed') self.error('%s: unexpected error: %s', plugin.name, why) finally: if plugin.exec_status != GNAThub.NOT_EXECUTED: # A plugin could not have been executed depending on # the command line (--runners-only/--reporters-only). backlog.append((plugin.name, { 'time': elapsed or 0, 'success': ( plugin.exec_status == GNAThub.EXEC_SUCCESS) })) except KeyboardInterrupt: self.info(os.linesep + 'Interrupt caught...') # Write results to file fname = os.path.join(GNAThub.root(), 'gnathub.backlog') try: with open(fname, 'w') as fd: fd.write(json.dumps(backlog)) except IOError as why: LOG.exception('could not write result file %s', fname) self.error('%s: unexpected error: %s', fname, why) if not GNAThub.dry_run() and not GNAThub.quiet(): # Display a summary for plugin, results in backlog: if results['success']: Console.ok(plugin) else: Console.ko(plugin)
def info(self, message): """Display an informative message. :param str message: the message to display """ Console.info(message, prefix=SonarScannerProperties.CONSOLE_NAME)
def report(self): """Parse GNATcheck output file report. Returns according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error Identify two type of messages with different format: * basic message * message for package instantiation """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('analyse report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse report: %s', self.output) if not os.path.exists(self.output): self.error('no report found') return GNAThub.EXEC_FAILURE try: with open(self.output, 'r') as output: lines = output.readlines() total = len(lines) # Local variables used for exemptions handling exempted_violation = False hide_exempted = GNAThub.gnatcheck_hide_exempted() # Add the tag "exempted" for GNATcheck exempted violations exempt_tag = GNAThub.Property('gnatcheck:exempted', 'Exempted') prev_line = "" for index, line in enumerate(lines, start=1): self.log.debug('parse line: %s', line) # check if is a section title matchTitle = self._TITLE.match(line) if matchTitle: stitle = matchTitle.group('stitle') exempted_violation = stitle in ('Exempted', 'EXEMPTED') # filter messages if occurs in exempted violation section import_violation = not exempted_violation or ( exempted_violation and not hide_exempted) handle_exempted = exempted_violation and not hide_exempted if import_violation: if handle_exempted: match1 = self._MESSAGE.match(line) if match1: self.log.debug('matched: %s', str(match1.groups())) # Store this line in order to gather next line # justification if any if prev_line == "": prev_line = line else: # Second line is a new violation report match_prev = self._MESSAGE.match(prev_line) if match_prev: self.__parse_line_exempted( match_prev, [exempt_tag]) prev_line = line # self.__parse_line_exempted(match1, # [exempt_tag]) else: if prev_line != "": if len(line.strip()) != 0: # Handle justification for prev_line pmatch = self._MESSAGE.match(prev_line) if pmatch: self.__parse_line_exempted( pmatch, [exempt_tag], line.strip()) # Reset previous line value prev_line = "" else: match = self._MESSAGE.match(line) if match: self.log.debug('matched: %s', str(match.groups())) self.__parse_line(match) else: match2 = self._MESSAGE_INST.match(line) if match2: self.log.debug('matched 2: %s', str(match2.groups())) self.__parse_line_inst(match2) Console.progress(index, total, new_line=(index == total)) except IOError as why: self.log.exception('failed to parse report') self.error('%s (%s:%d)' % ( why, os.path.basename(self.output), total)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
def report(self): """Execute CodePeer message reader and parses the output. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('extract results with msg_reader') proc = GNAThub.Run(self.output_dir, self.__msg_reader_cmd_line(), out=self.csv_report, capture_stderr=False) if proc.status != 0: return GNAThub.EXEC_FAILURE self.info('analyse CSV report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse report: %s', self.csv_report) if not os.path.isfile(self.csv_report): self.error('no report found') return GNAThub.EXEC_FAILURE with open(self.csv_report, 'rb') as report: # Compute the total number of lines for progress report (-1 because # the first line in irrelevant to the analysis). index, total = 0, len(report.readlines()) - 1 # Reset the read cursor to the first byte report.seek(0) # Create the tag "New" for new CodePeer messages added_tag = GNAThub.Property('codepeer:added', 'Added') removed_tag = GNAThub.Property('codepeer:removed', 'Removed') unchanged_tag = GNAThub.Property('codepeer:unchanged', 'Unchanged') try: # Parse the file and drop the first line (containing the # columns name). reader = csv.reader(report, quotechar='\"') # Drop the first line (containing the columns name) header = reader.next() self.log.debug('drop header line: %s', header) # Iterate over each relevant record for index, record in enumerate(reader, start=1): self.log.debug('parse record: %r', record) # Each row is a list of strings: # # File, Line, Column, Category, History, Has_Review, # Ranking, Kind, Message, Classification, CWE, Checks, # Primary_Checks, Subp, Timestamp, Approved By, Comment, # Message_Id (source, line, column, rule, history, has_review, severity, category, message, classification, cwe, checks, pchecks, subp, timestamp, app_by, comment, message_id) = record[:18] if not severity or severity == 'suppressed': # Some versions of codepeer report an empty severity # for suppressed messages: map this to 'info'. severity = 'info' rule_id = rule.lower() self.__add_message(source, line, column, rule_id, message, severity, message_id, [ added_tag if history == 'added' else (removed_tag if history == 'removed' else unchanged_tag) ]) if index % 100 == 1 or index == total: Console.progress(index, total, new_line=(index == total)) except csv.Error as why: self.log.exception('failed to parse CSV report') self.error('%s (%s:%d)' % (why, os.path.basename(self.csv_report), index)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
LOG = logging.getLogger(MODULE) # Define default path to server.py script DEFAULT_SCRIPT_PATH = GNAThub.engine_repository() SCRIPT_NAME = 'server.py' # Default port value DEFAULT_PORT = 8080 # Determine script path and check is different of default value script_path = DEFAULT_SCRIPT_PATH # TO DO : Add handling when path is given via --server-dir if not os.path.exists(script_path): repo_msg = script_path + ' repository does not exist' Console.error(repo_msg, prefix=MODULE) else: msg = 'load script from ' + script_path + ' repository' Console.info(msg, prefix=MODULE) # Build server script full path server_script_path = os.path.join(script_path, SCRIPT_NAME) if os.path.exists(server_script_path): if os.path.isfile(server_script_path): try: port = DEFAULT_PORT if GNAThub.port(): port = GNAThub.port() msg_exec = 'execute ' + SCRIPT_NAME msg_exec = msg_exec + ' (PORT NUMBER: ' + str(port) + ')'
def report(self): """Analyse the report files generated by :program:`Gcov`. Finds all .gcov files in the object directory and parses them. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ self.log.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('parse coverage reports (%s)' % self.GCOV_EXT) # Handle multiple object directories if GNAThub.Project.object_dirs(): # If there are object directories defined in the project tree, look # for .gcov files there. files = [] for obj_dir in GNAThub.Project.object_dirs(): # Fetch all files in project object directories and retrieve # only .gcov files, absolute path for filename in os.listdir(obj_dir): if filename.endswith(self.GCOV_EXT): files.append(os.path.join(obj_dir, filename)) else: # If any object directory is defined in .gpr, fetch all files in # default project object directory and retrieve only .gcov files, # absolute path files = [ os.path.join(GNAThub.Project.object_dir(), filename) for filename in os.listdir(GNAThub.Project.object_dir()) if filename.endswith(self.GCOV_EXT) ] # If no .gcov file found, plugin returns on failure if not files: self.error('no %s file in object directory' % self.GCOV_EXT) return GNAThub.EXEC_FAILURE self.tool = GNAThub.Tool(self.name) self.rule = GNAThub.Rule('coverage', 'coverage', GNAThub.METRIC_KIND, self.tool) total = len(files) # List of resource messages suitable for tool level bulk insertion resources_messages = [] try: for index, filename in enumerate(files, start=1): # Retrieve source fullname (`filename` is the *.gcov report # file). base, _ = os.path.splitext(os.path.basename(filename)) src = GNAThub.Project.source_file(base) resource = GNAThub.Resource.get(src) if resource: self.__process_file(resource, filename, resources_messages) Console.progress(index, total, new_line=(index == total)) # Tool level insert for resources messages self.tool.add_messages(resources_messages, []) except IOError as why: self.log.exception('failed to parse reports') self.error(str(why)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def report(self): """Generate JSON-encoded representation of the data collected.""" # The output directory for the JSON-encoded report data data_output_dir = os.path.join(self.output_dir, 'data') data_src_output_dir = os.path.join(data_output_dir, 'src') try: self.info('generate JSON-encoded report') # Create directory structure if needed if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) else: self.log.warn('%s: already exists', self.output_dir) self.log.warn('existing report may be overriden') # Copy the generic web application files for entry in os.listdir(self.webapp_dir): path = os.path.join(self.webapp_dir, entry) dest = os.path.join(self.output_dir, entry) if os.path.isdir(path): self.log.debug('rm -r "%s"', dest) if os.path.isdir(dest): rmtree(dest) self.log.debug('cp -r "%s" "%s"', path, dest) copytree(path, dest) else: self.log.debug('cp "%s" "%s"', path, dest) copy2(path, dest) # Create the JSON-encoded report output directory for directory in (data_output_dir, data_src_output_dir): if not os.path.exists(directory): os.makedirs(directory) # The report builder initially starts empty. The more sources # processed, the more complete the report. report = ReportBuilder() # Generate the JSON-representation of each source of the project. for count, source in enumerate(report.iter_sources(), start=1): dest = '{}.json'.format( os.path.join(data_src_output_dir, source.filename)) source.save_as(dest) self.log.debug('%s: saved as %s', source.filename, dest) Console.progress(count, report.index.source_file_count, False) # Generate the JSON-encoded report for message navigation. dest = os.path.join(data_output_dir, 'message.json') report.index.message_to_json(dest) self.log.debug('message index saved as %s', dest) self.info('HTML report message generated in %s', dest) # Generate the JSON-encoded report for filter panel. dest = os.path.join(data_output_dir, 'filter.json') report.index.filter_to_json(dest) self.log.debug('filter index saved as %s', dest) self.info('HTML report filter generated in %s', dest) # Generate the JSON-encoded report for code navigation. dest = os.path.join(data_output_dir, 'code.json') report.index.code_to_json(dest) self.log.debug('code index saved as %s', dest) self.info('HTML report code generated in %s', dest) except IOError as why: self.log.exception('failed to generate the HTML report') self.error(str(why)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def report(self): """Generate JSON-encoded representation of the data collected.""" # The output directory for the JSON-encoded report data data_output_dir = os.path.join(self.output_dir, 'data') data_src_output_dir = os.path.join(data_output_dir, 'src') try: self.info('generate JSON-encoded report') # Create directory structure if needed if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) else: self.log.warn('%s: already exists', self.output_dir) self.log.warn('existing report may be overriden') # Copy the generic web application files for entry in os.listdir(self.webapp_dir): path = os.path.join(self.webapp_dir, entry) dest = os.path.join(self.output_dir, entry) if os.path.isdir(path): self.log.debug('rm -r "%s"', dest) if os.path.isdir(dest): rmtree(dest) self.log.debug('cp -r "%s" "%s"', path, dest) copytree(path, dest) else: self.log.debug('cp "%s" "%s"', path, dest) copy2(path, dest) # Create the JSON-encoded report output directory for directory in (data_output_dir, data_src_output_dir): if not os.path.exists(directory): os.makedirs(directory) # The report builder initially starts empty. The more sources # processed, the more complete the report. report = ReportBuilder() # Generate the JSON-representation of each source of the project. for count, source in enumerate(report.iter_sources(), start=1): dest = '{}.json'.format( os.path.join(data_src_output_dir, source.filename)) source.save_as(dest) self.log.debug('%s: saved as %s', source.filename, dest) Console.progress(count, report.index.source_file_count, False) # Generate the JSON-encoded report for message navigation. dest = os.path.join(data_output_dir, 'message.json') report.index.message_to_json(dest) self.log.debug('message index saved as %s', dest) self.verbose_info('HTML report message generated in ' + dest) # Generate the JSON-encoded report for filter panel. dest = os.path.join(data_output_dir, 'filter.json') report.index.filter_to_json(dest) self.log.debug('filter index saved as %s', dest) self.verbose_info('HTML report filter generated in ' + dest) # Generate the JSON-encoded report for code navigation. dest = os.path.join(data_output_dir, 'code.json') report.index.code_to_json(dest) self.log.debug('code index saved as %s', dest) self.verbose_info('HTML report code generated in ' + dest) # Generate the JSON-encoded report for custom review status. dest = os.path.join(data_output_dir, 'custom_status.json') report.index.custom_review_to_json(dest) self.log.debug('custom review status saved as %s', dest) self.verbose_info( 'HTML report custom review status generated in ' + dest) codepeer_obj_dir = os.path.join(GNAThub.Project.object_dir(), 'codepeer') if os.path.isdir(codepeer_obj_dir): # Call to codepeer_bridge for offline mode self.log.debug("Export info from codepeer_bridge") dest = os.path.join(GNAThub.Project.object_dir(), 'gnathub', 'html-report', 'data', 'codepeer_review.xml') name = 'codepeer_bridge' cmd = [ 'codepeer_bridge', '--output-dir=' + GNAThub.output_dir(), '--db-dir=' + GNAThub.db_dir(), '--export-reviews=' + dest ] self.log.debug('Codepeer_bridge file generated in %s', dest) self.verbose_info('Codepeer_bridge file generated in ' + dest) GNAThub.Run(name, cmd) # Get codepeer_run file copy_file = os.path.join(GNAThub.Project.object_dir(), 'codepeer', 'codepeer_run') dest = os.path.join(GNAThub.Project.object_dir(), 'gnathub', 'html-report', 'data') copy2(copy_file, dest) self.log.debug('Codepeer_run file copied in %s', dest) self.verbose_info('Codepeer_run file copied in ' + dest) # Get race_condition file copy_file = os.path.join( GNAThub.Project.object_dir(), 'codepeer', GNAThub.Project.name().lower() + '.output', 'race_conditions.xml') if os.path.isfile(copy_file): dest = os.path.join(GNAThub.Project.object_dir(), 'gnathub', 'html-report', 'data') copy2(copy_file, dest) self.log.debug('%s file copied in %s', copy_file, dest) self.verbose_info(copy_file + ' file copied in ' + dest) except Exception as why: self.log.exception('failed to generate the HTML report') self.error(str(why)) return GNAThub.EXEC_FAILURE except IOError as why: self.log.exception('failed to generate the HTML report') self.error(str(why)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def error(self, message): """Display an error message. :param str message: the message to display """ Console.error(message, prefix=SonarScannerProperties.CONSOLE_NAME)
LOG = logging.getLogger(MODULE) # Define default path to server.py script DEFAULT_SCRIPT_PATH = GNAThub.engine_repository() SCRIPT_NAME = 'server.py' # Default port value DEFAULT_PORT = 8080 # Determine script path and check is different of default value script_path = DEFAULT_SCRIPT_PATH # TO DO : Add handling when path is given via --server-dir if not os.path.exists(script_path): repo_msg = script_path + ' repository does not exist' Console.error(repo_msg, prefix=MODULE) else: msg = 'load script from ' + script_path + ' repository' Console.info(msg, prefix=MODULE) # Build server script full path server_script_path = os.path.join(script_path, SCRIPT_NAME) if os.path.exists(server_script_path): if os.path.isfile(server_script_path): try: port = DEFAULT_PORT if GNAThub.port(): port = GNAThub.port() msg_exec = 'execute ' + SCRIPT_NAME
def report(self): """Execute GNATprove message reader and parses the output. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('extract results with msg_reader') proc = GNAThub.Run(self.output_dir, self.__msg_reader_cmd_line(), out=self.output) if proc.status != 0: return GNAThub.EXEC_FAILURE self.info('analyse report') self.tool = GNAThub.Tool(self.name) # Handle multiple object directories for a given project if GNAThub.Project.object_dirs(): # If there are multiple object directories defined in the project # tree will pass here and will look for all .spark files that was # generated under the object dirs gnatprove's folder for obj_dir in GNAThub.Project.object_dirs(): # Fetch all files in project object directories and retrieve # only .spark files from gnatprove folders gnatprove_dir = os.path.join(obj_dir, 'gnatprove') if os.path.isdir(gnatprove_dir): self.log.debug('parse report: %s', gnatprove_dir) self.__parse_spark_files(gnatprove_dir) else: self.log.debug('parse report: %s', self.output_dir) if not os.path.isdir(self.output_dir): self.error('no report found') return GNAThub.EXEC_FAILURE self.__parse_spark_files(self.output_dir) try: with open(self.output, 'r') as fdin: # Compute the total number of lines for progress report lines = fdin.readlines() index, total = 0, len(lines) for index, line in enumerate(lines, start=1): self.log.debug('parse line: %r', line) match = self._MESSAGE.match(line) if match: self.log.debug('matched: %s', str(match.groups())) self.__parse_line(match) Console.progress(index, total, new_line=(index == total)) except IOError as why: self.log.exception('failed to parse GNATprove output') self.error('%s (%s:%d)' % (why, os.path.basename(self.output), total)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
def report(self): """Execute GNATprove message reader and parses the output. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('extract results with msg_reader') proc = GNAThub.Run( self.output_dir, self.__msg_reader_cmd_line(), out=self.output) if proc.status != 0: return GNAThub.EXEC_FAILURE self.info('analyse report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse report: %s', self.output_dir) if not os.path.isdir(self.output_dir): self.error('no report found') return GNAThub.EXEC_FAILURE for entry in os.listdir(self.output_dir): filename, ext = os.path.splitext(entry) if not ext == '.spark': continue self.log.debug('parse file: %s', entry) try: with open(os.path.join(self.output_dir, entry), 'rb') as spark: results = json.load(spark) for record in chain(results['flow'], results['proof']): if 'msg_id' not in record or 'file' not in record: continue self.log.debug('found record %s', json.dumps(record)) msg_id = record['msg_id'] filename = record['file'] self.msg_ids[(filename, msg_id)] = record except IOError as why: self.log.exception('failed to parse GNATprove .spark file') self.error('%s (%s:%d)' % ( why, os.path.basename(self.output))) try: with open(self.output, 'rb') as fdin: # Compute the total number of lines for progress report lines = fdin.readlines() index, total = 0, len(lines) for index, line in enumerate(lines, start=1): self.log.debug('parse line: %r', line) match = self._MESSAGE.match(line) if match: self.log.debug('matched: %s', str(match.groups())) self.__parse_line(match) Console.progress(index, total, new_line=(index == total)) except IOError as why: self.log.exception('failed to parse GNATprove output') self.error('%s (%s:%d)' % ( why, os.path.basename(self.output), total)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
def report(self): """Execute CodePeer message reader and parses the output. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('extract results with msg_reader to %s' % self.csv_report) proc = GNAThub.Run( self.output_dir, self.__msg_reader_cmd_line(self.csv_report)) if proc.status != 0: return GNAThub.EXEC_FAILURE self.info('analyse CSV report form %s' % self.csv_report) self.tool = GNAThub.Tool(self.name) self.log.debug('parse report: %s', self.csv_report) if not os.path.isfile(self.csv_report): self.error('no report found') return GNAThub.EXEC_FAILURE with open(self.csv_report, 'rb') as report: # Compute the total number of lines for progress report (-1 because # the first line in irrelevant to the analysis). index, total = 0, len(report.readlines()) - 1 # Reset the read cursor to the first byte report.seek(0) # Create the tag "New" for new CodePeer messages added_tag = GNAThub.Property('codepeer:added', 'Added') removed_tag = GNAThub.Property('codepeer:removed', 'Removed') unchanged_tag = GNAThub.Property('codepeer:unchanged', 'Unchanged') try: # Parse the file and drop the first line (containing the # columns name). reader = csv.reader(report, quotechar='\"') # Drop the first line (containing the columns name) header = reader.next() self.log.debug('drop header line: %s', header) # Iterate over each relevant record for index, record in enumerate(reader, start=1): self.log.debug('parse record: %r', record) # Each row is a list of strings: # # File, Line, Column, Category, History, Has_Review, # Ranking, Kind, Message, Classification, CWE, Checks, # Primary_Checks, Subp, Timestamp, Approved By, Comment, # Message_Id ( source, line, column, rule, history, has_review, severity, category, message, classification, cwe, checks, pchecks, subp, timestamp, app_by, comment, message_id ) = record[:18] if not severity or severity == 'suppressed': # Some versions of codepeer report an empty severity # for suppressed messages: map this to 'info'. severity = 'info' rule_id = rule.lower() self.__add_message( source, line, column, rule_id, message, severity, message_id, [added_tag if history == 'added' else (removed_tag if history == 'removed' else unchanged_tag)] ) if index % 100 == 1 or index == total: Console.progress( index, total, new_line=(index == total)) except csv.Error as why: self.log.exception('failed to parse CSV report') self.error('%s (%s:%d)' % ( why, os.path.basename(self.csv_report), index)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
def report(self): """Analyse the report files generated by :program:`GNATcoverage`. Finds all .xcov files in the object directory and parses them. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ self.log.info('clear tool references in the database') GNAThub.Tool.clear_references(self.name) self.info('parse coverage reports (%s)' % self.GNATCOV_EXT) # Fetch all files in project object directory and retrieve only # .xcov files, absolute path files = [os.path.join(GNAThub.Project.object_dir(), filename) for filename in os.listdir(GNAThub.Project.object_dir()) if filename.endswith(self.GNATCOV_EXT)] # If no .xcov file found, plugin returns on failure if not files: self.error('no %s file in object directory' % self.GNATCOV_EXT) return GNAThub.EXEC_FAILURE self.tool = GNAThub.Tool(self.name) for cov_level in ('stmt', 'decision', 'mcdc'): self.issue_rules[cov_level] = GNAThub.Rule( cov_level, cov_level, GNAThub.RULE_KIND, self.tool) total = len(files) # List of resource messages suitable for tool level bulk insertion resources_messages = [] try: for index, filename in enumerate(files, start=1): # Retrieve source fullname (`filename` is the *.xcov report # file). base, _ = os.path.splitext(os.path.basename(filename)) src = GNAThub.Project.source_file(base) resource = GNAThub.Resource.get(src) if resource: self.__process_file(resource, filename, resources_messages) Console.progress(index, total, new_line=(index == total)) # Tool level insert for resources messages self.tool.add_messages(resources_messages, []) except (IOError, ValueError) as why: self.log.exception('failed to parse reports') self.error(str(why)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def _generate_source_dirs(self, modules): """Generate the source directories configuration. Copy over all sources in a temporary directory before running the Sonar Scanner. This is to work around recent versions of SonarQube source importer implementation that looks recursively in source directories (which is inconsistent with GPR files semantic). :param modules: project modules and their associated source directories :type modules: dict[str,list[str]] :return: the path to the root source directory and a copy of the input ``modules`` directory with updated path to source directories (pointing to the local copy) :rtype: (str, dict[str,list[str]]) """ self.log.debug('caching source dirs prior to sonar-scanner execution') # Compute the total dirs count to copy to display progress count = 0 total = sum([len(dirs) for dirs in modules.itervalues()]) root_src_dir = SonarQube.src_cache() self.info('prepare source dirs for sonar-scanner') self.log.info( 'copy source files from the project closure to %s', os.path.relpath(root_src_dir)) # Remove any previous analysis left-over shutil.rmtree(root_src_dir, ignore_errors=True) new_modules_mapping = collections.OrderedDict() for module_name in modules: module_root_src_dir = os.path.join(root_src_dir, module_name) new_modules_mapping[module_name] = _escpath(module_root_src_dir) # Add an additional subdirectory. # NOTE: SonarQube uses "[root]" as the root directory name, which # means that when we have a flat list of source files, they all end # up in different "[root]" dirs in SonarQube UI. Since each # project can have a "[root]" directory, it makes it harder to read # the hierarchy. To work around that, we interpose an additional # "<module-name>-src" directory to make things clearer from a UI # point of view. module_root_src_dir = os.path.join(module_root_src_dir, module_name.lower() + '-src') # Create the local source directory if not os.path.exists(module_root_src_dir): os.makedirs(module_root_src_dir) # Compute the base directory for source dirs. # NOTE: The GNAT Project language allows the user to specify a list # of source directories that are not necessarily all under the same # hierarchy. Furthermore, and this is the default behavior, it # allows the user to specify non-recursive source directories: # source directories which contain sub-directories which are not # themselves source directories. The SonarQube model requires all # sources in specified source directories to be in the source # closure: to satisfy this requirement, we gather all sources under # a same tree, and do a best effort to mimic the original # organization of sources. module_src_dirs = modules[module_name] dirs_commonprefix = os.path.commonprefix(module_src_dirs) self.log.info('source dirs common prefix: %s', dirs_commonprefix) # Use a dict to ensure we don't have duplicated names src_files = {} # Copy each source dir content self.info('prepare files from module: %s' % module_name) for src_dir in modules[module_name]: src_dir_relpath = os.path.relpath(src_dir, dirs_commonprefix) src_dir_path = os.path.join(module_root_src_dir, src_dir_relpath) self.log.info(' + %s' % src_dir_path) if not os.path.exists(src_dir_path): os.makedirs(src_dir_path) # Copy over all files for entry in os.listdir(src_dir): entry_path = os.path.join(src_dir, entry) if not os.path.isfile(entry_path): continue if entry in src_files: self.error('duplicated source file: %s' % entry) self.error(' + %s' % src_files[entry]) self.error(' + %s' % entry_path) src_files[entry] = entry_path new_path = os.path.join(src_dir_path, entry) self.log.debug('%s -> %s', entry_path, new_path) shutil.copy(entry_path, new_path) self.src_mapping[_escpath(entry_path)] = \ _escpath(os.path.normpath(new_path)) count = count + 1 Console.progress(count, total, count == total) return root_src_dir, new_modules_mapping
def report(self): """Analyse the report files generated by :program:`GNATcoverage`. Finds all .xcov files in the object directory and parses them. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.log.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) # Check if gnatcoverage output folder exist if not os.path.exists(self.GNATCOVERAGE_OUTPUT): self.log.info('No gnatcoverage folder in object directory') return GNAThub.EXEC_FAILURE self.info('parse coverage reports (%s)' % self.XML_EXT) # Fetch all files in project object directory and retrieve only # .xml files, absolute path file_path = os.path.join(self.GNATCOVERAGE_OUTPUT, 'index.xml') if not os.path.exists(file_path): self.error('no index.xml file in object directory') return GNAThub.EXEC_FAILURE index_xml = minidom.parse(file_path) if not index_xml: self.error('no %s file in object directory' % self.XML_EXT) return GNAThub.EXEC_FAILURE files = index_xml.getElementsByTagName('file') self.tool = GNAThub.Tool(self.name) for cov_level in ('statement', 'decision', 'condition', 'coverage'): self.issue_rules[cov_level] = GNAThub.Rule(cov_level, cov_level, GNAThub.RULE_KIND, self.tool) total = files.length # List of resource messages suitable for tool level bulk insertion resources_messages = [] try: for index, file in enumerate(files, start=1): # Retrieve source fullname filename = file.attributes['name'].value src = GNAThub.Project.source_file(filename) resource = GNAThub.Resource.get(src) if resource: self.__process_file(resource, filename, resources_messages) Console.progress(index, total, new_line=(index == total)) # Tool level insert for resources messages self.tool.add_messages(resources_messages, []) except (IOError, ValueError) as why: self.log.exception('failed to parse reports') self.error(str(why)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def report(self): """Parse GNATmetric XML report and save data to the database. Returns according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: transactions committed to database * ``GNAThub.EXEC_FAILURE``: error while parsing the xml report """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('analyse report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse XML report: %s', self.output) try: tree = ElementTree.parse(self.output) # Parse the config first to create the GNAThub rules self.parse_config(tree) # Fetch all files files = tree.findall('./file') total = len(files) # List of resource messages suitable for tool level bulk insertion resources_messages = [] for index, node in enumerate(files, start=1): resource = GNAThub.Resource.get(node.attrib.get('name')) # Save file level metrics if not resource: self.warn('skip "%s" message (file not found)' % node.attrib.get('name')) continue self.firstunit = True resources_messages.append([resource, self.parse_metrics(node)]) self.tool.add_messages([], self.parse_units(node, resource)) Console.progress(index, total, new_line=(index == total)) # Retrieve the project metrics resource = GNAThub.Resource(GNAThub.Project.name(), GNAThub.PROJECT_KIND) resources_messages.append([resource, self.parse_metrics(tree)]) self.tool.add_messages(resources_messages, []) except ParseError as why: self.log.exception('failed to parse XML report') self.error('%s (%s:%s)' % (why, why.filename, why.lineno)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def report(self): """Analyse the report files generated by :program:`Gcov`. Finds all .gcov files in the object directory and parses them. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.log.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('parse coverage reports (%s)' % self.GCOV_EXT) # Handle multiple object directories if GNAThub.Project.object_dirs(): # If there are object directories defined in the project tree, look # for .gcov files there. files = [] for obj_dir in GNAThub.Project.object_dirs(): # Fetch all files in project object directories and retrieve # only .gcov files, absolute path for filename in os.listdir(obj_dir): if filename.endswith(self.GCOV_EXT): files.append(os.path.join(obj_dir, filename)) else: # If any object directory is defined in .gpr, fetch all files in # default project object directory and retrieve only .gcov files, # absolute path files = [os.path.join(GNAThub.Project.object_dir(), filename) for filename in os.listdir(GNAThub.Project.object_dir()) if filename.endswith(self.GCOV_EXT)] # If no .gcov file found, plugin returns on failure if not files: self.error('no %s file in object directory' % self.GCOV_EXT) return GNAThub.EXEC_FAILURE self.tool = GNAThub.Tool(self.name) self.rule = GNAThub.Rule('coverage', 'coverage', GNAThub.METRIC_KIND, self.tool) total = len(files) # List of resource messages suitable for tool level bulk insertion resources_messages = [] try: for index, filename in enumerate(files, start=1): # Retrieve source fullname (`filename` is the *.gcov report # file). base, _ = os.path.splitext(os.path.basename(filename)) src = GNAThub.Project.source_file(base) resource = GNAThub.Resource.get(src) if resource: self.__process_file(resource, filename, resources_messages) Console.progress(index, total, new_line=(index == total)) # Tool level insert for resources messages self.tool.add_messages(resources_messages, []) except IOError as why: self.log.exception('failed to parse reports') self.error(str(why)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def report(self): """Execute GNATprove message reader and parses the output. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('extract results with msg_reader') proc = GNAThub.Run(self.output_dir, self.__msg_reader_cmd_line(), out=self.output) if proc.status != 0: return GNAThub.EXEC_FAILURE self.info('analyse report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse report: %s', self.output_dir) if not os.path.isdir(self.output_dir): self.error('no report found') return GNAThub.EXEC_FAILURE for entry in os.listdir(self.output_dir): filename, ext = os.path.splitext(entry) if not ext == '.spark': continue self.log.debug('parse file: %s', entry) try: with open(os.path.join(self.output_dir, entry), 'rb') as spark: results = json.load(spark) for record in chain(results['flow'], results['proof']): if 'msg_id' not in record or 'file' not in record: continue self.log.debug('found record %s', json.dumps(record)) msg_id = record['msg_id'] filename = record['file'] self.msg_ids[(filename, msg_id)] = record except IOError as why: self.log.exception('failed to parse GNATprove .spark file') self.error('%s (%s:%d)' % (why, os.path.basename(self.output))) try: with open(self.output, 'rb') as fdin: # Compute the total number of lines for progress report lines = fdin.readlines() index, total = 0, len(lines) for index, line in enumerate(lines, start=1): self.log.debug('parse line: %r', line) match = self._MESSAGE.match(line) if match: self.log.debug('matched: %s', str(match.groups())) self.__parse_line(match) Console.progress(index, total, new_line=(index == total)) except IOError as why: self.log.exception('failed to parse GNATprove output') self.error('%s (%s:%d)' % (why, os.path.basename(self.output), total)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
def report(self): """Analyse the report files generated by :program:`GNATcoverage`. Finds all .xcov files in the object directory and parses them. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.log.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('parse coverage reports (%s)' % self.GNATCOV_EXT) # Fetch all files in project object directory and retrieve only # .xcov files, absolute path files = [os.path.join(GNAThub.Project.object_dir(), filename) for filename in os.listdir(GNAThub.Project.object_dir()) if filename.endswith(self.GNATCOV_EXT)] # If no .xcov file found, plugin returns on failure if not files: self.error('no %s file in object directory' % self.GNATCOV_EXT) return GNAThub.EXEC_FAILURE self.tool = GNAThub.Tool(self.name) for cov_level in ('stmt', 'decision', 'mcdc'): self.issue_rules[cov_level] = GNAThub.Rule( cov_level, cov_level, GNAThub.RULE_KIND, self.tool) total = len(files) # List of resource messages suitable for tool level bulk insertion resources_messages = [] try: for index, filename in enumerate(files, start=1): # Retrieve source fullname (`filename` is the *.xcov report # file). base, _ = os.path.splitext(os.path.basename(filename)) src = GNAThub.Project.source_file(base) resource = GNAThub.Resource.get(src) if resource: self.__process_file(resource, filename, resources_messages) Console.progress(index, total, new_line=(index == total)) # Tool level insert for resources messages self.tool.add_messages(resources_messages, []) except (IOError, ValueError) as why: self.log.exception('failed to parse reports') self.error(str(why)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS