def _import_codepeer_bridge(filename): app.logger.info("Import info into codepeer_bridge") name = 'codepeer_bridge' cmd = [ 'codepeer_bridge', '--output-dir=' + OUTPUT_DIR, '--db-dir=' + DB_DIR, '--import-reviews=' + filename ] GNAThub.Run(name, cmd, out=SERVER_LOG, append_out=True)
def run(self): """Execute GNATstack. Returns according to the success of the execution of the tool: * ``GNAThub.EXEC_SUCCESS``: on successful execution * ``GNAThub.EXEC_FAILURE``: on any error """ return GNAThub.EXEC_SUCCESS if GNAThub.Run(self.name, self.__cmd_line( )).status in GNATstack.VALID_EXIT_CODES else GNAThub.EXEC_FAILURE
def _export_codeper_bridge(filename): app.logger.info("Export info from codepeer_bridge") name = 'codepeer_bridge' cmd = [ 'codepeer_bridge', '--output-dir=' + OUTPUT_DIR, '--db-dir=' + DB_DIR, '--export-reviews=' + os.path.join(GNAThub.Project.object_dir(), 'gnathub', 'html-report', 'data', filename) ] GNAThub.Run(name, cmd, out=SERVER_LOG, append_out=True)
def run(self): """Execute GNATmetric. Returns according to the success of the execution of the tool: * ``GNAThub.EXEC_SUCCESS``: on successful execution * ``GNAThub.EXEC_FAILURE``: on any error """ status = GNAThub.Run(self.name, self.__cmd_line()).status return GNAThub.EXEC_SUCCESS if status == 0 else GNAThub.EXEC_FAILURE
def report(self): """Execute the SonarQube Scanner. Returns according to the successful of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ return GNAThub.EXEC_SUCCESS if GNAThub.Run( self.name, self.__cmd_line(), workdir=SonarQube.workdir()).status == 0 else GNAThub.EXEC_FAILURE
def run(self): """Execute GNATprove. Sets the exec_status property according to the success of the execution of the tool: * ``GNAThub.EXEC_SUCCESS``: on successful execution * ``GNAThub.EXEC_FAILURE``: on any error """ return GNAThub.EXEC_SUCCESS if GNAThub.Run( self.name, self.__cmd_line()).status == 0 else GNAThub.EXEC_FAILURE
def report(self): """Generate JSON-encoded representation of the data collected.""" # The output directory for the JSON-encoded report data data_output_dir = os.path.join(self.output_dir, 'data') data_src_output_dir = os.path.join(data_output_dir, 'src') try: self.info('generate JSON-encoded report') # Create directory structure if needed if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) else: self.log.warn('%s: already exists', self.output_dir) self.log.warn('existing report may be overriden') # Copy the generic web application files for entry in os.listdir(self.webapp_dir): path = os.path.join(self.webapp_dir, entry) dest = os.path.join(self.output_dir, entry) if os.path.isdir(path): self.log.debug('rm -r "%s"', dest) if os.path.isdir(dest): rmtree(dest) self.log.debug('cp -r "%s" "%s"', path, dest) copytree(path, dest) else: self.log.debug('cp "%s" "%s"', path, dest) copy2(path, dest) # Create the JSON-encoded report output directory for directory in (data_output_dir, data_src_output_dir): if not os.path.exists(directory): os.makedirs(directory) # The report builder initially starts empty. The more sources # processed, the more complete the report. report = ReportBuilder() # Generate the JSON-representation of each source of the project. for count, source in enumerate(report.iter_sources(), start=1): dest = '{}.json'.format( os.path.join(data_src_output_dir, source.filename)) source.save_as(dest) self.log.debug('%s: saved as %s', source.filename, dest) Console.progress(count, report.index.source_file_count, False) # Generate the JSON-encoded report for message navigation. dest = os.path.join(data_output_dir, 'message.json') report.index.message_to_json(dest) self.log.debug('message index saved as %s', dest) self.verbose_info('HTML report message generated in ' + dest) # Generate the JSON-encoded report for filter panel. dest = os.path.join(data_output_dir, 'filter.json') report.index.filter_to_json(dest) self.log.debug('filter index saved as %s', dest) self.verbose_info('HTML report filter generated in ' + dest) # Generate the JSON-encoded report for code navigation. dest = os.path.join(data_output_dir, 'code.json') report.index.code_to_json(dest) self.log.debug('code index saved as %s', dest) self.verbose_info('HTML report code generated in ' + dest) # Generate the JSON-encoded report for custom review status. dest = os.path.join(data_output_dir, 'custom_status.json') report.index.custom_review_to_json(dest) self.log.debug('custom review status saved as %s', dest) self.verbose_info( 'HTML report custom review status generated in ' + dest) codepeer_obj_dir = os.path.join(GNAThub.Project.object_dir(), 'codepeer') if os.path.isdir(codepeer_obj_dir): # Call to codepeer_bridge for offline mode self.log.debug("Export info from codepeer_bridge") dest = os.path.join(GNAThub.Project.object_dir(), 'gnathub', 'html-report', 'data', 'codepeer_review.xml') name = 'codepeer_bridge' cmd = [ 'codepeer_bridge', '--output-dir=' + GNAThub.output_dir(), '--db-dir=' + GNAThub.db_dir(), '--export-reviews=' + dest ] self.log.debug('Codepeer_bridge file generated in %s', dest) self.verbose_info('Codepeer_bridge file generated in ' + dest) GNAThub.Run(name, cmd) # Get codepeer_run file copy_file = os.path.join(GNAThub.Project.object_dir(), 'codepeer', 'codepeer_run') dest = os.path.join(GNAThub.Project.object_dir(), 'gnathub', 'html-report', 'data') copy2(copy_file, dest) self.log.debug('Codepeer_run file copied in %s', dest) self.verbose_info('Codepeer_run file copied in ' + dest) # Get race_condition file copy_file = os.path.join( GNAThub.Project.object_dir(), 'codepeer', GNAThub.Project.name().lower() + '.output', 'race_conditions.xml') if os.path.isfile(copy_file): dest = os.path.join(GNAThub.Project.object_dir(), 'gnathub', 'html-report', 'data') copy2(copy_file, dest) self.log.debug('%s file copied in %s', copy_file, dest) self.verbose_info(copy_file + ' file copied in ' + dest) except Exception as why: self.log.exception('failed to generate the HTML report') self.error(str(why)) return GNAThub.EXEC_FAILURE except IOError as why: self.log.exception('failed to generate the HTML report') self.error(str(why)) return GNAThub.EXEC_FAILURE else: return GNAThub.EXEC_SUCCESS
def report(self): """Execute CodePeer message reader and parses the output. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('extract results with msg_reader') proc = GNAThub.Run(self.output_dir, self.__msg_reader_cmd_line(), out=self.csv_report, capture_stderr=False) if proc.status != 0: return GNAThub.EXEC_FAILURE self.info('analyse CSV report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse report: %s', self.csv_report) if not os.path.isfile(self.csv_report): self.error('no report found') return GNAThub.EXEC_FAILURE with open(self.csv_report, 'rb') as report: # Compute the total number of lines for progress report (-1 because # the first line in irrelevant to the analysis). index, total = 0, len(report.readlines()) - 1 # Reset the read cursor to the first byte report.seek(0) # Create the tag "New" for new CodePeer messages added_tag = GNAThub.Property('codepeer:added', 'Added') removed_tag = GNAThub.Property('codepeer:removed', 'Removed') unchanged_tag = GNAThub.Property('codepeer:unchanged', 'Unchanged') try: # Parse the file and drop the first line (containing the # columns name). reader = csv.reader(report, quotechar='\"') # Drop the first line (containing the columns name) header = reader.next() self.log.debug('drop header line: %s', header) # Iterate over each relevant record for index, record in enumerate(reader, start=1): self.log.debug('parse record: %r', record) # Each row is a list of strings: # # File, Line, Column, Category, History, Has_Review, # Ranking, Kind, Message, Classification, CWE, Checks, # Primary_Checks, Subp, Timestamp, Approved By, Comment, # Message_Id (source, line, column, rule, history, has_review, severity, category, message, classification, cwe, checks, pchecks, subp, timestamp, app_by, comment, message_id) = record[:18] if not severity or severity == 'suppressed': # Some versions of codepeer report an empty severity # for suppressed messages: map this to 'info'. severity = 'info' rule_id = rule.lower() self.__add_message(source, line, column, rule_id, message, severity, message_id, [ added_tag if history == 'added' else (removed_tag if history == 'removed' else unchanged_tag) ]) if index % 100 == 1 or index == total: Console.progress(index, total, new_line=(index == total)) except csv.Error as why: self.log.exception('failed to parse CSV report') self.error('%s (%s:%d)' % (why, os.path.basename(self.csv_report), index)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
def report(self): """Execute GNATprove message reader and parses the output. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('extract results with msg_reader') proc = GNAThub.Run(self.output_dir, self.__msg_reader_cmd_line(), out=self.output) if proc.status != 0: return GNAThub.EXEC_FAILURE self.info('analyse report') self.tool = GNAThub.Tool(self.name) self.log.debug('parse report: %s', self.output_dir) if not os.path.isdir(self.output_dir): self.error('no report found') return GNAThub.EXEC_FAILURE for entry in os.listdir(self.output_dir): filename, ext = os.path.splitext(entry) if not ext == '.spark': continue self.log.debug('parse file: %s', entry) try: with open(os.path.join(self.output_dir, entry), 'rb') as spark: results = json.load(spark) for record in chain(results['flow'], results['proof']): if 'msg_id' not in record or 'file' not in record: continue self.log.debug('found record %s', json.dumps(record)) msg_id = record['msg_id'] filename = record['file'] self.msg_ids[(filename, msg_id)] = record except IOError as why: self.log.exception('failed to parse GNATprove .spark file') self.error('%s (%s:%d)' % (why, os.path.basename(self.output))) try: with open(self.output, 'rb') as fdin: # Compute the total number of lines for progress report lines = fdin.readlines() index, total = 0, len(lines) for index, line in enumerate(lines, start=1): self.log.debug('parse line: %r', line) match = self._MESSAGE.match(line) if match: self.log.debug('matched: %s', str(match.groups())) self.__parse_line(match) Console.progress(index, total, new_line=(index == total)) except IOError as why: self.log.exception('failed to parse GNATprove output') self.error('%s (%s:%d)' % (why, os.path.basename(self.output), total)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
def report(self): """Execute GNATprove message reader and parses the output. Sets the exec_status property according to the success of the analysis: * ``GNAThub.EXEC_SUCCESS``: on successful execution and analysis * ``GNAThub.EXEC_FAILURE``: on any error """ # Clear existing references only if not incremental run if not GNAThub.incremental(): self.info('clear existing results if any') GNAThub.Tool.clear_references(self.name) self.info('extract results with msg_reader') proc = GNAThub.Run(self.output_dir, self.__msg_reader_cmd_line(), out=self.output) if proc.status != 0: return GNAThub.EXEC_FAILURE self.info('analyse report') self.tool = GNAThub.Tool(self.name) # Handle multiple object directories for a given project if GNAThub.Project.object_dirs(): # If there are multiple object directories defined in the project # tree will pass here and will look for all .spark files that was # generated under the object dirs gnatprove's folder for obj_dir in GNAThub.Project.object_dirs(): # Fetch all files in project object directories and retrieve # only .spark files from gnatprove folders gnatprove_dir = os.path.join(obj_dir, 'gnatprove') if os.path.isdir(gnatprove_dir): self.log.debug('parse report: %s', gnatprove_dir) self.__parse_spark_files(gnatprove_dir) else: self.log.debug('parse report: %s', self.output_dir) if not os.path.isdir(self.output_dir): self.error('no report found') return GNAThub.EXEC_FAILURE self.__parse_spark_files(self.output_dir) try: with open(self.output, 'r') as fdin: # Compute the total number of lines for progress report lines = fdin.readlines() index, total = 0, len(lines) for index, line in enumerate(lines, start=1): self.log.debug('parse line: %r', line) match = self._MESSAGE.match(line) if match: self.log.debug('matched: %s', str(match.groups())) self.__parse_line(match) Console.progress(index, total, new_line=(index == total)) except IOError as why: self.log.exception('failed to parse GNATprove output') self.error('%s (%s:%d)' % (why, os.path.basename(self.output), total)) return GNAThub.EXEC_FAILURE else: self.__do_bulk_insert() return GNAThub.EXEC_SUCCESS
# Default for jobs number is 0 assertEqual(GNAThub.jobs(), 0) # The plugin list is expected to be empty assertEqual(len(GNAThub.plugins()), 0) # We ensure that the core and extra plugins directories exist repos = GNAThub.repositories() for kind in ('system', 'global'): assertTrue(os.path.isdir(repos[kind])) # GNAThub.run TO_BE_ECHOED = 'this is the message to display on the standard output' process = GNAThub.Run('echo', ('echo', TO_BE_ECHOED)) assertEqual(process.wait(), 0) assertEqual(process.status, 0) assertEqual(process.name, 'echo') assertEqual(process.cmdline_image(), "echo '%s'" % TO_BE_ECHOED) assertEqual(process.output(), os.path.join(GNAThub.logs(), 'echo.log')) assertTrue(os.path.isfile(process.output())) with open(process.output(), 'r') as logs: content = logs.read().strip() assertEqual(content, TO_BE_ECHOED) assertListUnorderedEqual( GNAThub.tool_args('codepeer'),