def _display_start(self):
     '''
     Called once before job is started
     '''
     if self._quiet:
        return
     self._print(STR_Divider)
     self._print(STR_FolderMeasured.format(", ".join(
                 [os.path.abspath(path) for path in self._jobOpt.pathsToMeasure])))
     if self._jobOpt.deltaPath is not None:
         self._print(STR_DeltaFolder.format(os.path.abspath(self._jobOpt.deltaPath)))
     if self._jobOpt.fileFilters:
         self._print(STR_FileFilter.format(self._jobOpt.fileFilters))
     if self._jobOpt.skipFolders:
         self._print(STR_DirFilter.format(self._jobOpt.skipFolders))
     if self._jobOpt.includeFolders:
         self._print(STR_IncludeFolders.format(self._jobOpt.includeFolders))
     if not self._summaryOnly:
         if (len(self._jobOpt.pathsToMeasure) > 1 or
                 self._jobOpt.pathsToMeasure[0] != self._outFileDir):
             self._print(STR_LocationOfMeasurements.format(
                             os.path.abspath(self._outFileDir)))
     if self._detailed:
         self._print(STR_CmdArgs.format(self._args.args))
     self._print(STR_Divider)
     if log.level():
         self._print(" ==> Debug Trace <==\n")
         self._print(" Level: {}  Modes: {}\n".format(
                 log.level(), log.modes()))
         self._print(" Debug output:    {}\n".format(str(log.out()).split(',')[0]))
         self._print(" Surveyor folder: {}\n".format(surveyor_dir()))
         self._print(" CWD for job:     {}\n\n".format(runtime_dir()))
    def _initialize_output(self):
        # Do not run display meter if doing heavy debug output
        self._quiet = self._quiet or (
                        log.out() == sys.stdout and log.level() > 2)

        # Init the meter so it shows up right away (really big dirs can
        # cause a delay in feedback
        if not self._quiet:
            self._write_display_feedback_line()

        # Initialize the writer and note default outfile path
        # (there may be other output files open based on config file settings)
        # If sending measure to stdout, make sure quiet mode is on
        typeLookup = {
            CMDARG_OUTPUT_TYPE_CSV:  ',',
            CMDARG_OUTPUT_TYPE_TAB:  '\t',
            CMDARG_OUTPUT_TYPE_PARA: '\xB6',
            CMDARG_OUTPUT_TYPE_XML:  'xml',
            }
        self._writer = writer.get_writer(
                typeLookup[self._outType], 
                self.status_callback,
                self._outFileDir, 
                self._outFileName, 
                self._outFileOverride,
                self.ItemColumnOrder)
        if self._writer.using_console():
            self._quiet = True
Beispiel #3
0
def _file_match(fileName, fileFilter):
    '''
    Performs the match check of filename to filter
    In the case of blank detection, look for no extension
    Otherwise use regex comparison using cached version of either the
    re from fnmatch.translate or custom RE string provided in filter
    '''
    if BLANK_FILE_EXT == fileFilter:
        root, ext = os.path.splitext(fileName)
        filterMatch = ('' == ext and not root.startswith('.'))
    else:
        filterRe = None
        try:
            filterRe = _FilterCache[fileFilter]
        except KeyError:
            if fileFilter.startswith(CUSTOM_FILE_REGEX):
                filterRe = re.compile(fileFilter.replace(CUSTOM_FILE_REGEX, ''), RE_OPTIONS)
            else:
                filterRe = re.compile(fnmatch.translate(fileFilter), RE_OPTIONS)
            _FilterCache[fileFilter] = filterRe

        filterMatch = filterRe.match(fileName)

        if log.level() > 3 and filterMatch is None:
            log.file(4, "FilterExtFilter: %s, no match:  %s" % (filterRe.pattern[:10], fileName))

        return filterMatch is not None
 def _first_match(self,
                  searchTarget,
                  positiveSearches,
                  negativeSearches,
                  negativeFirst=False):
     '''
     Match object for the first positive match that has no negative matches,
     with option on which to check first
     If no positive match (including a negative hit), returns None
     Otherwise returns keyName of match and the match object
     Searches dicts have count that is incremented in place
     '''
     if log.level(): log.search(4, "Searching: {}".format(searchTarget))
     matchTuple = None
     if negativeFirst:
         if not self._is_negative_match(searchTarget, negativeSearches):
             matchTuple = self._find_positive_match(searchTarget,
                                                    positiveSearches)
     else:
         matchTuple = self._find_positive_match(searchTarget,
                                                positiveSearches)
         if matchTuple:
             if self._is_negative_match(searchTarget, negativeSearches):
                 matchTuple = None
     return matchTuple
    def _is_negative_match(self, searchTarget, negativeSearches):
        for negString, (negRegExp, negCount) in negativeSearches.items():
            if log.level():
                log.search(
                    4,
                    "  NegativeCheck: {} > {}".format(negRegExp.pattern,
                                                      searchTarget))

            negMatch = negRegExp.search(searchTarget)

            if negMatch:
                negativeSearches[negString][1] = negCount + 1
                if log.level():
                    log.search(
                        4, "  NegativeHit: {} > {}".format(
                            str(negMatch.group()), negRegExp.pattern))
                return True

        return False
    def _find_positive_match(self, searchTarget, positiveSearches):
        for posString, (posRegExp, posCount) in positiveSearches.items():
            if log.level():
                log.search(
                    4,
                    "  PositiveCheck: {} > {}".format(searchTarget,
                                                      posRegExp.pattern))

            match = posRegExp.search(searchTarget)

            if match:
                positiveSearches[posString][1] = posCount + 1
                if log.level():
                    log.search(
                        2,
                        "PositveHit: {} > {}".format(str(match.group()),
                                                     posRegExp.pattern))
                return posString, match

        return None
Beispiel #7
0
    def __init__(self, options):
        super(NBNC, self).__init__(options)

        # Optimize check for log level inside the core file processing loop, because some
        # log statements make calls to format even in non-debug mode
        self._logLevel = log.level()

        # Identify what measures can do for config file validation
        self.verbs = [self.VERB_MEASURE]
        self.measures = [self.LINES_CODE, self.LINES_COMMENT, self.LINES_TOTAL]

        # Flag whether block detection should be used in the file
        self._use_block_detection = True
Beispiel #8
0
    def _get_files_to_process(self, folderName, fileNames, fileFilters, configPath):
        '''
        Filter the list of files based on command line options and active
        config file filters
        '''
        # if fileFilters is empty it means an empty config file, so skip all files
        if not fileFilters:
            return []

        # Optimize the most common matching of extensions by creating cache of
        # simple '*.xxx' extensions from config filters for each config file
        filterExts = []
        try:
            filterExts = self._configFilterCache[configPath]
        except KeyError:
            filterSplits = [os.path.splitext(fileFilter) for fileFilter in fileFilters if
                                os.path.splitext(fileFilter)[0] == '*']
            filterExts = [ext for _root, ext in filterSplits]
            self._configFilterCache[configPath] = filterExts

        # Select files based on matching filters
        filesToProcess = []
        for fileName in fileNames:

            # Filter file list by command-line postive filter, if provided
            if fileext.file_matches_filters(fileName, self._fileExtFilters):

                # Optimize most common case of direct match of file extension, then
                # fall back to doing a full filter match on config file filter
                _root, fileExt = os.path.splitext(fileName)
                fileFilter = None
                if fileExt in filterExts:
                    fileFilter = '*' + fileExt
                else:
                    fileFilter = fileext.file_matches_filters(fileName, fileFilters)
                if fileFilter is not None:
                    filesToProcess.append((fileName, fileFilter))

        # Remove files that should be skipped
        if self._skipFiles:
            filesToProcess = [(fileName, fileFilter) for fileName, fileFilter in filesToProcess if
                                not fileext.file_matches_filters(fileName, self._skipFiles)]

        # Debug tracing of files that were not measured
        if log.level():
            filesSkipped = set(fileNames) - set([f for f, _filter in filesToProcess])
            if filesSkipped:
                log.file(2, "SkippingFiles: %s" % filesSkipped)

        return filesToProcess
 def _cleanup(self):
     if self._writer is not None:
         self._writer.close_files()
     self._display_profile_info()
     if self._keyboardInterrupt is not None:
         self._print(STR_UserInterrupt)
     if self._finalException is not None:
         exc = self._finalException
         # Don't use log or print output here to avoid more errors
         self._out.write(STR_Error)
         if log.level():
             dump = getattr( exc, '_stack_trace', "".join(
                     traceback.format_exception(type(exc), exc, exc.__traceback__)))
             self._out.write(dump)
         else:
             self._out.write(str(exc) + "\n")
    def _add_metric_to_summary(self, filePath, metricName, metric):
        if metricName not in self._totals:
            self._totals[metricName] = {}

        # If scalar value, add it to total, otherise increment count
        MEASURE_TOTAL_KEY = ''
        increment = 1
        if isinstance(metric, Number):
            increment = metric
        newValue = self._totals[metricName].get(MEASURE_TOTAL_KEY, 0) + increment
        self._totals[metricName][MEASURE_TOTAL_KEY] = newValue

        # For detailed measures stash metrics on per-file basis, according to exclusions
        if self._detailed and (
                metricName in self.SummaryToInclude or log.level() >= 2) and (
                True not in [metricName.startswith(prefix) for prefix in self.SummaryPrefixToExclude]):
            (_not_used_, fileType) = os.path.splitext(filePath)
            fileType = fileType.lower() if fileType else NO_EXTENSION_NAME
            self._totals[metricName][fileType] = (
                self._totals[metricName].get(fileType, 0) + increment)
    def _stash_summary_metrics(self, filePath, measures, analysisItems):
        '''
        Keep summary metrics on the measures for command-line display
        Use a dictionary of dictionaries to capture each measure along with
        the break-down on per-file type
        '''
        itemsToStash = []
        itemsToStash.extend(list(measures.items()))
        # For detailed or higher trace levels, show everything collected except exclusions
        # Otherwise show only key summary items
        if self._detailed or log.level() > 1:
            for analysis in analysisItems:
                itemsToStash.extend(list(analysis.items()))
            itemsToStash = [(n, v) for n, v in itemsToStash if
                    True not in [n.startswith(prefix) for prefix in self.SummaryPrefixToExclude]]
        else:
            itemsToStash = [(n, v) for n, v in itemsToStash if n in self.SummaryToInclude]

        for itemName, itemValue in itemsToStash:
            self._add_metric_to_summary(filePath, itemName, itemValue)