Exemple #1
0
 def _display_start(self):
     '''
     Called once before job is started
     '''
     if self._quiet:
        return
     self._print(STR_Divider)
     self._print(STR_FolderMeasured.format(", ".join(
                 [os.path.abspath(path) for path in self._jobOpt.pathsToMeasure])))
     if self._jobOpt.deltaPath is not None:
         self._print(STR_DeltaFolder.format(os.path.abspath(self._jobOpt.deltaPath)))
     if self._jobOpt.fileFilters:
         self._print(STR_FileFilter.format(self._jobOpt.fileFilters))
     if self._jobOpt.skipFolders:
         self._print(STR_DirFilter.format(self._jobOpt.skipFolders))
     if self._jobOpt.includeFolders:
         self._print(STR_IncludeFolders.format(self._jobOpt.includeFolders))
     if not self._summaryOnly:
         if (len(self._jobOpt.pathsToMeasure) > 1 or
                 self._jobOpt.pathsToMeasure[0] != self._outFileDir):
             self._print(STR_LocationOfMeasurements.format(
                             os.path.abspath(self._outFileDir)))
     if self._detailed:
         self._print(STR_CmdArgs.format(self._args.args))
     self._print(STR_Divider)
     if trace.level():
         self._print(" ==> Debug Trace <==\n")
         self._print(" Level: {0}  Modes: {1}\n".format(
                 trace.level(), trace.modes()))
         self._print(" Debug output:    {0}\n".format(str(trace.out()).split(',')[0]))
         self._print(" Surveyor folder: {0}\n".format(utils.surveyor_dir()))
         self._print(" CWD for job:     {0}\n\n".format(utils.runtime_dir()))
Exemple #2
0
def _file_match(fileName, fileFilter):
    '''
    Performs the match check of filename to filter
    In the case of blank detection, look for no extension
    Otherwise use regex comparison using cached version of either the
    re from fnmatch.translate or custom RE string provided in filter
    '''
    if BLANK_FILE_EXT == fileFilter:
        root, ext = os.path.splitext(fileName)
        filterMatch = ('' == ext and not root.startswith('.'))
    else:
        filterRe = None
        try:
            filterRe = _FilterCache[fileFilter]
        except KeyError:
            if fileFilter.startswith(CUSTOM_FILE_REGEX):
                filterRe = re.compile(fileFilter.replace(CUSTOM_FILE_REGEX, ''), RE_OPTIONS)
            else:
                filterRe = re.compile(fnmatch.translate(fileFilter), RE_OPTIONS)
            _FilterCache[fileFilter] = filterRe

        filterMatch = filterRe.match(fileName)

        if trace.level() and filterMatch is None:
            trace.file(3, "FilterExtFilter: %s, no match:  %s" % (filterRe.pattern[:10], fileName))

        return filterMatch is not None
Exemple #3
0
 def _first_match(self,
                  searchTarget,
                  positiveSearches,
                  negativeSearches,
                  negativeFirst=False):
     '''
     Match object for the first positive match that has no negative matches,
     with option on which to check first
     If no positive match (including a negative hit), returns None
     Otherwise returns keyName of match and the match object
     Searches dicts have count that is incremented in place
     '''
     if trace.level():
         trace.search(4, "Searching: {0}".format(searchTarget))
     matchTuple = None
     if negativeFirst:
         if not self._is_negative_match(searchTarget, negativeSearches):
             matchTuple = self._find_positive_match(searchTarget,
                                                    positiveSearches)
     else:
         matchTuple = self._find_positive_match(searchTarget,
                                                positiveSearches)
         if matchTuple:
             if self._is_negative_match(searchTarget, negativeSearches):
                 matchTuple = None
     return matchTuple
Exemple #4
0
    def _initialize_output(self):
        # Do not run display meter if we are doing heavy debug output
        self._quiet = self._quiet or (
                        trace.out() == sys.stdout and trace.level() > 2)

        # Init the meter so it shows up right away (really big dirs can
        # cause a delay in feedback
        if not self._quiet:
            self._write_display_feedback_line()

        # Initialize the writer and note default outfile path
        # (there may be other output files open based on config file settings)
        # If we are sending measure to stdout, make sure quiet mode is on
        typeLookup = {
            CMDARG_OUTPUT_TYPE_CSV:  ',',
            CMDARG_OUTPUT_TYPE_TAB:  '\t',
            CMDARG_OUTPUT_TYPE_PARA: '\xB6',
            CMDARG_OUTPUT_TYPE_XML:  'xml',
            }
        self._writer = writer.get_writer(
                typeLookup[self._outType], self.status_callback,
                self._outFileDir, self._outFileName, self._outFileOverride,
                self.ItemColumnOrder)
        if self._writer.using_console():
            self._quiet = True
Exemple #5
0
    def _is_negative_match(self, searchTarget, negativeSearches):
        for negString, (negRegExp, negCount) in negativeSearches.iteritems():
            if trace.level():
                trace.search(
                    2, "  NegativeCheck: {0} > {1}".format(
                        negRegExp.pattern, searchTarget))

            negMatch = negRegExp.search(searchTarget)

            if negMatch:
                negativeSearches[negString][1] = negCount + 1
                if trace.level():
                    trace.search(
                        1, "  NegativeHit: {0} > {1}".format(
                            str(negMatch.group()), negRegExp.pattern))
                return True

        return False
Exemple #6
0
    def __init__(self, options):
        super(NBNC, self).__init__(options)

        # We optimize a check for trace level inside the core file processing loop, because some
        # trace statements make calls to format even in non-debug mode
        self._traceLevel = trace.level()

        # Identify what measures we can do for config file validation
        self.verbs = [self.VERB_MEASURE]
        self.measures = [self.LINES_CODE, self.LINES_COMMENT, self.LINES_TOTAL]
Exemple #7
0
    def _find_positive_match(self, searchTarget, positiveSearches):
        for posString, (posRegExp, posCount) in positiveSearches.iteritems():
            if trace.level():
                trace.search(
                    3, "  PositiveCheck: {0} > {1}".format(
                        searchTarget, posRegExp.pattern))

            match = posRegExp.search(searchTarget)

            if match:
                positiveSearches[posString][1] = posCount + 1
                if trace.level():
                    trace.search(
                        1,
                        "PositveHit: {0} > {1}".format(str(match.group()),
                                                       posRegExp.pattern))
                return posString, match

        return None
Exemple #8
0
    def __init__(self, options):
        super(NBNC, self).__init__(options)

        # We optimize a check for trace level inside the core file processing loop, because some
        # trace statements make calls to format even in non-debug mode
        self._traceLevel = trace.level()

        # Identify what measures we can do for config file validation
        self.verbs = [self.VERB_MEASURE]
        self.measures = [self.LINES_CODE, self.LINES_COMMENT, self.LINES_TOTAL]
Exemple #9
0
    def _get_files_to_process(self, folderName, fileNames, fileFilters, configPath):
        '''
        Filter the list of files based on command line options and active
        config file filters
        '''
        # if fileFilters is empty it means an empty config file, so skip all files
        if not fileFilters:
            return []

        # Optimize the most common matching of extensions by creating cache of
        # simple '*.xxx' extensions from config filters for each config file
        filterExts = []
        try:
            filterExts = self._configFilterCache[configPath]
        except KeyError:
            filterSplits = [os.path.splitext(fileFilter) for fileFilter in fileFilters if
                                os.path.splitext(fileFilter)[0] == '*']
            filterExts = [ext for _root, ext in filterSplits]
            self._configFilterCache[configPath] = filterExts

        # Select files based on matching filters
        filesToProcess = []
        for fileName in fileNames:

            # Filter file list by command-line postive filter, if provided
            if fileext.file_matches_filters(fileName, self._fileExtFilters):

                # Optimize most common case of direct match of file extension, then
                # fall back to doing a full filter match on config file filter
                _root, fileExt = os.path.splitext(fileName)
                fileFilter = None
                if fileExt in filterExts:
                    fileFilter = '*' + fileExt
                else:
                    fileFilter = fileext.file_matches_filters(fileName, fileFilters)
                if fileFilter is not None:
                    filesToProcess.append((fileName, fileFilter))

        # Remove files that should be skipped
        if self._skipFiles:
            filesToProcess = [(fileName, fileFilter) for fileName, fileFilter in filesToProcess if
                                not fileext.file_matches_filters(fileName, self._skipFiles)]

        # Debug tracing of files that were not measured
        if trace.level():
            filesSkipped = set(fileNames) - set([f for f, _filter in filesToProcess])
            if filesSkipped:
                trace.file(2, "SkippingFiles: %s" % filesSkipped)

        return filesToProcess
Exemple #10
0
 def _cleanup(self):
     if self._writer is not None:
         self._writer.close_files()
     self._display_profile_info()
     if self._keyboardInterrupt is not None:
         self._print(STR_UserInterrupt)
     if self._finalException is not None:
         # We don't use our tracing or print output here
         self._out.write(STR_Error)
         if not isinstance(self._finalException, utils.SurveyorException):
             self._out.write(str(type(self._finalException)) + "\n")
         self._out.write(str(self._finalException) + "\n")
         if trace.level() >= 1:
             import traceback
             self._out.write(traceback.format_exc())
Exemple #11
0
    def _add_metric_to_summary(self, filePath, metricName, metric):
        if metricName not in self._totals:
            self._totals[metricName] = {}

        # If scalar value, add it to total, otherise increment count
        MEASURE_TOTAL_KEY = ''
        increment = 1
        if isinstance(metric, Number):
            increment = metric
        newValue = self._totals[metricName].get(MEASURE_TOTAL_KEY, 0) + increment
        self._totals[metricName][MEASURE_TOTAL_KEY] = newValue

        # For detailed measures stash metrics on per-file basis, according to exclusions
        if self._detailed and (
                metricName in self.SummaryToInclude or trace.level() >= 2) and (
                True not in [metricName.startswith(prefix) for prefix in self.SummaryPrefixToExclude]):
            (_not_used_, fileType) = os.path.splitext(filePath)
            fileType = fileType.lower() if fileType else NO_EXTENSION_NAME
            self._totals[metricName][fileType] = (
                self._totals[metricName].get(fileType, 0) + increment)
Exemple #12
0
    def _stash_summary_metrics(self, filePath, measures, analysisItems):
        '''
        Keep summary metrics on the measures for command-line display
        Use a dictionary of dictionaries to capture each measure along with
        the break-down on per-file type
        '''
        itemsToStash = []
        itemsToStash.extend(measures.items())
        # For detailed or higher trace levels, show everything we collected except exclusions
        # Otherwise show only key summary items
        if self._detailed or trace.level() > 1:
            for analysis in analysisItems:
                itemsToStash.extend(analysis.items())
            itemsToStash = [(n, v) for n, v in itemsToStash if
                    True not in [n.startswith(prefix) for prefix in self.SummaryPrefixToExclude]]
        else:
            itemsToStash = [(n, v) for n, v in itemsToStash if n in self.SummaryToInclude]

        for itemName, itemValue in itemsToStash:
            self._add_metric_to_summary(filePath, itemName, itemValue)
Exemple #13
0
    def __init__(self, options):
        super(Document, self).__init__(options)

        self.verbs = [self.VERB_MEASURE]
        self.measures = [ 'doc.*',
                self.NO_MEASURE,
                self.LINES_TOTAL,
                self.LINES_BLANK,
                self.LINES_CONTENT ]

        self.reBlankLine = re.compile( r"^\s*$" )

        # We optimize a check for trace level inside the core file processing loop, because some
        # trace statements make calls to format even in non-debug mode
        self._traceLevel = trace.level()


        # Lookup to associate file type with counting method
        # We could expose this to the endsuer for configuration as modules
        # or a config option, but documents tend to have much more stable file
        # types so it should be fine to encode them here
        self.filetypeMeasures = {
                '.pdf': _measure_pdf,
                }