Example #1
0
def cyclomaticComplexity(code):
    for member in cc_visit(code):
        blockType = member.letter
        blockComplexity = member.complexity
        blockRank = cc_rank(blockComplexity)
        blockFullName = member.fullname
    return [average_complexity(cc_visit(code)), cc_rank(average_complexity(cc_visit(code)))]
Example #2
0
File: tools.py Project: lamby/radon
def cc_to_terminal(results, show_complexity, min, max, total_average):
    '''Transfom Cyclomatic Complexity results into a 3-elements tuple:

        ``(res, total_cc, counted)``

    `res` is a list holding strings that are specifically formatted to be
    printed to a terminal.
    `total_cc` is a number representing the total analyzed cyclomatic
    complexity.
    `counted` holds the number of the analyzed blocks.

    If *show_complexity* is `True`, then the complexity of a block will be
    shown in the terminal line alongside its rank.
    *min* and *max* are used to control which blocks are shown in the resulting
    list. A block is formatted only if its rank is `min <= rank <= max`.
    If *total_average* is `True`, the `total_cc` and `counted` count every
    block, regardless of the fact that they are formatted in `res` or not.
    '''
    res = []
    counted = 0
    total_cc = .0
    for line in results:
        ranked = cc_rank(line.complexity)
        if min <= ranked <= max:
            total_cc += line.complexity
            counted += 1
            res.append(_format_line(line, ranked, show_complexity))
        elif total_average:
            total_cc += line.complexity
            counted += 1
    return res, total_cc, counted
Example #3
0
    def to_terminal(self):
        '''Yield lines to be printed in a terminal.'''
        average_cc = 0.0
        analyzed = 0
        for name, blocks in self.results:
            if 'error' in blocks:
                yield name, (blocks['error'], ), {'error': True}
                continue
            res, cc, n = cc_to_terminal(
                blocks,
                self.config.show_complexity,
                self.config.min,
                self.config.max,
                self.config.total_average,
            )
            average_cc += cc
            analyzed += n
            if res:
                yield name, (), {}
                yield res, (), {'indent': 1}

        if (self.config.average or self.config.total_average) and analyzed:
            cc = average_cc / analyzed
            ranked_cc = cc_rank(cc)
            yield (
                '\n{0} blocks (classes, functions, methods) analyzed.',
                (analyzed, ),
                {},
            )
            yield (
                'Average complexity: {0}{1} ({2}){3}',
                (RANKS_COLORS[ranked_cc], ranked_cc, cc, RESET),
                {},
            )
Example #4
0
def cc_to_terminal(results, show_complexity, min, max, total_average):
    '''Transfom Cyclomatic Complexity results into a 3-elements tuple:

        ``(res, total_cc, counted)``

    `res` is a list holding strings that are specifically formatted to be
    printed to a terminal.
    `total_cc` is a number representing the total analyzed cyclomatic
    complexity.
    `counted` holds the number of the analyzed blocks.

    If *show_complexity* is `True`, then the complexity of a block will be
    shown in the terminal line alongside its rank.
    *min* and *max* are used to control which blocks are shown in the resulting
    list. A block is formatted only if its rank is `min <= rank <= max`.
    If *total_average* is `True`, the `total_cc` and `counted` count every
    block, regardless of the fact that they are formatted in `res` or not.
    '''
    res = []
    counted = 0
    total_cc = .0
    for line in results:
        ranked = cc_rank(line.complexity)
        if min <= ranked <= max:
            total_cc += line.complexity
            counted += 1
            res.append(_format_line(line, ranked, show_complexity))
        elif total_average:
            total_cc += line.complexity
            counted += 1
    return res, total_cc, counted
Example #5
0
def compute_complexity(source):
    result = []
    blocks = cc_visit(source)
    mix_path = mi_visit(source, True)
    for func in blocks:
        result.append(func.name + ": Rank:" + cc_rank(func.complexity))
    return result
Example #6
0
    def __showCCContextMenu(self, pos):
        """Triggered when the cc icon context menu is requested"""
        if self.__currentUUID is None:
            return
        if self.__currentUUID not in self.__flakesResults:
            return

        count = 0
        contextMenu = QMenu(self.__ccLabel)
        for item in self.__flakesResults[self.__currentUUID].ccMessages:
            complexity = cc_rank(item.complexity)

            if complexity != 'A':
                count += 1
                title = complexity + '(' + str(item.complexity) + ') ' + \
                        item.fullname
                if item.letter in ('F', 'M'):
                    title += '()'
                act = contextMenu.addAction(getIcon('ccmarker.png'), title)
                act.setData(item.lineno)
        if count > 0:
            contextMenu.triggered.connect(self.__onContextMenu)
            contextMenu.popup(self.__ccLabel.mapToGlobal(pos))
        else:
            del contextMenu
Example #7
0
def find_infractions(args, logger, results):
    '''Analyze the results and find if the thresholds are surpassed.

    *args* and *logger* are the same as in :func:`~xenon.core.analyze`, while
    *results* is a dictionary holding the results of the complexity analysis.

    The number of infractions with respect to the threshold values is returned.
    '''
    infractions = 0
    module_averages = []
    total_cc = 0.
    total_blocks = 0
    for module, blocks in results.items():
        module_cc = 0.
        if isinstance(blocks, dict) and blocks.get('error'):
            logger.warning('cannot parse %s: %s', module, blocks['error'])
            continue
        for block in blocks:
            module_cc += block['complexity']
            r = cc_rank(block['complexity'])
            if check(r, args.absolute):
                logger.error('block "%s:%s %s" has a rank of %s', module,
                             block['lineno'], block['name'], r)
                infractions += 1
        module_averages.append((module, av(module_cc, len(blocks))))
        total_cc += module_cc
        total_blocks += len(blocks)

    av_cc = av(total_cc, total_blocks)
    ar = cc_rank(av_cc)

    if args.averagenum is not None and av_cc > args.averagenum:
        logger.error('total average complexity is %s', av_cc)
        infractions += 1

    if check(ar, args.average):
        logger.error('average complexity is ranked %s', ar)
        infractions += 1
    for module, ma in module_averages:
        mar = cc_rank(ma)
        if check(mar, args.modules):
            logger.error('module %r has a rank of %s', module, mar)
            infractions += 1
    return infractions
Example #8
0
File: cli.py Project: b4dtR1p/radon
def cc(path, min='A', max='F', show_complexity=False, average=False,
       exclude=None, ignore=None, order='SCORE', json=False, no_assert=False,
       total_average=False, *more_paths):
    '''Analyze the given Python modules and compute Cyclomatic
    Complexity (CC).

    The output can be filtered using the *min* and *max* flags. In addition
    to that, by default complexity score is not displayed.

    :param path: The path where to find modules or packages to analyze.
    :param -n, --min <str>: The minimum complexity to display (default to A).
    :param -x, --max <str>: The maximum complexity to display (default to F).
    :param -e, --exclude <str>: Comma separated list of patterns to exclude.
        By default hidden directories (those starting with '.') are excluded.
    :param -i, --ignore <str>: Comma separated list of patterns to ignore.
        If they are directory names, radon won't even descend into them.
    :param -s, --show-complexity: Whether or not to show the actual complexity
        score together with the A-F rank. Default to False.
    :param -a, --average: If True, at the end of the analysis display the
        average complexity. Default to False.
    :param --total-average: Like `-a, --average`, but it is not influenced by
        `min` and `max`. Every analyzed block is counted, no matter whether it
        is displayed or not.
    :param -o, --order <str>: The ordering function. Can be SCORE, LINES or
        ALPHA.
    :param -j, --json: Format results in JSON.
    :param --no-assert: Do not count `assert` statements when computing
        complexity.
    :param more_paths: Additional paths to analyze.
    '''
    paths = [path] + list(more_paths)
    min = min.upper()
    max = max.upper()
    average_cc = .0
    analyzed = 0
    order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE'))
    cc_data = analyze_cc(paths, exclude, ignore, order_function,
                         no_assert)
    if json:
        result = {}
        for key, data in cc_data:
            result[key] = list(map(cc_to_dict, data))
        log(json_mod.dumps(result), noformat=True)
    else:
        for name, results in cc_data:
            cc, blocks = _print_cc_results(name, results, show_complexity, min,
                                           max, total_average)
            average_cc += cc
            analyzed += blocks

    if (average or total_average) and analyzed:
        cc = average_cc / analyzed
        ranked_cc = cc_rank(cc)
        log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed)
        log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc],
            ranked_cc, cc, RESET)
Example #9
0
def compute_complexity(source):
    result = []
    # get complexity blocks
    blocks = cc_visit(source)
    # get MI score
    mi = mi_visit(source, True)

    for slave in blocks:
        result.append(slave.name + "-Rank:" + cc_rank(slave.complexity))
    return result
Example #10
0
def compute_complexity(source):
    result = []
    # get cc blocks
    blocks = cc_visit(source)
    # get MI score
    mi = mi_visit(source, True)

    for func in blocks:
        result.append(func.name + "- CC Rank:" + cc_rank(func.complexity))
    return result
Example #11
0
    def _complexity(self, filepaths):
        all_complexity = 0
        for filepath in filepaths:
            file_obj = open(join(self.repo_dir_path, filepath))
            results = sorted_results(cc_visit(file_obj.read()))
            complexities = [i.complexity for i in results]
            complexity = sum(complexities) / (len(complexities) or 1)
            all_complexity += complexity

        return all_complexity, cc_rank(all_complexity)
Example #12
0
def radon_test(org_name, repo_name, file_name):
    file_name = './' + org_name + '/' + repo_name + '/' + file_name
    print(file_name)

    # clone_cmd = 'git show ' + org_repo[index]["clone_url"] + ' ./' + org_name + '/' + org_repo[index]["name"]
    # print "Clone command line : ", clone_cmd
    # subprocess.call(clone_cmd, shell=True)
    file_data = open(file_name).read()
    print(file_data)
    cc = rd.cc_rank(file_data)
    print(cc)
Example #13
0
    def setAnalysisResults(label, results, ccLabel, ccResults, editor):
        """Displays the appropriate icon:

        - pyflakes has no complains
        - pyflakes found errors
        """
        if editor is not None:
            editor.clearAnalysisMessages()
            editor.setAnalysisMessages(results, ccResults)

        if results:
            # There are complains
            complains = "Buffer checked: there are pyflakes complains<br/>"
            lineNumbers = list(results.keys())
            lineNumbers.sort()
            for lineNo in lineNumbers:
                for item in results[lineNo]:
                    complains += '<br/>'
                    if lineNo == -1:
                        # Special case: compilation error
                        complains += escape(item)
                    else:
                        complains += "Line " + str(lineNo) + \
                                     ": " + escape(item)
            label.setToolTip(complains.replace(' ', '&nbsp;'))
            label.setPixmap(getPixmap('flakeserrors.png'))
        else:
            # There are no complains
            label.setToolTip('Buffer checked: no pyflakes complains')
            label.setPixmap(getPixmap('flakesok.png'))

        if ccResults:
            complains = 'Buffer cyclomatic complexity:<br/>'
            worstComplexity = 'A'
            for item in ccResults:
                complexity = cc_rank(item.complexity)
                worstComplexity = max(complexity, worstComplexity)

                if complexity != 'A':
                    complains += '<br/>' + complexity + \
                                 '(' + str(item.complexity) + ') ' + \
                                 escape(item.fullname)
                    if item.letter in ('F', 'M'):
                        complains += '()'

            if worstComplexity == 'A':
                ccLabel.setToolTip(
                    'Buffer cyclomatic complexity: no complains')
            else:
                ccLabel.setToolTip(complains.replace(' ', '&nbsp;'))
            ccLabel.setPixmap(getPixmap(COMPLEXITY_PIXMAPS[worstComplexity]))
        else:
            ccLabel.setToolTip('No complexity information available')
            ccLabel.setPixmap(getPixmap('ccmarker.png'))
def files_complexity_print(dictionary: OrderedDict) -> NoReturn:
    sorted_dict = sort_dict(dictionary)
    print('3. PROJECT FILES RANKED AND SORTED BY CYCLOMATIC COMPLEXITY')
    print('N | Rank | LLOC |  HV  |   % of LC   |    Filename')
    print('-------------------------------------------------')
    for n in range(len(sorted_dict)):
        rank, file_name, halstead_vol, logic_lines, lines_comment = sorted_dict[n][1]
        rank = cc_rank(rank)
        halstead_vol, lines_comment = np.round(halstead_vol, 2), np.round(lines_comment, 2)
        space = '  '
        print(n, space, rank, space, logic_lines, space, halstead_vol, space,
              lines_comment, space, file_name)
Example #15
0
File: core.py Project: rubik/xenon
def find_infractions(args, logger, results):
    '''Analyze the results and find if the thresholds are surpassed.

    *args* and *logger* are the same as in :func:`~xenon.core.analyze`, while
    *results* is a dictionary holding the results of the complexity analysis.

    The number of infractions with respect to the threshold values is returned.
    '''
    infractions = 0
    module_averages = []
    total_cc = 0.
    total_blocks = 0
    for module, blocks in results.items():
        module_cc = 0.
        if isinstance(blocks, dict) and blocks.get('error'):
            logger.warning('cannot parse %s: %s', module, blocks['error'])
            continue
        for block in blocks:
            module_cc += block['complexity']
            r = cc_rank(block['complexity'])
            if check(r, args.absolute):
                logger.error('block "%s:%s %s" has a rank of %s', module,
                             block['lineno'], block['name'], r)
                infractions += 1
        module_averages.append((module, av(module_cc, len(blocks))))
        total_cc += module_cc
        total_blocks += len(blocks)

    ar = cc_rank(av(total_cc, total_blocks))
    if check(ar, args.average):
        logger.error('average complexity is ranked %s', ar)
        infractions += 1
    for module, ma in module_averages:
        mar = cc_rank(ma)
        if check(mar, args.modules):
            logger.error('module %r has a rank of %s', module, mar)
            infractions += 1
    return infractions
Example #16
0
    def calc_sym_color(self, symbol):
        def get_complexity(sym):
            try:
                return sym.progpmccabe.mccabe
            except AttributeError:
                try:
                    return sym.progradon.complexity
                except AttributeError:
                    pass
            return None

        cc_value = get_complexity(symbol)
        try:
            return self.colormap[cc_rank(cc_value)]
        except (KeyError, TypeError):
            return self.COLOR_CC_UNKNOWN
Example #17
0
def _print_cc_results(path, results, show_complexity):
    '''Print Cyclomatic Complexity results.

    :param path: the path of the module that has been analyzed
    :param show_complexity: if True, show the complexity score in addition to
        the complexity rank
    '''
    res = []
    average_cc = .0
    for line in results:
        ranked = cc_rank(line.complexity)
        average_cc += line.complexity
        res.append(_format_line(line, ranked, show_complexity))
    if res:
        log(path)
        log_list(res, indent=1)
    return average_cc, len(results)
Example #18
0
def cc(min='A',
       max='F',
       show_complexity=False,
       average=False,
       exclude=None,
       order='SCORE',
       *paths):
    '''Analyze the given Python modules and compute Cyclomatic
    Complexity (CC).

    The output can be filtered using the *min* and *max* flags. In addition
    to that, by default complexity score is not displayed.

    :param min: The minimum complexity to display (default to A).
    :param max: The maximum complexity to display (default to F).
    :param show_complexity: Whether or not to show the actual complexity
        score together with the A-F rank. Default to False.
    :param average: If True, at the end of the analysis display the average
        complexity. Default to False.
    :param paths: The modules or packages to analyze.
    '''
    min = min.upper()
    max = max.upper()
    average_cc = .0
    analyzed = 0
    order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE'))
    for name in iter_filenames(paths, exclude or []):
        with open(name) as fobj:
            try:
                results = sorted_results(cc_visit(fobj.read()), order_function)
            except Exception as e:
                log('{0}\n{1}ERROR: {2}', name, ' ' * 4, str(e))
                continue
        cc, blocks = _print_cc_results(name, results, min, max,
                                       show_complexity)
        average_cc += cc
        analyzed += blocks

    if average and analyzed:
        cc = average_cc / analyzed
        ranked_cc = cc_rank(cc)
        log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed)
        log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc],
            ranked_cc, cc, RESET)
Example #19
0
def cc(min='A', max='F', show_complexity=False, average=False,
       exclude=None, order='SCORE', json=False, *paths):
    '''Analyze the given Python modules and compute Cyclomatic
    Complexity (CC).

    The output can be filtered using the *min* and *max* flags. In addition
    to that, by default complexity score is not displayed.

    -n, --min  The minimum complexity to display (default to A).
    -x, --max  The maximum complexity to display (default to F).
    -e, --exclude  Comma separated list of patterns to exclude. By default
        hidden directories (those starting with '.') are excluded.
    -s, --show_complexity  Whether or not to show the actual complexity score
        together with the A-F rank. Default to False.
    -a, --average  If True, at the end of the analysis display the average
        complexity. Default to False.
    -o, --order  The ordering function. Can be SCORE, LINES or ALPHA.
    -j, --json  Format results in JSON.
    paths  The modules or packages to analyze.
    '''
    min = min.upper()
    max = max.upper()
    average_cc = .0
    analyzed = 0
    order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE'))
    cc_data = analyze_cc(paths, exclude, min, max, order_function)
    if json:
        result = {}
        for key, data in cc_data:
            result[key] = map(cc_to_dict, data)
        log(json_mod.dumps(result), noformat=True)
    else:
        for name, results in cc_data:
            cc, blocks = _print_cc_results(name, results, show_complexity)
            average_cc += cc
            analyzed += blocks

    if average and analyzed:
        cc = average_cc / analyzed
        ranked_cc = cc_rank(cc)
        log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed)
        log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc],
            ranked_cc, cc, RESET)
 def analyze_complexity(self, args):
     
     def av(mod_cc, len):
         return mod_cc / len if len != 0 else 0
     
     config = Config(
         exclude=args.exclude,
         ignore=args.ignore,
         order=SCORE,
         no_assert=args.no_assert,
         multi=args.multi,
         show_closures=False,
         min='A',
         max='F')
     total_cc = 0.
     total_blocks = 0
     module_averages = []
     
     try:
         h = CCHarvester([args.path], config)
         m = MIHarvester([args.path], config)
         cc_results = h._to_dicts()
         mi_results = []
         for filename, mi_data in m.results:
             if mi_data:
                 # continue
                 mi_results.append((mi_data['mi'], mi_data['rank']))
         for module, blocks in cc_results.items():
             module_cc = 0.
             if len(blocks) != 0:
                 for block in blocks:
                     if block != "error":
                         module_cc += block['complexity']
                         r = cc_rank(block['complexity'])
             module_averages.append((module, av(module_cc, len(blocks))))
             total_cc += module_cc
             total_blocks += len(blocks)
         return module_averages, mi_results
     except Exception as e:
         print (exc_info()[0], e)
         return None, None
Example #21
0
def cc_to_dict(obj):
    '''Convert a list of results into a dictionary. This is meant for JSON
    dumping.'''
    def get_type(obj):
        if isinstance(obj, Function):
            return 'method' if obj.is_method else 'function'
        return 'class'

    result = {
        'type': get_type(obj),
        'rank': cc_rank(obj.complexity),
    }
    attrs = set(Function._fields) - set(('is_method', 'clojures'))
    for a in attrs:
        v = getattr(obj, a, None)
        if v is not None:
            result[a] = v
    for key in ('methods', 'clojures'):
        if hasattr(obj, key):
            result[key] = list(map(cc_to_dict, getattr(obj, key)))
    return result
Example #22
0
def cc_to_dict(obj):
    '''Convert a list of results into a dictionary. This is meant for JSON
    dumping.'''
    def get_type(obj):
        if isinstance(obj, Function):
            return 'method' if obj.is_method else 'function'
        return 'class'

    result = {
        'type': get_type(obj),
        'rank': cc_rank(obj.complexity),
    }
    attrs = set(Function._fields) - set(('is_method', 'clojures'))
    for a in attrs:
        v = getattr(obj, a, None)
        if v is not None:
            result[a] = v
    for key in ('methods', 'clojures'):
        if hasattr(obj, key):
            result[key] = list(map(cc_to_dict, getattr(obj, key)))
    return result
Example #23
0
def _print_cc_results(path, results, min, max, show_complexity):
    '''Print Cyclomatic Complexity results.

    :param path: the path of the module that has been analyzed
    :param min: the minimum complexity rank to show
    :param max: the maximum complexity rank to show
    :param show_complexity: if True, show the complexity score in addition to
        the complexity rank
    '''
    res = []
    average_cc = .0
    for line in results:
        ranked = cc_rank(line.complexity)
        average_cc += line.complexity
        if not min <= ranked <= max:
            continue
        res.append('{0}{1}'.format(' ' * 4, _format_line(line, ranked,
                                                         show_complexity)))
    if res:
        log(path)
        log_list(res)
    return average_cc, len(results)
Example #24
0
def cc(min='A', max='F', show_complexity=False, average=False,
       exclude=None, order='SCORE', *paths):
    '''Analyze the given Python modules and compute Cyclomatic
    Complexity (CC).

    The output can be filtered using the *min* and *max* flags. In addition
    to that, by default complexity score is not displayed.

    :param min: The minimum complexity to display (default to A).
    :param max: The maximum complexity to display (default to F).
    :param show_complexity: Whether or not to show the actual complexity
        score together with the A-F rank. Default to False.
    :param average: If True, at the end of the analysis display the average
        complexity. Default to False.
    :param paths: The modules or packages to analyze.
    '''
    min = min.upper()
    max = max.upper()
    average_cc = .0
    analyzed = 0
    order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE'))
    for name in iter_filenames(paths, exclude or []):
        with open(name) as fobj:
            try:
                results = sorted_results(cc_visit(fobj.read()), order_function)
            except Exception as e:
                log('{0}\n{1}ERROR: {2}', name, ' ' * 4, str(e))
                continue
        cc, blocks = _print_cc_results(name, results, min, max,
                                       show_complexity)
        average_cc += cc
        analyzed += blocks

    if average and analyzed:
        cc = average_cc / analyzed
        ranked_cc = cc_rank(cc)
        log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed)
        log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc],
            ranked_cc, cc, RESET)
Example #25
0
    def setAnalysisMessages(self, messages, ccMessages):
        """Sets a new set of messages"""
        self.__messages = dict(messages)

        for lineno in self.__messages:
            if lineno > 0:
                self.setBlockValue(
                    self._qpart.document().findBlockByNumber(lineno - 1), 1)

        self.__ccMessages = {}
        for item in ccMessages:
            if item.lineno not in self.__messages:
                complexity = cc_rank(item.complexity)
                if complexity != 'A':
                    msg = 'Cyclomatic complexity is ' + complexity + \
                          ' (value: ' + str(item.complexity) + ')'
                    self.__ccMessages[item.lineno] = (msg, ord(complexity))
                    self.setBlockValue(
                        self._qpart.document().findBlockByNumber(item.lineno - 1), 1)

        self.__noTooltip = False
        self.update()
Example #26
0
def _print_cc_results(path, results, min, max, show_complexity):
    '''Print Cyclomatic Complexity results.

    :param path: the path of the module that has been analyzed
    :param min: the minimum complexity rank to show
    :param max: the maximum complexity rank to show
    :param show_complexity: if True, show the complexity score in addition to
        the complexity rank
    '''
    res = []
    average_cc = .0
    for line in results:
        ranked = cc_rank(line.complexity)
        average_cc += line.complexity
        if not min <= ranked <= max:
            continue
        res.append('{0}{1}'.format(' ' * 4,
                                   _format_line(line, ranked,
                                                show_complexity)))
    if res:
        log(path)
        log_list(res)
    return average_cc, len(results)
Example #27
0
def _print_cc_results(path, results, show_complexity, min, max, total_average):
    '''Print Cyclomatic Complexity results.

    :param path: the path of the module that has been analyzed
    :param show_complexity: if True, show the complexity score in addition to
        the complexity rank
    '''
    res = []
    counted = 0
    average_cc = .0
    for line in results:
        ranked = cc_rank(line.complexity)
        if min <= ranked <= max:
            average_cc += line.complexity
            counted += 1
            res.append(_format_line(line, ranked, show_complexity))
        elif total_average:
            average_cc += line.complexity
            counted += 1
    if res:
        log(path)
        log_list(res, indent=1)
    return average_cc, counted
Example #28
0
File: cli.py Project: b4dtR1p/radon
def _print_cc_results(path, results, show_complexity, min, max, total_average):
    '''Print Cyclomatic Complexity results.

    :param path: the path of the module that has been analyzed
    :param show_complexity: if True, show the complexity score in addition to
        the complexity rank
    '''
    res = []
    counted = 0
    average_cc = .0
    for line in results:
        ranked = cc_rank(line.complexity)
        if min <= ranked <= max:
            average_cc += line.complexity
            counted += 1
            res.append(_format_line(line, ranked, show_complexity))
        elif total_average:
            average_cc += line.complexity
            counted += 1
    if res:
        log(path)
        log_list(res, indent=1)
    return average_cc, counted
Example #29
0
def cc(min='A', max='F', show_complexity=False, average=False,
       exclude=None, order='SCORE', json=False, *paths):
    '''Analyze the given Python modules and compute Cyclomatic
    Complexity (CC).

    The output can be filtered using the *min* and *max* flags. In addition
    to that, by default complexity score is not displayed.

    :param min: The minimum complexity to display (default to A).
    :param max: The maximum complexity to display (default to F).
    :param show_complexity: Whether or not to show the actual complexity
        score together with the A-F rank. Default to False.
    :param average: If True, at the end of the analysis display the average
        complexity. Default to False.
    :param paths: The modules or packages to analyze.
    '''
    if json:
        return cc_json(*paths)
    min = min.upper()
    max = max.upper()
    average_cc = .0
    analyzed = 0
    order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE'))
    cc_data = analyze_cc(*paths)
    for name, results in cc_data.iteritems():
        results = sorted_results(results, order_function)
        cc, blocks = _print_cc_results(name, results, min, max,
                                       show_complexity)
        average_cc += cc
        analyzed += blocks

    if average and analyzed:
        cc = average_cc / analyzed
        ranked_cc = cc_rank(cc)
        log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed)
        log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc],
            ranked_cc, cc, RESET)
def create_complexity_analysis_report():
    config_args = RadonAnalysis()

    my_drive = "/Users/shammond/GoogleDrive"
    my_semester = "6700Spring17"
    my_assignment = "Assignment5"
    out_file = open(my_drive + os.sep + my_semester + os.sep + my_assignment + os.sep + "code_complexity.csv", "w+")
    out_file.write("Module Name, CC Rank, CC Index\n")
    for root, myDir, files in os.walk(my_drive + os.sep + my_semester + os.sep + my_assignment + os.sep + "submissions"):
        name_split = root.split(os.sep)
        my_complexity_analysis = CodeComplexity.CodeComplexity(my_drive, my_semester)
        if re.search("softwareprocess", root):
            cum_score = 0.
            nbr_prod_files = 0
            complexity_score_list = []
            for myFile in files:
                if myFile.endswith(".py") and (not myFile.startswith("._")) and (myFile != '__init__.py'):
                    # os.chdir(myDir)
                    print root + os.sep + myFile
                    config_args.path = os.path.join(root, myFile)
                    results, mi_results = my_complexity_analysis.analyze_complexity(config_args)
                    if results is not None and len(results) > 0:
                        for module, ma in results:
                            cum_score += ma
                            nbr_prod_files += 1
                            complexity_score_list.append(ma)
                            mar = cc_rank(ma)
                            out_file.write(module + "," + mar + "," + format(ma, ".2f") + "\n")
                    else:
                        out_file.write(name_split[7] + os.sep + myFile + ", Missing, " + "\n")
            if nbr_prod_files > 0:
                avg_score = cum_score / nbr_prod_files
                med_score = median(complexity_score_list)
                max_score = max(complexity_score_list)
                out_file.write(name_split[7] + ", Average:  ," + format(avg_score, ".2f") + ", Median:  ,"
                           + format(med_score, ".2f") + ", Max:  ," + format(max_score, ".2f") + "\n")
    out_file.close()
Example #31
0
    def to_terminal(self):
        '''Yield lines to be printed in a terminal.'''
        average_cc = .0
        analyzed = 0
        for name, blocks in self.results:
            if 'error' in blocks:
                yield name, (blocks['error'],), {'error': True}
                continue
            res, cc, n = cc_to_terminal(blocks, self.config.show_complexity,
                                        self.config.min, self.config.max,
                                        self.config.total_average)
            average_cc += cc
            analyzed += n
            if res:
                yield name, (), {}
                yield res, (), {'indent': 1}

        if (self.config.average or self.config.total_average) and analyzed:
            cc = average_cc / analyzed
            ranked_cc = cc_rank(cc)
            yield ('\n{0} blocks (classes, functions, methods) analyzed.',
                   (analyzed,), {})
            yield ('Average complexity: {0}{1} ({2}){3}',
                   (RANKS_COLORS[ranked_cc], ranked_cc, cc, RESET), {})
Example #32
0
def compute_complexity(source):
    result = []
    blocks = cc_visit(source)
    for func in blocks:
        result.append(func.name + "- CC Rank:" + cc_rank(func.complexity))
    return result
Example #33
0
def cc(path,
       min='A',
       max='F',
       show_complexity=False,
       average=False,
       exclude=None,
       ignore=None,
       order='SCORE',
       json=False,
       no_assert=False,
       total_average=False,
       *more_paths):
    '''Analyze the given Python modules and compute Cyclomatic
    Complexity (CC).

    The output can be filtered using the *min* and *max* flags. In addition
    to that, by default complexity score is not displayed.

    :param path: The path where to find modules or packages to analyze.
    :param -n, --min <str>: The minimum complexity to display (default to A).
    :param -x, --max <str>: The maximum complexity to display (default to F).
    :param -e, --exclude <str>: Comma separated list of patterns to exclude.
        By default hidden directories (those starting with '.') are excluded.
    :param -i, --ignore <str>: Comma separated list of patterns to ignore.
        If they are directory names, radon won't even descend into them.
    :param -s, --show-complexity: Whether or not to show the actual complexity
        score together with the A-F rank. Default to False.
    :param -a, --average: If True, at the end of the analysis display the
        average complexity. Default to False.
    :param --total-average: Like `-a, --average`, but it is not influenced by
        `min` and `max`. Every analyzed block is counted, no matter whether it
        is displayed or not.
    :param -o, --order <str>: The ordering function. Can be SCORE, LINES or
        ALPHA.
    :param -j, --json: Format results in JSON.
    :param --no-assert: Do not count `assert` statements when computing
        complexity.
    :param more_paths: Additional paths to analyze.
    '''
    paths = [path] + list(more_paths)
    min = min.upper()
    max = max.upper()
    average_cc = .0
    analyzed = 0
    order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE'))
    cc_data = analyze_cc(paths, exclude, ignore, order_function, no_assert)
    if json:
        result = {}
        for key, data in cc_data:
            result[key] = list(map(cc_to_dict, data))
        log(json_mod.dumps(result), noformat=True)
    else:
        for name, results in cc_data:
            cc, blocks = _print_cc_results(name, results, show_complexity, min,
                                           max, total_average)
            average_cc += cc
            analyzed += blocks

    if (average or total_average) and analyzed:
        cc = average_cc / analyzed
        ranked_cc = cc_rank(cc)
        log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed)
        log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc],
            ranked_cc, cc, RESET)
Example #34
0
def _filter_by_rank(results, min, max):
    '''Yield results whose rank is between `min` and `max`.'''
    for result in results:
        if min <= cc_rank(result.complexity) <= max:
            yield result