Ejemplo n.º 1
0
def analyze(args, logger):
    '''Analyze the files as specified in *args*. Logging is done through the
    given *logger*.
    The *args* object should have the following attributes:

        * ``path``: the file to analyze.
        * ``exclude`` and ``ignore``: the patterns specifying which files to
            exclude and which directories to ignore.
        * ``no_assert``: if ``True``, ``assert`` statements will not be counted
            towards increasing the cyclomatic complexity.
        * ``absolute``, ``modules`` and ``average``: the threshold for the
            complexity.
    '''
    config = Config(
        exclude=args.exclude,
        ignore=args.ignore,
        order=SCORE,
        no_assert=args.no_assert,
        show_closures=False,
        min='A',
        max='F',
    )
    h = CCHarvester(args.path, config)
    results = h._to_dicts()
    return find_infractions(args, logger, results), results
Ejemplo n.º 2
0
def get_stats(paths):
    cc = CCHarvester(paths, cc_config)
    raw = RawHarvester(paths, cc_config)
    cc.run()
    raw.run()
    
    header = ['Filename', "SLOC", '#Functions', '#Intercepts', 'Max CC', 
              'Ave CC', 'Median CC', 'Min CC']
    data = {}
    for file_data in cc.results:
        filename, cc_results = file_data
        complexity = [x.complexity for x in cc_results if hasattr(x, 'is_method') and x.is_method]
        if len(complexity) > 0:
            print "Getting Complexity for:", filename
            data[filename] = {}
            data[filename]['Filename'] = filename
            data[filename]['Max CC'] = max(complexity)
            data[filename]['Min CC'] = min(complexity)
            data[filename]['Med CC'] = np.median(complexity)
            data[filename]['Ave CC'] = np.mean(complexity)
            data[filename]['#Functions'] = len(complexity)
        else:
            print "Skipping ", filename

    for file_data in raw.results:
        filename, results = file_data
        if filename in data:
            data[filename]['SLOC'] = results['sloc']
        else:
            print "Skipping ", filename

    return data
Ejemplo n.º 3
0
    def analyze(paths,
                min='A',
                max='F',
                exclude=None,
                ignore=None,
                order='SCORE',
                no_assert=False,
                include_ipynb=False,
                ipynb_cells=False):

        config = Config(
            min=min.upper(),
            max=max.upper(),
            exclude=exclude,
            ignore=ignore,
            show_complexity=False,
            average=False,
            total_average=False,
            order=getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE')),
            no_assert=no_assert,
            show_closures=False,
            include_ipynb=include_ipynb,
            ipynb_cells=ipynb_cells,
        )

        harvester = CCHarvester(paths, config)

        return harvester._to_dicts()
Ejemplo n.º 4
0
    def get_average_cc(self):
        '''
        Calculate the CC of every file in the tmp directory of Blobs
        CCHarvester returns a block of metrics for each function, class and method
        in a given file in dict format. CC is contained in this dict and is totalled
        for the file. Tmp directory is cleaned up afterwards
        '''

        results = CCHarvester(self.cc_path, self.cc_config)._to_dicts()
        if results == {}:
            rmtree('tmp')
            return 0

        total_cc = 0
        for filename in results.values():
            file_cc = 0
            for block in filename:
                try:
                    file_cc += block['complexity']

                except TypeError:
                    print(
                        "CC failed, file in script format with no classes/functions"
                    )

            total_cc += file_cc

        rmtree('tmp')
        return total_cc / len(results)
Ejemplo n.º 5
0
def ccjson(paths):
    """
    Modified version of radon.cli.cc.

    This silently returns the digest as a JSON (string).
    """

    config = Config(
        min=_cfg.get_value('cc_min', str, 'A').upper(),
        max=_cfg.get_value('cc_max', str, 'F').upper(),
        exclude=_cfg.get_value('exclude', str, None),
        ignore=_cfg.get_value('ignore', str, None),
        show_complexity=_cfg.get_value('show_complexity', bool, False),
        average=_cfg.get_value('average', bool, False),
        total_average=_cfg.get_value('total_average', bool, False),
        order=getattr(cc_mod,
                      _cfg.get_value('order', str, 'SCORE').upper(),
                      getattr(cc_mod, 'SCORE')),
        no_assert=_cfg.get_value('no_assert', bool, False),
        show_closures=_cfg.get_value('show_closures', bool, False),
        include_ipynb=_cfg.get_value('include_ipynb', bool, False),
        ipynb_cells=_cfg.get_value('ipynb_cells', bool, False)
    )

    harvester = CCHarvester(paths, config)

    return harvester.as_json()
Ejemplo n.º 6
0
def radon_test(f):
    filename = 'a1/a1_solution_' + f + '.py'
    with open(filename) as file:
        source = file.read()
        cv = ComplexityVisitor.from_code(source)
        res = sorted_results(cv.functions + cv.classes, order=LINES)
        output = {}
        for r in res:
            # print(f'Function: {r.name}, CC: {r.complexity}')
            output['CC'] = r.complexity

        res = analyze(source)
        # pprint(res)

        basic = {'loc': res[0],
                 'lloc': res[1],
                 'sloc': res[2],
                 'comments': res[3],
                 'multi': res[4],
                 'blank': res[5],
                 'single_comment': res[6]}
        output['Lines'] = basic

        config = Config(min='A',
                        max='F',
                        exclude=None,
                        ignore=None,
                        no_assert=False,
                        show_closures=False,
                        order=LINES)

        ch = CCHarvester([filename], config)
        res = ch.results
        x = json.loads(ch.as_json())
        # pprint(x)

        res = h_visit(source)

        hals = {'h1': res[0],
                'h2': res[1],
                'N1': res[2],
                'N2': res[3],
                'vocabulary': res[4],
                'length': res[5],
                'calculated_length': res[6],
                'volume': res[7],
                'difficulty': res[8],
                'effort': res[9],
                'time': res[10],
                'bugs': res[11]}

    output['Halstead'] = hals
    pprint({f: output})
Ejemplo n.º 7
0
def get_files_complexity_data(paths, ignore):
    config = Config(
        min="A",
        max="F",
        exclude=ignore,
        ignore=ignore,
        show_complexity=True,
        average=False,
        total_average=False,
        order=getattr(cc_mod, 'SCORE'),
        no_assert=False,
        show_closures=False,
    )

    harvester = CCHarvester(paths, config)
    data = []
    for filename, functions in harvester.results:
        if not functions:
            continue
        scores = [
            function_obj.complexity
            for function_obj in functions
        ]
        average = float(sum(scores)) / len(scores)
        data.append((filename, average,))

    sorted_scores = sorted(data, key=operator.itemgetter(1), reverse=True)
    return OrderedDict(sorted_scores)
Ejemplo n.º 8
0
def getCC(files):
    config = Config(exclude="",
                    ignore="",
                    order=SCORE,
                    no_assert=True,
                    show_complexity=True,
                    average=True,
                    total_average=True,
                    show_closures=True,
                    min='A',
                    max='F')  #found online
    commit_complexity = 0
    numfiles = 0
    for i, item in enumerate(
            files):  #forever file get complexity and increase total complexity
        f = open(files[i], 'r')
        results = CCHarvester(files[i], config).gobble(f)
        numfiles += 1
        total_cc = 0
        for result in results:
            commit_complexity += int(result.complexity)

    if numfiles != 0:
        return commit_complexity / numfiles
    else:
        return None  #to avoid divide by zero for empyt commit
Ejemplo n.º 9
0
    def calcCC(self, blobUrl):

        print(blobUrl)
        url = blobUrl.split('|')[0]
        filename = blobUrl.split('|')[1]

        headers = self.getHeader()

        flag = self.checkPy(filename)

        if flag == True:

            resp = requests.get(url, params=headers[0], headers=headers[1])

            filePath = filename + '.py'

            with open(filePath, 'w') as tmpFile:
                tmpFile.write(resp.text)
            tmpFile.close()

            getFile = open(filePath, 'r')
            results = CCHarvester(filePath, self.ccConfig).gobble(getFile)
            getFile.close()
            os.remove(filePath)

            fileCC = 0

            for x in results:
                print(x.complexity)
                fileCC += int(x.complexity)

            print("Complexity of file: " + str(fileCC))
            return fileCC
        else:
            return 0
Ejemplo n.º 10
0
def cc(paths,
       min='A',
       max='F',
       show_complexity=False,
       average=False,
       exclude=None,
       ignore=None,
       order='SCORE',
       json=False,
       no_assert=False,
       show_closures=False,
       total_average=False,
       xml=False,
       codeclimate=False):
    '''Analyze the given Python modules and compute Cyclomatic
    Complexity (CC).

    The output can be filtered using the *min* and *max* flags. In addition
    to that, by default complexity score is not displayed.

    :param paths: The paths where to find modules or packages to analyze. More
        than one path is allowed.
    :param -n, --min <str>: The minimum complexity to display (default to A).
    :param -x, --max <str>: The maximum complexity to display (default to F).
    :param -e, --exclude <str>: Exclude files only when their path matches one
        of these glob patterns. Usually needs quoting at the command line.
    :param -i, --ignore <str>: Ignore directories when their name matches one
        of these glob patterns: radon won't even descend into them. By default,
        hidden directories (starting with '.') are ignored.
    :param -s, --show-complexity: Whether or not to show the actual complexity
        score together with the A-F rank. Default to False.
    :param -a, --average: If True, at the end of the analysis display the
        average complexity. Default to False.
    :param --total-average: Like `-a, --average`, but it is not influenced by
        `min` and `max`. Every analyzed block is counted, no matter whether it
        is displayed or not.
    :param -o, --order <str>: The ordering function. Can be SCORE, LINES or
        ALPHA.
    :param -j, --json: Format results in JSON.
    :param --xml: Format results in XML (compatible with CCM).
    :param --codeclimate: Format results for Code Climate.
    :param --no-assert: Do not count `assert` statements when computing
        complexity.
    :param --show-closures: Add closures to the output.
    '''
    config = Config(
        min=min.upper(),
        max=max.upper(),
        exclude=exclude,
        ignore=ignore,
        show_complexity=show_complexity,
        average=average,
        total_average=total_average,
        order=getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE')),
        no_assert=no_assert,
        show_closures=show_closures,
    )
    harvester = CCHarvester(paths, config)
    log_result(harvester, json=json, xml=xml, codeclimate=codeclimate)
Ejemplo n.º 11
0
 def radon_cc(self, f, config):
     result = True
     for ccr in CCHarvester([f], config).results:
         for r in ccr[1]:
             ''' Не допускается Cyclomatic Complexity больше 5 '''
             if r.complexity > 5:
                 print('%s: %s' % (ccr[0], r))
                 result = False
     return result
Ejemplo n.º 12
0
 def radon_cc(self, filename, config):
     result = True
     for ccr in CCHarvester([filename], config).results:
         for result in ccr[1]:
             ''' Не допускается Cyclomatic Complexity больше 5 '''
             if result.complexity > 5:
                 print('%s: High cyclomatic complexity - %s' %
                       (ccr[0], result))
                 result = False
     return result
Ejemplo n.º 13
0
 def analyze_complexity(self, args):
     
     def av(mod_cc, len):
         return mod_cc / len if len != 0 else 0
     
     config = Config(
         exclude=args.exclude,
         ignore=args.ignore,
         order=SCORE,
         no_assert=args.no_assert,
         multi=args.multi,
         show_closures=False,
         min='A',
         max='F')
     total_cc = 0.
     total_blocks = 0
     module_averages = []
     
     try:
         h = CCHarvester([args.path], config)
         m = MIHarvester([args.path], config)
         cc_results = h._to_dicts()
         mi_results = []
         for filename, mi_data in m.results:
             if mi_data:
                 # continue
                 mi_results.append((mi_data['mi'], mi_data['rank']))
         for module, blocks in cc_results.items():
             module_cc = 0.
             if len(blocks) != 0:
                 for block in blocks:
                     if block != "error":
                         module_cc += block['complexity']
                         r = cc_rank(block['complexity'])
             module_averages.append((module, av(module_cc, len(blocks))))
             total_cc += module_cc
             total_blocks += len(blocks)
         return module_averages, mi_results
     except Exception as e:
         print (exc_info()[0], e)
         return None, None
    def calc_CC(self, blob_url):

        url = blob_url.split('|')[0]  # parse out the actual file url
        filename = blob_url.split('|')[
            1]  # parse out the file name from the url

        payload_headers = self.get__params_headers()

        flag = self.check_python_file(
            filename)  # check if file is a python file
        if flag == True:

            resp = requests.get(
                url, params=payload_headers[0],
                headers=payload_headers[1])  # get the data from the file

            curr_time = time.clock()
            curr_time = str(curr_time)
            curr_time = curr_time.split('.')[1]
            sha = url.split(
                '/blobs/'
            )[1]  # give the temp file a unique name (sha + current processor time)

            file_path = sha + curr_time + '.py'

            with open(
                    file_path,
                    'w') as tmp_file:  # temporarily write out the file's data
                tmp_file.write(resp.text)
            tmp_file.close()

            CC_file_get = open(file_path, 'r')  # read in the file's data
            results = CCHarvester(file_path, self.cc_config).gobble(
                CC_file_get)  # calculate the CC of the temp file
            CC_file_get.close()
            os.remove(file_path)  # delete the temp file

            file_cc = 0

            for i in results:
                file_cc += int(
                    i.complexity
                )  # append CC of all parts of the file to a total CC for the file

            print("Complexity of file: " + str(file_cc))

            return file_cc
        else:
            return 0  # if file is not a python file
Ejemplo n.º 15
0
    def doWork(self):
        print 3
        config = Config(exclude='',
                        ignore='venv',
                        order=SCORE,
                        no_assert=True,
                        show_closures=False,
                        min='A',
                        max='F')
        complexity = CCHarvester('./worker1tempfolder', config)._to_dicts()
        print 4
        print complexity
        print 5
        print complexity.values()

        totalComplexity = 0
        for doc in complexity.values():
            docComplexity = 0
            for codeBlock in doc:
                docComplexity = docComplexity + codeBlock['complexity']

        totalComplexity = totalComplexity + docComplexity
        self.deletefiles()
        return total_cc / len(complexity)
Ejemplo n.º 16
0
    def calculate_file_complexity(self, file_name):
        utils.print_to_console(
            'Worker' + WORKER_ID,
            'Calculating complexity for file {0}'.format(file_name))
        file_complexity = 0
        file = open(file_name, 'r')
        results = CCHarvester(file_name,
                              utils.get_CCHarvester_config()).gobble(file)

        for result in results:
            file_complexity += int(result.complexity)

        utils.print_to_console(
            'Worker' + WORKER_ID,
            "Total complexity of {0}: {1}".format(file_name,
                                                  str(file_complexity)))
        return file_complexity
Ejemplo n.º 17
0
def analyze(paths):
    """
    Analyze the files from the path specified
    """
    config = Config(
        exclude=None,
        ignore=None,
        order=SCORE,
        no_assert=True,
        show_closures=False,
        min="A",
        max="F",
        show_complexity=True,
        total_average=True,
        average=True,
    )
    h = CCHarvester(paths, config)
    log_result(h, stream=sys.stdout)
def cyclomatic_complexity(code_path, results):
    """
    Compute all available metrics.

    :param code_path: Path to the source code.
    :param results: Dictionary with the results.
    """
    h = CCHarvester([code_path],
                    Config(min='A',
                           max='F',
                           exclude=None,
                           ignore=None,
                           show_complexity=False,
                           average=False,
                           total_average=False,
                           order=SCORE,
                           no_assert=False,
                           show_closures=False))

    max_complexity = 0
    max_complexity_function = ""  # Maximum complexity function pointer
    avg_complexity = 0  # Weighted average complexity by the number of functions
    avg_weight = 0  # Total weight used to compute the average complexity
    for file_path, metrics in h.results:
        for m in metrics:
            # Average
            avg_complexity += m.complexity
            avg_weight += 1

            # Maximum
            if max_complexity < m.complexity:
                max_complexity = m.complexity
                max_complexity_function = "%s in %s:%s with complexity %s" % (
                    m.fullname, file_path, m.lineno, m.complexity)

    # Finish the weighted average
    if avg_weight > 0.0:
        avg_complexity /= avg_weight

    # Populate results
    results[AVERAGE_CYCLOMATIC_COMPLEXITY] = avg_complexity
    results[MAX_CYCLOMATIC_COMPLEXITY] = max_complexity
    results[MAX_CYCLOMATIC_COMPLEXITY_FUNCTION] = max_complexity_function
Ejemplo n.º 19
0
    def CC_Calculator(self, blob_url):
        url = blob_url.split('|')[0]
        file_name = blob_url.split('|')[1]

        payload = {'access_token': self.token}
        header_s = {'Accept': 'application/vnd.github.v3.raw'}
        
        x = self.is_py(file_name)

        if x == True:

            resp = requests.get(blob_url, params = payload, headers=header_s)

            current_time = time.clock()
            current_time = str(time)
            current_time = current_time.split('.')[1]
            SHA = url.split('/blobs/')[1]

            path = SHA + current_time + '.py'
            with open(path, 'wb') as tf:
                tf.write(resp.text)
            tf.close()

            CC_file = open(path, 'r')
            results = CCHarvester(path, self.cc_config).gobble(CC_file_get)
            CC_file.close()
            os.remove(path)

            cc_file = 0

            for y in results:
                cc_file += int(y.complexity)

            return cc_file
            else:
                return 0
Ejemplo n.º 20
0
def run_radon_analysis(file_path, radon_config):
    harvester = CCHarvester([file_path], radon_config)
    return harvester._to_dicts()
Ejemplo n.º 21
0
def run_radon_analysis(file_path, radon_config):
    harvester = CCHarvester([file_path], radon_config)
    return harvester._to_dicts()
Ejemplo n.º 22
0
def cc(paths, min=_cfg.get_value('cc_min', str, 'A'),
       max=_cfg.get_value('cc_max', str, 'F'),
       show_complexity=_cfg.get_value('show_complexity', bool, False),
       average=_cfg.get_value('average', bool, False),
       exclude=_cfg.get_value('exclude', str, None),
       ignore=_cfg.get_value('ignore', str, None),
       order=_cfg.get_value('order', str, 'SCORE'),
       json=False,
       no_assert=_cfg.get_value('no_assert', bool, False),
       show_closures=_cfg.get_value('show_closures', bool, False),
       total_average=_cfg.get_value('total_average', bool, False),
       xml=False,
       codeclimate=False,
       output_file=_cfg.get_value('output_file', str, None),
       include_ipynb=_cfg.get_value('include_ipynb', bool, False),
       ipynb_cells=_cfg.get_value('ipynb_cells', bool, False),):
    '''Analyze the given Python modules and compute Cyclomatic
    Complexity (CC).

    The output can be filtered using the *min* and *max* flags. In addition
    to that, by default complexity score is not displayed.

    :param paths: The paths where to find modules or packages to analyze. More
        than one path is allowed.
    :param -n, --min <str>: The minimum complexity to display (default to A).
    :param -x, --max <str>: The maximum complexity to display (default to F).
    :param -e, --exclude <str>: Exclude files only when their path matches one
        of these glob patterns. Usually needs quoting at the command line.
    :param -i, --ignore <str>: Ignore directories when their name matches one
        of these glob patterns: radon won't even descend into them. By default,
        hidden directories (starting with '.') are ignored.
    :param -s, --show-complexity: Whether or not to show the actual complexity
        score together with the A-F rank. Default to False.
    :param -a, --average: If True, at the end of the analysis display the
        average complexity. Default to False.
    :param --total-average: Like `-a, --average`, but it is not influenced by
        `min` and `max`. Every analyzed block is counted, no matter whether it
        is displayed or not.
    :param -o, --order <str>: The ordering function. Can be SCORE, LINES or
        ALPHA.
    :param -j, --json: Format results in JSON.
    :param --xml: Format results in XML (compatible with CCM).
    :param --codeclimate: Format results for Code Climate.
    :param --no-assert: Do not count `assert` statements when computing
        complexity.
    :param --show-closures: Add closures/inner classes to the output.
    :param -O, --output-file <str>: The output file (default to stdout).
    :param --include-ipynb: Include IPython Notebook files
    :param --ipynb-cells: Include reports for individual IPYNB cells
    '''
    config = Config(
        min=min.upper(),
        max=max.upper(),
        exclude=exclude,
        ignore=ignore,
        show_complexity=show_complexity,
        average=average,
        total_average=total_average,
        order=getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE')),
        no_assert=no_assert,
        show_closures=show_closures,
        include_ipynb=include_ipynb,
        ipynb_cells=ipynb_cells,
    )
    harvester = CCHarvester(paths, config)
    with outstream(output_file) as stream:
        log_result(harvester, json=json, xml=xml, codeclimate=codeclimate,
                   stream=stream)
Ejemplo n.º 23
0
	# wroker has url, get data from web
	address = get_url
	filee = requests.get(str(address), params = payload, headers = headers)
	data = filee.content
	#saves data into a file
	i = i+1
	filename = str(filedirname)+'/'+str(i)+'.py'
	f = open(str(filename), "w+")
	f.write(data)   
	f.close()
	
	# process for getting avg cyclomatic complexity
	cc = 0
	lent = 1
	h = CCHarvester([filename], config)._to_dicts()
	if len(h) == 0:
		cc = 0
	else:
		for m in h:
			for n in h[m]:	
				lent = lent + 1		
				try:
					com = n['complexity']
					cc = cc + com
				except:
					cc = 0	
	avg_comp = float(cc) / float(lent)
	print 'cc', avg_comp

	return_value = {'cc' : avg_comp}
Ejemplo n.º 24
0
def analyse(paths):

    #Setup the Configuration for each Harvester 
    config = Config(
        exclude=[],
        ignore=[],
        order=SCORE,
        no_assert= False,
        show_closures=False,
        min='A',
        max='F',
    )

    config2 = Config(
        exclude=[],
        ignore=[],
        by_function= False
    )

    config3 = Config(
        min= 'A',
        max= 'C',
        exclude=[],
        ignore=[],
        multi=True,
        show=True,
    )

    config4 = Config(
        exclude=[],
        ignore=[],
        summary=False,
        json=True,
    )
    


    """
    ----------------------
    Cyclomatic Complexity 
    ---------------------
    Cyclomatic Complexity corresponds to the number of decisions a block of code contains plus 1. 
    This number (also called McCabe number) is equal to the number of linearly independent paths through the code. 
    This number can be used as a guide when testing conditional logic in blocks.Radon analyzes the AST tree 
    of a Python program to compute Cyclomatic Complexity. Statements have the following effects on Cyclomatic Complexity:
    """
    
    h = CCHarvester(paths, config)
    ccResults = h._to_dicts()
    # print(ccResults)
    numOfFunctions = 0
    complexity = 0

    # for result in ccResults.values():
    #     numOfFunctions += 1
    #     complexity += result['complexity'] if isinstance(result, dict) else 0

    for path in paths:
        for i in ccResults.get(path, []):
            numOfFunctions += 1
            complexity += i["complexity"] if isinstance(i, dict) else 0

    cc = complexity/numOfFunctions if numOfFunctions != 0 else 0

    """
    -------------------
    Halstead's Metrics
    ------------------
    Halstead’s goal was to identify measurable properties of software, and the relations between them. 
    These numbers are statically computed from the source code: Effort, Bugs, Length, Difficulty, Time, Vocabulary , Volume
    """
    i = HCHarvester(paths, config2)
    hcResults = i._to_dicts()
  
    halsteadEffort = 0
    halsteadBugs = 0
    halsteadLength = 0 
    halsteadDifficulty = 0
    halsteadTime = 0
    halsteadVocabulary = 0 
    halsteadVolume = 0
    numberOfFiles = 0

    for result in hcResults.values():
        if 'total' in result:
            halsteadEffort += result["total"][9]
            halsteadBugs  += result["total"][11]
            halsteadLength  += result["total"][5]
            halsteadDifficulty += result["total"][8]
            halsteadTime += result["total"][10]
            halsteadVocabulary += result["total"][4]
            halsteadVolume += result["total"][7]
        numberOfFiles += 1

    avgHalsteadEffort = halsteadEffort/numberOfFiles
    avgHalsteadBugs = halsteadBugs/numberOfFiles
    avgHalsteadLength = halsteadLength /numberOfFiles
    avgHalsteadDifficulty = halsteadDifficulty/numberOfFiles
    avgHalsteadTime = halsteadTime/numberOfFiles
    avgHalsteadVocabulary = halsteadVocabulary/numberOfFiles
    avgHalsteadVolume = halsteadVolume/numberOfFiles

    
    """
    ---------------------------------------
    MI Harvester for Maintainability index
    --------------------------------------
    Maintainability Index is a software metric which measures how maintainable (easy to support and change)
    the source code is. The maintainability index is calculated as a factored formula consisting of SLOC (Source Lines Of Code),
    Cyclomatic Complexity and Halstead volume. It is used in several automated software metric tools, including the Microsoft 
    Visual Studio 2010 development environment, which uses a shifted scale (0 to 100) derivative.
    """
    j = MIHarvester(paths, config3)
    miResults = dict(j.filtered_results)

    miVal = 0 
    numOfFiles = 0
    for result in miResults.values():
        if 'mi' in result:
            miVal += result["mi"] 
        numOfFiles += 1

    mi = miVal/numOfFiles
    


    """
    ------------
    Raw Metrics
    -----------
    The following are the definitions employed by Radon:
     - LOC: The total number of lines of code. It does not necessarily correspond to the number of lines in the file.
     - LLOC: The number of logical lines of code. Every logical line of code contains exactly one statement.
     - SLOC: The number of source lines of code - not necessarily corresponding to the LLOC. [sloc]
     - Comments: The number of comment lines. Multi-line strings are not counted as comment since, to the Python interpreter, they are just strings.
     - Multi: The number of lines which represent multi-line strings.  [multi] 
     - Blanks: The number of blank lines (or whitespace-only ones).  [blank]
    """
    k = RawHarvester(paths, config4)
    rawResults = (dict(k.results))

    comments = 0
    lloc = 0
    loc = 0
    for result in rawResults.values():
        if 'comments' in result:
            comments += result['comments']
        if 'lloc' in result:
            lloc += result['lloc']
        if 'loc' in result:
            loc += result['loc']


    data = {
        "numberOfFiles" : len(paths),
        "numberOfLines" : loc,
        "numberOfLogicalLines" : lloc, 
        "numberOfComments" : comments, 
        "cyclomaticComplexity" : cc, 
        "maintainabilityIndex" : mi,
        "halsteadEffort" : avgHalsteadEffort,
        "halsteadBugs" : avgHalsteadBugs,
        "halsteadLength" :avgHalsteadLength,
        "halsteadDifficulty" : avgHalsteadDifficulty,
        "halsteadTime" : avgHalsteadTime, 
        "halsteadVocabulary" : avgHalsteadVocabulary, 
        "halsteadVolume" : avgHalsteadVolume
    }

    return data
Ejemplo n.º 25
0
    def start(self, paths):

        self.paths = paths
        cc_harvester = CCHarvester(paths, self.config)
        self.cc_output = cc_harvester._to_dicts()