def find_complex_functions(data, filtered_files):
    print("Finding complex functions...", flush=True)

    complex_functions = {}
    first_commit = True
    i = 0

    for c in data:
        i += 1
        blobs_to_scan = []
        if i == 1:
            append_blobs(blobs_to_scan, c.commit.tree)
        else:
            for diff in c.commit.parents[0].diff(c.commit):
                if diff.b_blob and extensions.extension_supported(diff.b_blob.path):
                    blobs_to_scan.append(diff.b_blob)

        print("    " + str(i) + "/" + str(len(data)) + " (" + c.commit.__str__() + ", " + str(len(blobs_to_scan)) + " blobs)      ", end="\r", flush=True),

        for blob in blobs_to_scan:
            if blob.path in filtered_files: continue
            content = blob.data_stream.read().decode("utf-8", "backslashreplace")
            f = lizard.FileAnalyzer(lizard.get_extensions([])).analyze_source_code(blob.name, content)

            for function in f.function_list:
                if function.cyclomatic_complexity > 15:
                    distinguished_function_name = blob.path + "." + function.name
                    if distinguished_function_name in complex_functions:
                        complex_functions[distinguished_function_name] = max(function.cyclomatic_complexity, complex_functions[distinguished_function_name])
                    else:
                        complex_functions[distinguished_function_name] = function.cyclomatic_complexity

    print("")
    print("Done.", flush=True)
    return complex_functions
Esempio n. 2
0
 def setUp(self):
     StreamStdoutTestCase.setUp(self)
     self.option = parse_args("app")
     self.foo = FunctionInfo("foo", 'FILENAME', 100)
     self.fileSummary = FileInformation("FILENAME", 123, [self.foo])
     self.extensions = get_extensions([])
     self.scheme = OutputScheme(self.extensions)
Esempio n. 3
0
 def test_function_info_header_should_have_the_captions_of_external_extensions(self):
     external_extension = Mock(FUNCTION_INFO = {"xx": {"caption":"*external_extension*"}}, ordering_index=-1)
     extensions = get_extensions([external_extension])
     scheme = OutputScheme(extensions)
     print_and_save_modules([], extensions, scheme)
     self.assertEquals("  NLOC    CCN   token  PARAM  length *external_extension* location  ", sys.stdout.stream.splitlines()[1])
     self.assertFalse(scheme.any_regression())
Esempio n. 4
0
 def setUp(self):
     StreamStdoutTestCase.setUp(self)
     self.option = parse_args("app")
     self.foo = FunctionInfo("foo", 'FILENAME', 100)
     self.fileSummary = FileInformation("FILENAME", 123, [self.foo])
     self.extensions = get_extensions([])
     self.scheme = OutputScheme(self.extensions)
Esempio n. 5
0
 def test_function_info_header_should_have_the_captions_of_external_extensions(self):
     external_extension = Mock(FUNCTION_INFO = {"xx": {"caption":"*external_extension*"}}, ordering_index=-1)
     extensions = get_extensions([external_extension])
     scheme = OutputScheme(extensions)
     print_and_save_modules([], extensions, scheme)
     self.assertEquals("  NLOC    CCN   token  PARAM  length *external_extension* location  ", sys.stdout.stream.splitlines()[1])
     self.assertFalse(scheme.any_regression())
Esempio n. 6
0
 def test_function_info_header_should_have_the_captions_of_external_extensions(self):
     external_extension = Mock(FUNCTION_CAPTION = "*external_extension*", FUNCTION_INFO_PART ="xx", ordering_index=-1)
     del external_extension.AVERAGE_CAPTION
     extensions = get_extensions([external_extension])
     scheme = OutputScheme(extensions)
     print_and_save_modules([], extensions, scheme)
     self.assertEquals("  NLOC    CCN   token  PARAM  length *external_extension* location  ", sys.stdout.stream.splitlines()[1])
Esempio n. 7
0
 def test_should_insert_extension_at_the_index_when_specified(
         self, mock_import):
     extension = Mock(ordering_index=1)
     del extension.AVERAGE_CAPTION
     mock_import.return_value = extension
     exts = get_extensions([extension])
     self.assertEqual(extension, exts[1])
Esempio n. 8
0
 def test_lizard_analyze(self, auto_read):
     actuals = list(lz.analyze_files([__file__],
                                     exts=lz.get_extensions([])))
     self.assertEqual(len(actuals), 1)
     actual = actuals[0]
     self.assertEqual(4, actual.nloc)
     self.assertEqual(2.0, actual.average_cyclomatic_complexity)
Esempio n. 9
0
 def test_should_use_clang_format_with_function_end_line_number_for_warning(self):
     fun = FunctionInfo("foo", 100)
     fun.end_line = 100
     fun.cyclomatic_complexity = 16
     fileStat = FileInformation("FILENAME", 1, [fun])
     option = Mock(display_fn_end_line = True, extensions = get_extensions([])) 
     print_warnings(option, [(fun, "FILENAME")])
     self.assertIn("FILENAME:100-100: warning: foo has 16 CCN and 0 params (0 NLOC, 1 tokens)\n", sys.stdout.stream)
Esempio n. 10
0
 def test_sort_warning(self, print_function_info):
     option = Mock(display_fn_end_line = False, extensions = get_extensions([]))
     option.sorting = ['cyclomatic_complexity']
     foo = FunctionInfo("foo", 100)
     foo.cyclomatic_complexity = 10
     bar = FunctionInfo("bar", 100)
     bar.cyclomatic_complexity = 15
     print_warnings(option, [(foo, "FILENAME"),(bar, "FILENAME")])
     self.assertEqual('bar', print_function_info.call_args_list[0][0][0].name)
Esempio n. 11
0
 def test_schema_should_exhaust_the_result_if_there_is_regression_data(
         self):
     external_extension = Mock(FUNCTION_INFO={"xx": {
         "regression": True
     }},
                               ordering_index=-1)
     extensions = get_extensions([external_extension])
     schema = OutputScheme(extensions)
     self.assertTrue(schema.any_regression())
Esempio n. 12
0
 def setUp(self):
     self.ext = FanInOut()
     self.lizard_object = FileAnalyzer(get_extensions(
         ["io", "ns", "nd"])).analyze_source_code("a.cpp", """int foo(){
                                                                           bar();
                                                                           if(a){
                                                                               b;
                                                                           }
                                                                       }
                                                           int bar(){foo();};""")
Esempio n. 13
0
 def setUp(self):
     self.ext = FanInOut()
     self.lizard_object = FileAnalyzer(get_extensions(
         ["io", "ns", "nd"])).analyze_source_code("a.cpp", """int foo(){
                                                                           bar();
                                                                           if(a){
                                                                               b;
                                                                           }
                                                                       }
                                                           int bar(){foo();};""")
Esempio n. 14
0
def get_token_counts(file_path, code, target_line_no):
    """Process lines in argument."""
    extensions = lizard.get_extensions([]) + [LizardExtension()]
    analyzer = lizard.FileAnalyzer(extensions)
    results = analyzer.analyze_source_code(file_path, code)
    for function in results.function_list:
        if function.start_line <= target_line_no <= function.end_line:
            return function.token_counts
    # FIXME: How can I access global_pseudo_function here?
    return []
Esempio n. 15
0
def get_func_info_from_stream(file_path, code, line_no):
    """Process entire file and return lizard.FileInfo for the function."""
    extensions = lizard.get_extensions([]) + [LizardExtension()]
    analyzer = lizard.FileAnalyzer(extensions)
    results = analyzer.analyze_source_code(file_path, code)
    for function in results.function_list:
        if function.start_line <= line_no <= function.end_line:
            return function
    # FIXME: How can I access global_pseudo_function here?
    return
Esempio n. 16
0
def calculate_function_complexities(tree):
    function_complexities = {}
    for path, blob in tree.items():
        content = blob.data_stream.read().decode(codec, errorHandler)
        analyzer = lizard.FileAnalyzer(lizard.get_extensions([]))
        f = analyzer.analyze_source_code(blob.name, content)
        for function in f.function_list:
            function_complexities[
                path + "." + function.name] = function.cyclomatic_complexity

    return function_complexities
Esempio n. 17
0
def run_lizard(folder, concurrency=4, find_duplicates=True):
    duplcode_ext = architect.utils.DuplcodeExtension()
    from importlib import import_module as im
    wordcount_ext = im('lizard_ext.lizardwordcount').LizardExtension()

    extensions = lizard.get_extensions(['mccabe', 'nd']) + [wordcount_ext]
    if find_duplicates:
        extensions.append(duplcode_ext)

    # extensions = [lizard.preprocessing, lizard.line_counter, duplcode_ext]
    files = find_files(folder, for_lizard=True, lang_ext='')
    file_analyzer = lizard.FileAnalyzer(extensions)

    with cf.ProcessPoolExecutor(max_workers=concurrency) as executor:
        futures = {}
        complexity_by_file = {}

        for file in files:
            futures[executor.submit(file_analyzer, file)] = file

        for future in tqdm(cf.as_completed(futures), total=len(futures), desc='Analyzing complexity in files'):
            file = futures[future]
            logging.debug(f'Analyzed complexity for file: {file}')
            if future._exception:
                logging.warning(f'Failed to analyze complexity for file: {file}')
            else:
                file_info: lizard.FileInformation = future.result()
                complexity_by_file[file] = file_info

        lizard_metrics = {'complexity_by_file': {k: file_info_to_dict(v) for k, v in complexity_by_file.items()}}
        if find_duplicates:
            list(duplcode_ext.cross_file_process(complexity_by_file.values()))

            duplicate_blocks = []
            for duplicate_block in tqdm(duplcode_ext.get_duplicates(), desc='Analyzing duplicate code:'):
                duplicate_blocks.append([snippet_to_dict(snippet) for snippet in duplicate_block])

            logging.info(f'Total duplicate blocks: {len(duplicate_blocks)}')
            logging.info("Total duplicate rate: %.2f%%" % (duplcode_ext.duplicate_rate() * 100))
            logging.info("Total unique rate: %.2f%%" % (duplcode_ext.unique_rate() * 100))

            lizard_metrics.update({
                'duplicate_blocks': duplicate_blocks,
                'duplicate_blocks_count': len(duplicate_blocks),
                'duplicate_rate': duplcode_ext.duplicate_rate(),
                'unique_rate': duplcode_ext.unique_rate()
            })
        return lizard_metrics
Esempio n. 18
0
def runLizard(filenameList):
    duplicates = DuplicateDetector()
    cpre = lizardcpre()
    nd = lizardnd()
    ns = lizardns()
    io = lizardio()
    extensions = lizard.get_extensions([duplicates, cpre, ns, nd, io])
    outList = list(lizard.analyze_files(filenameList, exts=extensions))
    dupCodeSnips = list(
        duplicates.get_duplicates(min_duplicate_tokens=MIN_DUP_TOKENS))
    dupInfo = {
        'duplicates': [dupInfoToDict(d) for d in dupCodeSnips],
        'duplicateRate': duplicates.saved_duplicate_rate,
        'uniqueRate': duplicates.saved_unique_rate
    }
    return {'fileList': outList, 'dupInfo': dupInfo}
Esempio n. 19
0
def analyse_repository(self, repository_name, repository_url, user_name):
    print('started')
    celerysocketio = SocketIO(message_queue='redis://localhost:6379/')
    self.update_state(state='RUNNING', meta={'current': 0, 'total': 100})

    # os.system('git clone ' + repository_url + base_dir + repository_name + '/')
    repository_id = create_repository(repository_name, self.request.id,
                                      user_name)
    print(self.request.id)

    files = lizard.analyze(
        paths=[base_dir + repository_name],
        exclude_pattern=['*/node_modules/*', '*/build/*', '*/build-api/*'],
        exts=lizard.get_extensions([]))

    files_list = list(files)
    for idx, repository_file in enumerate(files_list):
        celerysocketio.emit(
            'update', {
                'state': 'RUNNING',
                'complete':
                ((idx if idx != 0 else idx) / len(files_list)) * 100,
                'repositoryId': repository_id,
            },
            room=self.request.id)
        self.update_state(state='RUNNING',
                          meta={
                              'current': idx,
                              'total': len(files_list)
                          })

        file_id = create_file(repository_name, repository_file, repository_id)

        create_functions(repository_file, file_id)

        # os.system('rm -rf /home/sam/' + repository_name)

    create_aggregate_tables(repository_id)

    celerysocketio.emit('update', {
        'state': 'SUCCESS',
        'complete': 100,
        'repositoryId': repository_id
    },
                        room=self.request.id)

    update_repository_status(repository_name, user_name)
Esempio n. 20
0
def run_lizard(filename,
               symbolic_filename_translator=lambda file_: file_,
               filecontent=None):
    '''
    Runs lizard with the possibility of using a symbolic file translation
    Explanation of terms:
    'nloc': non-commented lines of code (commonly known as source-line-of-code)
            Note that this is NOT the same definition as used everywhere else in the maintainer_scripts
            where nloc means number-of-lines-of-code (i.e every single line, regardless of being comments or not).
    'token_count': Number of tokens (number of words)
    'parameter_count': Number of arguments passed to function

    Args:
            filename(str):                 The file to be analyzed, if filecontent != None, file will be read.
            filecontent(str):              If not None, this str will be used for analysis instead
    Returns:
            functions[dict]:                  All functions, including global scope (empty string)
                'name'(str)':                 function name (global scope is called "")
                'nloc'(int)':                 nloc for function
                'cyclomatic_complexity(int)': mccabes cyclomatic complexity
                'token_count'(int)':          #tokens in function
                'parameter_count'(int)':      #parematers passed to function
                'start_line'(int):            starting linenumber of function
                'end_line'(int):              ending linenumber of function

            If parsing fails an empty list will be returned
    '''
    if filecontent is None:
        with open(filename) as f:
            filecontent = f.read()

    fa = _SymbolicFileAnalyzer(extensions=lizard.get_extensions([]))
    lizard_object = fa.analyze_file(symbolic_filename_translator(filename),
                                    filecontent)

    if lizard_object:
        logger.debug("Parsed file %s ", os.path.basename(filename))
        return [func.__dict__ for func in lizard_object.function_list]

    else:
        logger.debug("Unable to parse file %s", os.path.basename(filename))
        return []
Esempio n. 21
0
    def __analyze_repository(self, repository_path, files_affected, details):
        """Add code complexity information for a given repository
        using Lizard and CLOC.

        Current information includes cyclomatic complexity (ccn),
        lines of code, number of functions, tokens, blanks and comments.

        :param repository_path: repository path
        :param details: if True, it returns fine-grained results

        :returns  result: list of the results of the analysis
        """
        analysis_result = []

        repository_analysis = lizard.analyze(
            paths=[repository_path],
            threads=1,
            exts=lizard.get_extensions([]),
        )
        cloc = Cloc()

        for analysis in repository_analysis:
            cloc_analysis = cloc.analyze(file_path=analysis.filename)
            file_path = analysis.filename.replace(repository_path + "/", '')
            in_commit = True if file_path in files_affected else False

            result = {
                'loc': analysis.nloc,
                'ccn': analysis.CCN,
                'tokens': analysis.token_count,
                'num_funs': len(analysis.function_list),
                'file_path': file_path,
                'in_commit': in_commit,
                'blanks': cloc_analysis['blanks'],
                'comments': cloc_analysis['comments']
            }
            analysis_result.append(result)

        # TODO: implement details option

        return analysis_result
Esempio n. 22
0
def run_lizard(filename, symbolic_filename_translator=lambda file_: file_, filecontent=None):
    '''
    Runs lizard with the possibility of using a symbolic file translation
    Explanation of terms:
    'nloc': non-commented lines of code (commonly known as source-line-of-code)
            Note that this is NOT the same definition as used everywhere else in the maintainer_scripts
            where nloc means number-of-lines-of-code (i.e every single line, regardless of being comments or not).
    'token_count': Number of tokens (number of words)
    'parameter_count': Number of arguments passed to function

    Args:
            filename(str):                 The file to be analyzed, if filecontent != None, file will be read.
            filecontent(str):              If not None, this str will be used for analysis instead
    Returns:
            functions[dict]:                  All functions, including global scope (empty string)
                'name'(str)':                 function name (global scope is called "")
                'nloc'(int)':                 nloc for function
                'cyclomatic_complexity(int)': mccabes cyclomatic complexity
                'token_count'(int)':          #tokens in function
                'parameter_count'(int)':      #parematers passed to function
                'start_line'(int):            starting linenumber of function
                'end_line'(int):              ending linenumber of function

            If parsing fails an empty list will be returned
    '''
    if filecontent is None:
        with open(filename) as f:
            filecontent = f.read()

    fa = _SymbolicFileAnalyzer(extensions=lizard.get_extensions(['nd', 'io']))
    lizard_object = fa.analyze_file(symbolic_filename_translator(filename), filecontent)

    if lizard_object:
        logger.debug("Parsed file %s ", os.path.basename(filename))
        return [func.__dict__ for func in lizard_object.function_list]

    else:
        logger.debug("Unable to parse file %s", os.path.basename(filename))
        return []
Esempio n. 23
0
def main(path, function):
    """The main function for this script"""
    options = argparse.Namespace()
    options.paths = path
    options.extensions = lizard.get_extensions(["cpre"], False)
    options.warnings_only = False
    options.whitelist = "whitelizard.txt"
    options.verbose = True
    options.sorting = []
    # default maximum cyclomatic complexity
    options.CCN = 30
    # default maximum function length
    options.length = 300
    # default maximum number of arguments
    options.arguments = 8
    options.number = 0
    options.working_threads = 4

    analysis = lizard.analyze(path,
                              threads=options.working_threads,
                              extensions=options.extensions)

    print(process_code_info(analysis, options.extensions))
Esempio n. 24
0
 def setUp(self):
     StreamStdoutTestCase.setUp(self)
     self.options = Mock(warnings_only=False, extensions = get_extensions([]))
Esempio n. 25
0
 def test_should_use_clang_format_for_warning(self):
     option = Mock(display_fn_end_line = False, extensions = get_extensions([]))
     print_warnings(option, [(FunctionInfo("foo", 100), "FILENAME")])
     self.assertIn("FILENAME:100: warning: foo has 1 CCN and 0 params (0 NLOC, 1 tokens)\n", sys.stdout.stream)
Esempio n. 26
0
 def test_function_info_header_should_have_the_captions_of_external_extensions(self):
     external_extension = Mock(FUNCTION_CAPTION = "*external_extension*")
     self.options = Mock(warnings_only=False, extensions = get_extensions([external_extension]))
     print_and_save_detail_information([], self.options)
     self.assertEquals("  NLOC    CNN   token  PARAM *external_extension* function@line@filename          ", sys.stdout.stream.splitlines()[1])
Esempio n. 27
0
def get_cpp_fileinfo_with_extension(source_code, extension):
    return FileAnalyzer(get_extensions([extension])).analyze_source_code("a.cpp", source_code)
Esempio n. 28
0
 def test_should_append_extension_at_the_end_by_default(self, mock_import):
     mock_import.return_value = FakeExtension()
     exts = get_extensions(["my_ext"])
     self.assertEqual("fake extension", exts[-1])
Esempio n. 29
0
 def test_schema_should_exhaust_the_result_if_there_is_regression_data(self):
     external_extension = Mock(FUNCTION_INFO = {"xx": {"regression": True}}, ordering_index=-1)
     extensions = get_extensions([external_extension])
     schema = OutputScheme(extensions)
     self.assertTrue(schema.any_regression())
Esempio n. 30
0
 def detect(self, source_files, auto_read):
     auto_read.side_effect = lambda filename: source_files[filename]
     extensions = get_extensions([self.detector])
     list(analyze_files(source_files.keys(), exts=extensions))
     return list(self.detector.get_duplicates(30))
Esempio n. 31
0
 def test_function_info_header_should_have_the_captions_of_external_extensions(self):
     external_extension = Mock(FUNCTION_CAPTION = "*external_extension*")
     extensions = get_extensions([external_extension])
     scheme = OutputScheme(extensions)
     print_and_save_modules([], extensions, scheme)
     self.assertEquals("  NLOC    CCN   token  PARAM *external_extension* location  ", sys.stdout.stream.splitlines()[1])
Esempio n. 32
0
def analyse_repo(repo: git.Repo, analysis_settings: 'lizard_mon.config.AnalysisSettings',
                 verbosity: int) -> 'TargetResultCache':
    result = TargetResultCache(AnalysisResult(), {})

    analysis_dir = os.path.relpath(repo.working_tree_dir)

    def patch_relative_exclude_patterns(pattern):
        if pattern.startswith("./") or pattern.startswith(".\\"):
            patched = os.path.join(analysis_dir, pattern[2:])
        else:
            patched = pattern
        patched = patched.replace("\\", "/")
        return patched

    exclusion_patterns = [
        patch_relative_exclude_patterns(pattern)
        for pattern in analysis_settings.exclusion_patterns
    ]
    for pattern in exclusion_patterns:
        print("  excluding:", pattern)
    analysis = lizard.analyze(
        paths=[analysis_dir],
        exclude_pattern=exclusion_patterns,
        threads=os.cpu_count(),
        exts=lizard.get_extensions([]),
        lans=analysis_settings.languages,
    )
    file_analysis = typing.cast(typing.Iterator[lizard.FileInformation], analysis)
    thresholds = analysis_settings.limits
    for analysed_file in file_analysis:
        if verbosity > 0:
            print(f"  - file: {analysed_file.filename} (NLOC={analysed_file.nloc})")

        violations_in_this_file = 0
        for fn in analysed_file.function_list:
            values = lizard_mon.config.AnalysisLimits(
                fn.cyclomatic_complexity,
                fn.nloc,
                len(fn.parameters),
            )

            if not values.exceeds(thresholds):
                continue

            violations = lizard_mon.config.list_limit_violations(values, thresholds)
            violations_in_this_file += 1

            if verbosity > 1:
                print(f"    - {fn.long_name} [{fn.start_line}:{fn.end_line}]")
                print(f"      violations: {', '.join(violations)}")

        file_result = AnalysisResult(
            violation_count=violations_in_this_file,
            lines_of_code=analysed_file.nloc,
            file_count=1,
        )
        if verbosity > 0:
            print(f"    results for this file: {file_result}")
        result.overall.merge_with(file_result)
        result.files[analysed_file.filename] = file_result

    return result
Esempio n. 33
0
 def process(ext, code):
     result = FileAnalyzer(get_extensions([ext])).analyze_source_code(
         "a.cpp", code)
     ext.reduce(result)
     return result
Esempio n. 34
0
 def process(ext, code):
     result = FileAnalyzer(get_extensions([ext])).analyze_source_code("a.cpp", code)
     list(ext.cross_file_process([result]))
     return result
Esempio n. 35
0
 def process(ext, code):
     result = FileAnalyzer(get_extensions([ext])).analyze_source_code(
         "a.cpp", code)
     list(ext.cross_file_process([result]))
     return result
Esempio n. 36
0
 def setUp(self):
     StreamStdoutTestCase.setUp(self)
     self.extensions = get_extensions([])
     self.scheme = OutputScheme(self.extensions)
     self.foo = FunctionInfo("foo", 'FILENAME', 100)
Esempio n. 37
0
 def detect(self, source_files, auto_read):
     auto_read.side_effect = lambda filename: source_files[filename]
     extensions = get_extensions([self.detector])
     list(analyze_files(sorted(source_files.keys()), exts=extensions))
     return list(self.detector.get_duplicates(30))
Esempio n. 38
0
def get_python_function_list_with_extension(source_code, extension):
    return FileAnalyzer(get_extensions([extension])).analyze_source_code(
        "a.py", source_code).function_list
Esempio n. 39
0
 def test_should_append_extension_at_the_end_by_default(self, mock_import):
     mock_import.return_value = FakeExtension()
     exts = get_extensions(["my_ext"])
     self.assertEqual("fake extension", exts[-1])
Esempio n. 40
0
def get_cpp_fileinfo_with_extension(source_code, extension):
    return FileAnalyzer(get_extensions([extension])).analyze_source_code(
        "a.cpp", source_code)
Esempio n. 41
0
 def test_should_insert_extension_at_the_index_when_specified(self, mock_import):
     extension = Mock(ordering_index=1)
     del extension.AVERAGE_CAPTION
     mock_import.return_value = extension
     exts = get_extensions([extension])
     self.assertEqual(extension, exts[1])
Esempio n. 42
0
def get_cpp_function_list_with_extnesion(source_code, extension):
    return FileAnalyzer(get_extensions([extension])).analyze_source_code(
        "a.cpp", source_code).function_list
Esempio n. 43
0
 def test_sort_warning_with_generator(self):
     option = Mock(display_fn_end_line = False, extensions = get_extensions([]))
     option.sorting = ['cyclomatic_complexity']
     print_warnings(option, (x for x in []))
Esempio n. 44
0
 def test_should_use_clang_format_for_warning(self):
     self.option = Mock(display_fn_end_line = False, extensions = get_extensions([]))
     print_warnings(self.option, self.scheme, [self.foo])
     self.assertIn("FILENAME:100: warning: foo has 1 CCN and 0 params (1 NLOC, 1 tokens)\n", sys.stdout.stream)
Esempio n. 45
0
def analyze(file_paths):
    extensions = lizard.get_extensions(["cpre"], False)
    analysis = lizard.analyze(file_paths,
                              threads=4,
                              extensions=extensions)
    return process_code_info(analysis, extensions)
Esempio n. 46
0
 def setUp(self):
     StreamStdoutTestCase.setUp(self)
     self.extensions = get_extensions([])
     self.scheme = OutputScheme(self.extensions)
     self.foo = FunctionInfo("foo", 'FILENAME', 100)
Esempio n. 47
0
def get_python_function_list_with_extnesion(source_code, extension):
    return FileAnalyzer(get_extensions([extension])).analyze_source_code("a.py", source_code).function_list