示例#1
0
    def run(self, args):
        # get data file reader using standard metrix++ plugin
        loader = self.get_plugin('metrixpp.mpp.dbf').get_loader()

        # iterate and print file length for every path in args
        exit_code = 0
        for path in (args if len(args) > 0 else [""]):
            file_iterator = loader.iterate_file_data(path)
            if file_iterator == None:
                utils.report_bad_path(path)
                exit_code += 1
                continue
            for file_data in file_iterator:
                print(file_data.get_path())
        return exit_code
示例#2
0
    def run(self, args):
        loader = self.get_plugin('metrixpp.mpp.dbf').get_loader()
        # get previous db file loader
        loader_prev = self.get_plugin('metrixpp.mpp.dbf').get_loader_prev()

        exit_code = 0
        for path in (args if len(args) > 0 else [""]):
            added_lines = 0
            file_iterator = loader.iterate_file_data(path)
            if file_iterator == None:
                utils.report_bad_path(path)
                exit_code += 1
                continue
            for file_data in file_iterator:
                added_lines += self._compare_file(file_data, loader,
                                                  loader_prev)
            cout.notify(path, '', cout.SEVERITY_INFO, "Change trend report",
                        [('Added lines', added_lines)])
        return exit_code
示例#3
0
def main(plugin, args):
    
    exit_code = 0

    loader_prev = plugin.get_plugin('metrixpp.mpp.dbf').get_loader_prev()
    loader = plugin.get_plugin('metrixpp.mpp.dbf').get_loader()
    
    paths = None
    if len(args) == 0:
        paths = [""]
    else:
        paths = args

    # Try to optimise iterative change scans
    modified_file_ids = None
    if plugin.mode != plugin.MODE_ALL:
        modified_file_ids = get_list_of_modified_files(loader, loader_prev)
        
    for path in paths:
        path = utils.preprocess_path(path)
        
        for limit in plugin.iterate_limits():
            warns_count = 0
            logging.info("Applying limit: " + str(limit))
            filters = [limit.filter]
            if modified_file_ids != None:
                filters.append(('file_id', 'IN', modified_file_ids))
            sort_by = None
            limit_by = None
            limit_warnings = None
            if plugin.hotspots != None:
                sort_by = limit.field
                if limit.type == "max":
                    sort_by = "-" + sort_by
                if plugin.mode == plugin.MODE_ALL:
                    # if it is not ALL mode, the tool counts number of printed warnings below
                    limit_by = plugin.hotspots
                limit_warnings = plugin.hotspots
            selected_data = loader.load_selected_data(limit.namespace,
                                                   fields = [limit.field],
                                                   path=path,
                                                   filters = filters,
                                                   sort_by=sort_by,
                                                   limit_by=limit_by)
            if selected_data == None:
                utils.report_bad_path(path)
                exit_code += 1
                continue
            
            for select_data in selected_data:
                if limit_warnings != None and limit_warnings <= 0:
                    break
                
                is_modified = None
                diff = None
                file_data = loader.load_file_data(select_data.get_path())
                file_data_prev = loader_prev.load_file_data(select_data.get_path())
                if file_data_prev != None:
                    if file_data.get_checksum() == file_data_prev.get_checksum():
                        diff = 0
                        is_modified = False
                    else:
                        matcher = utils.FileRegionsMatcher(file_data, file_data_prev)
                        prev_id = matcher.get_prev_id(select_data.get_region().get_id())
                        if matcher.is_matched(select_data.get_region().get_id()):
                            if matcher.is_modified(select_data.get_region().get_id()):
                                is_modified = True
                            else:
                                is_modified = False
                            diff = api.DiffData(select_data,
                                                           file_data_prev.get_region(prev_id)).get_data(limit.namespace, limit.field)

                if (plugin.is_mode_matched(limit.limit,
                                                select_data.get_data(limit.namespace, limit.field),
                                                diff,
                                                is_modified) == False):
                    continue
                
                is_sup = is_metric_suppressed(limit.namespace, limit.field, loader, select_data)
                if is_sup == True and plugin.no_suppress == False:
                    continue    
                
                region_cursor = 0
                region_name = None
                if select_data.get_region() != None:
                    if select_data.get_region().get_type() & limit.region_types == 0:
                        continue
                    region_cursor = select_data.get_region().cursor
                    region_name = select_data.get_region().name
                warns_count += 1
                exit_code += 1
                report_limit_exceeded(select_data.get_path(),
                                  region_cursor,
                                  limit.namespace,
                                  limit.field,
                                  region_name,
                                  select_data.get_data(limit.namespace, limit.field),
                                  diff,
                                  limit.limit,
                                  is_modified,
                                  is_sup)
                if limit_warnings != None:
                    limit_warnings -= 1
                    
            cout.notify(path, None, cout.SEVERITY_INFO, "{0} regions exceeded the limit {1}".format(warns_count, str(limit)))

    return exit_code
示例#4
0
def export_to_str(out_format, paths, loader, loader_prev, nest_regions,
                  dist_columns, mode):
    exit_code = 0
    result = ""
    if out_format == 'xml':
        result += "<view>\n"
    elif out_format == 'python':
        result += "{'view': ["

    for (ind, path) in enumerate(paths):
        path = utils.preprocess_path(path)

        aggregated_data, aggregated_data_prev = load_aggregated_data_with_mode(
            loader, loader_prev, path, mode)

        aggregated_data_tree = {}
        subdirs = []
        subfiles = []
        if aggregated_data != None:
            aggregated_data_tree = aggregated_data.get_data_tree()
            subdirs = sorted(aggregated_data.get_subdirs())
            subfiles = sorted(aggregated_data.get_subfiles())
        else:
            utils.report_bad_path(path)
            exit_code += 1
        aggregated_data_tree = append_suppressions(path, aggregated_data_tree,
                                                   loader, mode)

        if aggregated_data_prev != None:
            aggregated_data_prev_tree = aggregated_data_prev.get_data_tree()
            aggregated_data_prev_tree = append_suppressions(
                path, aggregated_data_prev_tree, loader_prev, mode)
            aggregated_data_tree = append_diff(aggregated_data_tree,
                                               aggregated_data_prev_tree)

        aggregated_data_tree = compress_dist(aggregated_data_tree,
                                             dist_columns)

        file_data = loader.load_file_data(path)
        file_data_tree = {}
        if file_data != None:
            file_data_tree = file_data.get_data_tree()
            file_data_prev = loader_prev.load_file_data(path)
            append_regions(file_data_tree, file_data, file_data_prev,
                           nest_regions)

        data = {
            "info": {
                "path": path,
                "id": ind + 1
            },
            "aggregated-data": aggregated_data_tree,
            "file-data": file_data_tree,
            "subdirs": subdirs,
            "subfiles": subfiles
        }

        if out_format == 'txt':
            cout_txt(data, loader)
        elif out_format == 'xml':
            result += utils.serialize_to_xml(
                data, root_name="data", digitCount=DIGIT_COUNT) + "\n"
        elif out_format == 'python':
            postfix = ""
            if ind < len(paths) - 1:
                postfix = ", "
            result += utils.serialize_to_python(data,
                                                root_name="data") + postfix

    if out_format == 'xml':
        result += "</view>"
    elif out_format == 'python':
        result += "]}"

    return (result, exit_code)
示例#5
0
def dumphtml(args, loader):
    exit_code = 0
    result = ""
    result += '<html><body>'
    for path in args:
        path = utils.preprocess_path(path)

        data = loader.load_file_data(path)
        if data == None:
            utils.report_bad_path(path)
            exit_code += 1
            continue

        file_name = data.get_path()
        fh = open(file_name, 'r')
        if fh == None:
            logging.error("can not open file '" + path + "' for reading")
            exit_code += 1
            continue
        text = fh.read()
        fh.close()

        # TODO fix highlightning of markers
        #        result += '<table><tr><td><pre>'
        #        last_pos = 0
        #        for marker in data.iterate_markers(filter_group= api.Marker.T.COMMENT |
        #                                           api.Marker.T.STRING |
        #                                           api.Marker.T.PREPROCESSOR):
        #            result += (cgi.escape(text[last_pos:marker.begin]))
        #            if marker.get_type() == api.Marker.T.STRING:
        #                result += ('<span style="color:#0000FF">')
        #            elif marker.get_type() == api.Marker.T.COMMENT:
        #                result += ('<span style="color:#009900">')
        #            elif marker.get_type() == api.Marker.T.PREPROCESSOR:
        #                result += ('<span style="color:#990000">')
        #            else:
        #                assert False, "Uknown marker type"
        #            result += (cgi.escape(text[marker.begin:marker.end]))
        #            result += ('</span>')
        #            last_pos = marker.end
        #        result += (cgi.escape(text[last_pos:]))
        #        result += ('</pre></td><td><pre>')
        result += '<table><tr><td><pre>'
        styles = [('<span style="background-color:#F0F010">',
                   '<span style="background-color:#F010F0">'),
                  ('<span style="background-color:#F0F030">',
                   '<span style="background-color:#F030F0">'),
                  ('<span style="background-color:#F0F050">',
                   '<span style="background-color:#F050F0">'),
                  ('<span style="background-color:#F0F070">',
                   '<span style="background-color:#F070F0">'),
                  ('<span style="background-color:#F0F090">',
                   '<span style="background-color:#F090F0">'),
                  ('<span style="background-color:#F0F0B0">',
                   '<span style="background-color:#F0B0F0">'),
                  ('<span style="background-color:#F0F0D0">',
                   '<span style="background-color:#F0D0F0">'),
                  ('<span style="background-color:#F0F0E0">',
                   '<span style="background-color:#F0E0F0">')]

        def proc_rec(region_id, file_data, styles, indent, pos):
            result = (styles[indent % len(styles)][pos % 2])
            region = file_data.get_region(region_id)
            result += ('<a href="#line' + str(region.get_cursor()) +
                       '" id=line"' + str(region.get_cursor()) + '"></a>')
            last_pos = region.get_offset_begin()
            for (ind, sub_id) in enumerate(
                    file_data.get_region(region_id).iterate_subregion_ids()):
                subregion = file_data.get_region(sub_id)
                result += (cgi.escape(
                    text[last_pos:subregion.get_offset_begin()]))
                result += proc_rec(sub_id, file_data, styles, indent + 3, ind)
                last_pos = subregion.get_offset_end()
            result += (cgi.escape(text[last_pos:region.get_offset_end()]))
            result += ('</span>')
            return result

        result += proc_rec(1, data, styles, 0, 0)
        result += ('</pre></td></tr></table>')
    result += ('</body></html>')
    print(result)
    return exit_code
示例#6
0
    def run(self, args):
        exit_code = 0

        loader_prev = self.get_plugin('metrixpp.mpp.dbf').get_loader_prev(
            none_if_empty=True)
        loader = self.get_plugin('metrixpp.mpp.dbf').get_loader()

        details = []
        for each in loader.iterate_properties():
            prev_value_str = ""
            if loader_prev != None:
                prev = loader_prev.get_property(each.name)
                if prev == None:
                    prev_value_str = " [new]"
                elif prev != each.value:
                    prev_value_str = " [modified (was: " + loader_prev.get_property(
                        each.name) + ")]"
            details.append((each.name, each.value + prev_value_str))
        path = self.get_plugin('metrixpp.mpp.dbf').get_dbfile_path()
        if ('METRIXPLUSPLUS_TEST_MODE' in list(os.environ.keys())
                and os.environ['METRIXPLUSPLUS_TEST_MODE'] == "True"):
            # in tests, paths come as full paths, strip it for consistent gold files
            # TODO: if there are other path-like arguments, it is better to think about other solution
            path = os.path.basename(path)
        cout.notify(path, '', cout.SEVERITY_INFO,
                    'Created using plugins and settings:', details)

        details = []
        for each in sorted(loader.iterate_namespace_names()):
            for field in sorted(
                    loader.get_namespace(each).iterate_field_names()):
                prev_value_str = ""
                if loader_prev != None:
                    prev = False
                    prev_namespace = loader_prev.get_namespace(each)
                    if prev_namespace != None:
                        prev = prev_namespace.check_field(field)
                    if prev == False:
                        prev_value_str = " [new]"
                details.append((each + ':' + field, prev_value_str))
        cout.notify(path, '', cout.SEVERITY_INFO, 'Collected metrics:',
                    details)

        paths = None
        if len(args) == 0:
            paths = [""]
        else:
            paths = args
        for path in paths:
            details = []
            path = utils.preprocess_path(path)

            file_iterator = loader.iterate_file_data(path=path)
            if file_iterator == None:
                utils.report_bad_path(path)
                exit_code += 1
                continue
            for each in file_iterator:
                prev_value_str = ""
                if loader_prev != None:
                    prev = loader_prev.load_file_data(each.get_path())
                    if prev == None:
                        prev_value_str = " [new]"
                    elif prev.get_checksum() != each.get_checksum():
                        prev_value_str = " [modified]"
                details.append(
                    (each.get_path(),
                     '{0:#x}'.format(each.get_checksum()) + prev_value_str))
            cout.notify(path, '', cout.SEVERITY_INFO,
                        'Processed files and checksums:', details)

        return exit_code
示例#7
0
    def _export_to_stdout(self, paths):
        class StdoutWriter(object):
            def write(self, *args, **kwargs):
                print(args[0].strip())

        exit_code = 0

        columns = []
        columnNames = [
            "file", "region", "type", "modified", "line start", "line end"
        ]
        for name in sorted(self.loader.iterate_namespace_names()):
            namespace = self.loader.get_namespace(name)
            for field in sorted(namespace.iterate_field_names()):
                columns.append((name, field))
                columnNames.append(name + ":" + field)

        writer = StdoutWriter()
        csvWriter = csv.writer(writer)
        csvWriter.writerow(columnNames)

        for path in paths:
            path = utils.preprocess_path(path)

            files = self.loader.iterate_file_data(path)
            if files == None:
                utils.report_bad_path(path)
                exit_code += 1
                continue

            for file_data in files:
                matcher = None
                file_data_prev = self.loader_prev.load_file_data(
                    file_data.get_path())
                if file_data_prev != None:
                    matcher = utils.FileRegionsMatcher(file_data,
                                                       file_data_prev)
                for reg in file_data.iterate_regions():
                    per_reg_data = []
                    per_reg_data.append(api.Region.T().to_str(reg.get_type()))
                    if matcher != None and matcher.is_matched(reg.get_id()):
                        per_reg_data.append(matcher.is_modified(reg.get_id()))
                    else:
                        per_reg_data.append(None)
                    per_reg_data.append(reg.get_line_begin())
                    per_reg_data.append(reg.get_line_end())
                    for column in columns:
                        per_reg_data.append(reg.get_data(column[0], column[1]))
                    csvWriter.writerow([file_data.get_path(),
                                        reg.get_name()] + per_reg_data)
                per_file_data = []
                per_file_data.append('file')
                if file_data_prev != None:
                    per_file_data.append(file_data.get_checksum() !=
                                         file_data_prev.get_checksum())
                else:
                    per_file_data.append(None)
                per_file_data.append(file_data.get_region(1).get_line_begin())
                per_file_data.append(file_data.get_region(1).get_line_end())
                for column in columns:
                    per_file_data.append(
                        file_data.get_data(column[0], column[1]))
                csvWriter.writerow([file_data.get_path(), None] +
                                   per_file_data)

        return exit_code