Exemplo n.º 1
0
    def setUp(self):
        super().setUp()
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")
        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.context = {
            "filename": self.tmp_fname,
            "lineno": 4,
            "linerange": [4],
        }
        self.check_name = "hardcoded_bind_all_interfaces"
        self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM,
                                 "Possible binding to all interfaces.")

        self.candidates = [
            issue.Issue(bandit.LOW, bandit.LOW, "Candidate A", lineno=1),
            issue.Issue(bandit.HIGH, bandit.HIGH, "Candiate B", lineno=2),
        ]

        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context["filename"]
        self.issue.lineno = self.context["lineno"]
        self.issue.linerange = self.context["linerange"]
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
        self.manager.metrics = metrics.Metrics()

        # mock up the metrics
        for key in ["_totals", "binding.py"]:
            self.manager.metrics.data[key] = {"loc": 4, "nosec": 2}
            for (criteria, default) in constants.CRITERIA:
                for rank in constants.RANKING:
                    self.manager.metrics.data[key][f"{criteria}.{rank}"] = 0
Exemplo n.º 2
0
    def setUp(self):
        super(YamlFormatterTests, self).setUp()
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')
        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.context = {'filename': self.tmp_fname,
                        'lineno': 4,
                        'linerange': [4]}
        self.check_name = 'hardcoded_bind_all_interfaces'
        self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM,
                                 'Possible binding to all interfaces.')

        self.candidates = [issue.Issue(bandit.LOW, bandit.LOW, 'Candidate A',
                                       lineno=1),
                           issue.Issue(bandit.HIGH, bandit.HIGH, 'Candiate B',
                                       lineno=2)]

        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context['filename']
        self.issue.lineno = self.context['lineno']
        self.issue.linerange = self.context['linerange']
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
        self.manager.metrics = metrics.Metrics()

        # mock up the metrics
        for key in ['_totals', 'binding.py']:
            self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2}
            for (criteria, default) in constants.CRITERIA:
                for rank in constants.RANKING:
                    self.manager.metrics.data[key]['{0}.{1}'.format(
                        criteria, rank
                    )] = 0
Exemplo n.º 3
0
    def __init__(self, config, agg_type, debug=False, verbose=False,
                 profile=None, ignore_nosec=False):
        '''Get logger, config, AST handler, and result store ready

        :param config: config options object
        :type config: bandit.core.BanditConfig
        :param agg_type: aggregation type
        :param debug: Whether to show debug messages or not
        :param verbose: Whether to show verbose output
        :param profile_name: Optional name of profile to use (from cmd line)
        :param ignore_nosec: Whether to ignore #nosec or not
        :return:
        '''
        self.debug = debug
        self.verbose = verbose
        if not profile:
            profile = {}
        self.ignore_nosec = ignore_nosec
        self.b_conf = config
        self.files_list = []
        self.excluded_files = []
        self.b_ma = b_meta_ast.BanditMetaAst()
        self.skipped = []
        self.results = []
        self.baseline = []
        self.agg_type = agg_type
        self.metrics = metrics.Metrics()
        self.b_ts = b_test_set.BanditTestSet(config, profile)

        # set the increment of after how many files to show progress
        self.progress = b_constants.progress_increment
        self.scores = []
Exemplo n.º 4
0
    def __init__(self,
                 config,
                 agg_type,
                 debug=False,
                 verbose=False,
                 profile_name=None,
                 ignore_nosec=False):
        '''Get logger, config, AST handler, and result store ready

        :param config: config options object
        :type config: bandit.core.BanditConfig
        :param agg_type: aggregation type
        :param debug: Whether to show debug messsages or not
        :param verbose: Whether to show verbose output
        :param profile_name: Optional name of profile to use (from cmd line)
        :param ignore_nosec: Whether to ignore #nosec or not
        :return:
        '''
        self.debug = debug
        self.verbose = verbose
        self.ignore_nosec = ignore_nosec
        self.b_conf = config
        self.files_list = []
        self.excluded_files = []
        self.b_ma = b_meta_ast.BanditMetaAst()
        self.skipped = []
        self.results = []
        self.baseline = []
        self.agg_type = agg_type
        self.metrics = metrics.Metrics()

        # if the profile name was specified, try to find it in the config
        if profile_name:
            if profile_name in self.b_conf.config['profiles']:
                profile = self.b_conf.config['profiles'][profile_name]
                logger.debug("read in profile '%s': %s", profile_name, profile)
            else:
                raise utils.ProfileNotFound(self.b_conf.config_file,
                                            profile_name)
        else:
            profile = None

        self.b_ts = b_test_set.BanditTestSet(config=self.b_conf,
                                             profile=profile)

        # set the increment of after how many files to show progress
        self.progress = b_constants.progress_increment
        self.scores = []
Exemplo n.º 5
0
    def __init__(
        self,
        config,
        agg_type,
        debug=False,
        verbose=False,
        quiet=False,
        profile=None,
        ignore_nosec=False,
    ):
        """Get logger, config, AST handler, and result store ready

        :param config: config options object
        :type config: bandit.core.BanditConfig
        :param agg_type: aggregation type
        :param debug: Whether to show debug messages or not
        :param verbose: Whether to show verbose output
        :param quiet: Whether to only show output in the case of an error
        :param profile_name: Optional name of profile to use (from cmd line)
        :param ignore_nosec: Whether to ignore #nosec or not
        :return:
        """
        self.debug = debug
        self.verbose = verbose
        self.quiet = quiet
        if not profile:
            profile = {}
        self.ignore_nosec = ignore_nosec
        self.b_conf = config
        self.files_list = []
        self.excluded_files = []
        self.b_ma = b_meta_ast.BanditMetaAst()
        self.skipped = []
        self.results = []
        self.baseline = []
        self.agg_type = agg_type
        self.metrics = metrics.Metrics()
        self.b_ts = b_test_set.BanditTestSet(config, profile)
        self.scores = []
Exemplo n.º 6
0
    def check_metrics(self, example_script, expect):
        '''A helper method to test the metrics being returned.

        :param example_script: Filename of an example script to test
        :param expect: dict with expected values of metrics
        '''
        self.b_mgr.metrics = metrics.Metrics()
        self.b_mgr.scores = []
        self.run_example(example_script)

        # test general metrics (excludes issue counts)
        m = self.b_mgr.metrics.data
        for k in expect:
            if k != 'issues':
                self.assertEqual(expect[k], m['_totals'][k])
        # test issue counts
        if 'issues' in expect:
            for (criteria, default) in C.CRITERIA:
                for rank in C.RANKING:
                    label = '{0}.{1}'.format(criteria, rank)
                    expected = 0
                    if expect['issues'].get(criteria).get(rank):
                        expected = expect['issues'][criteria][rank]
                    self.assertEqual(expected, m['_totals'][label])
Exemplo n.º 7
0
    def check_metrics(self, example_script, expect):
        """A helper method to test the metrics being returned.

        :param example_script: Filename of an example script to test
        :param expect: dict with expected values of metrics
        """
        self.b_mgr.metrics = metrics.Metrics()
        self.b_mgr.scores = []
        self.run_example(example_script)

        # test general metrics (excludes issue counts)
        m = self.b_mgr.metrics.data
        for k in expect:
            if k != "issues":
                self.assertEqual(expect[k], m["_totals"][k])
        # test issue counts
        if "issues" in expect:
            for (criteria, default) in C.CRITERIA:
                for rank in C.RANKING:
                    label = f"{criteria}.{rank}"
                    expected = 0
                    if expect["issues"].get(criteria).get(rank):
                        expected = expect["issues"][criteria][rank]
                    self.assertEqual(expected, m["_totals"][label])