Пример #1
0
    def setUp(self):
        super(HtmlFormatterTests, self).setUp()
        conf = config.PantherConfig()
        self.manager = manager.PantherManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()

        self.manager.out_file = self.tmp_fname
Пример #2
0
 def setUp(self):
     super(FunctionalTests, self).setUp()
     # NOTE(tkelsey): panther is very sensitive to paths, so stitch
     # them up here for the testing environment.
     #
     path = os.path.join(os.getcwd(), 'panther', 'plugins')
     p_conf = p_config.PantherConfig()
     self.p_mgr = p_manager.PantherManager(p_conf, 'file')
     self.p_mgr.p_conf._settings['plugins_dir'] = path
     self.p_mgr.p_ts = p_test_set.PantherTestSet(config=p_conf)
Пример #3
0
    def setUp(self):
        super(ManagerTests, self).setUp()
        self.profile = {}
        self.profile['include'] = {
            'any_other_function_with_shell_equals_true', 'assert_used'
        }

        self.config = config.PantherConfig()
        self.manager = manager.PantherManager(config=self.config,
                                              agg_type='file',
                                              debug=False,
                                              verbose=False)
Пример #4
0
    def test_settings(self):
        # Can initialize a PantherConfig.

        example_key = uuid.uuid4().hex
        example_value = self.getUniqueString()
        contents = '%s: %s' % (example_key, example_value)
        f = self.useFixture(TempFile(contents))
        p_config = config.PantherConfig(f.name)

        # After initialization, can get settings.
        self.assertEqual('*.py', p_config.get_setting('plugin_name_pattern'))

        self.assertEqual({example_key: example_value}, p_config.config)
        self.assertEqual(example_value, p_config.get_option(example_key))
Пример #5
0
    def setUp(self):
        super(TestGetOption, self).setUp()

        self.example_key = uuid.uuid4().hex
        self.example_subkey = uuid.uuid4().hex
        self.example_subvalue = uuid.uuid4().hex
        sample_yaml = textwrap.dedent(
            """
            %s:
                %s: %s
            """ %
            (self.example_key, self.example_subkey, self.example_subvalue))

        f = self.useFixture(TempFile(sample_yaml))

        self.p_config = config.PantherConfig(f.name)
Пример #6
0
    def test_no_issues(self, get_issue_list):
        conf = config.PantherConfig()
        self.manager = manager.PantherManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        get_issue_list.return_value = collections.OrderedDict()
        with mock.patch('panther.formatters.screen.do_print') as m:
            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager,
                          tmp_file,
                          panther.LOW,
                          panther.LOW,
                          lines=5)
            self.assertIn('No issues identified.',
                          '\n'.join([str(a) for a in m.call_args]))
Пример #7
0
    def test_no_issues(self, get_issue_list):
        conf = config.PantherConfig()
        self.manager = manager.PantherManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        get_issue_list.return_value = collections.OrderedDict()
        tmp_file = open(self.tmp_fname, 'w')
        p_text.report(self.manager,
                      tmp_file,
                      panther.LOW,
                      panther.LOW,
                      lines=5)

        with open(self.tmp_fname) as f:
            data = f.read()
            self.assertIn('No issues identified.', data)
Пример #8
0
    def test_report_baseline(self, get_issue_list):
        conf = config.PantherConfig()
        self.manager = manager.PantherManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        issue_x = _get_issue_instance()
        issue_x.fname = 'x'
        issue_y = _get_issue_instance()
        issue_y.fname = 'y'
        issue_z = _get_issue_instance()
        issue_z.fname = 'z'

        get_issue_list.return_value = collections.OrderedDict([
            (issue_a, [issue_x]), (issue_b, [issue_y, issue_z])
        ])

        # Validate that we're outputting the correct issues
        indent_val = ' ' * 10
        output_str_fn = 'panther.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager,
                          tmp_file,
                          panther.LOW,
                          panther.LOW,
                          lines=5)

            calls = [
                mock.call(issue_a, '', lines=5),
                mock.call(issue_b, '', show_code=False, show_lineno=False),
                mock.call(issue_y, indent_val, lines=5),
                mock.call(issue_z, indent_val, lines=5)
            ]

            output_str.assert_has_calls(calls, any_order=True)
Пример #9
0
    def setUp(self):
        super(CsvFormatterTests, self).setUp()
        conf = config.PantherConfig()
        self.manager = manager.PantherManager(conf, 'file')
        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.context = {
            'filename': self.tmp_fname,
            'lineno': 4,
            'linerange': [4]
        }
        self.check_name = 'hardcoded_bind_all_interfaces'
        self.issue = issue.Issue(panther.MEDIUM, panther.MEDIUM,
                                 'Possible binding to all interfaces.')
        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context['filename']
        self.issue.lineno = self.context['lineno']
        self.issue.linerange = self.context['linerange']
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
Пример #10
0
    def setUp(self):
        super(JsonFormatterTests, self).setUp()
        conf = config.PantherConfig()
        self.manager = manager.PantherManager(conf, 'file')
        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.context = {
            'filename': self.tmp_fname,
            'lineno': 4,
            'linerange': [4]
        }
        self.check_name = 'hardcoded_bind_all_interfaces'
        self.issue = issue.Issue(panther.MEDIUM, panther.MEDIUM,
                                 'Possible binding to all interfaces.')

        self.candidates = [
            issue.Issue(panther.LOW, panther.LOW, 'Candidate A', lineno=1),
            issue.Issue(panther.HIGH, panther.HIGH, 'Candiate B', lineno=2)
        ]

        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context['filename']
        self.issue.lineno = self.context['lineno']
        self.issue.linerange = self.context['linerange']
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
        self.manager.metrics = metrics.Metrics()

        # mock up the metrics
        for key in ['_totals', 'binding.js']:
            self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2}
            for (criteria, default) in constants.CRITERIA:
                for rank in constants.RANKING:
                    self.manager.metrics.data[key]['{0}.{1}'.format(
                        criteria, rank)] = 0
Пример #11
0
 def test_bad_yaml(self):
     f = self.useFixture(TempFile("[]"))
     try:
         self.config = config.PantherConfig(f.name)
     except utils.ConfigError as e:
         self.assertIn("Error parsing file.", e.message)
Пример #12
0
 def setUp(self):
     super(TestConfigCompat, self).setUp()
     f = self.useFixture(TempFile(self.sample_yaml))
     self.config = config.PantherConfig(f.name)
Пример #13
0
 def setUp(self):
     super(TestGetSetting, self).setUp()
     test_yaml = 'key: value'
     f = self.useFixture(TempFile(test_yaml))
     self.p_config = config.PantherConfig(f.name)
Пример #14
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.PantherConfig()
        self.manager = manager.PantherManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ['binding.js']

        self.manager.scores = [{
            'SEVERITY': [0, 0, 0, 1],
            'CONFIDENCE': [0, 0, 0, 1]
        }]

        self.manager.skipped = [('abc.js', 'File is bad')]
        self.manager.excluded_files = ['def.js']

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
        for category in ['SEVERITY', 'CONFIDENCE']:
            for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
                self.manager.metrics.data['_totals']['%s.%s' %
                                                     (category, level)] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = 'panther.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager,
                          tmp_file,
                          panther.LOW,
                          panther.LOW,
                          lines=5)

            calls = [
                mock.call(issue_a, '', lines=5),
                mock.call(issue_b, '', lines=5)
            ]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch('panther.formatters.screen.do_print') as m:
            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager,
                          tmp_file,
                          panther.LOW,
                          panther.LOW,
                          lines=5)

            data = '\n'.join([str(a) for a in m.call_args[0][0]])

            expected = 'Run started'
            self.assertIn(expected, data)

            expected_items = [
                screen.header('Files in scope (1):'),
                '\n\tbinding.js (score: {SEVERITY: 1, CONFIDENCE: 1})'
            ]

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header('Files excluded (1):') + '\n\tdef.js'
            self.assertIn(expected, data)

            expected = ('Total lines of code: 1000\n\tTotal lines skipped '
                        '(//nosec): 50')
            self.assertIn(expected, data)

            expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = (screen.header('Files skipped (1):') +
                        '\n\tabc.js (File is bad)')
            self.assertIn(expected, data)
Пример #15
0
def main():
    # bring our logging stuff up as early as possible
    debug = ('-d' in sys.argv or '--debug' in sys.argv)
    _init_logger(debug)
    extension_mgr = _init_extensions()

    baseline_formatters = [
        f.name
        for f in filter(lambda x: hasattr(x.plugin, '_accepts_baseline'),
                        extension_mgr.formatters)
    ]

    # now do normal startup
    parser = argparse.ArgumentParser(
        description='Panther - a Node.js source code security analyzer',
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('targets',
                        metavar='targets',
                        type=str,
                        nargs='*',
                        help='source file(s) or directory(s) to be tested')
    parser.add_argument('-r',
                        '--recursive',
                        dest='recursive',
                        action='store_true',
                        help='find and process files in subdirectories')
    parser.add_argument(
        '-a',
        '--aggregate',
        dest='agg_type',
        action='store',
        default='file',
        type=str,
        choices=['file', 'vuln'],
        help='aggregate output by vulnerability (default) or by filename')
    parser.add_argument(
        '-n',
        '--number',
        dest='context_lines',
        action='store',
        default=3,
        type=int,
        help='maximum number of code lines to output for each issue')
    parser.add_argument(
        '-c',
        '--configfile',
        dest='config_file',
        action='store',
        default=None,
        type=str,
        help='optional config file to use for selecting plugins and '
        'overriding defaults')
    parser.add_argument(
        '-p',
        '--profile',
        dest='profile',
        action='store',
        default=None,
        type=str,
        help='profile to use (defaults to executing all tests)')
    parser.add_argument('-t',
                        '--tests',
                        dest='tests',
                        action='store',
                        default=None,
                        type=str,
                        help='comma-separated list of test IDs to run')
    parser.add_argument('-s',
                        '--skip',
                        dest='skips',
                        action='store',
                        default=None,
                        type=str,
                        help='comma-separated list of test IDs to skip')
    parser.add_argument('-l',
                        '--level',
                        dest='severity',
                        action='count',
                        default=1,
                        help='report only issues of a given severity level or '
                        'higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)')
    parser.add_argument(
        '-i',
        '--confidence',
        dest='confidence',
        action='count',
        default=1,
        help='report only issues of a given confidence level or '
        'higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)')
    output_format = 'screen' if sys.stdout.isatty() else 'txt'
    parser.add_argument('-f',
                        '--format',
                        dest='output_format',
                        action='store',
                        default=output_format,
                        help='specify output format',
                        choices=sorted(extension_mgr.formatter_names))
    parser.add_argument(
        '--msg-template',
        action='store',
        default=None,
        help='specify output message template'
        ' (only usable with --format custom),'
        ' see CUSTOM FORMAT section'
        ' for list of available values',
    )
    parser.add_argument('-o',
                        '--output',
                        dest='output_file',
                        action='store',
                        nargs='?',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='write report to filename')
    parser.add_argument(
        '-v',
        '--verbose',
        dest='verbose',
        action='store_true',
        help='output extra information like excluded and included files')
    parser.add_argument('-d',
                        '--debug',
                        dest='debug',
                        action='store_true',
                        help='turn on debug mode')
    parser.add_argument('--ignore-nosec',
                        dest='ignore_nosec',
                        action='store_true',
                        help='do not skip lines with # nosec comments')
    parser.add_argument(
        '-x',
        '--exclude',
        dest='excluded_paths',
        action='store',
        default='',
        help='comma-separated list of paths to exclude from scan '
        '(note that these are in addition to the excluded '
        'paths provided in the config file)')
    parser.add_argument('-b',
                        '--baseline',
                        dest='baseline',
                        action='store',
                        default=None,
                        help='path of a baseline report to compare against '
                        '(only JSON-formatted files are accepted)')
    parser.add_argument(
        '--ini',
        dest='ini_path',
        action='store',
        default=None,
        help='path to a .panther file that supplies command line arguments')
    parser.add_argument(
        '--version',
        action='version',
        version='%(prog)s {version}'.format(version=panther.__version__))
    parser.add_argument(
        '--nsp',
        dest='nsp',
        action='store_true',
        help='scan the package.json to find vulnerable dependencies')
    parser.set_defaults(debug=False)
    parser.set_defaults(verbose=False)
    parser.set_defaults(ignore_nosec=False)

    plugin_info = [
        "%s\t%s" % (a[0], a[1].name)
        for a in extension_mgr.plugins_by_id.items()
    ]

    plugin_list = '\n\t'.join(sorted(set(plugin_info)))
    dedent_text = textwrap.dedent('''
    CUSTOM FORMATTING
    -----------------

    Available tags:

        {abspath}, {relpath}, {line},  {test_id},
        {severity}, {msg}, {confidence}, {range}

    Example usage:

        Default template:
        panther -r examples/ --format custom --msg-template \\
        "{abspath}:{line}: {test_id}[panther]: {severity}: {msg}"

        Provides same output as:
        panther -r examples/ --format custom

        Tags can also be formatted in python string.format() style:
        panther -r examples/ --format custom --msg-template \\
        "{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"

        See python documentation for more information about formatting style:
        https://docs.python.org/3.4/library/string.html

    The following tests were discovered and loaded:
    -----------------------------------------------
    ''')
    parser.epilog = dedent_text + "\t{0}".format(plugin_list)

    # setup work - parse arguments, and initialize PantherManager
    args = parser.parse_args()
    # Check if `--msg-template` is not present without custom formatter
    if args.output_format != 'custom' and args.msg_template is not None:
        parser.error("--msg-template can only be used with --format=custom")

    try:
        p_conf = p_config.PantherConfig(config_file=args.config_file)
    except utils.ConfigError as e:
        LOG.error(e)
        sys.exit(2)

    # Handle .panther files in projects to pass cmdline args from file
    ini_options = _get_options_from_ini(args.ini_path, args.targets)
    if ini_options:
        # prefer command line, then ini file
        args.excluded_paths = _log_option_source(args.excluded_paths,
                                                 ini_options.get('exclude'),
                                                 'excluded paths')

        args.skips = _log_option_source(args.skips, ini_options.get('skips'),
                                        'skipped tests')

        args.tests = _log_option_source(args.tests, ini_options.get('tests'),
                                        'selected tests')
        ini_targets = ini_options.get('targets')
        if ini_targets:
            ini_targets = ini_targets.split(',')
        args.targets = _log_option_source(args.targets, ini_targets,
                                          'selected targets')
        # TODO(tmcpeak): any other useful options to pass from .panther?

    if not args.targets:
        LOG.error("No targets found in CLI or ini files, exiting.")
        sys.exit(2)
    # if the log format string was set in the options, reinitialize
    if p_conf.get_option('log_format'):
        log_format = p_conf.get_option('log_format')
        _init_logger(debug, log_format=log_format)

    try:
        profile = _get_profile(p_conf, args.profile, args.config_file)
        _log_info(args, profile)

        profile['include'].update(args.tests.split(',') if args.tests else [])
        profile['exclude'].update(args.skips.split(',') if args.skips else [])
        extension_mgr.validate_profile(profile)

    except (utils.ProfileNotFound, ValueError) as e:
        LOG.error(e)
        sys.exit(2)

    p_mgr = p_manager.PantherManager(p_conf,
                                     args.agg_type,
                                     args.debug,
                                     profile=profile,
                                     verbose=args.verbose,
                                     ignore_nosec=args.ignore_nosec)

    if args.baseline is not None:
        try:
            with open(args.baseline) as bl:
                data = bl.read()
                p_mgr.populate_baseline(data)
        except IOError:
            LOG.warning("Could not open baseline report: %s", args.baseline)
            sys.exit(2)

        if args.output_format not in baseline_formatters:
            LOG.warning('Baseline must be used with one of the following '
                        'formats: ' + str(baseline_formatters))
            sys.exit(2)

    if args.output_format != "json":
        if args.config_file:
            LOG.info("using config: %s", args.config_file)

        LOG.info("running on Python %d.%d.%d", sys.version_info.major,
                 sys.version_info.minor, sys.version_info.micro)

    # initiate file discovery step within Panther Manager
    p_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)

    if not p_mgr.p_ts.tests:
        LOG.error('No tests would be run, please check the profile.')
        sys.exit(2)

    # initiate execution of tests within Panther Manager
    p_mgr.run_tests()
    LOG.debug(p_mgr.p_ma)
    LOG.debug(p_mgr.metrics)

    # initiate execution of tests within Nsp Manager
    if args.nsp:
        nsp_mgr = n_manager.NspManager(results=p_mgr.results)
        nsp_mgr.update_issues()

    # trigger output of results by Panther Manager
    sev_level = constants.RANKING[args.severity - 1]
    conf_level = constants.RANKING[args.confidence - 1]
    p_mgr.output_results(args.context_lines, sev_level, conf_level,
                         args.output_file, args.output_format,
                         args.msg_template)

    # return an exit code of 1 if there are results, 0 otherwise
    if p_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0:
        sys.exit(1)
    else:
        sys.exit(0)
Пример #16
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.PantherConfig()
        self.manager = manager.PantherManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ['binding.js']

        self.manager.scores = [{
            'SEVERITY': [0, 0, 0, 1],
            'CONFIDENCE': [0, 0, 0, 1]
        }]

        self.manager.skipped = [('abc.js', 'File is bad')]
        self.manager.excluded_files = ['def.js']

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
        for category in ['SEVERITY', 'CONFIDENCE']:
            for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
                self.manager.metrics.data['_totals']['%s.%s' %
                                                     (category, level)] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = 'panther.formatters.text._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            tmp_file = open(self.tmp_fname, 'w')
            p_text.report(self.manager,
                          tmp_file,
                          panther.LOW,
                          panther.LOW,
                          lines=5)

            calls = [
                mock.call(issue_a, '', lines=5),
                mock.call(issue_b, '', lines=5)
            ]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        tmp_file = open(self.tmp_fname, 'w')
        p_text.report(self.manager,
                      tmp_file,
                      panther.LOW,
                      panther.LOW,
                      lines=5)
        with open(self.tmp_fname) as f:
            data = f.read()

            expected_items = [
                'Run started', 'Files in scope (1)', 'binding.js (score: ',
                "CONFIDENCE: 1", "SEVERITY: 1", 'Files excluded (1):',
                'def.js', 'Undefined: 1', 'Low: 1', 'Medium: 1', 'High: 1',
                'Total lines skipped ', '(//nosec): 50',
                'Total issues (by severity)', 'Total issues (by confidence)',
                'Files skipped (1)', 'abc.js (File is bad)'
            ]
            for item in expected_items:
                self.assertIn(item, data)