def test_generate_report_from_json(self):
        """Can we read in JSON results and make a report.

        Reference datasets are from a v13.0 validation_data_hsc run.

        Check for the default (srd_level, release_level) = ('design', 'FY17') and ('miminimum', 'FY18')
        """
        # Manually use temporary directories here,
        #  because I can't figure out how to get py.test tmpdir fixture
        #  to work in the unittest.TestCase context.
        tmp_dir = tempfile.mkdtemp()

        srd_release_report = zip(self.srd_levels, self.release_levels,
                                 self.report_files)
        for srd_level, release_level, ref_file in srd_release_report:
            out_file_name = os.path.join(
                tmp_dir, "report_performance_test_{}_{}.rst".format(
                    srd_level, release_level))
            report_performance.run(
                [self.json_file],
                out_file_name,
                srd_level=srd_level,
                release_metrics_file=self.release_metrics_file,
                release_level=release_level)

            assert (os.path.exists(out_file_name))
            assert filecmp.cmp(out_file_name, ref_file)
            # Cleanup our temp file
            os.remove(out_file_name)

        # Cleanup our temp directory
        os.removedirs(tmp_dir)
Ejemplo n.º 2
0
    def test_generate_report_from_json(self):
        """Can we read in JSON results and make a report.

        Reference datasets are from a v13.0 validation_data_hsc run.

        Check for the default (srd_level, release_level) = ('design', 'FY17') and ('minimum', 'FY18')
        """
        # Manually use temporary directories here,
        #  because I can't figure out how to get py.test tmpdir fixture
        #  to work in the unittest.TestCase context.
        tmp_dir = tempfile.mkdtemp()

        srd_release_report = zip(self.srd_levels, self.release_levels,
                                 self.report_files)
        for srd_level, release_level, ref_file in srd_release_report:
            out_file_name = os.path.join(
                tmp_dir, "report_performance_test_{}_{}.rst".format(
                    srd_level, release_level))
            report_performance.run([self.json_file],
                                   out_file_name,
                                   srd_level=srd_level,
                                   release_specs_package='verify_metrics',
                                   release_level=release_level)

            assert (os.path.exists(out_file_name))
            with open(out_file_name) as fh:
                of_lines = fh.readlines()
            with open(ref_file) as fh:
                rf_lines = fh.readlines()
            self.maxDiff = None
            self.assertEqual(''.join(of_lines),
                             ''.join(rf_lines),
                             msg=f"Files are {out_file_name} and {ref_file}")
            # Cleanup our temp file
            os.remove(out_file_name)

        # Cleanup our temp directory
        os.removedirs(tmp_dir)
Ejemplo n.º 3
0
    # 2017-07-21: MWV
    # I'm not enforcing the value of 'srd_level' with a restricted 'choices'
    # because I want to preserve the ability for somewhat arbitrary levels,
    # even in the SRD for increased general utility of the function
    # The acceptable values of 'choices' are defined by what's available
    # in the 'srd_metrics' file.
    parser.add_argument(
        '--srd_level',
        type=str,
        default='design',
        help=
        'Level of srd_metric requirement to meet: ["design", "minimum", "stretch"]'
    )
    parser.add_argument('--release_specs_package',
                        default='verify_metrics',
                        help='Package with release specifications.')
    parser.add_argument(
        '--release_level',
        type=str,
        default='FY17',
        help=
        'Level of release_metric requirement to meet: ["FY17", "FY18", ...]')

    args = parser.parse_args()

    report_performance.run(args.json_files,
                           args.output_file,
                           srd_level=args.srd_level,
                           release_specs_package=args.release_specs_package,
                           release_level=args.release_level)
Ejemplo n.º 4
0
    # because I want to preserve the ability for somewhat arbitrary levels,
    # even in the SRD for increased general utility of the function
    # The acceptable values of 'choices' are defined by what's available
    # in the 'srd_metrics' file.
    parser.add_argument(
        '--srd_level',
        type=str,
        default='design',
        help=
        'Level of srd_metric requirement to meet: ["design", "minimum", "stretch"]'
    )
    parser.add_argument(
        '--release_metrics',
        default=os.path.join(getPackageDir('validate_drp'), 'etc',
                             'release_metrics.yaml'),
        help='Path of YAML file with this release specifications.')
    parser.add_argument(
        '--release_level',
        type=str,
        default='FY17',
        help=
        'Level of release_metric requirement to meet: ["FY17", "FY18", ...]')

    args = parser.parse_args()

    report_performance.run(args.json_files,
                           args.output_file,
                           srd_level=args.srd_level,
                           release_metrics_file=args.release_metrics,
                           release_level=args.release_level)