Exemple #1
0
    def _handler(self, request, response):
        dataset = None
        if 'dataset_opendap' in request.inputs:
            dataset = request.inputs['dataset_opendap'][0].url
            LOGGER.debug("opendap dataset url: {}".format(dataset))
        elif 'dataset' in request.inputs:
            dataset = request.inputs['dataset'][0].file
            LOGGER.debug("opendap dataset file: {}".format(dataset))

        if not dataset:
            raise ProcessError("You need to provide a Dataset.")

        output_format = request.inputs['format'][0].data

        check_suite = CheckSuite()
        check_suite.load_all_available_checkers()
        if not request.inputs['test'][0].data in check_suite.checkers:
            raise ProcessError("Test {} is not available.".format(request.inputs['test'][0].data))

        output_file = os.path.join(
            self.workdir,
            "check_report.{}".format(output_format))

        LOGGER.info("checking dataset {}".format(dataset))
        ComplianceChecker.run_checker(
            dataset,
            checker_names=[checker.data for checker in request.inputs['test']],
            verbose=True,
            criteria=request.inputs['criteria'][0].data,
            output_filename=output_file,
            output_format=output_format)
        response.outputs['output'].file = output_file
        response.update_status("compliance checker finshed.", 100)
        return response
async def cc_report(args):

    if args.verbose > 1:
        print(f'Checking OPeNDAP URL: {url}')

    if args.format == 'summary':
        cs = CheckSuite()
        if args.criteria == 'normal':
            limit = 2
        elif args.criteria == 'strict':
            limit = 1
        elif args.criteria == 'lenient':
            limit = 3


        ds = cs.load_dataset(url)
        skip_checks = ()
        score_groups = cs.run(ds, skip_checks, *args.test)

        # Always use sorted test (groups) so they print in correct order
        reports = {}
        for checker, rpair in sorted(score_groups.items()):
            groups, _ = rpair
            _, points, out_of = cs.get_points(groups, limit)
            reports[checker] = (100 * float(points) / float(out_of))

        print((report_fmt).format(url, *[reports[t] for t in sorted(args.test)]))
        sys.stdout.flush()
    else:
        # Send the compliance report to stdout
        ComplianceChecker.run_checker(url, args.test, args.verbose, args.criteria,
                                        args.output, args.format)
Exemple #3
0
    def test_multi_checker_return_value(self):
        """
        Tests that any failure for multiple checkers results in a failure return
        status
        """
        # CF should pass here
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES["ncei_gold_point_1"],
            verbose=0,
            criteria="strict",
            checker_names=["cf:1.6"],
            output_filename=self.path,
            output_format="text",
        )
        self.assertTrue(return_value)

        # CF should pass, but ACDD will have some errors.  Overall return
        # status should be a failure
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES["ncei_gold_point_1"],
            verbose=0,
            criteria="strict",
            checker_names=["acdd", "cf"],
            output_filename=self.path,
            output_format="text",
        )
        self.assertFalse(return_value)
Exemple #4
0
    def test_multi_checker_return_value(self):
        '''
        Tests that any failure for multiple checkers results in a failure return
        status
        '''
        # CF should pass here
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['ncei_gold_point_1'],
            verbose=0,
            criteria='strict',
            checker_names=['cf'],
            output_filename=self.path,
            output_format='text'
        )
        self.assertTrue(return_value)

        # CF should pass, but ACDD will have some errors.  Overall return
        # status should be a failure
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['ncei_gold_point_1'],
            verbose=0,
            criteria='strict',
            checker_names=['acdd', 'cf'],
            output_filename=self.path,
            output_format='text'
        )
        self.assertFalse(return_value)
Exemple #5
0
    def test_multi_checker_return_value(self):
        '''
        Tests that any failure for multiple checkers results in a failure return
        status
        '''
        # CF should pass here
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['ncei_gold_point_1'],
            verbose=0,
            criteria='strict',
            checker_names=['cf'],
            output_filename=self.path,
            output_format='text'
        )
        self.assertTrue(return_value)

        # CF should pass, but ACDD will have some errors.  Overall return
        # status should be a failure
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['ncei_gold_point_1'],
            verbose=0,
            criteria='strict',
            checker_names=['acdd', 'cf'],
            output_filename=self.path,
            output_format='text'
        )
        self.assertFalse(return_value)
    def _handler(self, request, response):
        dataset = None
        if 'dataset_opendap' in request.inputs:
            dataset = request.inputs['dataset_opendap'][0].url
            LOGGER.debug("opendap dataset url: {}".format(dataset))
        elif 'dataset' in request.inputs:
            dataset = request.inputs['dataset'][0].file
            LOGGER.debug("opendap dataset file: {}".format(dataset))

        if not dataset:
            raise ProcessError("You need to provide a Dataset.")

        output_format = request.inputs['format'][0].data

        check_suite = CheckSuite()
        check_suite.load_all_available_checkers()

        output_file = os.path.join(
            self.workdir,
            "check_report.{}".format(output_format))

        LOGGER.info("checking dataset {}".format(dataset))
        ComplianceChecker.run_checker(
            dataset,
            checker_names=[checker.data for checker in request.inputs['test']],
            verbose=True,
            criteria=request.inputs['criteria'][0].data,
            output_filename=output_file,
            output_format=output_format)
        response.outputs['output'].file = output_file
        response.update_status("compliance checker finshed.", 100)
        return response
Exemple #7
0
 def test_retrieve_getcaps(self):
     """Method that simulates retrieving SOS GetCapabilities"""
     url = "http://data.oceansmap.com/thredds/sos/caricoos_ag/VIA/VIA.ncml"
     httpretty.register_uri(httpretty.GET, url, content_type="text/xml", body=self.resp)
     # need to mock out the HEAD response so that compliance checker
     # recognizes this as some sort of XML doc instead of an OPeNDAP
     # source
     ComplianceChecker.run_checker(url, ['ioos'], 1, 'normal')
 def test_retrieve_describesensor(self):
     """Method that simulates retrieving SOS DescribeSensor"""
     url = "http://data.oceansmap.com/thredds/sos/caricoos_ag/VIA/VIA.ncml?request=describesensor&service=sos&procedure=urn:ioos:station:ncsos:VIA&outputFormat=text/xml%3Bsubtype%3D%22sensorML/1.0.1/profiles/ioos_sos/1.0%22&version=1.0.0"
     httpretty.register_uri(httpretty.GET,
                            url,
                            content_type="text/xml",
                            body=self.resp)
     # need to mock out the HEAD response so that compliance checker
     # recognizes this as some sort of XML doc instead of an OPeNDAP
     # source
     ComplianceChecker.run_checker(url, ['ioos'], 1, 'normal')
def main(args):
    if args.format == 'summary':
        hdr_fmt = '{},' * len(args.test)
        rpt_fmt = '{:.1f},' * len(args.test)
        report_fmt = '{},' + rpt_fmt[:-1]
        print(('{},' + hdr_fmt[:-1]).format('url', *sorted(args.test)))

    for cat in args.catalog_urls:
        if args.verbose > 1:
            print(f'Opening catalog_url: {cat}')
        for url in get_opendap_urls(cat):

            if args.verbose > 1:
                print(f'Checking OPeNDAP URL: {url}')

            if args.format == 'summary':
                cs = CheckSuite()
                if args.criteria == 'normal':
                    limit = 2
                elif args.criteria == 'strict':
                    limit = 1
                elif args.criteria == 'lenient':
                    limit = 3

                try:
                    ds = cs.load_dataset(url)
                except ValueError as e:
                    print(f'Failed to get report for {url}')
                    print(str(e))
                    continue
                    
                skip_checks = ()
                score_groups = cs.run(ds, skip_checks, *args.test)
                

                # Always use sorted test (groups) so they print in correct order
                reports = {}
                for checker, rpair in sorted(score_groups.items()):
                    groups, _ = rpair
                    _, points, out_of = cs.get_points(groups, limit)
                    reports[checker] = (100 * float(points) / float(out_of))

                print((report_fmt).format(url, *[reports[t] for t in sorted(args.test)]))
                sys.stdout.flush()

            else:
                # Send the compliance report to stdout
                ComplianceChecker.run_checker(url, args.test, args.verbose, args.criteria,
                                              args.output, args.format)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset_location', nargs=1, help= "Defines the location of the dataset to be checked.")
    parser.add_argument('--test', '-t', '--test=', '-t=', help= "Select the Checks you want to perform.  Either all (default), cf, ioos, or acdd.", nargs='+', default=[], choices=ComplianceCheckerCheckSuite.checkers.keys())
    parser.add_argument('--criteria', '-c', help="Define the criteria for the checks.  Either Strict, Normal, or Lenient.  Defaults to Normal.", nargs='?', default='normal', choices = ['lenient', 'normal', 'strict'])
    parser.add_argument('--verbose' , '-v', help="Increase output. May be specified up to three times.", action="count")

    args = parser.parse_args()

    print "Running Compliance Checker on the dataset from: %s" % args.dataset_location[0]

    ComplianceChecker.run_checker(args.dataset_location[0],
                                  args.test,
                                  args.verbose,
                                  args.criteria)
Exemple #11
0
    def _run_check(self, file_path, check):
        """
        Run a single check suite on the given file.

        :param str file_path: Full path to the file
        :param str check: Name of check suite to run.
        :return: :py:class:`aodncore.pipeline.CheckResult` object
        """
        stdout_log = []
        stderr_log = []
        try:
            with CaptureStdIO() as (stdout_log, stderr_log):
                compliant, errors = ComplianceChecker.run_checker(file_path, [check],
                                                                  self.verbosity, self.criteria, self.skip_checks,
                                                                  output_format=self.output_format)
        except Exception as e:  # pragma: no cover
            errors = True
            stderr_log.extend([
                'WARNING: compliance checks did not complete due to error. {e}'.format(e=format_exception(e))
            ])

        # if any exceptions during checking, assume file is non-compliant
        if errors:
            compliant = False

        compliance_log = []
        if not compliant:
            compliance_log.extend(stdout_log)
            compliance_log.extend(stderr_log)

        return CheckResult(compliant, compliance_log, errors)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--test', '-t', '--test=', '-t=', action='append', help= "Select the Checks you want to perform.",  choices=ComplianceCheckerCheckSuite.checkers.keys())
    parser.add_argument('--criteria', '-c', help="Define the criteria for the checks.  Either Strict, Normal, or Lenient.  Defaults to Normal.", nargs='?', default='normal', choices = ['lenient', 'normal', 'strict'])
    parser.add_argument('--verbose' , '-v', help="Increase output. May be specified up to three times.", action="count")
    parser.add_argument('-f', '--format', default='text', choices=['text', 'html'], help='Output format')
    parser.add_argument('-o', '--output', default='-', action='store', help='Output filename')
    parser.add_argument('dataset_location', nargs='+', help= "Defines the location of the dataset to be checked.")

    args = parser.parse_args()
    args.test = args.test or ['acdd']

    return_values = []
    for dataset in args.dataset_location:
        print "Running Compliance Checker on the dataset from: %s" % dataset
        return_value = ComplianceChecker.run_checker(args.dataset_location[0],
                                      args.test,
                                      args.verbose,
                                      args.criteria,
                                      args.output,
                                      args.format)
        return_values.append(return_value)


    if all(return_values):
        return 0
    return 1
Exemple #13
0
def profile_compliance(filepath):
    check_suite = CheckSuite()
    check_suite.load_all_available_checkers()

    _, outfile = tempfile.mkstemp()

    try:
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=filepath,
            checker_names=['gliderdac'],
            verbose=True,
            criteria='normal',
            output_format='json',
            output_filename=outfile
        )
        assert errors is False
        return True
    except AssertionError:
        with open(outfile, 'rt') as f:
            ers = json.loads(f.read())
            for k, v in ers.items():
                if isinstance(v, list):
                    for x in v:
                        if 'msgs' in x and x['msgs']:
                            logger.debug(x['msgs'])
        return False
    except BaseException as e:
        logger.warning(e)
        return False
    finally:
        if os.path.isfile(outfile):
            os.remove(outfile)
def check_cf_compliance(dataset):
    try:
        from compliance_checker.runner import CheckSuite, ComplianceChecker
        import compliance_checker
    except ImportError:
        warnings.warn('compliance_checker unavailable, skipping NetCDF-CF Compliance Checks')
        return

    if compliance_checker.__version__ < '4.0.0':
        warnings.warn('Please upgrade compliance-checker to 4+ version')
        warnings.warn('compliance_checker version is too old, skipping NetCDF-CF Compliance Checks')
        return

    skip = ['check_dimension_order',
            'check_all_features_are_same_type',
            'check_conventions_version',
            'check_appendix_a']

    cs = CheckSuite()
    cs.load_all_available_checkers()
    score_groups = cs.run(dataset, skip, 'cf')
    score_dict = {dataset.filepath(): score_groups}

    groups = ComplianceChecker.stdout_output(cs, score_dict, verbose=1, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
    assert cs.passtree(groups, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
Exemple #15
0
def pass_netcdf_checker(netcdf_file_path, tests=['cf:latest', 'imos:latest']):
    """Calls the netcdf checker and run the IMOS and CF latests version tests
    by default.
    Returns True if passes, False otherwise
    """
    from compliance_checker.runner import ComplianceChecker, CheckSuite
    import tempfile
    import os

    tmp_json_checker_output = tempfile.mkstemp()
    return_values = []
    had_errors = []
    CheckSuite.load_all_available_checkers()

    for test in tests:
        # creation of a tmp json file. Only way (with html) to create an output not displayed to stdin by default
        return_value, errors = ComplianceChecker.run_checker(
            netcdf_file_path, [test],
            1,
            'normal',
            output_filename=tmp_json_checker_output[1],
            output_format='json')
        had_errors.append(errors)
        return_values.append(return_value)

    os.close(tmp_json_checker_output[0])
    os.remove(
        tmp_json_checker_output[1]
    )  #file object needs to be closed or can end up with too many open files

    if any(had_errors):
        return False  # checker exceptions
    if all(return_values):
        return True  # all tests passed
    return False  # at least one did not pass
Exemple #16
0
def run_cc():

    # ensure vars
    err = False
    url = None
    if 'dataset-url' not in request.form or not request.form['dataset-url']:
        err = True
        flash("No dataset URL specified", 'error')
    else:
        url = request.form['dataset-url']
        try:
            u = urlparse.urlparse(url)
            assert u.scheme != ''
        except Exception as ex:
            err = True
            flash("Could not parse URL: %s" % ex, 'error')

    checkers = request.form.getlist('checkers')
    if len(checkers) == 0:
        err = True
        flash("You must specify one or more checkers", 'error')

    if err:
        return redirect(url_for('index'))

    ########
    # RUN CC

    output = ""

    csio = StringIO()
    try:
        # magic to wrap stdout
        with stdout_redirected(csio):
            ComplianceChecker.run_checker(url, checkers, 2, 'strict')

        output = csio.getvalue()

    except Exception as e:
        flash("Error while running Compliance Checker: %s" % e, 'error')
        return redirect(url_for('index'))
    finally:
        csio.close()

    return render_template('results.html', output=output)
def main():
    # Load all available checker classes
    check_suite = CheckSuite()
    check_suite.load_all_available_checkers()

    parser = argparse.ArgumentParser()
    parser.add_argument('--test', '-t', '--test=', '-t=', default=('acdd',),
                        nargs='+',
                        choices=sorted(check_suite.checkers.keys()),
                        help="Select the Checks you want to perform.  Defaults to 'acdd' if unspecified")

    parser.add_argument('--criteria', '-c',
                        help="Define the criteria for the checks.  Either Strict, Normal, or Lenient.  Defaults to Normal.",
                        nargs='?', default='normal',
                        choices = ['lenient', 'normal', 'strict'])

    parser.add_argument('--verbose', '-v',
                        help="Increase output. May be specified up to three times.",
                        action="count",
                        default=0)

    parser.add_argument('-f', '--format', default='text',
                        choices=['text', 'html', 'json'], help='Output format')
    parser.add_argument('-o', '--output', default='-', action='store',
                        help='Output filename')
    parser.add_argument('-V', '--version', action='store_true',
                        help='Display the IOOS Compliance Checker version information.')
    parser.add_argument('dataset_location', nargs='*',
                        help="Defines the location of the dataset to be checked.")

    args = parser.parse_args()

    if args.version:
        print("IOOS compliance checker version %s" % __version__)
        return 0

    return_values = []
    had_errors = []
    for dataset in args.dataset_location:
        if args.format != 'json':
            print("Running Compliance Checker on the dataset from: {}".format(dataset), file=sys.stderr)
        return_value, errors = ComplianceChecker.run_checker(args.dataset_location[0],
                                                             args.test,
                                                             args.verbose,
                                                             args.criteria,
                                                             args.output,
                                                             args.format)
        return_values.append(return_value)
        had_errors.append(errors)

    if any(had_errors):
        return 2
    if all(return_values):
        return 0
    return 1
    def _handler(self, request, response):
        if 'dataset_opendap' in request.inputs:
            dataset = request.inputs['dataset_opendap'][0].url
        elif 'dataset' in request.inputs:
            dataset = request.inputs['dataset'][0].file
        else:
            raise ProcessError("You need to provide a Dataset.")

        checker = request.inputs['test'][0].data

        with open(os.path.join(self.workdir, "nc_dump.txt"), 'w') as fp:
            response.outputs['ncdump'].file = fp.name
            fp.writelines(ncdump(dataset))
            response.update_status('ncdump done.', 10)

        if 'CF' in checker:
            check_suite = CheckSuite()
            check_suite.load_all_available_checkers()

            with open(os.path.join(self.workdir, "report.html"), 'w') as fp:
                response.update_status("cfchecker ...", 20)
                response.outputs['output'].file = fp.name
                ComplianceChecker.run_checker(
                    dataset,
                    checker_names=['cf'],
                    verbose=True,
                    criteria="normal",
                    output_filename=fp.name,
                    output_format="html")
        elif 'CMIP6' in checker:
            with open(os.path.join(self.workdir, "cmip6-cmor.txt"), 'w') as fp:
                response.outputs['output'].file = fp.name
                response.update_status("cmip6 checker ...", 20)
                cmor_checker(dataset, output_filename=fp.name)
        else:
            response.update_status("qa checker ...", 20)
            from hummingbird.processing import hdh_qa_checker
            logfile, _ = hdh_qa_checker(dataset, project=request.inputs['test'][0].data)
            response.outputs['output'].file = logfile

        response.update_status('spotchecker done.', 100)
        return response
Exemple #19
0
def pass_netcdf_checker(netcdf_file_path,
                        tests=['cf:latest', 'imos:latest'],
                        criteria='normal',
                        skip_checks="",
                        output_format='json',
                        keep_outfile=False):
    """Calls the netcdf checker and run the IMOS and CF latests version tests
    by default.
    Returns True if passes, False otherwise

    default arguments explanation:
    * keep_outfile=False -> not keeping the output file generated by the checker
    """
    from compliance_checker.runner import ComplianceChecker, CheckSuite
    import tempfile
    import os

    tmp_json_checker_output_fd, tmp_json_checker_output_filename = tempfile.mkstemp(
    )
    return_values = []
    had_errors = []
    CheckSuite.load_all_available_checkers()

    for test in tests:
        # creation of a tmp json file. Only way (with html) to create an output not displayed to stdin by default
        return_value, errors = ComplianceChecker.run_checker(
            netcdf_file_path, [test],
            1,
            criteria,
            skip_checks=skip_checks,
            output_filename=tmp_json_checker_output_filename,
            output_format=output_format)
        had_errors.append(errors)
        return_values.append(return_value)

    os.close(
        tmp_json_checker_output_fd
    )  # file object needs to be closed or can end up with too many open files

    if keep_outfile:  # optional output
        if any(had_errors):
            return False, tmp_json_checker_output_filename  # checker exceptions
        if all(return_values):
            return True, tmp_json_checker_output_filename  # all tests passed
        return False, tmp_json_checker_output_filename  # at least one did not pass
    else:
        os.remove(tmp_json_checker_output_filename)

        if any(had_errors):
            return False  # checker exceptions
        if all(return_values):
            return True  # all tests passed
        return False  # at least one did not pass
Exemple #20
0
def check_cf_compliance(dataset):
    try:
        from compliance_checker.runner import CheckSuite, ComplianceChecker
    except ImportError:
        warnings.warn('compliance_checker unavailable, skipping NetCDF-CF Compliance Checks')
        return

    cs = CheckSuite()
    cs.load_all_available_checkers()
    score_groups = cs.run(dataset, 'cf')

    groups = ComplianceChecker.stdout_output(cs, score_groups, verbose=1, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
    assert cs.passtree(groups, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
def check_cf_compliance(dataset):
    try:
        from compliance_checker.runner import CheckSuite, ComplianceChecker
    except ImportError:
        warnings.warn('compliance_checker unavailable, skipping NetCDF-CF Compliance Checks')
        return

    cs = CheckSuite()
    cs.load_all_available_checkers()
    score_groups = cs.run(dataset, 'cf')

    groups = ComplianceChecker.stdout_output(cs, score_groups, verbose=1, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
    assert cs.passtree(groups, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
Exemple #22
0
    def test_unicode_cf_html(self):
        '''
        Tests that the CF checker can produce HTML output with unicode characters
        '''
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['2dim'],
            verbose=0,
            criteria='strict',
            checker_names=['cf'],
            output_filename=self.path,
            output_format='html')

        assert os.stat(self.path).st_size > 0
Exemple #23
0
    def test_unicode_cf_html(self):
        """
        Tests that the CF checker can produce HTML output with unicode characters
        """
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES["2dim"],
            verbose=0,
            criteria="strict",
            checker_names=["cf"],
            output_filename=self.path,
            output_format="html",
        )

        assert os.stat(self.path).st_size > 0
    def test_unicode_cf_html(self):
        '''
        Tests that the CF checker can produce HTML output with unicode characters
        '''
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['2dim'],
            verbose=0,
            criteria='strict',
            checker_names=['cf'],
            output_filename=self.path,
            output_format='html'
        )

        assert os.stat(self.path).st_size > 0
Exemple #25
0
    def test_text_output(self):
        '''
        Tests that the 'text' output can be redirected to file with arguments
        to the command line
        '''
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['conv_bad'],
            verbose=0,
            criteria='strict',
            checker_names=['acdd', 'cf'],
            output_filename=self.path,
            output_format='text')

        assert os.stat(self.path).st_size > 0
Exemple #26
0
    def test_text_output(self):
        """
        Tests that the 'text' output can be redirected to file with arguments
        to the command line
        """
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES["conv_bad"],
            verbose=0,
            criteria="strict",
            checker_names=["acdd", "cf"],
            output_filename=self.path,
            output_format="text",
        )

        assert os.stat(self.path).st_size > 0
    def test_text_output(self):
        '''
        Tests that the 'text' output can be redirected to file with arguments
        to the command line
        '''
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['conv_bad'],
            verbose=0,
            criteria='strict',
            checker_names=['acdd', 'cf'],
            output_filename=self.path,
            output_format='text'
        )

        assert os.stat(self.path).st_size > 0
Exemple #28
0
    def test_single_json_output(self):
        '''
        Tests that a suite can produce JSON output to a file
        '''
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['conv_bad'],
            verbose=0,
            criteria='strict',
            checker_names=['cf'],
            output_filename=self.path,
            output_format='json')

        assert os.stat(self.path).st_size > 0
        with open(self.path) as f:
            r = json.load(f)
            assert 'cf' in r
Exemple #29
0
def check_dataset(args):
    check_suite = CheckSuite()
    check_suite.load_all_available_checkers()

    outhandle, outfile = tempfile.mkstemp()

    def show_messages(jn, log):
        out_messages = []
        for k, v in jn.items():
            if isinstance(v, list):
                for x in v:
                    if 'msgs' in x and x['msgs']:
                        out_messages += x['msgs']
        log(
            '{}:\n{}'.format(args.file, '\n'.join(['  * {}'.format(
                m) for m in out_messages ])
            )
        )

    try:
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=args.file,
            checker_names=['gliderdac'],
            verbose=True,
            criteria='normal',
            output_format='json',
            output_filename=outfile
        )
    except BaseException as e:
        L.warning('{} - {}'.format(args.file, e))
        return 1
    else:
        if errors is False:
            return_value = 0
            log = L.debug
        else:
            return_value = 1
            log = L.warning

        with open(outfile, 'rt') as f:
            show_messages(json.loads(f.read())['gliderdac'], log)

        return return_value
    finally:
        os.close(outhandle)
        if os.path.isfile(outfile):
            os.remove(outfile)
Exemple #30
0
    def test_single_json_output(self):
        """
        Tests that a suite can produce JSON output to a file
        """
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES["conv_bad"],
            verbose=0,
            criteria="strict",
            checker_names=["cf"],
            output_filename=self.path,
            output_format="json",
        )

        assert os.stat(self.path).st_size > 0
        with open(self.path) as f:
            r = json.load(f)
            assert "cf" in r
    def test_single_json_output(self):
        '''
        Tests that a suite can produce JSON output to a file
        '''
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['conv_bad'],
            verbose=0,
            criteria='strict',
            checker_names=['cf'],
            output_filename=self.path,
            output_format='json'
        )

        assert os.stat(self.path).st_size > 0
        with open(self.path) as f:
            r = json.load(f)
            assert 'cf' in r
Exemple #32
0
    def test_unicode_acdd_html(self):
        '''
        Tests that the checker is capable of producing HTML with unicode characters
        '''

        fd, path = tempfile.mkstemp()
        os.close(fd)
        self.addCleanup(os.remove, path)
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['2dim'],
            verbose=0,
            criteria='strict',
            checker_names=['acdd'],
            output_filename=path,
            output_format='html'
        )

        assert os.stat(path).st_size > 0
Exemple #33
0
 def test_single_json_output_stdout(self):
     '''
     Tests that a suite can produce JSON output to stdout
     '''
     saved = sys.stdout
     try:
         fake_stdout = io.StringIO()
         sys.stdout = fake_stdout
         return_value, errors = ComplianceChecker.run_checker(
             ds_loc=STATIC_FILES['conv_bad'],
             verbose=0,
             criteria='strict',
             checker_names=['cf'],
             output_filename='-',
             output_format='json')
         r = json.loads(fake_stdout.getvalue().strip())
         assert 'cf' in r
     finally:
         sys.stdout = saved
Exemple #34
0
    def test_json_output(self):
        '''
        Tests that the CF checker can produce JSON output
        '''

        fd, path = tempfile.mkstemp()
        os.close(fd)
        self.addCleanup(os.remove, path)

        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=STATIC_FILES['conv_bad'],
            verbose=0,
            criteria='strict',
            checker_names=['cf'],
            output_filename=path,
            output_format='json'
        )

        assert os.stat(path).st_size > 0
 def test_single_json_output_stdout(self):
     '''
     Tests that a suite can produce JSON output to stdout
     '''
     saved = sys.stdout
     try:
         fake_stdout = io.StringIO()
         sys.stdout = fake_stdout
         return_value, errors = ComplianceChecker.run_checker(
             ds_loc=STATIC_FILES['conv_bad'],
             verbose=0,
             criteria='strict',
             checker_names=['cf'],
             output_filename='-',
             output_format='json'
         )
         r = json.loads(fake_stdout.getvalue().strip())
         assert 'cf' in r
     finally:
         sys.stdout = saved
Exemple #36
0
def check_compliance(netcdf_file):
    check_suite = CheckSuite()
    check_suite.load_all_available_checkers()

    _, outfile = tempfile.mkstemp(prefix='scobs-cc-', suffix='.json')

    try:
        return_value, errors = ComplianceChecker.run_checker(
            ds_loc=netcdf_file,
            checker_names=['cf', 'acdd'],
            verbose=True,
            criteria='strict',
            output_format='json',
            output_filename=outfile
        )
        z = []
        score = namedtuple('ComplianceScore', 'name passed score possible')
        with open(outfile, 'rt') as f:
            for checker, results in json.load(f).items():
                z.append(
                    score(
                        name=checker,
                        passed=not errors,
                        score=results['scored_points'],
                        possible=results['possible_points']
                    )
                )

        for s in z:
            level = 'info'
            if not s.passed:
                level = 'error'
            getattr(logger, level)(s)

        return z

    except BaseException as e:
        logger.warning(e)
    finally:
        if os.path.isfile(outfile):
            os.remove(outfile)
Exemple #37
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'dataset_location',
        nargs=1,
        help="Defines the location of the dataset to be checked.")
    parser.add_argument(
        '--test',
        '-t',
        '--test=',
        '-t=',
        help=
        "Select the Checks you want to perform.  Either all (default), cf, ioos, or acdd.",
        nargs='+',
        default=[],
        choices=ComplianceCheckerCheckSuite.checkers.keys())
    parser.add_argument(
        '--criteria',
        '-c',
        help=
        "Define the criteria for the checks.  Either Strict, Normal, or Lenient.  Defaults to Normal.",
        nargs='?',
        default='normal',
        choices=['lenient', 'normal', 'strict'])
    parser.add_argument(
        '--verbose',
        '-v',
        help="Increase output. May be specified up to three times.",
        action="count")

    args = parser.parse_args()

    print "Running Compliance Checker on the dataset from: %s" % args.dataset_location[
        0]

    return_value = ComplianceChecker.run_checker(args.dataset_location[0],
                                                 args.test, args.verbose,
                                                 args.criteria)
    if return_value is not True:
        return 1
    return 0
def check_cf_compliance(dataset):
    try:
        from compliance_checker.runner import CheckSuite, ComplianceChecker
        import compliance_checker
    except ImportError:
        warnings.warn('compliance_checker unavailable, skipping NetCDF-CF Compliance Checks')
        return

    cs = CheckSuite()
    cs.load_all_available_checkers()
    if compliance_checker.__version__ >= '2.3.0':
        # This skips a failing compliance check. Our files don't contain all the lats/lons
        # as an auxiliary cordinate var as it's unnecessary for any software we've tried.
        # It may be added at some point in the future, and this check should be re-enabled.
        score_groups = cs.run(dataset, ['check_dimension_order'], 'cf')
    else:
        warnings.warn('Please upgrade to compliance-checker 2.3.0 or higher.')
        score_groups = cs.run(dataset, 'cf')

    groups = ComplianceChecker.stdout_output(cs, score_groups, verbose=1, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
    assert cs.passtree(groups, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
Exemple #39
0
 def test_single_json_output_stdout(self):
     """
     Tests that a suite can produce JSON output to stdout
     """
     saved = sys.stdout
     try:
         fake_stdout = io.StringIO()
         sys.stdout = fake_stdout
         return_value, errors = ComplianceChecker.run_checker(
             ds_loc=STATIC_FILES["conv_bad"],
             verbose=0,
             criteria="strict",
             checker_names=["cf"],
             output_filename="-",
             output_format="json",
         )
         r = json.loads(fake_stdout.getvalue().strip())
         assert "cf" in r
     finally:
         sys.stdout = saved
         fake_stdout.close()
Exemple #40
0
def process_deployment(dep):
    deployment_issues = "Deployment {}".format(os.path.basename(dep['name']))
    groups = OrderedDict()
    erddap_fmt_string = "erddap/tabledap/{}.nc?&time%3Emax(time)-1%20day"
    base_url = app.config["PRIVATE_ERDDAP"]
    # FIXME: determine a more robust way of getting scheme
    if not base_url.startswith("http"):
        base_url = "http://{}".format(base_url)
    url_path = "/".join([base_url, erddap_fmt_string.format(dep["name"])])
    # TODO: would be better if we didn't have to write to a temp file
    outhandle, outfile = tempfile.mkstemp()
    try:
        failures, errors = ComplianceChecker.run_checker(
            ds_loc=url_path,
            checker_names=['gliderdac'],
            verbose=True,
            criteria='lenient',
            output_format='text',
            output_filename=outfile)
    except Exception as e:
        root_logger.exception(e)
        errs = "Other error - possibly can't read ERDDAP"
    else:
        with open(outfile, 'r') as f:
            errs = f.read()

    # janky way of testing if passing -- consider using JSON format instead
    compliance_passed = "All tests passed!" in errs

    update_fields = {"compliance_check_passed": compliance_passed}
    if compliance_passed:
        final_message = "All files passed compliance check on glider deployment {}".format(
            dep['name'])
    else:
        final_message = ("Deployment {} has issues:\n".format(dep['name']) +
                         errs)
        update_fields["compliance_check_report"] = errs
    # Set fields.  Don't use upsert as deployment ought to exist prior to write.
    db.deployments.update({"_id": dep["_id"]}, {"$set": update_fields})
    return compliance_passed, final_message
Exemple #41
0
def main():
    # Load all available checker classes
    check_suite = CheckSuite()
    check_suite.load_all_available_checkers()

    parser = argparse.ArgumentParser()
    parser.add_argument('--test', '-t', '--test=', '-t=', action='append', help= "Select the Checks you want to perform.",  choices=check_suite.checkers.keys())
    parser.add_argument('--criteria', '-c', help="Define the criteria for the checks.  Either Strict, Normal, or Lenient.  Defaults to Normal.", nargs='?', default='normal', choices = ['lenient', 'normal', 'strict'])
    parser.add_argument('--verbose' , '-v', help="Increase output. May be specified up to three times.", action="count")
    parser.add_argument('-f', '--format', default='text', choices=['text', 'html', 'json'], help='Output format')
    parser.add_argument('-o', '--output', default='-', action='store', help='Output filename')
    parser.add_argument('-V', '--version', action='store_true', help='Display the IOOS Compliance Checker version information.')
    parser.add_argument('dataset_location', nargs='*', help= "Defines the location of the dataset to be checked.")

    args = parser.parse_args()
    args.test = args.test or ['acdd']

    if args.version:
        print("IOOS compliance checker version %s" % __version__)
        return 0

    return_values = []
    had_errors = []
    for dataset in args.dataset_location:
        if args.format != 'json':
            print("Running Compliance Checker on the dataset from: %s" % dataset, file=sys.stderr)
        return_value, errors = ComplianceChecker.run_checker(args.dataset_location[0],
                                      args.test,
                                      args.verbose,
                                      args.criteria,
                                      args.output,
                                      args.format)
        return_values.append(return_value)
        had_errors.append(errors)

    if any(had_errors):
        return 2
    if all(return_values):
        return 0
    return 1
Exemple #42
0
def _compliance_check(nc_path: Path, results_path: Path = None):
    """
    Run cf and adcc checks with normal strictness, verbose text format to stdout
    """
    # Specify a tempfile as a sink, as otherwise it will spew results into stdout.
    out_file = str(results_path) if results_path else tempfile.mktemp(
        prefix='compliance-log-')

    try:
        was_success, errors_occurred = ComplianceChecker.run_checker(
            ds_loc=str(nc_path),
            checker_names=['cf'],
            verbose=0,
            criteria='lenient',
            skip_checks=['check_dimension_order'],
            output_filename=out_file,
            output_format='text')
    finally:
        if not results_path and os.path.exists(out_file):
            os.remove(out_file)

    return was_success, errors_occurred
Exemple #43
0
def check_compliance(nc_file, checker_names=["cf:1.6"]):
    from compliance_checker.runner import ComplianceChecker, CheckSuite

    check_suite = CheckSuite()
    check_suite.load_all_available_checkers()

    verbose = 1
    criteria = "normal"
    output_filename = nc_file + ".cfcheck.txt"
    output_format = "text"

    print(
        f"*** Checking CF compliance. Please view contents of {output_filename} for any compliance issues."
    )

    return_value, errors = ComplianceChecker.run_checker(
        nc_file,
        checker_names,
        verbose,
        criteria,
        output_filename=output_filename,
        output_format=output_format,
    )
Exemple #44
0
def main():
    # Load all available checker classes
    check_suite = CheckSuite()
    check_suite.load_all_available_checkers()

    parser = argparse.ArgumentParser()
    parser.add_argument('--test', '-t', '--test=', '-t=', default=[],
                        action='append',
                        help=("Select the Checks you want to perform. Defaults to 'acdd'"
                              " if unspecified.  Versions of standards can be specified via "
                              "`-t <test_standard>:<version>`.  If `<version>` is omitted, or "
                              "is \"latest\", the latest version of the test standard is used."))

    parser.add_argument('--criteria', '-c',
                        help=("Define the criteria for the checks. "
                              "Either Strict, Normal, or Lenient.  Defaults to Normal."),
                        nargs='?', default='normal',
                        choices=['lenient', 'normal', 'strict'])

    parser.add_argument('--verbose', '-v',
                        help="Increase output. May be specified up to three times.",
                        action="count",
                        default=0)

    parser.add_argument('--skip-checks', '-s',
                        help=dedent("""
                                    Specifies tests to skip. Can take the form
                                    of either `<check_name>` or
                                    `<check_name>:<skip_level>`.  The first
                                    form skips any checks matching the name.
                                    In the second form <skip_level> may be
                                    specified as "A", "M", or "L".  "A" skips
                                    all checks and is equivalent to calling
                                    the first form. "M" will only show high
                                    priority output from the given check and
                                    will skip medium and low.  "L" will show
                                    both high and medium priority issues, while
                                    skipping low priority issues.
                                    """),
                        action='append')

    parser.add_argument('-f', '--format', default=[], action='append',
                        help=("Output format(s). Options are 'text', 'html', 'json', 'json_new'."
                              " The difference between the 'json' and the 'json_new'"
                              " formats is that the 'json' format has the check as the top level"
                              " key, whereas the 'json_new' format has the dataset name(s) as the"
                              " main key in the output follow by any checks as subkeys.  Also, "
                              "'json' format can be only be run against one input file, whereas "
                              "'json_new' can be run against multiple files."),
                        choices=['text', 'html', 'json', 'json_new'])

    parser.add_argument('-o', '--output', default=[], action='append',
                        help=("Output filename(s).  If '-' is supplied, output to stdout."
                              " Can either be one or many files.  If one file is supplied,"
                              " but the checker is run against many files, all the output"
                              " from the checks goes to that file (does not presently work "
                              "with 'json' format).  If more than one output file is "
                              "supplied, the number of input datasets supplied must match "
                              "the number of output files."))

    parser.add_argument('-V', '--version', action='store_true',
                        help='Display the IOOS Compliance Checker version information.')

    parser.add_argument('dataset_location', nargs='*',
                        help="Defines the location of the dataset to be checked.")

    parser.add_argument('-l', '--list-tests', action='store_true',
                        help='List the available tests')

    parser.add_argument('-d', '--download-standard-names',
                        help=("Specify a version of the cf standard name table"
                              " to download as packaged version"))

    # Add command line args from generator plugins
    check_suite.add_plugin_args(parser)

    args = parser.parse_args()

    check_suite.load_generated_checkers(args)

    if args.version:
        print("IOOS compliance checker version %s" % __version__)
        return 0

    if args.list_tests:
        print("IOOS compliance checker available checker suites:")
        for checker in sorted(check_suite.checkers.keys()):
            version = getattr(check_suite.checkers[checker],
                              '_cc_checker_version', "???")
            if args.verbose:
                print(" - {} (v{})".format(checker, version))
            elif ':' in checker and not checker.endswith(':latest'):  # Skip the "latest" output
                print(" - {}".format(checker))
        return 0

    if args.download_standard_names:
        download_cf_standard_name_table(args.download_standard_names)

    if len(args.dataset_location) == 0:
        parser.print_help()
        return 1

    # Check the number of output files
    if not args.output:
        args.output = '-'
    output_len = len(args.output)
    if not (output_len == 1 or output_len == len(args.dataset_location)):
        print('The number of output files must either be one or the same as the number of datasets', file=sys.stderr)
        sys.exit(2)

    # Run the compliance checker
    # 2 modes, concatenated output file or multiple output files
    return_values = []
    had_errors = []
    if output_len == 1:
        if args.format != 'json':
            print("Running Compliance Checker on the datasets from: {}".format(args.dataset_location), file=sys.stderr)
        return_value, errors = ComplianceChecker.run_checker(args.dataset_location,
                                                             args.test or ['acdd'],
                                                             args.verbose,
                                                             args.criteria,
                                                             args.skip_checks,
                                                             args.output[0],
                                                             args.format or ['text'])
        return_values.append(return_value)
        had_errors.append(errors)
    else:
        for output, dataset in zip(args.output, args.dataset_location):
            if args.format != 'json':
                print("Running Compliance Checker on the dataset from: {}".format(dataset), file=sys.stderr)
            return_value, errors = ComplianceChecker.run_checker([dataset],
                                                                args.test or ['acdd'],
                                                                args.verbose,
                                                                args.criteria,
                                                                args.skip_checks,
                                                                output,
                                                                args.format or ['text'])
            return_values.append(return_value)
            had_errors.append(errors)

    if any(had_errors):
        return 2
    if all(return_values):
        return 0
    return 1
def main():
    # Load all available checker classes
    check_suite = CheckSuite()
    check_suite.load_all_available_checkers()

    parser = argparse.ArgumentParser()
    parser.add_argument('--test', '-t', '--test=', '-t=', default=[],
                        action='append',
                        help="Select the Checks you want to perform.  Defaults to 'acdd' if unspecified")

    parser.add_argument('--criteria', '-c',
                        help="Define the criteria for the checks.  Either Strict, Normal, or Lenient.  Defaults to Normal.",
                        nargs='?', default='normal',
                        choices=['lenient', 'normal', 'strict'])

    parser.add_argument('--verbose', '-v',
                        help="Increase output. May be specified up to three times.",
                        action="count",
                        default=0)

    parser.add_argument('--skip-checks', '-s',
                        help="Specifies tests to skip",
                        action='append')

    parser.add_argument('-f', '--format', default='text',
                        choices=['text', 'html', 'json'], help='Output format')
    parser.add_argument('-o', '--output', default='-', action='store',
                        help='Output filename')
    parser.add_argument('-V', '--version', action='store_true',
                        help='Display the IOOS Compliance Checker version information.')
    parser.add_argument('dataset_location', nargs='*',
                        help="Defines the location of the dataset to be checked.")
    parser.add_argument('-l', '--list-tests', action='store_true', help='List the available tests')
    parser.add_argument('-d', '--download-standard-names', help='Specify a version of the cf standard name table to download as packaged version')

    args = parser.parse_args()

    if args.version:
        print("IOOS compliance checker version %s" % __version__)
        return 0

    if args.list_tests:
        print("IOOS compliance checker available checker suites (code version):")
        for checker in sorted(check_suite.checkers.keys()):
            version = getattr(check_suite.checkers[checker], '_cc_checker_version', "???")
            print(" - {} ({})".format(checker, version))
        return 0

    if args.download_standard_names:
        download_cf_standard_name_table(args.download_standard_names)

    return_values = []
    had_errors = []
    for dataset in args.dataset_location:
        if args.format != 'json':
            print("Running Compliance Checker on the dataset from: {}".format(dataset), file=sys.stderr)
        return_value, errors = ComplianceChecker.run_checker(args.dataset_location[0],
                                                             args.test or ['acdd'],
                                                             args.verbose,
                                                             args.criteria,
                                                             args.skip_checks,
                                                             args.output,
                                                             args.format)
        return_values.append(return_value)
        had_errors.append(errors)

    if any(had_errors):
        return 2
    if all(return_values):
        return 0
    return 1