def state_from_inputs(self, inputs: dict, grid=None) -> dict:
     if grid is None:
         grid = self.grid
     state = {}
     for name, properties in self.inputs.items():
         standard_name = properties.get("name", name)
         if len(properties["dims"]) > 0:
             state[standard_name] = grid.quantity_factory.empty(
                 properties["dims"],
                 properties["units"],
                 dtype=inputs[name].dtype)
             input_slice = _serialize_slice(
                 state[standard_name], properties.get("n_halo", utils.halo))
             if len(properties["dims"]) > 0:
                 state[standard_name].data[input_slice] = utils.asarray(
                     inputs[name], to_type=type(state[standard_name].data))
             else:
                 state[standard_name].data[:] = inputs[name]
             if name in self._base.in_vars["data_vars"].keys():
                 if "kaxis" in self._base.in_vars["data_vars"][name].keys():
                     kaxis = int(
                         self._base.in_vars["data_vars"][name]["kaxis"])
                     dims = list(state[standard_name].dims)
                     k_dim = dims.pop(kaxis)
                     dims.insert(len(dims), k_dim)
                     state[standard_name] = state[standard_name].transpose(
                         dims)
         else:
             state[standard_name] = inputs[name]
     return state
 def outputs_from_state(self, state: dict):
     return_dict: Dict[str, np.ndarray] = {}
     if len(self.outputs) == 0:
         return return_dict
     for name, properties in self.outputs.items():
         standard_name = properties["name"]
         if name in self._base.in_vars["data_vars"].keys():
             if "kaxis" in self._base.in_vars["data_vars"][name].keys():
                 kaxis = int(self._base.in_vars["data_vars"][name]["kaxis"])
                 dims = list(state[standard_name].dims)
                 dims.insert(kaxis, dims.pop(-1))
                 state[standard_name] = state[standard_name].transpose(dims)
         if len(properties["dims"]) > 0:
             output_slice = _serialize_slice(
                 state[standard_name], properties.get("n_halo", utils.halo))
             return_dict[name] = utils.asarray(
                 state[standard_name].data[output_slice])
         else:
             return_dict[name] = state[standard_name]
     return return_dict
Exemple #3
0
def hash_result_data(result, data_keys):
    hashes = {}
    for k in data_keys:
        hashes[k] = hashlib.sha1(
            np.ascontiguousarray(gt_utils.asarray(result[k]))).hexdigest()
    return hashes
Exemple #4
0
def test_mock_parallel_savepoint(
    testobj,
    test_name,
    grid,
    mock_communicator_list,
    serializer_list,
    savepoint_in_list,
    savepoint_out_list,
    backend,
    print_failures,
    failure_stride,
    subtests,
    caplog,
    threshold_overrides,
    xy_indices=False,
):
    caplog.set_level(logging.DEBUG, logger="fv3core")
    caplog.set_level(logging.DEBUG, logger="fv3util")
    if testobj is None:
        pytest.xfail(
            f"no translate object available for savepoint {test_name}")
    # Reduce error threshold for GPU
    if backend.endswith("cuda"):
        testobj.max_error = max(testobj.max_error, GPU_MAX_ERR)
        testobj.near_zero = max(testobj.near_zero, GPU_NEAR_ZERO)
    if threshold_overrides is not None:
        process_override(threshold_overrides, testobj, test_name, backend)
    fv3core._config.set_grid(grid)
    inputs_list = []
    for savepoint_in, serializer in zip(savepoint_in_list, serializer_list):
        inputs_list.append(testobj.collect_input_data(serializer,
                                                      savepoint_in))
    output_list = testobj.compute_sequential(inputs_list,
                                             mock_communicator_list)
    failing_names = []
    ref_data = {}
    for varname in testobj.outputs.keys():
        ref_data[varname] = []
        ignore_near_zero = testobj.ignore_near_zero_errors.get(varname, False)
        with _subtest(failing_names, subtests, varname=varname):
            failing_ranks = []
            for rank, (savepoint_out, serializer, output) in enumerate(
                    zip(savepoint_out_list, serializer_list, output_list)):
                with _subtest(failing_ranks,
                              subtests,
                              varname=varname,
                              rank=rank):
                    ref_data[varname].append(
                        serializer.read(varname, savepoint_out))
                    assert success(
                        gt_utils.asarray(output[varname]),
                        ref_data[varname][-1],
                        testobj.max_error,
                        ignore_near_zero,
                        testobj.near_zero,
                    ), sample_wherefail(
                        output[varname],
                        ref_data[varname][-1],
                        testobj.max_error,
                        print_failures,
                        failure_stride,
                        test_name,
                        ignore_near_zero,
                        testobj.near_zero,
                        xy_indices,
                    )
            assert failing_ranks == []
    failing_names = [item["varname"] for item in failing_names]
    if len(failing_names) > 0:
        out_filename = os.path.join(OUTDIR, f"{test_name}.nc")
        try:
            save_netcdf(testobj, inputs_list, output_list, ref_data,
                        failing_names, out_filename)
        except Exception as error:
            print(error)
    assert failing_names == [], f"names tested: {list(testobj.outputs.keys())}"
def test_parallel_savepoint(
    data_regression,
    data_path,
    testobj,
    test_name,
    test_case,
    grid,
    serializer,
    savepoint_in,
    savepoint_out,
    communicator,
    backend,
    print_failures,
    failure_stride,
    subtests,
    caplog,
    python_regression,
    threshold_overrides,
    print_domains,
    xy_indices=True,
):
    caplog.set_level(logging.DEBUG, logger="fv3core")
    if python_regression and not testobj.python_regression:
        pytest.xfail(f"python_regression not set for test {test_name}")
    if testobj is None:
        pytest.xfail(
            f"no translate object available for savepoint {test_name}")
    # Increase minimum error threshold for GPU
    if config.is_gpu_backend():
        testobj.max_error = max(testobj.max_error, GPU_MAX_ERR)
        testobj.near_zero = max(testobj.near_zero, GPU_NEAR_ZERO)
    if threshold_overrides is not None:
        process_override(threshold_overrides, testobj, test_name, backend)
    fv3core._config.set_grid(grid[0])
    input_data = testobj.collect_input_data(serializer, savepoint_in)
    # run python version of functionality
    output = testobj.compute_parallel(input_data, communicator)
    out_vars = set(testobj.outputs.keys())
    out_vars.update(list(testobj._base.out_vars.keys()))
    if python_regression and testobj.python_regression:
        filename = f"python_regressions/{test_case}_{backend}_{platform()}.yml"
        filename = filename.replace("=", "_")
        data_regression.check(
            hash_result_data(output, out_vars),
            fullpath=os.path.join(data_path, filename),
        )
        return
    failing_names = []
    passing_names = []
    ref_data = {}
    for varname in out_vars:
        ref_data[varname] = []
        new_ref_data = serializer.read(varname, savepoint_out)
        if hasattr(testobj, "subset_output"):
            new_ref_data = testobj.subset_output(varname, new_ref_data)
        ref_data[varname].append(new_ref_data)
        ignore_near_zero = testobj.ignore_near_zero_errors.get(varname, False)
        with subtests.test(varname=varname):
            failing_names.append(varname)
            output_data = gt_utils.asarray(output[varname])
            assert success(
                output_data,
                ref_data[varname][0],
                testobj.max_error,
                ignore_near_zero,
                testobj.near_zero,
            ), sample_wherefail(
                output_data,
                ref_data[varname][0],
                testobj.max_error,
                print_failures,
                failure_stride,
                test_name,
                ignore_near_zero,
                testobj.near_zero,
                xy_indices,
            )
            passing_names.append(failing_names.pop())
    if len(failing_names) > 0:
        out_filename = os.path.join(OUTDIR, f"{test_name}-{grid[0].rank}.nc")
        try:
            save_netcdf(testobj, [input_data], [output], ref_data,
                        failing_names, out_filename)
        except Exception as error:
            print(f"TestParallel SaveNetCDF Error: {error}")
    assert failing_names == [], f"only the following variables passed: {passing_names}"
    assert len(passing_names) > 0, "No tests passed"
def test_sequential_savepoint(
    testobj,
    test_name,
    grid,
    serializer,
    savepoint_in,
    savepoint_out,
    rank,
    backend,
    print_failures,
    failure_stride,
    subtests,
    caplog,
    threshold_overrides,
    print_domains,
    xy_indices=True,
):
    caplog.set_level(logging.DEBUG, logger="fv3core")
    if testobj is None:
        pytest.xfail(
            f"no translate object available for savepoint {test_name}")
    # Reduce error threshold for GPU
    if config.is_gpu_backend():
        testobj.max_error = max(testobj.max_error, GPU_MAX_ERR)
        testobj.near_zero = max(testobj.near_zero, GPU_NEAR_ZERO)
    if threshold_overrides is not None:
        process_override(threshold_overrides, testobj, test_name, backend)
    fv3core._config.set_grid(grid)
    input_data = testobj.collect_input_data(serializer, savepoint_in)
    # run python version of functionality
    output = testobj.compute(input_data)
    failing_names = []
    passing_names = []
    for varname in testobj.serialnames(testobj.out_vars):
        ignore_near_zero = testobj.ignore_near_zero_errors.get(varname, False)
        ref_data = serializer.read(varname, savepoint_out)
        if hasattr(testobj, "subset_output"):
            ref_data = testobj.subset_output(varname, ref_data)
        with subtests.test(varname=varname):
            failing_names.append(varname)
            output_data = gt_utils.asarray(output[varname])
            assert success(
                output_data,
                ref_data,
                testobj.max_error,
                ignore_near_zero,
                testobj.near_zero,
            ), sample_wherefail(
                output_data,
                ref_data,
                testobj.max_error,
                print_failures,
                failure_stride,
                test_name,
                ignore_near_zero_errors=ignore_near_zero,
                near_zero=testobj.near_zero,
                xy_indices=xy_indices,
            )
            passing_names.append(failing_names.pop())
    if len(failing_names) > 0:
        out_filename = os.path.join(OUTDIR, f"{test_name}.nc")
        try:
            save_netcdf(testobj, [input_data], [output], ref_data,
                        failing_names, out_filename)
        except Exception as error:
            print(f"TestSequential SaveNetCDF Error: {error}")
    assert failing_names == [], f"only the following variables passed: {passing_names}"
    assert len(passing_names) > 0, "No tests passed"