def test03_kernel_missing_index_offset(): ''' Check that we raise an error if a kernel's meta-data is missing the INDEX_OFFSET field. ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test03_invoke_kernel_missing_offset.f90"), api="gocean1.0")
def test00p1_kernel_wrong_meta_arg_count(): ''' Check that we raise an error if one of the meta-args in a kernel's meta-data has the wrong number of arguments ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test00.1_invoke_kernel_wrong_meta_arg_count.f90"), api="gocean1.0")
def test04_kernel_invalid_index_offset(): ''' Check that we raise an error if a kernel's meta-data is contains an invalid value for the INDEX_OFFSET field. ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test04_invoke_kernel_invalid_offset.f90"), api="gocean1.0")
def test13_kernel_invalid_fortran(): ''' Check that the parser raises an error if the specified kernel code is not valid fortran ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test13_invoke_kernel_invalid_fortran.f90"), api="gocean1.0")
def test08p1_kernel_without_fld_args(): ''' Check that the parser raises an error if a kernel does not have a field object as an argument but requests a grid property ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test08.1_invoke_kernel_no_fld_args.f90"), api="gocean1.0")
def test00p2_kernel_invalid_meta_args(): ''' Check that we raise an error if one of the meta-args in a kernel's meta-data is not 'arg' ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test00.2_invoke_kernel_invalid_meta_args.f90"), api="gocean1.0")
def test05p1_kernel_invalid_iterates_over(): ''' Check that we raise an error if a kernel's meta-data has an invalid ITERATES_OVER field. ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test05.1_invoke_kernel_invalid_iterates_over.f90"), api="gocean1.0")
def test_single_invoke_undeclared(): ''' Check that an invoke of an undeclared function raises a ParseError ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gunghoproto", "2_undeclared_function.f90"), api="gunghoproto")
def test08_kernel_invalid_grid_property(): ''' Check that the parser raises an error if a kernel's meta-data specifies an unrecognised grid property ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test08_invoke_kernel_invalid_grid_property.f90"), api="gocean1.0")
def test07_kernel_wrong_gridpt_type(): ''' Check that we raise an error if a kernel's meta-data specifies an unrecognised grid-point type for a field argument (i.e. something other than C{U,V,F,T}, I_SCALAR or R_SCALAR) ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test07_invoke_kernel_wrong_gridpt_type.f90"), api="gocean1.0")
def test06_kernel_invalid_access(): ''' Check that we raise an error if a kernel's meta-data specifies an unrecognised access type for a kernel argument (i.e. something other than READ,WRITE,READWRITE) ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test06_invoke_kernel_wrong_access.f90"), api="gocean1.0")
def test10_kernel_invalid_stencil_prop(): '''Check that the parser raises an error if there is no stencil specified in the meta-data of a kernel ''' with pytest.raises(ParseError): parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "test10_invoke_kernel_invalid_stencil.f90"), api="gocean1.0")
def test_omp_explicit_gen(): ''' Check code generation for a single explicit loop containing a kernel. ''' _, invoke_info = parse(os.path.join(BASE_PATH, "explicit_do.f90"), api=API, line_length=False) psy = PSyFactory(API, distributed_memory=False).create(invoke_info) schedule = psy.invokes.get('explicit_do').schedule omp_trans = TransInfo().get_trans_name('OMPParallelLoopTrans') for loop in schedule.loops(): kernel = loop.kernel if kernel and loop.loop_type == "levels": schedule, _ = omp_trans.apply(loop) gen_code = str(psy.gen).lower() expected = ( "program explicit_do\n" " implicit none\n" " integer :: ji, jj, jk\n" " integer :: jpi, jpj, jpk\n" " real, dimension(jpi, jpj, jpk) :: umask\n" " !$omp parallel do default(shared), private(jk,jj,ji), " "schedule(static)\n" " do jk = 1, jpk\n" " do jj = 1, jpj\n" " do ji = 1, jpi\n" " umask(ji, jj, jk) = ji * jj * jk / r\n" " end do\n" " end do\n" " end do\n" " !$omp end parallel do\n" "end program explicit_do") assert expected in gen_code # Check that calling gen a second time gives the same code gen_code = str(psy.gen).lower() assert expected in gen_code
def test_writetoread_dag(tmpdir, have_graphviz): ''' Test that the GOSchedule::dag() method works as expected when we have two kernels with a write -> read dependency ''' _, invoke_info = parse(os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "single_invoke_write_to_read.f90"), api=API) psy = PSyFactory(API).create(invoke_info) invoke = psy.invokes.invoke_list[0] _ = tmpdir.chdir() invoke.schedule.dag() if have_graphviz: dot_file = os.path.join(str(tmpdir), "dag") assert os.path.isfile(dot_file) with open(dot_file, "r") as dfile: dot = dfile.read() assert dot.startswith("digraph") # write -> read means that the second loop can only begin once the # first loop is complete. Check that we have the correct forwards # dependence (green) and backwards dependence (red). assert ('"loop_[outer]_1_end" -> "loop_[outer]_4_start" [color=red]' in dot or '"loop_[outer]_1_end" -> "loop_[outer]_4_start" ' '[color=#ff0000]' in dot) assert ('"loop_[outer]_1_end" -> "loop_[outer]_4_start" [color=green]' in dot or '"loop_[outer]_1_end" -> "loop_[outer]_4_start" ' '[color=#00ff00]' in dot)
def test_goschedule_view(capsys): ''' Test that the GOSchedule::view() method works as expected ''' _, invoke_info = parse(os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", "single_invoke_two_kernels.f90"), api=API) psy = PSyFactory(API).create(invoke_info) invoke = psy.invokes.invoke_list[0] invoke.schedule.view() # The view method writes to stdout and this is captured by py.test # by default. We have to query this captured output. out, _ = capsys.readouterr() expected_output = ( "GOSchedule[invoke='invoke_0',Constant loop bounds=True]\n" " Loop[type='outer',field_space='cu',it_space='internal_pts']\n" " Loop[type='inner',field_space='cu',it_space='internal_pts']\n" " KernCall compute_cu_code(cu_fld,p_fld,u_fld) " "[module_inline=False]\n" " Loop[type='outer',field_space='every',it_space='internal_pts']\n" " Loop[type='inner',field_space='every'," "it_space='internal_pts']\n" " KernCall time_smooth_code(u_fld,unew_fld,uold_fld) " "[module_inline=False]") assert expected_output in out
def test_implicit_loop_sched2(): ''' Check that we get the correct schedule when we transform an implicit loop over the i-j slab within an explicit loop levels. ''' _, invoke_info = parse(os.path.join(BASE_PATH, "explicit_over_implicit.f90"), api=API, line_length=False) psy = PSyFactory(API).create(invoke_info) exp_trans = TransInfo().get_trans_name('NemoExplicitLoopTrans') sched = psy.invokes.invoke_list[0].schedule loop_levels = sched.children[0] _, _ = exp_trans.apply(loop_levels.children[0]) # We should have 3 loops (one from the explicit loop over levels and # the other two from the implicit loops over ji and jj). loops = sched.walk(sched.children, nemo.NemoLoop) assert len(loops) == 3 assert loop_levels.children[0].loop_type == "lat" kerns = sched.kern_calls() assert not kerns _, _ = exp_trans.apply(loop_levels.children[0].children[0]) gen_code = str(psy.gen) assert (" INTEGER :: jj\n" " INTEGER :: ji\n" " DO jk = 1, jpk\n" " DO jj = 1, jpj, 1\n" " DO ji = 1, jpi, 1\n" " umask(ji, jj, jk) = vmask(ji, jj, jk) + 1.0\n" " END DO\n" " END DO\n" " END DO\n" "END PROGRAM explicit_over_implicit" in gen_code) # Check that we haven't got duplicate declarations of the loop vars assert gen_code.count("INTEGER :: ji") == 1
def test_dyninvokebasisfns_compute(monkeypatch): ''' Check that the DynInvokeBasisFns.compute_basis_fns() method raises the expected InternalErrors if an unrecognised type or shape of basis function is encountered. ''' from psyclone.f2pygen import ModuleGen _, invoke_info = parse(os.path.join(BASE_PATH, "1.1.0_single_invoke_xyoz_qr.f90"), api=API) psy = PSyFactory(API, distributed_memory=False).create(invoke_info) sched = psy.invokes.invoke_list[0].schedule dinf = DynInvokeBasisFns(sched) mod = ModuleGen(name="testmodule") # First supply an invalid shape for one of the basis functions dinf._basis_fns[0]["shape"] = "not-a-shape" with pytest.raises(InternalError) as err: dinf.compute_basis_fns(mod) assert ("Unrecognised shape 'not-a-shape' specified for basis function. " "Should be one of: ['gh_quadrature_xyoz', 'gh_evaluator']" in str(err)) # Now supply an invalid type for one of the basis functions monkeypatch.setattr(dinf, "_basis_fns", [{'type': 'not-a-type'}]) with pytest.raises(InternalError) as err: dinf.compute_basis_fns(mod) assert ("Unrecognised type of basis function: 'not-a-type'. Expected " "one of 'basis' or 'diff-basis'" in str(err))
def test_loop_fuse_trans(): ''' test of the loop-fuse transformation ''' _, info = parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gunghoproto", "3_two_functions_shared_arguments.f90"), api="gunghoproto") psy = PSyFactory("gunghoproto").create(info) invoke = psy.invokes.get("invoke_0") schedule = invoke.schedule loop1 = schedule.children[0] loop2 = schedule.children[1] trans = LoopFuseTrans() schedule, _ = trans.apply(loop1, loop2) gen = str(psy.gen) for idx, line in enumerate(gen.split('\n')): if line.find("DO column=1,topology") != -1: do_idx = idx if line.find("CALL testkern1_code(") != -1: call1_idx = idx if line.find("CALL testkern2_code(") != -1: call2_idx = idx if line.find("END DO") != -1: enddo_idx = idx # 4 lines should be in sequence as calls have been fused into one loop assert enddo_idx-call2_idx == 1 and\ call2_idx-call1_idx == 1 and\ call1_idx-do_idx == 1
def test_prolong_vector(tmpdir, f90, f90flags): ''' Check that we generate correct code when an inter-grid kernel takes a field vector as argument ''' _, invoke_info = parse(os.path.join(BASE_PATH, "22.4_intergrid_prolong_vec.f90"), api=API) psy = PSyFactory(API, distributed_memory=True).create(invoke_info) output = str(psy.gen) if TEST_COMPILE: assert code_compiles(API, psy, tmpdir, f90, f90flags) assert "TYPE(field_type), intent(inout) :: field1(3)" in output assert "TYPE(field_proxy_type) field1_proxy(3)" in output # Make sure we always index into the field arrays assert " field1%" not in output assert " field2%" not in output assert ("ncpc_field1_field2, ncell_field1, field1_proxy(1)%data, " "field1_proxy(2)%data, field1_proxy(3)%data, field2_proxy(1)%data," " field2_proxy(2)%data, field2_proxy(3)%data, ndf_w1" in output) for idx in [1, 2, 3]: assert (" IF (field2_proxy({0})%is_dirty(depth=1)) THEN\n" " CALL field2_proxy({0})%halo_exchange(depth=1)\n" " END IF \n".format(idx) in output) assert ("field1_proxy({0}) = field1({0})%get_proxy()".format(idx) in output) assert "CALL field1_proxy({0})%set_dirty()".format(idx) in output assert "CALL field1_proxy({0})%set_clean(1)".format(idx) in output
def inline(): ''' function exercising the module-inline transformation ''' from psyclone.parse import parse from psyclone.psyGen import PSyFactory import os from psyclone.transformations import KernelModuleInlineTrans _, info = parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "src", "psyclone", "tests", "test_files", "dynamo0p1", "algorithm", "1_single_function.f90"), api="dynamo0.1") psy = PSyFactory("dynamo0.1").create(info) invokes = psy.invokes print(psy.invokes.names) invoke = invokes.get("invoke_0_testkern_type") schedule = invoke.schedule schedule.view() kern = schedule.children[0].children[0] # setting module inline directly kern.module_inline = True schedule.view() # unsetting module inline via a transformation trans = KernelModuleInlineTrans() schedule, _ = trans.apply(kern, inline=False) schedule.view() # setting module inline via a transformation schedule, _ = trans.apply(kern) schedule.view() print(str(psy.gen))
def test_omp_do_within_if(): ''' Check that we can insert an OpenMP parallel do within an if block. ''' from psyclone.transformations import OMPParallelLoopTrans otrans = OMPParallelLoopTrans() _, invoke_info = parse(os.path.join(BASE_PATH, "imperfect_nest.f90"), api=API, line_length=False) psy = PSyFactory(API, distributed_memory=False).create(invoke_info) schedule = psy.invokes.get('imperfect_nest').schedule loop = schedule.children[0].children[1].children[2].children[0] assert isinstance(loop, nemo.NemoLoop) # Apply the transformation to a loop within an else clause schedule, _ = otrans.apply(loop) gen = str(psy.gen) expected = ( " ELSE\n" " !$omp parallel do default(shared), private(jj,ji), " "schedule(static)\n" " DO jj = 1, jpj, 1\n" " DO ji = 1, jpi, 1\n" " zdkt(ji, jj) = (ptb(ji, jj, jk - 1, jn) - " "ptb(ji, jj, jk, jn)) * wmask(ji, jj, jk)\n" " END DO\n" " END DO\n" " !$omp end parallel do\n" " END IF\n") assert expected in gen
def test_script_trans(): ''' checks that generator.py works correctly when a transformation is provided as a script, i.e. it applies the transformations correctly. We use loop fusion as an example.''' from psyclone.parse import parse from psyclone.psyGen import PSyFactory from psyclone.transformations import LoopFuseTrans root_path = os.path.dirname(os.path.abspath(__file__)) base_path = os.path.join(root_path, "test_files", "dynamo0p3") # first loop fuse explicitly (without using generator.py) parse_file = os.path.join(base_path, "4_multikernel_invokes.f90") _, invoke_info = parse(parse_file, api="dynamo0.3") psy = PSyFactory("dynamo0.3").create(invoke_info) invoke = psy.invokes.get("invoke_0") schedule = invoke.schedule loop1 = schedule.children[3] loop2 = schedule.children[4] trans = LoopFuseTrans() schedule.view() schedule, _ = trans.apply(loop1, loop2) invoke.schedule = schedule generated_code_1 = psy.gen schedule.view() # second loop fuse using generator.py and a script _, generated_code_2 = generate(parse_file, api="dynamo0.3", script_name=os.path.join( base_path, "loop_fuse_trans.py")) # remove module so we do not affect any following tests delete_module("loop_fuse_trans") # third - check that the results are the same ... assert str(generated_code_1) == str(generated_code_2)
def test_loop_bounds_gen_multiple_loops(): ''' Test that we only generate one assignment for a loop-bounds variable when we have multiple loops ''' _, info = parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean0p1", "openmp_fuse_test.f90"), api=API) psy = PSyFactory(API).create(info) gen = str(psy.gen) print gen expected = (" DO j=1,SIZE(uold, 2)\n" " DO i=1,SIZE(uold, 1)\n" " CALL time_smooth_code(i, j, u, unew, uold)\n" " END DO \n" " END DO \n" " DO j=1,SIZE(vold, 2)\n" " DO i=1,SIZE(vold, 1)\n" " CALL time_smooth_code(i, j, v, vnew, vold)\n" " END DO \n" " END DO \n" " DO j=1,SIZE(pold, 2)\n" " DO i=1,SIZE(pold, 1)\n" " CALL time_smooth_code(i, j, p, pnew, pold)\n" " END DO \n" " END DO ") assert expected in gen
def test_dynkern_setup(monkeypatch): ''' Check that internal-consistency checks in DynKern._setup() work as expected ''' _, invoke_info = parse(os.path.join(BASE_PATH, "1.1.0_single_invoke_xyoz_qr.f90"), api=API) psy = PSyFactory(API).create(invoke_info) # Get hold of a DynKern object schedule = psy.invokes.invoke_list[0].schedule kern = schedule.children[3].children[0] # Monkeypatch a couple of __init__ routines so that we can get past # them in the _setup() routine. from psyclone.psyGen import Kern monkeypatch.setattr(Kern, "__init__", lambda me, ktype, kcall, parent, check: None) from psyclone.parse import KernelCall monkeypatch.setattr(KernelCall, "__init__", lambda me, mname, ktype, args: None) # Break the shape of the quadrature for this kernel monkeypatch.setattr(kern, "_eval_shape", value="gh_wrong_shape") # Rather than try and mock-up a DynKernMetadata object, it's easier # to make one properly by parsing the kernel code. ast = fpapi.parse(os.path.join(BASE_PATH, "testkern_qr.F90"), ignore_comments=False) name = "testkern_qr_type" dkm = DynKernMetadata(ast, name=name) # Finally, call the _setup() method with pytest.raises(GenerationError) as excinfo: kern._setup(dkm, "my module", None, None) assert ("Internal error: evaluator shape 'gh_wrong_shape' is not " "recognised" in str(excinfo))
def test_dyninvokebasisfns(monkeypatch): ''' Check that we raise internal errors as required ''' _, invoke_info = parse(os.path.join(BASE_PATH, "1.1.0_single_invoke_xyoz_qr.f90"), api=API) psy = PSyFactory(API).create(invoke_info) # Get hold of a DynInvokeBasisFns object evaluator = psy.invokes.invoke_list[0].evaluators # Test the error check in dynamo0p3.qr_basis_alloc_args() by passing in a # dictionary containing an invalid shape entry basis_dict = {"shape": "gh_wrong_shape"} from psyclone import dynamo0p3 with pytest.raises(GenerationError) as excinfo: _ = dynamo0p3.qr_basis_alloc_args("size1", basis_dict) assert "unrecognised shape (gh_wrong_shape) specified " in str(excinfo) # Monkey-patch it so that it doesn't have any quadrature args monkeypatch.setattr(evaluator, "_qr_vars", value=[]) # Check that calling the various _initialise_... routines does nothing. # We pass parent=None so that if any of the routines get beyond the # initial check then they will fail. evaluator._initialise_xyz_qr(None) evaluator._initialise_xyoz_qr(None) evaluator._initialise_xoyoz_qr(None)
def test_default_api(): ''' Check that parse() picks up the default API if none is specified by the caller. We do this simply by checking that it returns OK having parsed some dynamo0.3 code. ''' _, invoke_info = parse( os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "dynamo0p3", "1_single_invoke.f90")) assert len(list(invoke_info.calls.keys())) == 1
def test00p3_kern_invalid_meta_arg_type(): ''' Check that the parser catches the case where the type of one of the meta-args in the kernel meta-data is incorrect ''' test_file = "test00.3_invoke_kernel_invalid_meta_arg_type.f90" with pytest.raises(ParseError): _, _ = parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "gocean1p0", test_file), api=API)
def test_continuators_algorithm(): '''Tests that an input algorithm file with long lines that already has continuators to make the code conform to the line length limit does not cause an error. ''' _, _ = parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "dynamo0p3", "13.2_alg_long_line_continuator.f90"), api="dynamo0.3", line_length=True)
def test_continuators_kernel(): '''Tests that an input kernel file with long lines that already has continuators to make the code conform to the line length limit does not cause an error. ''' _, _ = parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files", "dynamo0p3", "1.1.0_single_invoke_xyoz_qr.f90"), api="dynamo0.3", line_length=True)
def test_duplicate_named_invoke(): ''' Test that we raise the expected error when an algorithm file contains two invokes that are given the same name ''' with pytest.raises(ParseError) as err: _, _ = parse(os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_files", "dynamo0p3", "3.3_multi_functions_multi_invokes_name_clash.f90"), api="dynamo0.3") assert "Found multiple named invoke()'s with the same name" in str(err)