コード例 #1
0
ファイル: gen_kernel_stub.py プロジェクト: xyuan/PSyclone
def generate(filename, api=""):
    '''
    Generates an empty kernel subroutine with the required arguments
    and datatypes (which we call a stub) when presented with Kernel
    Metadata. This is useful for Kernel developers to make sure
    they are using the correct arguments in the correct order.  The
    Kernel Metadata must be presented in the standard Kernel
    format.

    :param str filename: the name of the file for which to create a \
                         kernel stub for.
    :param str api: the name of the API for which to create a kernel \
                    stub. Must be one of the supported stub APIs.

    :returns: root of fparser1 parse tree for the stub routine.
    :rtype: :py:class:`fparser.one.block_statements.Module`

    :raises GenerationError: if an invalid stub API is specified.
    :raises IOError: if filename does not specify a file.
    :raises ParseError: if the given file could not be parsed.
    :raises GenerationError: if a kernel stub does not have a supported \
                             iteration space (currently only "cells").

    '''
    if api == "":
        api = Config.get().default_stub_api
    if api not in Config.get().supported_stub_apis:
        raise GenerationError(
            "Kernel stub generator: Unsupported API '{0}' specified. "
            "Supported APIs are {1}.".format(api,
                                             Config.get().supported_stub_apis))

    if not os.path.isfile(filename):
        raise IOError(
            "Kernel stub generator: File '{0}' not found.".format(filename))

    # Drop cache
    fparser.one.parsefortran.FortranParser.cache.clear()
    fparser.logging.disable(fparser.logging.CRITICAL)
    try:
        ast = fparser.api.parse(filename, ignore_comments=False)

    except (fparser.common.utils.AnalyzeError, AttributeError) as error:
        raise ParseError("Kernel stub generator: Code appears to be invalid "
                         "Fortran: {0}.".format(str(error)))

    metadata = DynKernMetadata(ast)
    kernel = DynKern()
    kernel.load_meta(metadata)

    # Check kernel iteration space before generating code
    if (api == "dynamo0.3"
            and kernel.iterates_over not in USER_KERNEL_ITERATION_SPACES):
        raise GenerationError(
            "The LFRic API kernel stub generator supports kernels that operate"
            " on one of {0}, but found '{1}' in kernel '{2}'.".format(
                USER_KERNEL_ITERATION_SPACES, kernel.iterates_over,
                kernel.name))

    return kernel.gen_stub
コード例 #2
0
ファイル: generator_test.py プロジェクト: hiker/PSyclone
def test_main_include_path(capsys):
    ''' Test that the main function supplies any INCLUDE paths to
    fparser. '''
    # This algorithm file INCLUDE's a file that defines a variable called
    # "some_fake_mpi_handle"
    alg_file = (os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             "nemo", "test_files", "include_stmt.f90"))
    # First try without specifying where to find the include file. Currently
    # fparser2 just removes any include statement that it cannot resolve
    # (https://github.com/stfc/fparser/issues/138).
    main([alg_file, '-api', 'nemo'])
    stdout, _ = capsys.readouterr()
    assert "some_fake_mpi_handle" not in stdout
    # Now specify two locations to search with only the second containing
    # the necessary header file
    inc_path1 = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             "test_files")
    inc_path2 = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             "nemo", "test_files", "include_files")
    main(
        [alg_file, '-api', 'nemo', '-I',
         str(inc_path1), '-I',
         str(inc_path2)])
    stdout, _ = capsys.readouterr()
    assert "some_fake_mpi_handle" in stdout
    # Check that the Config object contains the provided include paths
    assert str(inc_path1) in Config.get().include_paths
    assert str(inc_path2) in Config.get().include_paths
コード例 #3
0
ファイル: generator_test.py プロジェクト: hiker/PSyclone
def test_main_kern_output_dir(tmpdir):
    ''' Test that we can specify a valid kernel output directory. '''
    alg_filename = (os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 "test_files", "dynamo0p3",
                                 "1_single_invoke.f90"))
    main([alg_filename, '-okern', str(tmpdir)])
    # The specified kernel output directory should have been stored in
    # the configuration object
    assert Config.get().kernel_output_dir == str(tmpdir)

    # If no kernel_output_dir is set, it should default to the
    # current directory
    Config.get().kernel_output_dir = None
    assert Config.get().kernel_output_dir == str(os.getcwd())
コード例 #4
0
def run():
    ''' Top-level driver for the kernel-stub generator. Handles command-line
    flags, calls generate() and applies line-length limiting to the output (if
    requested). '''
    import argparse
    parser = argparse.ArgumentParser(description="Create Kernel stub code from"
                                     " Kernel metadata")
    parser.add_argument("-o", "--outfile", help="filename of output")
    parser.add_argument(
        "-api",
        default=Config.get().default_stub_api,
        help="choose a particular api from {0}, default {1}".format(
            str(Config.get().supported_stub_apis),
            Config.get().default_stub_api))
    parser.add_argument('filename', help='Kernel metadata')
    parser.add_argument('-l',
                        '--limit',
                        dest='limit',
                        action='store_true',
                        default=False,
                        help='limit the fortran line length to 132 characters')

    args = parser.parse_args()

    try:
        stub = generate(args.filename, api=args.api)
    except (IOError, ParseError, GenerationError, RuntimeError) as error:
        print("Error:", error)
        exit(1)
    except Exception as error:  # pylint: disable=broad-except
        print("Error, unexpected exception:\n")
        exc_type, exc_value, exc_traceback = sys.exc_info()
        print(exc_type)
        print(exc_value)
        traceback.print_tb(exc_traceback)
        exit(1)

    if args.limit:
        fll = FortLineLength()
        stub_str = fll.process(str(stub))
    else:
        stub_str = str(stub)
    if args.outfile is not None:
        my_file = open(args.outfile, "w")
        my_file.write(stub_str)
        my_file.close()
    else:
        print("Kernel stub code:\n", stub_str)
コード例 #5
0
def test_correct(func, output, tmpdir):
    '''Check that a valid example produces the expected output when the
    argument to ABS is a simple argument and when it is an
    expresssion.

    '''
    Config.get().api = "nemo"
    operation = example_psyir(func)
    writer = FortranWriter()
    result = writer(operation.root)
    assert ("subroutine abs_example(arg)\n"
            "  real, intent(inout) :: arg\n"
            "  real :: psyir_tmp\n\n"
            "  psyir_tmp = ABS({0})\n\n"
            "end subroutine abs_example\n".format(output)) in result
    trans = Abs2CodeTrans()
    trans.apply(operation, operation.root.symbol_table)
    result = writer(operation.root)
    assert ("subroutine abs_example(arg)\n"
            "  real, intent(inout) :: arg\n"
            "  real :: psyir_tmp\n"
            "  real :: res_abs\n"
            "  real :: tmp_abs\n\n"
            "  tmp_abs = {0}\n"
            "  if (tmp_abs > 0.0) then\n"
            "    res_abs = tmp_abs\n"
            "  else\n"
            "    res_abs = tmp_abs * -1.0\n"
            "  end if\n"
            "  psyir_tmp = res_abs\n\n"
            "end subroutine abs_example\n".format(output)) in result
    assert Compile(tmpdir).string_compiles(result)
    # Remove the created config instance
    Config._instance = None
コード例 #6
0
def test_halo_for_discontinuous_2(tmpdir, monkeypatch, annexed):
    '''This test checks the case when our loop iterates over owned cells
    (e.g. it writes to a discontinuous field), we read from a
    continuous field, there are no stencil accesses, and the previous
    writer iterates over ndofs or nannexed.

    When the previous writer iterates over ndofs we have dirty annexed
    dofs so need to add a halo exchange. This is the case when
    api_config.compute_annexed_dofs is False.

    When the previous writer iterates over nannexed we have clean
    annexed dofs so do not need to add a halo exchange. This is the
    case when api_config.compute_annexed_dofs is True

    '''
    api_config = Config.get().api_conf(TEST_API)
    monkeypatch.setattr(api_config, "_compute_annexed_dofs", annexed)
    _, info = parse(os.path.join(BASE_PATH, "14.7_halo_annexed.f90"),
                    api=TEST_API)
    psy = PSyFactory(TEST_API, distributed_memory=True).create(info)
    result = str(psy.gen)
    if annexed:
        assert "halo_exchange" not in result
    else:
        assert "IF (f1_proxy%is_dirty(depth=1)) THEN" not in result
        assert "CALL f1_proxy%halo_exchange(depth=1)" in result
        assert "IF (f2_proxy%is_dirty(depth=1)) THEN" not in result
        assert "CALL f2_proxy%halo_exchange(depth=1)" in result
        assert "IF (m1_proxy%is_dirty(depth=1)) THEN" in result
        assert "CALL m1_proxy%halo_exchange(depth=1)" in result

    assert LFRicBuild(tmpdir).code_compiles(psy)
コード例 #7
0
def test_setval_x_then_user(tmpdir, monkeypatch):
    ''' Check that the correct halo exchanges are added if redundant
    computation is enabled for a built-in kernel called before a
    user-supplied kernel. '''
    api_config = Config.get().api_conf(API)
    monkeypatch.setattr(api_config, "_compute_annexed_dofs", True)
    _, invoke_info = parse(os.path.join(
        BASE_PATH, "15.7.3_setval_X_before_user_kern.f90"),
                           api=API)
    psy = PSyFactory(API, distributed_memory=True).create(invoke_info)

    first_invoke = psy.invokes.invoke_list[0]
    # Since (redundant) computation over annexed dofs is enabled, there
    # should be no halo exchange before the first (builtin) kernel call
    assert isinstance(first_invoke.schedule[0], DynLoop)
    # There should be a halo exchange for field f1 before the second
    # kernel call
    assert isinstance(first_invoke.schedule[1], DynHaloExchange)
    assert first_invoke.schedule[1].field.name == "f1"
    # Now transform the first loop to perform redundant computation out to
    # the level-1 halo
    rtrans = Dynamo0p3RedundantComputationTrans()
    _, _ = rtrans.apply(first_invoke.schedule[0], options={"depth": 1})
    # There should now be a halo exchange for f1 before the first
    # (builtin) kernel call
    assert isinstance(first_invoke.schedule[0], DynHaloExchange)
    assert first_invoke.schedule[0].field.name == "f1"
    assert isinstance(first_invoke.schedule[1], DynLoop)
    # There should only be one halo exchange for field f1
    assert len([
        node for node in first_invoke.schedule.walk(DynHaloExchange)
        if node.field.name == "f1"
    ]) == 1
    assert LFRicBuild(tmpdir).code_compiles(psy)
コード例 #8
0
ファイル: gocean_extract_test.py プロジェクト: stfc/PSyclone
def test_change_prefix(tmpdir, monkeypatch):
    '''
    This tests that the prefix of a gocean extract transformation
    can be changed, and that the new prefix is also used in the
    created driver.
    '''
    # Use tmpdir so that the driver is created in tmp
    tmpdir.chdir()

    psy, invoke = get_invoke("single_invoke_scalar_float_arg.f90",
                             GOCEAN_API,
                             idx=0,
                             dist_mem=False)

    # In order to use a different prefix, this prefix needs to be valid.
    # So monkeypatch the valid prefix names in the config object:
    config = Config.get()
    monkeypatch.setattr(config, "_valid_psy_data_prefixes", ["NEW"])

    etrans = GOceanExtractTrans()
    etrans.apply(invoke.schedule.children[0], {
        'create_driver': True,
        'region_name': ("main", "update"),
        'prefix': "NEW"
    })

    # Test that the extraction code contains the new prefix:
    assert 'CALL NEW_psy_data%PreStart("main", "update", 4, 3)' \
        in str(psy.gen)

    # Now test if the created driver has the right prefix:
    driver_name = tmpdir.join("driver-main-update.f90")
    with open(str(driver_name), "r") as driver_file:
        driver_code = driver_file.read()
    assert 'CALL NEW_psy_data%OpenRead("main", "update")' in driver_code
コード例 #9
0
def test_correct_binary(func, output, tmpdir):
    '''Check that a valid example produces the expected output when the
    first argument to MIN is a simple argument and when it is an
    expression.

    '''
    Config.get().api = "nemo"
    operation = example_psyir_binary(func)
    writer = FortranWriter()
    result = writer(operation.root)
    assert ("subroutine min_example(arg,arg_1)\n"
            "  real, intent(inout) :: arg\n"
            "  real, intent(inout) :: arg_1\n"
            "  real :: psyir_tmp\n\n"
            "  psyir_tmp=MIN({0}, arg_1)\n\n"
            "end subroutine min_example\n".format(output)) in result
    trans = Min2CodeTrans()
    trans.apply(operation, operation.root.symbol_table)
    result = writer(operation.root)
    assert ("subroutine min_example(arg,arg_1)\n"
            "  real, intent(inout) :: arg\n"
            "  real, intent(inout) :: arg_1\n"
            "  real :: psyir_tmp\n"
            "  real :: res_min\n"
            "  real :: tmp_min\n\n"
            "  res_min={0}\n"
            "  tmp_min=arg_1\n"
            "  if (tmp_min < res_min) then\n"
            "    res_min=tmp_min\n"
            "  end if\n"
            "  psyir_tmp=res_min\n\n"
            "end subroutine min_example\n".format(output)) in result
    assert Compile(tmpdir).string_compiles(result)
    # Remove the created config instance
    Config._instance = None
コード例 #10
0
def test_2kern_trans(tmpdir, monkeypatch):
    ''' Check that we generate correct code when we transform two kernels
    within a single invoke. '''
    # Ensure kernel-output directory is uninitialised
    config = Config.get()
    monkeypatch.setattr(config, "_kernel_output_dir", "")
    # Change to temp dir (so kernel written there)
    old_cwd = tmpdir.chdir()
    psy, invoke = get_invoke("4.5.2_multikernel_invokes.f90", api="dynamo0.3",
                             idx=0)
    sched = invoke.schedule
    kernels = sched.walk(Kern)
    assert len(kernels) == 5
    rtrans = ACCRoutineTrans()
    _, _ = rtrans.apply(kernels[1])
    _, _ = rtrans.apply(kernels[2])
    # Generate the code (this triggers the generation of new kernels)
    code = str(psy.gen).lower()
    # Find the tags added to the kernel/module names
    for match in re.finditer('use testkern_any_space_2(.+?)_mod', code):
        tag = match.group(1)
        assert ("use testkern_any_space_2{0}_mod, only: "
                "testkern_any_space_2{0}_code".format(tag) in code)
        assert "call testkern_any_space_2{0}_code(".format(tag) in code
        assert os.path.isfile(
            os.path.join(str(tmpdir),
                         "testkern_any_space_2{0}_mod.f90".format(tag)))
    assert "use testkern_any_space_2_mod, only" not in code
    assert "call testkern_any_space_2_code(" not in code
    assert Dynamo0p3Build(tmpdir).code_compiles(psy)
    old_cwd.chdir()
コード例 #11
0
def test_properties():
    '''Test creation of properties.
    '''

    config = Config.get()
    api_config = config.api_conf("gocean1.0")

    all_props = api_config.grid_properties

    assert all_props["go_grid_area_t"].fortran == "{0}%grid%area_t"
    assert all_props["go_grid_area_t"].type == "array"
    assert all_props["go_grid_area_t"].intrinsic_type == "real"

    with pytest.raises(InternalError) as error:
        new_prop = GOceanConfig.make_property("my_fortran", "my_type",
                                              "integer")
    assert "Type must be 'array' or 'scalar' but is 'my_type'" \
        in str(error.value)

    with pytest.raises(InternalError) as error:
        new_prop = GOceanConfig.make_property("my_fortran", "scalar",
                                              "my_intrinsic")
    assert "Intrinsic type must be 'integer' or 'real' but is 'my_intrinsic'" \
        in str(error.value)

    new_prop = GOceanConfig.make_property("my_fortran", "array", "integer")
    assert new_prop.fortran == "my_fortran"
    assert new_prop.type == "array"
    assert new_prop.intrinsic_type == "integer"

    new_prop = GOceanConfig.make_property("my_fortran", "scalar", "real")
    assert new_prop.fortran == "my_fortran"
    assert new_prop.type == "scalar"
    assert new_prop.intrinsic_type == "real"
コード例 #12
0
def test_new_kern_no_clobber(tmpdir, monkeypatch):
    ''' Check that we create a new kernel with a new name when kernel-naming
    is set to 'multiple' and we would otherwise get a name clash. '''
    # Ensure kernel-output directory is uninitialised
    config = Config.get()
    monkeypatch.setattr(config, "_kernel_output_dir", "")
    monkeypatch.setattr(config, "_kernel_naming", "multiple")
    # Change to temp dir (so kernel written there)
    old_cwd = tmpdir.chdir()
    psy, invoke = get_invoke("1_single_invoke.f90", api="dynamo0.3", idx=0)
    sched = invoke.schedule
    kernels = sched.walk(Kern)
    kern = kernels[0]
    old_mod_name = kern.module_name[:]
    # Create a file with the same name as we would otherwise generate
    with open(os.path.join(str(tmpdir),
                           old_mod_name+"_0_mod.f90"), "w") as ffile:
        ffile.write("some code")
    rtrans = ACCRoutineTrans()
    _, _ = rtrans.apply(kern)
    # Generate the code (this triggers the generation of a new kernel)
    _ = str(psy.gen).lower()
    filename = os.path.join(str(tmpdir), old_mod_name+"_1_mod.f90")
    assert os.path.isfile(filename)
    old_cwd.chdir()
コード例 #13
0
def test_new_same_kern_single(tmpdir, monkeypatch):
    ''' Check that we do not overwrite an existing, identical kernel if
    there is a name clash and kernel-naming is 'single'. '''
    # Ensure kernel-output directory is uninitialised
    config = Config.get()
    monkeypatch.setattr(config, "_kernel_output_dir", "")
    monkeypatch.setattr(config, "_kernel_naming", "single")
    rtrans = ACCRoutineTrans()
    # Change to temp dir (so kernel written there)
    old_cwd = tmpdir.chdir()
    _, invoke = get_invoke("4_multikernel_invokes.f90", api="dynamo0.3",
                           idx=0)
    sched = invoke.schedule
    # Apply the same transformation to both kernels. This should produce
    # two, identical transformed kernels.
    new_kernels = []
    for kern in sched.coded_kernels():
        new_kern, _ = rtrans.apply(kern)
        new_kernels.append(new_kern)

    # Generate the code - we should end up with just one transformed kernel
    new_kernels[0].rename_and_write()
    new_kernels[1].rename_and_write()
    assert new_kernels[1]._name == "testkern_0_code"
    assert new_kernels[1].module_name == "testkern_0_mod"
    out_files = os.listdir(str(tmpdir))
    assert out_files == [new_kernels[1].module_name+".f90"]
    old_cwd.chdir()
コード例 #14
0
ファイル: nemo.py プロジェクト: stfc/PSyclone
    def _create_loop(self, parent, variable):
        '''
        Specialised method to create a NemoLoop instead of a
        generic Loop.

        TODO #1210 replace this with a Transformation.

        :param parent: the parent of the node.
        :type parent: :py:class:`psyclone.psyir.nodes.Node`
        :param variable: the loop variable.
        :type variable: :py:class:`psyclone.psyir.symbols.DataSymbol`

        :return: a new NemoLoop instance.
        :rtype: :py:class:`psyclone.nemo.NemoLoop`

        '''
        loop = NemoLoop(parent=parent, variable=variable)

        loop_type_mapping = Config.get().api_conf("nemo")\
            .get_loop_type_mapping()

        # Identify the type of loop
        if variable.name in loop_type_mapping:
            loop.loop_type = loop_type_mapping[variable.name]
        else:
            loop.loop_type = "unknown"

        return loop
コード例 #15
0
def trans(psy):
    ''' PSyclone transformation script for the dynamo0p3 API to apply
    loop fusion and OpenMP for a particular example.'''
    otrans = OMPParallelTrans()
    ltrans = Dynamo0p3OMPLoopTrans()
    ftrans = LFRicLoopFuseTrans()

    invoke = psy.invokes.invoke_list[0]
    schedule = invoke.schedule

    config = Config.get()
    if config.api_conf("dynamo0.3").compute_annexed_dofs and \
       config.distributed_memory:
        # We can't loop fuse as the loop bounds differ so add
        # OpenMP parallel do directives to the loops
        otrans.apply(schedule.children[0])
        otrans.apply(schedule.children[1])
    else:
        # Loop fuse the two built-in kernels. The 'same_space' flag needs to
        # be set as built-ins are over ANY_SPACE.
        ftrans.apply(schedule[0], schedule[1], {"same_space": True})

        # Add an OpenMP do directive to the resultant loop-fused loop,
        # specifying that we want reproducible reductions
        ltrans.apply(schedule.children[0], {"reprod": True})

        # Add an OpenMP parallel directive around the OpenMP do directive
        otrans.apply(schedule.children[0])

    # take a look at what we've done
    schedule.view()
    schedule.dag()

    return psy
コード例 #16
0
ファイル: omp_transform.py プロジェクト: stfc/PSycloneBench
def trans(psy):
    ''' Transformation entry point '''
    config = Config.get()
    tinfo = TransInfo()
    parallel_loop_trans = tinfo.get_trans_name('GOceanOMPParallelLoopTrans')
    loop_trans = tinfo.get_trans_name('GOceanOMPLoopTrans')
    parallel_trans = tinfo.get_trans_name('OMPParallelTrans')
    module_inline_trans = tinfo.get_trans_name('KernelModuleInline')

    schedule = psy.invokes.get('invoke_0').schedule

    # Inline all kernels in this Schedule
    for kernel in schedule.kernels():
        module_inline_trans.apply(kernel)

    # Apply the OpenMPLoop transformation to every child in the schedule or
    # OpenMPParallelLoop to every Loop if it has distributed memory.
    for child in schedule.children:
        if config.distributed_memory:
            if isinstance(child, Loop):
                parallel_loop_trans.apply(child)
        else:
            loop_trans.apply(child)

    if not config.distributed_memory:
        # If it is not distributed memory, enclose all of these loops
        # within a single OpenMP PARALLEL region
        parallel_trans.apply(schedule.children)

    return psy
コード例 #17
0
def test_add_halo_exchange_code_nreader(monkeypatch):
    '''Check that _add_field_component_halo_exchange() in DynLoop raises
    the expected exception when there is more than one read dependence
    associated with a halo exchange in the read dependence list.

    '''
    api_config = Config.get().api_conf(API)
    monkeypatch.setattr(api_config, "_compute_annexed_dofs", True)
    _, invoke_info = parse(os.path.join(
        BASE_PATH, "15.7.3_setval_X_before_user_kern.f90"),
                           api=API)
    psy = PSyFactory(API, distributed_memory=True).create(invoke_info)

    schedule = psy.invokes.invoke_list[0].schedule
    loop = schedule[0]
    rtrans = Dynamo0p3RedundantComputationTrans()
    rtrans.apply(loop, options={"depth": 1})
    f1_field = schedule[0].field
    del schedule.children[0]
    schedule[1].field._name = "f1"
    schedule[2].field._name = "f1"
    with pytest.raises(InternalError) as info:
        loop._add_field_component_halo_exchange(f1_field)
    assert ("When replacing a halo exchange with another one for field f1, "
            "a subsequent dependent halo exchange was found. This should "
            "never happen." in str(info.value))
コード例 #18
0
def test_compute_halo_read_info_async(monkeypatch):
    '''Check that _compute_halo_read_info() in DynHaloExchange raises the
    expected exception when there is a read dependence associated with
    an asynchronous halo exchange in the read dependence list.

    '''
    api_config = Config.get().api_conf(API)
    monkeypatch.setattr(api_config, "_compute_annexed_dofs", True)
    _, invoke_info = parse(os.path.join(
        BASE_PATH, "15.7.3_setval_X_before_user_kern.f90"),
                           api=API)
    psy = PSyFactory(API, distributed_memory=True).create(invoke_info)

    schedule = psy.invokes.invoke_list[0].schedule

    hex_f1 = schedule[1]

    schedule[2].field._name = "f1"
    async_hex = Dynamo0p3AsyncHaloExchangeTrans()
    async_hex.apply(schedule[2])
    with pytest.raises(GenerationError) as info:
        hex_f1._compute_halo_read_info(ignore_hex_dep=True)
    assert ("Please perform redundant computation transformations "
            "before asynchronous halo exchange transformations."
            in str(info.value))
コード例 #19
0
def test_new_kern_single_error(tmpdir, monkeypatch):
    ''' Check that we do not overwrite an existing, different kernel if
    there is a name clash and kernel-naming is 'single'. '''
    # Ensure kernel-output directory is uninitialised
    config = Config.get()
    monkeypatch.setattr(config, "_kernel_output_dir", "")
    monkeypatch.setattr(config, "_kernel_naming", "single")
    # Change to temp dir (so kernel written there)
    old_cwd = tmpdir.chdir()
    _, invoke = get_invoke("1_single_invoke.f90", api="dynamo0.3", idx=0)
    sched = invoke.schedule
    kernels = sched.coded_kernels()
    kern = kernels[0]
    old_mod_name = kern.module_name[:]
    # Create a file with the same name as we would otherwise generate
    with open(os.path.join(str(tmpdir),
                           old_mod_name+"_0_mod.f90"), "w") as ffile:
        ffile.write("some code")
    rtrans = ACCRoutineTrans()
    new_kern, _ = rtrans.apply(kern)
    # Generate the code - this should raise an error as we get a name
    # clash and the content of the existing file is not the same as that
    # which we would generate
    with pytest.raises(GenerationError) as err:
        new_kern.rename_and_write()
    assert ("transformed version of this Kernel 'testkern_0_mod.f90' already "
            "exists in the kernel-output directory ({0}) but is not the same "
            "as the current, transformed kernel and the kernel-renaming "
            "scheme is set to 'single'".format(str(tmpdir)) in str(err))
    old_cwd.chdir()
コード例 #20
0
ファイル: algorithm.py プロジェクト: stfc/PSyclone
    def __init__(self,
                 api="",
                 invoke_name="invoke",
                 kernel_path="",
                 line_length=False):

        self._invoke_name = invoke_name
        self._kernel_path = kernel_path
        self._line_length = line_length

        _config = Config.get()
        if not api:
            api = _config.default_api
        else:
            check_api(api)
        self._api = api

        self._arg_name_to_module_name = {}
        # Dict holding a 2-tuple consisting of type and precision
        # information for each variable declared in the algorithm
        # file, indexed by variable name.
        self._arg_type_defns = {}
        self._unique_invoke_labels = []

        # Use the get_builtin_defs helper function to access
        # information about the builtins supported by this API. The
        # first argument contains the names of the builtins and the
        # second is the file where these names are defined.
        self._builtin_name_map, \
            self._builtin_defs_file = get_builtin_defs(self._api)

        self._alg_filename = None
コード例 #21
0
def test_1kern_trans(tmpdir, monkeypatch):
    ''' Check that we generate the correct code when an invoke contains
    the same kernel more than once but only one of them is transformed. '''
    # Ensure kernel-output directory is uninitialised
    config = Config.get()
    monkeypatch.setattr(config, "_kernel_output_dir", "")
    # Change to temp dir (so kernel written there)
    old_cwd = tmpdir.chdir()
    psy, invoke = get_invoke("4_multikernel_invokes.f90", api="dynamo0.3",
                             idx=0)
    sched = invoke.schedule
    kernels = sched.coded_kernels()
    # We will transform the second kernel but not the first
    kern = kernels[1]
    rtrans = ACCRoutineTrans()
    _, _ = rtrans.apply(kern)
    # Generate the code (this triggers the generation of a new kernel)
    code = str(psy.gen).lower()
    tag = re.search('use testkern(.+?)_mod', code).group(1)
    # We should have a USE for the original kernel and a USE for the new one
    assert "use testkern{0}_mod, only: testkern{0}_code".format(tag) in code
    assert "use testkern, only: testkern_code" in code
    # Similarly, we should have calls to both the original and new kernels
    assert "call testkern_code(" in code
    assert "call testkern{0}_code(".format(tag) in code
    first = code.find("call testkern_code(")
    second = code.find("call testkern{0}_code(".format(tag))
    assert first < second
    assert Dynamo0p3Build(tmpdir).code_compiles(psy)
    old_cwd.chdir()
コード例 #22
0
ファイル: kernel.py プロジェクト: muhdfirdaus373/PSyclone
 def __init__(self, api=""):
     if not api:
         _config = Config.get()
         self._type = _config.default_api
     else:
         check_api(api)
         self._type = api
コード例 #23
0
ファイル: parse_test.py プロジェクト: drewsilcock/PSyclone
def test_kerneltypefactory_default_api():
    ''' Check that the KernelTypeFactory correctly defaults to using
    the default API '''
    from psyclone.configuration import Config
    _config = Config.get()
    factory = KernelTypeFactory(api="")
    assert factory._type == _config.default_api
コード例 #24
0
ファイル: algorithm.py プロジェクト: xyuan/PSyclone
    def __init__(self,
                 api="",
                 invoke_name="invoke",
                 kernel_path="",
                 line_length=False):

        self._invoke_name = invoke_name
        self._kernel_path = kernel_path
        self._line_length = line_length

        _config = Config.get()
        if not api:
            api = _config.default_api
        else:
            check_api(api)
        self._api = api

        self._arg_name_to_module_name = {}
        self._unique_invoke_labels = []

        # Use the get_builtin_defs helper function to access
        # information about the builtins supported by this API. The
        # first argument contains the names of the builtins and the
        # second is the file where these names are defined.
        self._builtin_name_map, \
            self._builtin_defs_file = get_builtin_defs(self._api)

        self._alg_filename = None
コード例 #25
0
def parse_fp2(filename):
    '''Parse a Fortran source file contained in the file 'filename' using
    fparser2.

    :param str filename: source file (including path) to read.
    :returns: fparser2 AST for the source file.
    :rtype: :py:class:`fparser.two.Fortran2003.Program`
    :raises ParseError: if the file could not be parsed.

    '''
    parser = ParserFactory().create(std="f2008")
    # We get the directories to search for any Fortran include files from
    # our configuration object.
    config = Config.get()
    try:
        reader = FortranFileReader(filename, include_dirs=config.include_paths)
    except IOError as error:
        raise ParseError(
            "algorithm.py:parse_fp2: Failed to parse file '{0}'. Error "
            "returned was ' {1} '.".format(filename, error))
    try:
        parse_tree = parser(reader)
    except FortranSyntaxError as msg:
        raise ParseError(
            "algorithm.py:parse_fp2: Syntax error in file '{0}':\n"
            "{1}".format(filename, str(msg)))
    return parse_tree
コード例 #26
0
ファイル: constants_test.py プロジェクト: stfc/PSyclone
def test_nemo_const():
    '''Tests the Nemo constant object.
    '''

    # This guarantees that the first time we use the constant object,
    # we read it from the config file.
    NemoConstants.HAS_BEEN_INITIALISED = False
    config = Config.get()

    nemo_const = config.api_conf("nemo").get_constants()
    assert nemo_const.VALID_INTRINSIC_TYPES == []
    assert nemo_const.VALID_ARG_TYPE_NAMES == []
    assert nemo_const.VALID_SCALAR_NAMES == ["rscalar", "iscalar"]

    assert NemoConstants.HAS_BEEN_INITIALISED
    # Test that we don't re-evalue the constants, i.e. if
    # we modify them, the modified value will not be overwritten.
    NemoConstants.VALID_INTRINSIC_TYPES = "INVALID"
    nemo_const = NemoConstants()
    assert nemo_const.VALID_INTRINSIC_TYPES == "INVALID"
    assert nemo_const.VALID_ARG_TYPE_NAMES == []
    assert nemo_const.VALID_SCALAR_NAMES == ["rscalar", "iscalar"]
    # Make sure the 'INVALID' value is reset when the constant
    # object is created again.
    NemoConstants.HAS_BEEN_INITIALISED = False
コード例 #27
0
def test_apply_reference_literal():
    '''Check that the apply method add bounds appropriately when the
    config file specifies a lower bound as a reference and an upper
    bound as a literal.

    '''
    _, invoke_info = get_invoke("implicit_many_dims.f90", api=API, idx=0)
    # Create a new config instance and load a test config file with
    # the bounds information set the way we want.
    config = Config.get(do_not_load_file=True)
    config.load(config_file=TEST_CONFIG)
    schedule = invoke_info.schedule
    assignment = schedule[0]
    array_ref = assignment.lhs
    trans = NemoArrayRange2LoopTrans()
    for index in range(4, -1, -1):
        range_node = array_ref.children[index]
        trans.apply(range_node)
    # Remove this config file so the next time the default one will be
    # loaded (in case we affect other tests)
    Config._instance = None
    writer = FortranWriter()
    result = writer(schedule)
    assert ("do idx = LBOUND(umask, 5), UBOUND(umask, 5), 1\n"
            "  do jt = 1, UBOUND(umask, 4), 1\n"
            "    do jk = jpk, 1, 1\n"
            "      do jj = 1, jpj, 1\n"
            "        do ji = jpi, 1, 1\n"
            "          umask(ji,jj,jk,jt,idx) = vmask(ji,jj,jk,jt,idx) + 1.0\n"
            "        enddo\n"
            "      enddo\n"
            "    enddo\n"
            "  enddo\n"
            "enddo" in result)
コード例 #28
0
ファイル: constants_test.py プロジェクト: stfc/PSyclone
def test_lfric_const():
    '''Tests the LFRic constant object.
    '''
    # This guarantees that the first time we use the constant object,
    # we read it from the config file.
    LFRicConstants.HAS_BEEN_INITIALISED = False
    config = Config.get()

    lfric_const = config.api_conf("dynamo0.3").get_constants()
    # Don't test intrinsic_types, which comes from the config file
    assert lfric_const.VALID_ARG_TYPE_NAMES == ["gh_field", "gh_operator",
                                                "gh_columnwise_operator",
                                                "gh_scalar"]

    assert lfric_const.VALID_SCALAR_NAMES == ["gh_scalar"]

    assert LFRicConstants.HAS_BEEN_INITIALISED
    # Test that we don't initialise the object again, i.e. that a
    # modified value is not changed.
    LFRicConstants.VALID_INTRINSIC_TYPES = "INVALID"
    lfric_const = LFRicConstants()
    assert lfric_const.VALID_INTRINSIC_TYPES == "INVALID"
    assert lfric_const.VALID_ARG_TYPE_NAMES == ["gh_field", "gh_operator",
                                                "gh_columnwise_operator",
                                                "gh_scalar"]
    assert lfric_const.VALID_SCALAR_NAMES == ["gh_scalar"]
    assert lfric_const.VALID_ARG_DATA_TYPES == ["gh_real", "gh_integer",
                                                "gh_logical"]
    # Make sure the 'INVALID' value is reset when the constant
    # object is created again.
    LFRicConstants.HAS_BEEN_INITIALISED = False
コード例 #29
0
def trans(psy):
    ''' PSyclone transformation script for the dynamo0p3 API to apply
    loop fusion and OpenMP for a particular example.'''
    otrans = DynamoOMPParallelLoopTrans()
    ftrans = DynamoLoopFuseTrans()

    invoke = psy.invokes.invoke_list[0]
    schedule = invoke.schedule

    from psyclone.configuration import Config
    config = Config.get()
    if config.api_conf("dynamo0.3").compute_annexed_dofs and \
       config.distributed_memory:
        # We can't loop fuse as the loop bounds differ so add
        # OpenMP parallel do directives to the loops
        schedule, _ = otrans.apply(schedule.children[0])
        schedule, _ = otrans.apply(schedule.children[1])
    else:
        # Loop fuse the two built-in kernels. The 'same_space' flag needs to
        # be set as built-ins are over ANY_SPACE.
        ftrans.same_space = True
        schedule, _ = ftrans.apply(schedule[0], schedule[1])

        # Add an OpenMP parallel do directive to the resultant loop-fused loop
        schedule, _ = otrans.apply(schedule.children[0])

    # take a look at what we've done
    schedule.view()
    schedule.dag(file_format="png")

    return psy
コード例 #30
0
def test_halo_for_discontinuous(tmpdir, monkeypatch, annexed):
    '''This test checks the case when our loop iterates over owned cells
    (e.g. it writes to a discontinuous field), we read from a
    continuous field, there are no stencil accesses, but we do not
    know anything about the previous writer.

    As we don't know anything about the previous writer we have to
    assume that it may have been over dofs. If so, we could have dirty
    annexed dofs so need to add a halo exchange (for the three
    continuous fields being read (f1, f2 and m1). This is the case
    when api_config.compute_annexed_dofs is False.

    If we always iterate over annexed dofs by default, our annexed
    dofs will always be clean. Therefore we do not need to add a halo
    exchange. This is the case when
    api_config.compute_annexed_dofs is True.

    '''
    api_config = Config.get().api_conf(TEST_API)
    monkeypatch.setattr(api_config, "_compute_annexed_dofs", annexed)
    _, info = parse(os.path.join(BASE_PATH, "1_single_invoke_w3.f90"),
                    api=TEST_API)
    psy = PSyFactory(TEST_API, distributed_memory=True).create(info)
    result = str(psy.gen)
    if annexed:
        assert "halo_exchange" not in result
    else:
        assert "IF (f1_proxy%is_dirty(depth=1)) THEN" in result
        assert "CALL f1_proxy%halo_exchange(depth=1)" in result
        assert "IF (f2_proxy%is_dirty(depth=1)) THEN" in result
        assert "CALL f2_proxy%halo_exchange(depth=1)" in result
        assert "IF (m1_proxy%is_dirty(depth=1)) THEN" in result
        assert "CALL m1_proxy%halo_exchange(depth=1)" in result

    assert LFRicBuild(tmpdir).code_compiles(psy)