Esempio n. 1
0
    def _init_operator(self, arg_type):
        '''
        Validates metadata descriptors for operator arguments and
        initialises operator argument properties accordingly.

        :param arg_type: LFRic API operator argument type.
        :type arg_type: :py:class:`psyclone.expression.FunctionVar`

        :raises InternalError: if argument type other than an operator is \
                               passed in.
        :raises ParseError: if there are not exactly 5 metadata arguments.
        :raises ParseError: if an operator argument has an invalid data \
                            type (the only permitted data type is 'gh_real').
        :raises ParseError: if the function space to- is not one of the \
                            valid function spaces.
        :raises ParseError: if the function space from- is not one of the \
                            valid function spaces.
        :raises ParseError: if the operator argument has an invalid access.

        '''
        # Check whether something other than an operator is passed in
        if self._argument_type not in LFRicArgDescriptor.VALID_OPERATOR_NAMES:
            raise InternalError(
                "Expected an operator argument but got an argument of type "
                "'{0}'.".format(self._argument_type))

        # TODO in #874: Remove support for the old-style operator descriptors
        #               throughout this routine.
        # We expect 5 arguments with the 4th and 5th each being a
        # function space
        nargs_operator = 4 + self._offset
        if self._nargs != nargs_operator:
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must have {0} "
                "arguments if its first argument is an operator (one "
                "of {1}), but found {2} in '{3}'.".format(
                    nargs_operator, LFRicArgDescriptor.VALID_OPERATOR_NAMES,
                    self._nargs, arg_type))

        # Check whether an invalid data type for an operator argument is passed
        # in. The only valid data type for operators in LFRic API is "gh_real".
        # TODO in #874: Remove the support for old-style operator metadata that
        #               prescribes the data type.
        if not self._data_type and self._offset == 0:
            self._data_type = "gh_real"
        if (self._data_type
                not in LFRicArgDescriptor.VALID_OPERATOR_DATA_TYPES):
            raise ParseError(
                "In the LFRic API the permitted data types for operator "
                "arguments are one of {0}, but found '{1}' in '{2}'.".format(
                    LFRicArgDescriptor.VALID_OPERATOR_DATA_TYPES,
                    self._data_type, arg_type))

        # Operator arguments need to have valid to- and from- function spaces
        # Check for a valid to- function space
        prop_ind = 2 + self._offset
        if arg_type.args[prop_ind].name not in \
           FunctionSpace.VALID_FUNCTION_SPACE_NAMES:
            raise ParseError(
                "In the LFRic API argument {0} of a 'meta_arg' operator "
                "entry must be a valid function-space name (one of "
                "{1}), but found '{2}' in '{3}'.".format(
                    prop_ind + 1, FunctionSpace.VALID_FUNCTION_SPACE_NAMES,
                    arg_type.args[prop_ind].name, arg_type))
        self._function_space1 = arg_type.args[prop_ind].name
        # Check for a valid from- function space
        prop_ind = 3 + self._offset
        if arg_type.args[prop_ind].name not in \
           FunctionSpace.VALID_FUNCTION_SPACE_NAMES:
            raise ParseError(
                "In the LFRic API argument {0} of a 'meta_arg' operator "
                "entry must be a valid function-space name (one of "
                "{1}), but found '{2}' in '{3}'.".format(
                    prop_ind + 1, FunctionSpace.VALID_FUNCTION_SPACE_NAMES,
                    arg_type.args[prop_ind].name, arg_type))
        self._function_space2 = arg_type.args[prop_ind].name

        # Test allowed accesses for operators
        operator_accesses = [
            AccessType.READ, AccessType.WRITE, AccessType.READWRITE
        ]
        # Convert generic access types to GH_* names for error messages
        api_config = Config.get().api_conf(API)
        rev_access_mapping = api_config.get_reverse_access_mapping()
        op_acc_msg = [rev_access_mapping[acc] for acc in operator_accesses]
        if self._access_type not in operator_accesses:
            raise ParseError(
                "In the LFRic API, allowed accesses for operators are {0} "
                "because they behave as discontinuous quantities, but found "
                "'{1}' in '{2}'.".format(op_acc_msg,
                                         rev_access_mapping[self._access_type],
                                         arg_type))
Esempio n. 2
0
def setup():
    '''Make sure that all tests here use dynamo0.3 as API.'''
    Config.get().api = "dynamo0.3"
Esempio n. 3
0
    def validate(self, nodes, options=None):
        '''
        Calls the validate method of the base class and then checks that,
        for the NEMO API, the routine that will contain the instrumented
        region already has a Specification_Part (because we've not yet
        implemented the necessary support if it doesn't).
        TODO: #435

        :param nodes: a node or list of nodes to be instrumented with \
            PSyData API calls.
        :type nodes: (list of) :py:class:`psyclone.psyir.nodes.Loop`

        :param options: a dictionary with options for transformations.
        :type options: dictionary of string:values or None
        :param str options["prefix"]: a prefix to use for the PSyData module \
            name (``PREFIX_psy_data_mod``) and the PSyDataType \
            (``PREFIX_PSYDATATYPE``) - a "_" will be added automatically. \
            It defaults to "".
        :param (str,str) options["region_name"]: an optional name to \
            use for this PSyData area, provided as a 2-tuple containing a \
            location name followed by a local name. The pair of strings \
            should uniquely identify a region unless aggregate information \
            is required (and is supported by the runtime library).

        :raises TransformationError: if we're using the NEMO API and the \
            target routine has no Specification_Part.
        :raises TransformationError: if the PSyData node is inserted \
            between an OpenMP/ACC directive and the loop(s) to which it \
            applies.

        '''
        node_list = self.get_node_list(nodes)

        if not node_list:
            raise TransformationError("Cannot apply transformation to an "
                                      "empty list of nodes.")

        node_parent = node_list[0].parent
        if isinstance(node_parent, Schedule) and \
           isinstance(node_parent.parent, (OMPDoDirective, ACCLoopDirective)):
            raise TransformationError("A PSyData node cannot be inserted "
                                      "between an OpenMP/ACC directive and "
                                      "the loop(s) to which it applies!")

        if node_list[0].ancestor(ACCDirective):
            raise TransformationError("A PSyData node cannot be inserted "
                                      "inside an OpenACC region.")

        if options:
            if "region_name" in options:
                name = options["region_name"]
                # pylint: disable=too-many-boolean-expressions
                if not isinstance(name, tuple) or not len(name) == 2 or \
                   not name[0] or not isinstance(name[0], str) or \
                   not name[1] or not isinstance(name[1], str):
                    raise TransformationError(
                        "Error in {0}. User-supplied region name must be a "
                        "tuple containing two non-empty strings."
                        "".format(self.name))
                # pylint: enable=too-many-boolean-expressions
            if "prefix" in options:
                prefix = options["prefix"]
                if prefix not in Config.get().valid_psy_data_prefixes:
                    raise TransformationError(
                        "Error in 'prefix' parameter: found '{0}', expected "
                        "one of {1} as defined in {2}".format(
                            prefix,
                            Config.get().valid_psy_data_prefixes,
                            Config.get().filename))

        # We have to create an instance of the node that will be inserted in
        # order to find out what module name it will use.
        pdata_node = self._node_class(options=options)
        table = node_list[0].scope.symbol_table
        for name in ([sym.name for sym in pdata_node.imported_symbols] +
                     [pdata_node.fortran_module]):
            try:
                _ = table.lookup_with_tag(name)
            except KeyError:
                # The tag doesn't exist which means that we haven't already
                # added this symbol as part of a PSyData transformation. Check
                # for any clashes with existing symbols.
                try:
                    _ = table.lookup(name)
                    raise TransformationError(
                        "Cannot add PSyData calls because there is already a "
                        "symbol named '{0}' which clashes with one of those "
                        "used by the PSyclone PSyData API. ".format(name))
                except KeyError:
                    pass

        super(PSyDataTrans, self).validate(node_list, options)

        # The checks below are only for the NEMO API and can be removed
        # once #435 is done.
        sched = node_list[0].ancestor(InvokeSchedule)
        if not sched:
            # Some tests construct PSyIR fragments that do not have an
            # InvokeSchedule
            return
        invoke = sched.invoke
        if not isinstance(invoke, NemoInvoke):
            return

        # Get the parse tree of the routine containing this region
        # pylint: disable=protected-access
        ptree = invoke._ast
        # pylint: enable=protected-access
        # Search for the Specification_Part
        if not walk([ptree], Fortran2003.Specification_Part):
            raise TransformationError(
                "For the NEMO API, PSyData can only be added to routines "
                "which contain existing variable declarations (i.e. a "
                "Specification Part) but '{0}' does not have any.".format(
                    invoke.name))
Esempio n. 4
0
def trans(psy):
    '''Transformation entry point'''
    config = Config.get()

    schedule = psy.invokes.get('invoke_0').schedule

    loop_trans = OMPTaskloopTrans(grainsize=32, nogroup=True)
    wait_trans = OMPTaskwaitTrans()

    module_inline_trans = KernelModuleInlineTrans()

    # Inline all kernels in this Schedule
    for kernel in schedule.kernels():
        module_inline_trans.apply(kernel)

    for child in schedule.children:
        if isinstance(child, Loop):
            loop_trans.apply(child)

    single_trans = OMPSingleTrans()
    parallel_trans = OMPParallelTrans()
    sets = []
    if not config.distributed_memory:
        single_trans.apply(schedule.children)
        parallel_trans.apply(schedule.children)
        wait_trans.apply(schedule.children[0])
        return
    # Find all of the groupings of taskloop and taskwait directives. Each of
    # these groups needs its own parallel+single regions. This makes sure we
    # don't apply OpenMP transformations to the Halo Exchange operations.
    next_start = 0
    next_end = 0
    idx = 0
    for idx, child in enumerate(schedule.children):
        # Loop through through the schedule until we find a non-OpenMP
        # node, and extend the grouping until we do.
        if isinstance(child, (OMPTaskloopDirective, OMPTaskwaitDirective)):
            next_end = next_end + 1
        elif not isinstance(child, OMPDirective):
            # If we find a non OpenMP directive, if we're currently in a
            # grouping of OpenMP directives then we stop, and add it to
            # the set of groupings. Otherwise we just skip over this
            # node.
            if next_start == idx:
                next_end = idx + 1
                next_start = idx + 1
            else:
                sets.append((next_start, next_end))
                next_end = idx + 1
                next_start = idx + 1
        else:
            next_end = next_end + 1
    # If currently in a grouping of directives, add it to the list
    # of groupings
    if next_start <= idx:
        sets.append((next_start, idx + 1))
    # Start from the last grouping to keep indexing correct,
    # so reverse the ordering
    sets.reverse()
    # For each of the groupings of OpenMP directives, surround them
    # with an OpenMP Single and an OpenMP Parallel directive set.
    for next_set in sets:
        single_trans.apply(schedule[next_set[0]:next_set[1]])
        parallel_trans.apply(schedule[next_set[0]])
    # Finally, we loop over the OMPParallelDirectives, and apply the
    # OMPTaskWaitTrans to ensure correctness.
    for child in schedule.children:
        if isinstance(child, OMPParallelDirective):
            wait_trans.apply(child)
Esempio n. 5
0
def test_field_restrict(tmpdir, dist_mem, monkeypatch, annexed):
    ''' Test that we generate correct code for an invoke containing a
    single restriction operation (read from fine, write to
    coarse). Check when annexed is False and True as we produce a
    different number of halo exchanges.

    '''

    config = Config.get()
    dyn_config = config.api_conf("dynamo0.3")
    monkeypatch.setattr(dyn_config, "_compute_annexed_dofs", annexed)

    _, invoke_info = parse(os.path.join(BASE_PATH,
                                        "22.1_intergrid_restrict.f90"),
                           api=API)
    psy = PSyFactory(API, distributed_memory=dist_mem).create(invoke_info)
    output = str(psy.gen)

    assert LFRicBuild(tmpdir).code_compiles(psy)

    defs = ("      USE restrict_test_kernel_mod, "
            "ONLY: restrict_test_kernel_code\n"
            "      USE mesh_map_mod, ONLY: mesh_map_type\n"
            "      USE mesh_mod, ONLY: mesh_type\n"
            "      TYPE(field_type), intent(in) :: field1, field2\n")
    assert defs in output

    defs2 = (
        "      INTEGER(KIND=i_def) nlayers\n"
        "      TYPE(field_proxy_type) field1_proxy, field2_proxy\n"
        "      INTEGER(KIND=i_def), pointer :: "
        "map_aspc1_field1(:,:) => null(), map_aspc2_field2(:,:) => null()\n"
        "      INTEGER(KIND=i_def) ndf_aspc1_field1, undf_aspc1_field1, "
        "ndf_aspc2_field2, undf_aspc2_field2\n"
        "      INTEGER(KIND=i_def) ncell_field2, ncpc_field2_field1\n"
        "      INTEGER(KIND=i_def), pointer :: "
        "cell_map_field1(:,:) => null()\n"
        "      TYPE(mesh_map_type), pointer :: mmap_field2_field1 => "
        "null()\n"
        "      TYPE(mesh_type), pointer :: mesh_field2 => null()\n"
        "      TYPE(mesh_type), pointer :: mesh_field1 => null()\n")
    assert defs2 in output

    inits = (
        "      !\n"
        "      ! Look-up mesh objects and loop limits for inter-grid kernels\n"
        "      !\n"
        "      mesh_field2 => field2_proxy%vspace%get_mesh()\n"
        "      mesh_field1 => field1_proxy%vspace%get_mesh()\n"
        "      mmap_field2_field1 => mesh_field1%get_mesh_map(mesh_field2)\n"
        "      cell_map_field1 => mmap_field2_field1%get_whole_cell_map()\n")
    if dist_mem:
        inits += ("      ncell_field2 = mesh_field2%"
                  "get_last_halo_cell(depth=2)\n")
    else:
        inits += "      ncell_field2 = field2_proxy%vspace%get_ncell()\n"
    inits += (
        "      ncpc_field2_field1 = mmap_field2_field1%"
        "get_ntarget_cells_per_source_cell()\n"
        "      !\n"
        "      ! Look-up dofmaps for each function space\n"
        "      !\n"
        "      map_aspc1_field1 => field1_proxy%vspace%get_whole_dofmap()\n"
        "      map_aspc2_field2 => field2_proxy%vspace%get_whole_dofmap()\n")
    assert inits in output

    if dist_mem:
        # We write out to the L1 halo on the coarse mesh which means
        # we require up-to-date values out to the L2 halo on the fine.
        # Since we are incrementing the coarse field we also need
        # up-to-date values for it in the L1 halo.
        if not annexed:
            halo_exchs = (
                "      ! Call kernels and communication routines\n"
                "      !\n"
                "      IF (field1_proxy%is_dirty(depth=1)) THEN\n"
                "        CALL field1_proxy%halo_exchange(depth=1)\n"
                "      END IF\n"
                "      !\n"
                "      IF (field2_proxy%is_dirty(depth=2)) THEN\n"
                "        CALL field2_proxy%halo_exchange(depth=2)\n"
                "      END IF\n"
                "      !\n"
                "      DO cell=1,mesh_field1%get_last_halo_cell(1)\n")
        else:
            halo_exchs = (
                "      ! Call kernels and communication routines\n"
                "      !\n"
                "      IF (field2_proxy%is_dirty(depth=2)) THEN\n"
                "        CALL field2_proxy%halo_exchange(depth=2)\n"
                "      END IF\n"
                "      !\n"
                "      DO cell=1,mesh_field1%get_last_halo_cell(1)\n")
        assert halo_exchs in output

    # We pass the whole dofmap for the fine mesh (we are reading from).
    # This is associated with the second kernel argument.
    kern_call = (
        "        !\n"
        "        CALL restrict_test_kernel_code(nlayers, "
        "cell_map_field1(:,cell), ncpc_field2_field1, ncell_field2, "
        "field1_proxy%data, field2_proxy%data, undf_aspc1_field1, "
        "map_aspc1_field1(:,cell), ndf_aspc2_field2, undf_aspc2_field2, "
        "map_aspc2_field2)\n"
        "      END DO\n"
        "      !\n")
    assert kern_call in output

    if dist_mem:
        set_dirty = "      CALL field1_proxy%set_dirty()\n"
        assert set_dirty in output
    def apply(self, node, options=None):
        '''Apply the NemoArrayRange2Loop transformation if the supplied node
        is the outermost Range node (specifying an access to an array
        index) within an Array Reference that is on the left-hand-side
        of an Assignment node. These constraints are required for
        correctness and an exception will be raised if they are not
        satisfied. If the constraints are satisfied then the outermost
        Range nodes within array references within the Assignment node
        are replaced with references to a loop index. A NemoLoop loop
        (with the same loop index) is also placed around the modified
        assignment statement. If the array reference on the
        left-hand-side of the assignment only had one range node as an
        index (so now has none) then the assigment is also placed
        within a NemoKern.

        The name of the loop index is taken from the PSyclone
        configuration file if a name exists for the particular array
        index, otherwise a new name is generated. The bounds of the
        loop are taken from the Range node if they are provided. If
        not, the loop bounds are taken from the PSyclone configuration
        file if bounds values are supplied. If not, the LBOUND or
        UBOUND intrinsics are used as appropriate. The type of the
        NemoLoop is also taken from the configuration file if it is
        supplied for that index, otherwise it is specified as being
        "unknown".

        :param node: a Range node.
        :type node: :py:class:`psyclone.psyir.nodes.Range`
        :param options: a dictionary with options for \
            transformations. No options are used in this \
            transformation. This is an optional argument that defaults \
            to None.
        :type options: dict of string:values or None

        '''
        self.validate(node)

        array_reference = node.parent
        array_index = node.position
        assignment = array_reference.parent
        parent = assignment.parent
        symbol_table = node.scope.symbol_table

        # See if there is any configuration information for this array index
        loop_type_order = Config.get().api_conf("nemo").get_index_order()
        # TODO: Add tests in get_loop_type_data() to make sure values
        # are strings that represent an integer or a valid variable
        # name, e.g. 1a should not be allowed. See issue #1035
        loop_type_data = Config.get().api_conf("nemo").get_loop_type_data()
        try:
            loop_type = loop_type_order[array_index]
            loop_type_info = loop_type_data[loop_type]
            lower_bound_info = loop_type_info['start']
            upper_bound_info = loop_type_info['stop']
            loop_variable_name = loop_type_info['var']
        except IndexError:
            lower_bound_info = None
            upper_bound_info = None
            loop_variable_name = symbol_table.next_available_name("idx")

        # Lower bound
        if not array_reference.is_lower_bound(array_index):
            # The range specifies a lower bound so use it
            lower_bound = node.start
        elif lower_bound_info:
            # The config metadata specifies a lower bound so use it
            try:
                _ = int(lower_bound_info)
                lower_bound = Literal(lower_bound_info, INTEGER_TYPE)
            except ValueError:
                lower_bound = Reference(symbol_table.lookup(lower_bound_info))
        else:
            # The lower bound is not set or specified so use the
            # LBOUND() intrinsic
            lower_bound = node.start

        # Upper bound
        if not array_reference.is_upper_bound(array_index):
            # The range specifies an upper bound so use it
            upper_bound = node.stop
        elif upper_bound_info:
            # The config metadata specifies an upper bound so use it
            try:
                _ = int(upper_bound_info)
                upper_bound = Literal(upper_bound_info, INTEGER_TYPE)
            except ValueError:
                upper_bound = Reference(symbol_table.lookup(upper_bound_info))
        else:
            # The upper bound is not set or specified so use the
            # UBOUND() intrinsic
            upper_bound = node.stop

        # Just use the specified step value
        step = node.step

        # Look up the loop variable in the symbol table. If it does
        # not exist then create it.
        try:
            loop_variable_symbol = symbol_table.lookup(loop_variable_name)
        except KeyError:
            # Add loop variable as it does not already exist
            loop_variable_symbol = DataSymbol(loop_variable_name, INTEGER_TYPE)
            symbol_table.add(loop_variable_symbol)

        # Replace the loop_idx array dimension with the loop variable.
        n_ranges = None
        for array in assignment.walk(ArrayReference):

            # Ignore the array reference if none of its index accesses
            # are Ranges
            if not any(child for child in array.children
                       if isinstance(child, Range)):
                continue
            # Ignore the array reference if any of its parents up to
            # the Assignment node are not Operations that return
            # scalars.
            ignore = False
            current = array.parent
            while not isinstance(current, Assignment):
                # Ignore if not a scalar valued operation (vector
                # valued operations are excluded in the validate
                # method).
                if not isinstance(current, Operation):
                    ignore = True
                    break
                current = current.parent
            if ignore:
                continue

            current_n_ranges = len([
                child for child in array.children if isinstance(child, Range)
            ])
            if n_ranges is None:
                n_ranges = current_n_ranges
            elif n_ranges != current_n_ranges:
                raise InternalError(
                    "The number of ranges in the arrays within this "
                    "assignment are not equal. This is invalid PSyIR and "
                    "should never happen.")
            idx = get_outer_index(array)
            array.children[idx] = Reference(loop_variable_symbol, parent=array)
        position = assignment.position
        loop = NemoLoop.create(loop_variable_symbol, lower_bound, upper_bound,
                               step, [assignment])
        parent.children[position] = loop
        loop.parent = parent

        try:
            _ = get_outer_index(array_reference)
        except IndexError:
            # All valid array ranges have been replaced with explicit
            # loops. We now need to take the content of the loop and
            # place it within a NemoKern (inlined kernel) node.
            parent = assignment.parent
            # We do not provide the fparser2 ast of the code as we are
            # moving towards using visitors rather than gen_code when
            # outputting nemo api code
            inlined_kernel = NemoKern([assignment], None, parent=parent)
            parent.children = [inlined_kernel]
Esempio n. 7
0
def test_debug_mode(tmpdir):
    '''Test creation of GOcean debug_mode congifuration.
    '''
    _CONFIG_CONTENT = '''\
    [DEFAULT]
    DEFAULTAPI = dynamo0.3
    DEFAULTSTUBAPI = dynamo0.3
    DISTRIBUTED_MEMORY = true
    REPRODUCIBLE_REDUCTIONS = false
    REPROD_PAD_SIZE = 8
    [gocean1.0]
    '''

    # Test with invalid debug mode
    content = _CONFIG_CONTENT + "DEBUG_MODE = 4"
    config_file = tmpdir.join("config1")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert ("error while parsing DEBUG_MODE in the [gocean1p0] section "
                "of the config file: Not a boolean") in str(err.value)

    # Test with debug mode True
    content = _CONFIG_CONTENT + "DEBUG_MODE = true"
    config_file = tmpdir.join("config2")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        config.load(str(config_file))
        api_config = config.api_conf("gocean1.0")
        assert api_config.debug_mode is True

    # Test with debug mode False
    content = _CONFIG_CONTENT + "DEBUG_MODE = false"
    config_file = tmpdir.join("config3")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        config.load(str(config_file))
        api_config = config.api_conf("gocean1.0")
        assert api_config.debug_mode is False

    # Test that if DEBUG_MODE key desn't exist it defaults to False
    content = _CONFIG_CONTENT
    config_file = tmpdir.join("config4")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        config.load(str(config_file))
        api_config = config.api_conf("gocean1.0")
        assert api_config.debug_mode is False
Esempio n. 8
0
def trans(psy):
    ''' Transform the schedule for OpenCL generation '''

    # Import transformations
    tinfo = TransInfo()
    globaltrans = tinfo.get_trans_name('KernelImportsToArguments')
    move_boundaries_trans = GOMoveIterationBoundariesInsideKernelTrans()
    cltrans = GOOpenCLTrans()

    # Get the invoke routine
    schedule = psy.invokes.get('invoke_0').schedule

    # Map the kernels by their name to different OpenCL queues. The multiple
    # command queues can be executed concurrently while each command queue
    # executes in-order its kernels. This provides functional parallelism
    # when kernels don't have dependencies between them.
    qmap = {
        'continuity_code': 1,
        'momentum_u_code': 2,
        'momentum_v_code': 3,
        'bc_ssh_code': 1,
        'bc_solid_u_code': 2,
        'bc_solid_v_code': 3,
        'bc_flather_u_code': 2,
        'bc_flather_v_code': 3,
        'field_copy_code': 1,
        'next_sshu_code': 1,
        'next_sshv_code': 1
    }

    # Remove global variables from inside each kernel, pass the boundary
    # values as arguments to the kernel and set the OpenCL work size to 64,
    # which is required for performance (with OpenCL < 1.2 this requires
    # the resulting application to be executed with DL_ESM_ALIGNMENT=64).
    # Technically the OpenCL global_size (which is controlled by
    # DL_ESM_ALIGNMENT) must be divisible by the work_group_size (which
    # is set to 64 in the psyclone script) in OpenCL implementations < 2.0.
    # But from OpenCL 2.0 the standard says its not necessary anymore.
    # In practice it is safe to always use it as most implementations
    # are lacking in this aspect.
    # If using a different WORK_GROUP_SIZE, make sure to update the
    # DL_ESM_ALIGNMENT to match.
    for kern in schedule.kernels():
        print(kern.name)
        globaltrans.apply(kern)
        if MOVE_BOUNDARIES:
            move_boundaries_trans.apply(kern)
        if FUCTIONAL_PARALLELISM:
            kern.set_opencl_options({
                'local_size': WORK_GROUP_SIZE,
                'queue_number': qmap[kern.name]
            })
        else:
            kern.set_opencl_options({'local_size': WORK_GROUP_SIZE})

    # Transform invoke to OpenCL
    cltrans.apply(schedule)

    if XILINX_CONFIG_FILE:
        # Create a Xilinx Compiler Configuration file
        path = Config.get().kernel_output_dir
        with open(os.path.join(path, "xilinx.cfg"), "w") as cfgfile:
            cfgfile.write("# Xilinx FPGA configuration file\n")
            # cfgfile.write("[connectivity]\n")
            # cfgfile.write("# Create 2 CU of the given kernels\n")
            # cfgfile.write("nk=continuity_code:2\n")
            # cfgfile.write("nk=momentum_u_code:2\n")
            # cfgfile.write("nk=momentum_v_code:2\n")
            # cfgfile.write("nk=bc_ssh_code:2\n")

            # cfgfile.write("\n[hls]\n")
            # cfgfile.write("# Assign CUs to different SLRs\n")
            # cfgfile.write("slr=momentum_u_code_1:SLR0\n")
            # cfgfile.write("slr=momentum_u_code_2:SLR0\n")
            # cfgfile.write("slr=momentum_v_code_1:SLR2\n")
            # cfgfile.write("slr=momentum_v_code_2:SLR2\n")

    return psy
Esempio n. 9
0
def test_main_api():
    '''Tests the three ways of specifying an API: command line, config file,
    or relying on the default.'''

    # 1) Make sure if no paramenters are given,
    #   config will give us the default API

    # Make sure we get a default config instance
    Config._instance = None
    Config.get()

    assert Config.get().api == Config.get().default_api

    # 2) Check that a command line option will overwrite the default
    filename = (os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             "test_files", "gocean1p0", "single_invoke.f90"))

    main([filename, "-api", "gocean1.0"])
    assert Config.get().api == "gocean1.0"

    # 3) Check that a config option will overwrite the default
    Config._instance = None
    Config.get()
    # This config file specifies the gocean1.0 api
    config_name = (os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                "test_files", "gocean1p0",
                                "new_iteration_space.psyclone"))
    main([filename, "--config", config_name])
    assert Config.get().api == "gocean1.0"

    # 4) Check that a command line option overwrites what is specified in
    #    in the config file (and the default)
    Config._instance = None
    Config.get()

    filename = (os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             "test_files", "dynamo0p1", "1_kg_inline.f90"))

    # This config file specifies the gocean1.0 api, but
    # command line should take precedence
    main([filename, "--config", config_name, "-api", "dynamo0.1"])
    assert Config.get().api == "dynamo0.1"
Esempio n. 10
0
 def __init__(self, arg, arg_info, call):
     self._arg = arg
     api_config = Config.get().api_conf("dynamo0.1")
     access_mapping = api_config.get_access_mapping()
     Argument.__init__(self, call, arg_info, access_mapping[arg.access])
Esempio n. 11
0
def test_invalid_nemo_config_files(tmpdir):
    ''' Test various error conditions.
    '''

    # Valid configuration file without nemo-specific settings.
    # We add several lines in the tests for various error conditions
    # pylint: disable=invalid-name
    _CONFIG_CONTENT = '''\
    [DEFAULT]
    DEFAULTAPI = nemo
    DEFAULTSTUBAPI = dynamo0.3
    DISTRIBUTED_MEMORY = true
    REPRODUCIBLE_REDUCTIONS = false
    REPROD_PAD_SIZE = 8
    [nemo]
    '''
    # Create a config files with a nemo section and a mapping-lon
    # but leave out each required key
    for (key, data) in [("var", "start: 1, stop: jpi"),
                        ("start", "var: ji, stop: jpi"),
                        ("stop", "var: ji, start: 1")]:
        content = _CONFIG_CONTENT + "mapping-lon = " + data
        Config._instance = None
        config_file = tmpdir.join("config1")
        with config_file.open(mode="w") as new_cfg:
            new_cfg.write(content)
            new_cfg.close()

            config = Config()
            with pytest.raises(ConfigurationError) as err:
                config.load(str(config_file))
            assert "does not contain key '{0}".format(key) in str(err)

    # Add an invalid index-order
    content = _CONFIG_CONTENT + \
        '''mapping-lon = var: i, start: 1, stop:2
    mapping-lat = var: j, start: 1, stop:2
    index-order = invalid'''
    config_file = tmpdir.join("config2")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "Invalid loop type \"invalid\" found " in str(err)
        assert "Must be one of [\\'lon\\', \\'lat\\']"

    # Add an invalid key:
    content = _CONFIG_CONTENT + "invalid-key=value"
    config_file = tmpdir.join("config3")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "Invalid key \"invalid-key\" found in the \"nemo\" section " \
               "of the configuration file \"{0}\".". format(str(config_file)) \
               in str(err)

    # Use a variable name more than once:
    content = _CONFIG_CONTENT + \
        '''mapping-lon = var: i, start: 1, stop:2
    mapping-lat = var: i, start: 1, stop:2
    index-order = lon, lat'''
    config_file = tmpdir.join("config4")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "mapping-lat defines variable \"i\" again in the \"nemo\" "\
               "section of the file \"{0}\".".format(str(config_file)) \
               in str(err)
Esempio n. 12
0
    def __init__(self):
        if LFRicConstants.HAS_BEEN_INITIALISED:
            return

        LFRicConstants.HAS_BEEN_INITIALISED = True
        api_config = Config.get().api_conf("dynamo0.3")

        # ---------- Evaluators: quadrature ----------------------------------
        LFRicConstants.VALID_QUADRATURE_SHAPES = \
            ["gh_quadrature_xyoz", "gh_quadrature_face", "gh_quadrature_edge"]
        LFRicConstants.VALID_EVALUATOR_SHAPES = \
            LFRicConstants.VALID_QUADRATURE_SHAPES + ["gh_evaluator"]

        # ---------- LFRicArgDescriptor class constants  ---------------------

        # Supported LFRic API argument types (scalars, fields, operators)
        LFRicConstants.VALID_SCALAR_NAMES = ["gh_scalar"]
        LFRicConstants.VALID_FIELD_NAMES = ["gh_field"]
        LFRicConstants.VALID_OPERATOR_NAMES = ["gh_operator",
                                               "gh_columnwise_operator"]
        LFRicConstants.VALID_ARG_TYPE_NAMES = \
            LFRicConstants.VALID_FIELD_NAMES + \
            LFRicConstants.VALID_OPERATOR_NAMES + \
            LFRicConstants.VALID_SCALAR_NAMES

        # Supported API argument data types ('gh_real', 'gh_integer'
        # and 'gh_logical')
        LFRicConstants.VALID_ARG_DATA_TYPES = \
            ["gh_real", "gh_integer", "gh_logical"]
        LFRicConstants.VALID_SCALAR_DATA_TYPES = \
            LFRicConstants.VALID_ARG_DATA_TYPES
        LFRicConstants.VALID_FIELD_DATA_TYPES = ["gh_real", "gh_integer"]
        LFRicConstants.VALID_OPERATOR_DATA_TYPES = ["gh_real"]

        # pylint: disable=too-many-instance-attributes

        # Supported LFRic API stencil types and directions
        LFRicConstants.VALID_STENCIL_TYPES = ["x1d", "y1d", "xory1d", "cross",
                                              "region", "cross2d"]
        # Note, can't use VALID_STENCIL_DIRECTIONS at all locations in this
        # file as it causes failures with py.test 2.8.7. Therefore some parts
        # of the code do not use the VALID_STENCIL_DIRECTIONS variable.
        LFRicConstants.VALID_STENCIL_DIRECTIONS = ["x_direction",
                                                   "y_direction"]

        # Note, xory1d does not have a direct mapping in STENCIL_MAPPING as it
        # indicates either x1d or y1d.
        LFRicConstants.STENCIL_MAPPING = \
            {"x1d": "STENCIL_1DX", "y1d": "STENCIL_1DY",
             "cross": "STENCIL_CROSS", "cross2d": "STENCIL_2D_CROSS",
             "region": "STENCIL_REGION"}

        # Supported LFRic API mesh types that may be specified for a field
        # using the mesh_arg=... meta-data element (for inter-grid kernels that
        # perform prolongation/restriction).
        LFRicConstants.VALID_MESH_TYPES = ["gh_coarse", "gh_fine"]

        # ---------- Fortran datatypes ---------------------------------------
        # This is only used here, so no class variable:
        supported_fortran_datatypes = api_config.supported_fortran_datatypes

        # psyGen intrinsic types for kernel argument data as defined in LFRic
        # configuration by the supported Fortran datatypes ('real', 'integer'
        # and 'logical').
        LFRicConstants.VALID_INTRINSIC_TYPES = supported_fortran_datatypes

        # Valid intrinsic types for field kernel argument data
        # ('real' and 'integer').
        LFRicConstants.VALID_FIELD_INTRINSIC_TYPES = ["real", "integer"]

        # ---------- Mapping from metadata data_type to Fortran intrinsic type
        LFRicConstants.MAPPING_DATA_TYPES = \
            OrderedDict(zip(LFRicConstants.VALID_ARG_DATA_TYPES,
                            LFRicConstants.VALID_INTRINSIC_TYPES))

        # ---------- Evaluators -----------------------------------------------

        # Dictionary allowing us to look-up the name of the Fortran module,
        # type and proxy-type associated with each quadrature shape
        LFRicConstants.QUADRATURE_TYPE_MAP = {
            "gh_quadrature_xyoz": {"module": "quadrature_xyoz_mod",
                                   "type": "quadrature_xyoz_type",
                                   "proxy_type": "quadrature_xyoz_proxy_type"},
            "gh_quadrature_face": {"module": "quadrature_face_mod",
                                   "type": "quadrature_face_type",
                                   "proxy_type": "quadrature_face_proxy_type"},
            "gh_quadrature_edge": {"module": "quadrature_edge_mod",
                                   "type": "quadrature_edge_type",
                                   "proxy_type": "quadrature_edge_proxy_type"}}

        # ---------- Loops (bounds, types, names) -----------------------------
        # These are loop bound names which identify positions in a field's
        # halo. It is useful to group these together as we often need to
        # determine whether an access to a field or other object includes
        # access to the halo, or not.
        LFRicConstants.HALO_ACCESS_LOOP_BOUNDS = ["cell_halo", "dof_halo",
                                                  "colour_halo"]

        LFRicConstants.VALID_LOOP_BOUNDS_NAMES = \
            (["start",     # the starting
                           # index. Currently this is
                           # always 1
              "inner",     # a placeholder for when we
                           # support loop splitting into
                           # work that does not access
                           # the halo and work that does.
                           # This will be used to help
                           # overlap computation and
                           # communication
              "ncolour",   # the number of cells with
                           # the current colour
              "ncolours",  # the number of colours in a
                           # coloured loop
              "ncells",    # the number of owned cells
              "ndofs",     # the number of owned dofs
              "nannexed"]  # the number of owned dofs
                           # plus the number of annexed
                           # dofs. As the indices of
                           # dofs are arranged that
                           # owned dofs have lower
                           # indices than annexed dofs,
                           # having this value as an
                           # upper bound will compute
                           # both owned and annexed
                           # dofs.
             + LFRicConstants.HALO_ACCESS_LOOP_BOUNDS)

        # Valid LFRic loop types. The default is "" which is over cell columns
        # (in the horizontal plane). A "null" loop doesn't iterate over
        # anything but is required for the halo-exchange logic.
        LFRicConstants.VALID_LOOP_TYPES = ["dof", "colours", "colour", "",
                                           "null"]

        # Valid LFRic iteration spaces for built-in kernels
        LFRicConstants.BUILTIN_ITERATION_SPACES = ["dof"]

        # The types of argument that are valid for built-in kernels in the
        # LFRic API
        LFRicConstants.VALID_BUILTIN_ARG_TYPES = \
            LFRicConstants.VALID_FIELD_NAMES + \
            LFRicConstants.VALID_SCALAR_NAMES

        # The data types of argument that are valid for built-in kernels
        # in the LFRic API ('real' and 'integer')
        LFRicConstants.VALID_BUILTIN_DATA_TYPES = ["gh_real", "gh_integer"]

        # Valid LFRic iteration spaces for user-supplied kernels and
        # built-in kernels
        LFRicConstants.USER_KERNEL_ITERATION_SPACES = ["cell_column", "domain"]
        LFRicConstants.VALID_ITERATION_SPACES = \
            LFRicConstants.USER_KERNEL_ITERATION_SPACES + \
            LFRicConstants.BUILTIN_ITERATION_SPACES

        # ---------- Function spaces (FS) ------------------------------------
        # Discontinuous FS
        LFRicConstants.DISCONTINUOUS_FUNCTION_SPACES = \
            ["w3", "wtheta", "w2v", "w2vtrace", "w2broken"]

        # Continuous FS
        # Note, any_w2 is not a space on its own. any_w2 is used as a common
        # term for any vector "w2*" function space (w2, w2h, w2v, w2broken) but
        # not w2*trace (spaces of scalar functions). As any_w2 stands for all
        # vector "w2*" spaces it needs to a) be treated as continuous and b)
        # have vector basis and scalar differential basis dimensions.
        # TODO #540: resolve what W2* spaces should be included in ANY_W2 list
        # and whether ANY_W2 should be in the continuous function space list.
        LFRicConstants.ANY_W2_FUNCTION_SPACES = \
            ["w2", "w2h", "w2v", "w2broken"]

        LFRicConstants.CONTINUOUS_FUNCTION_SPACES = \
            ["w0", "w1", "w2", "w2trace", "w2h", "w2htrace", "any_w2"]

        # Read-only FS
        LFRicConstants.READ_ONLY_FUNCTION_SPACES = ["wchi"]

        # Valid FS names
        LFRicConstants.VALID_FUNCTION_SPACES = \
            LFRicConstants.DISCONTINUOUS_FUNCTION_SPACES + \
            LFRicConstants.CONTINUOUS_FUNCTION_SPACES + \
            LFRicConstants.READ_ONLY_FUNCTION_SPACES

        # Valid any_space metadata (general FS, could be continuous or
        # discontinuous). The number of 'ANY_SPACE' spaces is set in the
        # PSyclone configuration file.
        LFRicConstants.VALID_ANY_SPACE_NAMES = [
            "any_space_{0}".format(x+1) for x in
            range(api_config.num_any_space)]

        # Valid any_discontinuous_space metadata (general FS known to be
        # discontinuous). The number of 'ANY_DISCONTINUOU_SPACE' spaces is
        # set in the PSyclone configuration file.
        LFRicConstants.VALID_ANY_DISCONTINUOUS_SPACE_NAMES = [
            "any_discontinuous_space_{0}".format(x+1) for x in
            range(api_config.num_any_discontinuous_space)]

        # Valid discontinuous FS names (for optimisation purposes)
        LFRicConstants.VALID_DISCONTINUOUS_NAMES = \
            LFRicConstants.DISCONTINUOUS_FUNCTION_SPACES +\
            LFRicConstants.VALID_ANY_DISCONTINUOUS_SPACE_NAMES

        # FS names consist of all valid names
        LFRicConstants.VALID_FUNCTION_SPACE_NAMES = \
            LFRicConstants.VALID_FUNCTION_SPACES + \
            LFRicConstants.VALID_ANY_SPACE_NAMES + \
            LFRicConstants.VALID_ANY_DISCONTINUOUS_SPACE_NAMES

        # Lists of function spaces that have
        # a) scalar basis functions;
        LFRicConstants.SCALAR_BASIS_SPACE_NAMES = \
            ["w0", "w2trace", "w2htrace", "w2vtrace", "w3", "wtheta", "wchi"]
        # b) vector basis functions;
        LFRicConstants.VECTOR_BASIS_SPACE_NAMES = ["w1", "w2", "w2h", "w2v",
                                                   "w2broken", "any_w2"]
        # c) scalar differential basis functions;
        LFRicConstants.SCALAR_DIFF_BASIS_SPACE_NAMES = ["w2", "w2h", "w2v",
                                                        "w2broken", "any_w2"]
        # d) vector differential basis functions.
        LFRicConstants.VECTOR_DIFF_BASIS_SPACE_NAMES = \
            ["w0", "w1", "w2trace", "w2htrace", "w2vtrace", "w3", "wtheta",
             "wchi"]

        # Evaluators: basis and differential basis
        LFRicConstants.VALID_EVALUATOR_NAMES = ["gh_basis", "gh_diff_basis"]

        # Meta functions
        LFRicConstants.VALID_METAFUNC_NAMES = \
            LFRicConstants.VALID_EVALUATOR_NAMES
Esempio n. 13
0
def main(args):
    '''
    Parses and checks the command line arguments, calls the generate
    function if all is well, catches any errors and outputs the
    results.
    :param list args: the list of command-line arguments that PSyclone has \
                      been invoked with.
    '''
    # pylint: disable=too-many-statements,too-many-branches

    # Make sure we have the supported APIs defined in the Config singleton,
    # but postpone loading the config file till the command line was parsed
    # in case that the user specifies a different config file.
    Config.get(do_not_load_file=True)

    parser = argparse.ArgumentParser(
        description='Run the PSyclone code generator on a particular file')
    parser.add_argument('-oalg', help='filename of transformed algorithm code')
    parser.add_argument('-opsy', help='filename of generated PSy code')
    parser.add_argument('-okern',
                        help='directory in which to put transformed kernels, '
                        'default is the current working directory.')
    parser.add_argument('-api',
                        help='choose a particular api from {0}, '
                        'default \'{1}\'.'.format(
                            str(Config.get().supported_apis),
                            Config.get().default_api))
    parser.add_argument('filename', help='algorithm-layer source code')
    parser.add_argument('-s',
                        '--script',
                        help='filename of a PSyclone'
                        ' optimisation script')
    parser.add_argument('-d',
                        '--directory',
                        default="",
                        help='path to root of directory '
                        'structure containing kernel source code')
    # Make the default an empty list so that we can check whether the
    # user has supplied a value(s) later
    parser.add_argument('-I',
                        '--include',
                        default=[],
                        action="append",
                        help='path to Fortran INCLUDE or module files')
    parser.add_argument(
        '-l',
        '--limit',
        dest='limit',
        default='off',
        choices=['off', 'all', 'output'],
        help='limit the Fortran line length to 132 characters (default '
        '\'%(default)s\'). Use \'all\' to apply limit to both input and '
        'output Fortran. Use \'output\' to apply line-length limit to output '
        'Fortran only.')
    parser.add_argument('-dm',
                        '--dist_mem',
                        dest='dist_mem',
                        action='store_true',
                        help='generate distributed memory code')
    parser.add_argument('-nodm',
                        '--no_dist_mem',
                        dest='dist_mem',
                        action='store_false',
                        help='do not generate distributed memory code')
    parser.add_argument(
        '--kernel-renaming',
        default="multiple",
        choices=configuration.VALID_KERNEL_NAMING_SCHEMES,
        help="Naming scheme to use when re-naming transformed kernels")
    parser.add_argument(
        '--profile',
        '-p',
        action="append",
        choices=Profiler.SUPPORTED_OPTIONS,
        help="Add profiling hooks for either 'kernels' or 'invokes'")
    parser.set_defaults(dist_mem=Config.get().distributed_memory)

    parser.add_argument("--config",
                        help="Config file with "
                        "PSyclone specific options.")
    parser.add_argument(
        '-v',
        '--version',
        dest='version',
        action="store_true",
        help='Display version information ({0})'.format(__VERSION__))

    args = parser.parse_args(args)

    if args.version:
        print("PSyclone version: {0}".format(__VERSION__))

    if args.profile:
        Profiler.set_options(args.profile)

    # If an output directory has been specified for transformed kernels
    # then check that it is valid
    if args.okern:
        if not os.path.exists(args.okern):
            print("Specified kernel output directory ({0}) does not exist.".
                  format(args.okern),
                  file=sys.stderr)
            sys.exit(1)
        if not os.access(args.okern, os.W_OK):
            print("Cannot write to specified kernel output directory ({0}).".
                  format(args.okern),
                  file=sys.stderr)
            sys.exit(1)
        kern_out_path = args.okern
    else:
        # We write any transformed kernels to the current working directory
        kern_out_path = os.getcwd()

    # If no config file name is specified, args.config is none
    # and config will load the default config file.
    Config.get().load(args.config)

    # Check API, if none is specified, take the setting from the config file
    if args.api is None:
        # No command line option, use the one specified in Config - which
        # is either based on a parameter in the config file, or otherwise
        # the default:
        api = Config.get().api
    elif args.api not in Config.get().supported_apis:
        print("Unsupported API '{0}' specified. Supported API's are "
              "{1}.".format(args.api,
                            Config.get().supported_apis),
              file=sys.stderr)
        sys.exit(1)
    else:
        # There is a valid API specified on the command line. Set it
        # as API in the config object as well.
        api = args.api
        Config.get().api = api

    # The Configuration manager checks that the supplied path(s) is/are
    # valid so protect with a try
    try:
        if args.include:
            Config.get().include_paths = args.include
        else:
            # Default is to instruct fparser2 to look in the directory
            # containing the file being parsed
            Config.get().include_paths = ["./"]
    except ConfigurationError as err:
        print(str(err), file=sys.stderr)
        sys.exit(1)

    try:
        alg, psy = generate(args.filename,
                            api=api,
                            kernel_path=args.directory,
                            script_name=args.script,
                            line_length=(args.limit == 'all'),
                            distributed_memory=args.dist_mem,
                            kern_out_path=kern_out_path,
                            kern_naming=args.kernel_renaming)
    except NoInvokesError:
        _, exc_value, _ = sys.exc_info()
        print("Warning: {0}".format(exc_value))
        # no invoke calls were found in the algorithm file so we need
        # not need to process it, or generate any psy layer code so
        # output the original algorithm file and set the psy file to
        # be empty
        alg_file = open(args.filename)
        alg = alg_file.read()
        psy = ""
    except (OSError, IOError, ParseError, GenerationError, RuntimeError):
        _, exc_value, _ = sys.exc_info()
        print(exc_value, file=sys.stderr)
        sys.exit(1)
    except Exception:  # pylint: disable=broad-except
        print("Error, unexpected exception, please report to the authors:",
              file=sys.stderr)
        exc_type, exc_value, exc_tb = sys.exc_info()
        print("Description ...", file=sys.stderr)
        print(exc_value, file=sys.stderr)
        print("Type ...", file=sys.stderr)
        print(exc_type, file=sys.stderr)
        print("Stacktrace ...", file=sys.stderr)
        traceback.print_tb(exc_tb, limit=20, file=sys.stderr)
        sys.exit(1)
    if args.limit != 'off':
        # Limit the line length of the output Fortran to ensure it conforms
        # to the 132 characters mandated by the standard.
        fll = FortLineLength()
        psy_str = fll.process(str(psy))
        alg_str = fll.process(str(alg))
    else:
        psy_str = str(psy)
        alg_str = str(alg)
    if args.oalg is not None:
        write_unicode_file(alg_str, args.oalg)
    else:
        print("Transformed algorithm code:\n%s" % alg_str)

    if not psy_str:
        # empty file so do not output anything
        pass
    elif args.opsy is not None:
        write_unicode_file(psy_str, args.opsy)
    else:
        print("Generated psy layer code:\n", psy_str)
Esempio n. 14
0
def generate(filename,
             api="",
             kernel_path="",
             script_name=None,
             line_length=False,
             distributed_memory=None,
             kern_out_path="",
             kern_naming="multiple"):
    # pylint: disable=too-many-arguments
    '''Takes a PSyclone algorithm specification as input and outputs the
    associated generated algorithm and psy codes suitable for
    compiling with the specified kernel(s) and support
    infrastructure. Uses the :func:`parse.algorithm.parse` function to
    parse the algorithm specification, the :class:`psyGen.PSy` class
    to generate the PSy code and the :class:`alg_gen.Alg` class to
    generate the modified algorithm code.

    :param str filename: The file containing the algorithm specification.
    :param str kernel_path: The directory from which to recursively \
                            search for the files containing the kernel \
                            source (if different from the location of the \
                            algorithm specification).
    :param str script_name: A script file that can apply optimisations \
                            to the PSy layer (can be a path to a file or \
                            a filename that relies on the PYTHONPATH to \
                            find the module).
    :param bool line_length: A logical flag specifying whether we care \
                             about line lengths being longer than 132 \
                             characters. If so, the input (algorithm \
                             and kernel) code is checked to make sure \
                             that it conforms. The default is False.
    :param bool distributed_memory: A logical flag specifying whether to \
                                    generate distributed memory code. The \
                                    default is set in the config.py file.
    :param str kern_out_path: Directory to which to write transformed \
                              kernel code.
    :param bool kern_naming: the scheme to use when re-naming transformed \
                             kernels.
    :return: 2-tuple containing fparser1 ASTs for the algorithm code and \
             the psy code.
    :rtype: (:py:class:`fparser.one.block_statements.BeginSource`, \
             :py:class:`fparser.one.block_statements.Module`)

    :raises IOError: if the filename or search path do not exist
    :raises GenerationError: if an invalid API is specified.
    :raises GenerationError: if an invalid kernel-renaming scheme is specified.

    For example:

    >>> from psyclone.generator import generate
    >>> alg, psy = generate("algspec.f90")
    >>> alg, psy = generate("algspec.f90", kernel_path="src/kernels")
    >>> alg, psy = generate("algspec.f90", script_name="optimise.py")
    >>> alg, psy = generate("algspec.f90", line_length=True)
    >>> alg, psy = generate("algspec.f90", distributed_memory=False)

    '''

    if distributed_memory is None:
        distributed_memory = Config.get().distributed_memory

    # pylint: disable=too-many-statements, too-many-locals, too-many-branches
    if api == "":
        api = Config.get().default_api
    else:
        if api not in Config.get().supported_apis:
            raise GenerationError(
                "generate: Unsupported API '{0}' specified. Supported "
                "types are {1}.".format(api,
                                        Config.get().supported_apis))

    # Store Kernel-output options in our Configuration object
    Config.get().kernel_output_dir = kern_out_path
    try:
        Config.get().kernel_naming = kern_naming
    except ValueError as verr:
        raise GenerationError(
            "Invalid kernel-renaming scheme supplied: {0}".format(str(verr)))

    if not os.path.isfile(filename):
        raise IOError("file '{0}' not found".format(filename))
    if kernel_path and not os.access(kernel_path, os.R_OK):
        raise IOError("kernel search path '{0}' not found".format(kernel_path))
    try:
        from psyclone.alg_gen import Alg
        ast, invoke_info = parse(filename,
                                 api=api,
                                 invoke_name="invoke",
                                 kernel_path=kernel_path,
                                 line_length=line_length)
        psy = PSyFactory(api, distributed_memory=distributed_memory)\
            .create(invoke_info)
        if script_name is not None:
            handle_script(script_name, psy)

        # Add profiling nodes to schedule if automatic profiling has
        # been requested.
        from psyclone.psyir.nodes import Loop
        for invoke in psy.invokes.invoke_list:
            Profiler.add_profile_nodes(invoke.schedule, Loop)

        if api not in API_WITHOUT_ALGORITHM:
            alg_gen = Alg(ast, psy).gen
        else:
            alg_gen = None
    except Exception:
        raise

    return alg_gen, psy.gen
Esempio n. 15
0
def setup():
    '''Make sure that all tests here use gocean1.0 as API.'''
    Config.get().api = "gocean1.0"
    yield ()
    # pylint: disable=protected-access
    Config._instance = None
Esempio n. 16
0
 def __init__(self, access, space, stencil=None, mesh=None):
     api_config = Config.get().api_conf("gocean0.1")
     access_mapping = api_config.get_access_mapping()
     access_type = access_mapping[access]
     super(GODescriptor, self).__init__(access_type, space, stencil, mesh)
    def validate(self, node, options=None):
        '''Perform various checks to ensure that it is valid to apply the
        NemoArrayRange2LoopTrans transformation to the supplied PSyIR Node.

        :param node: the node that is being checked.
        :type node: :py:class:`psyclone.psyir.nodes.Range`
        :param options: a dictionary with options for \
            transformations. No options are used in this \
            transformation. This is an optional argument that defaults \
            to None.
        :type options: dict of string:values or None

        :raises TransformationError: if the node argument is not a \
            Range, if the Range node is not part of an ArrayReference, \
            if the Range node is not the outermost Range node of the \
            ArrayReference or if that ArrayReference does not \
            consitute the left hand side of an Assignment node.

        '''
        # Am I Range node?
        if not isinstance(node, Range):
            raise TransformationError(
                "Error in NemoArrayRange2LoopTrans transformation. The "
                "supplied node argument should be a PSyIR Range, but "
                "found '{0}'.".format(type(node).__name__))
        # Am I within an array reference?
        if not node.parent or not isinstance(node.parent, ArrayReference):
            raise TransformationError(
                "Error in NemoArrayRange2LoopTrans transformation. The "
                "supplied node argument should be within an ArrayReference "
                "node, but found '{0}'.".format(type(node.parent).__name__))
        array_ref = node.parent
        # Is the array reference within an assignment?
        if not array_ref.parent or not isinstance(array_ref.parent,
                                                  Assignment):
            raise TransformationError(
                "Error in NemoArrayRange2LoopTrans transformation. The "
                "supplied node argument should be within an ArrayReference "
                "node that is within an Assignment node, but found '{0}'.".
                format(type(array_ref.parent).__name__))
        assignment = array_ref.parent
        # Is the array reference the lhs of the assignment?
        if assignment.lhs is not array_ref:
            raise TransformationError(
                "Error in NemoArrayRange2LoopTrans transformation. The "
                "supplied node argument should be within an ArrayReference "
                "node that is within the left-hand-side of an Assignment "
                "node, but it is on the right-hand-side.")
        # Does the rhs of the assignment have any operations that
        # return arrays as this is not currently supported?
        for operation in assignment.rhs.walk(Operation):
            # At the moment the only array valued operator is matmul
            if operation.operator == BinaryOperation.Operator.MATMUL:
                raise TransformationError(
                    "Error in NemoArrayRange2LoopTrans transformation. This "
                    "transformation does not support array valued operations "
                    "on the rhs of the associated Assignment node, but found "
                    "'{0}'.".format(operation.operator.name))

        # Is the Range node the outermost Range (as if not, the
        # transformation would be invalid)?
        if any(
                isinstance(node, Range)
                for node in node.parent.children[node.position + 1:]):
            raise TransformationError(
                "Error in NemoArrayRange2LoopTrans transformation. This "
                "transformation can only be applied to the outermost "
                "Range.")

        # If there is a loop variable defined in the config file and
        # this variable is already defined in the code, is it defined
        # as an integer?
        array_index = node.position
        loop_type_order = Config.get().api_conf("nemo").get_index_order()
        loop_type_data = Config.get().api_conf("nemo").get_loop_type_data()
        try:
            loop_type = loop_type_order[array_index]
            loop_type_info = loop_type_data[loop_type]
            loop_variable_name = loop_type_info['var']
            try:
                symbol_table = node.scope.symbol_table
                loop_variable_symbol = symbol_table.lookup(loop_variable_name)
                # Check the existing loop variable name is a scalar integer
                if isinstance(loop_variable_symbol, DeferredType) or \
                   not (loop_variable_symbol.is_scalar and
                        loop_variable_symbol.datatype.intrinsic is
                        ScalarType.Intrinsic.INTEGER):
                    raise TransformationError(
                        "The config file specifies '{0}' as the name of the "
                        "iteration variable but this is already declared in "
                        "the code as something that is not a scalar integer, "
                        "or is a deferred type.".format(loop_variable_name))
            except KeyError:
                # Variable is not defined
                pass
        except IndexError:
            # There is no name for this index in the config file
            pass
Esempio n. 18
0
def test_correct_2sign(tmpdir):
    '''Check that a valid example produces the expected output when there
    is more than one SIGN in an expression.

    '''
    Config.get().api = "nemo"
    operation = example_psyir(lambda arg: BinaryOperation.create(
        BinaryOperation.Operator.MUL, arg, Literal("3.14", REAL_TYPE)))
    assignment = operation.parent
    sign_op = BinaryOperation.create(BinaryOperation.Operator.SIGN,
                                     Literal("1.0", REAL_TYPE),
                                     Literal("1.0", REAL_TYPE))
    op1 = BinaryOperation.create(BinaryOperation.Operator.ADD, sign_op,
                                 operation)
    op1.parent = assignment
    assignment.children[1] = op1
    writer = FortranWriter()
    result = writer(operation.root)
    assert ("subroutine sign_example(arg,arg_1)\n"
            "  real, intent(inout) :: arg\n"
            "  real, intent(inout) :: arg_1\n"
            "  real :: psyir_tmp\n\n"
            "  psyir_tmp=SIGN(1.0, 1.0) + SIGN(arg * 3.14, arg_1)\n\n"
            "end subroutine sign_example\n") in result
    trans = Sign2CodeTrans()
    trans.apply(operation, operation.root.symbol_table)
    trans.apply(sign_op, operation.root.symbol_table)
    result = writer(operation.root)
    assert ("subroutine sign_example(arg,arg_1)\n"
            "  real, intent(inout) :: arg\n"
            "  real, intent(inout) :: arg_1\n"
            "  real :: psyir_tmp\n"
            "  real :: res_sign\n"
            "  real :: tmp_sign\n"
            "  real :: res_abs\n"
            "  real :: tmp_abs\n"
            "  real :: res_sign_1\n"
            "  real :: tmp_sign_1\n"
            "  real :: res_abs_1\n"
            "  real :: tmp_abs_1\n\n"
            "  tmp_abs=arg * 3.14\n"
            "  if (tmp_abs > 0.0) then\n"
            "    res_abs=tmp_abs\n"
            "  else\n"
            "    res_abs=tmp_abs * -1.0\n"
            "  end if\n"
            "  res_sign=res_abs\n"
            "  tmp_sign=arg_1\n"
            "  if (tmp_sign < 0.0) then\n"
            "    res_sign=res_sign * -1.0\n"
            "  end if\n"
            "  tmp_abs_1=1.0\n"
            "  if (tmp_abs_1 > 0.0) then\n"
            "    res_abs_1=tmp_abs_1\n"
            "  else\n"
            "    res_abs_1=tmp_abs_1 * -1.0\n"
            "  end if\n"
            "  res_sign_1=res_abs_1\n"
            "  tmp_sign_1=1.0\n"
            "  if (tmp_sign_1 < 0.0) then\n"
            "    res_sign_1=res_sign_1 * -1.0\n"
            "  end if\n"
            "  psyir_tmp=res_sign_1 + res_sign\n\n"
            "end subroutine sign_example\n") in result
    assert Compile(tmpdir).string_compiles(result)
    # Remove the created config instance
    Config._instance = None
Esempio n. 19
0
def setup():
    '''Make sure that all tests here use gocean1.0 as API.'''
    Config.get().api = "gocean1.0"
    yield ()
    Config._instance = None
Esempio n. 20
0
    def __init__(self, arg_type, operates_on):
        self._arg_type = arg_type
        # Initialise properties
        self._argument_type = None
        self._data_type = None
        self._function_space_to = None
        self._function_space_from = None
        self._function_space = None
        self._function_spaces = []
        # Set vector size to 1 (scalars set it to 0 in their validation)
        self._vector_size = 1
        # Initialise other internal arguments
        self._access_type = None
        self._function_space1 = None
        self._function_space2 = None
        self._stencil = None
        self._mesh = None
        self._nargs = 0
        # Initialise temporary "offset" internal argument required
        # to support the old and the current argument metadata style.
        # TODO in #874: Remove support the for the old-style metadata
        #               as well as this temporary argument.
        self._offset = 0

        # Check for correct type descriptor
        if arg_type.name != 'arg_type':
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must be of type "
                "'arg_type', but found '{0}'.".format(arg_type.name))

        # Check the first argument descriptor. If it is a binary operator
        # then it has to be a field vector with an "*n" appended where "*"
        # is a binary operator and "n > 1" is a vector size. If it is a
        # variable then it can be one of the other allowed argument types.
        argtype = None
        separator = None
        if isinstance(arg_type.args[0], expr.BinaryOperator):
            argtype = arg_type.args[0].toks[0]
            separator = arg_type.args[0].toks[1]
        else:
            argtype = arg_type.args[0]

        # First check for a valid argument type. It has to be a variable
        # (FunctionVar expression) and have a valid LFRic API argument name.
        if isinstance(argtype, expr.FunctionVar) and argtype.name in \
           LFRicArgDescriptor.VALID_ARG_TYPE_NAMES:
            self._argument_type = argtype.name
        else:
            raise ParseError(
                "In the LFRic API the 1st argument of a 'meta_arg' "
                "entry should be a valid argument type (one of {0}), "
                "but found '{1}' in '{2}'.".format(
                    LFRicArgDescriptor.VALID_ARG_TYPE_NAMES, argtype,
                    arg_type))

        # Check for a valid vector size in case of a binary
        # operator expression
        if separator:
            self._validate_vector_size(separator, arg_type)

        # The 2nd arg for scalars (1st for the old-style scalar metadata)
        # is the Fortran primitive type of their data.
        # TODO in issue #817: introduce data type for fields and operators,
        # too, and modify the ParseError accordingly.
        # Note: Here we also set internal "offset" argument required to
        #       support the old and the current argument metadata style.
        # TODO in #874: Remove support for the old-style metadata.
        if self._argument_type == "gh_scalar":
            dtype = arg_type.args[1].name
            if dtype in LFRicArgDescriptor.VALID_ARG_DATA_TYPES:
                self._data_type = dtype
                self._offset = 1
            else:
                raise ParseError(
                    "In the LFRic API the 2nd argument of a 'meta_arg' "
                    "scalar entry should be a valid data type (one of {0}), "
                    "but found '{1}' in '{2}'.".format(
                        LFRicArgDescriptor.VALID_ARG_DATA_TYPES, dtype,
                        self._argument_type))

        # Check number of args (in general and also for scalar arguments).
        # We require at least three (two for old-style metadata).
        # TODO in issue #874: Remove offset and restore this check below
        #                     the first check for the correct 'arg_type'
        #                     descriptor name.
        self._nargs = len(arg_type.args)
        min_nargs = 2 + self._offset
        if self._nargs < min_nargs:
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must have at least "
                "{0} args, but found {1} in '{2}'.".format(
                    min_nargs, self._nargs, arg_type))

        # The 3rd arg for scalars and 2nd arg for fields and operators is an
        # access descriptor (issue #817 will make the access descriptor a 3rd
        # argument for the fields and operators, too). Permitted accesses for
        # each argument type are dealt with in the related _validate methods.
        # Convert from GH_* names to the generic access type
        api_config = Config.get().api_conf(API)
        access_mapping = api_config.get_access_mapping()
        prop_ind = 1 + self._offset
        try:
            self._access_type = access_mapping[arg_type.args[prop_ind].name]
        except KeyError:
            valid_names = api_config.get_valid_accesses_api()
            raise ParseError(
                "In the LFRic API argument {0} of a 'meta_arg' entry "
                "must be a valid access descriptor (one of {1}), but found "
                "'{2}' in '{3}'.".format(prop_ind + 1, valid_names,
                                         arg_type.args[prop_ind].name,
                                         arg_type))

        # Check for the allowed iteration spaces from the parsed kernel
        # metadata
        from psyclone.dynamo0p3 import VALID_ITERATION_SPACES
        if operates_on not in VALID_ITERATION_SPACES:
            raise InternalError(
                "Expected operates_on in the kernel metadata to be one of "
                "{0} but got '{1}'.".format(VALID_ITERATION_SPACES,
                                            operates_on))

        # FIELD, OPERATOR and SCALAR argument type descriptors and checks
        if self._argument_type in LFRicArgDescriptor.VALID_FIELD_NAMES:
            # Validate field arguments
            self._init_field(arg_type, operates_on)

        elif self._argument_type in LFRicArgDescriptor.VALID_OPERATOR_NAMES:
            # Validate operator arguments
            self._init_operator(arg_type)

        elif self._argument_type in LFRicArgDescriptor.VALID_SCALAR_NAMES:
            # Validate scalar arguments
            self._init_scalar(arg_type)

        else:
            # We should never get to here if the checks are tight enough
            raise InternalError(
                "LFRicArgDescriptor.__init__(): failed argument validation "
                "for the 'meta_arg' entry '{0}', should not get to here.".
                format(arg_type))

        # Initialise the parent class
        super(LFRicArgDescriptor,
              self).__init__(self._access_type,
                             self._function_space1,
                             stencil=self._stencil,
                             mesh=self._mesh,
                             argument_type=self._argument_type)
Esempio n. 21
0
def test_invalid_config_files(tmpdir):
    ''' Test various error conditions.
    '''

    # Valid configuration file without iteration spaces. We add several
    # iteration spaces to it to test for various error conditions
    # pylint: disable=invalid-name
    # pylint: disable=too-many-statements
    _CONFIG_CONTENT = '''\
    [DEFAULT]
    DEFAULTAPI = dynamo0.3
    DEFAULTSTUBAPI = dynamo0.3
    DISTRIBUTED_MEMORY = true
    REPRODUCIBLE_REDUCTIONS = false
    REPROD_PAD_SIZE = 8
    [gocean1.0]
    '''
    # Create a config files with gocean1.0 section, but an
    # invalid iteration space:
    content = _CONFIG_CONTENT + "iteration-spaces=a:b"
    config_file = tmpdir.join("config1")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "An iteration space must be in the form" in str(err.value)
        assert "But got \"a:b\"" in str(err.value)

    # Try a multi-line specification to make sure all lines are tested
    content = _CONFIG_CONTENT + "iteration-spaces=a:b:c:1:2:3:4\n        d:e"
    config_file = tmpdir.join("config2")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "An iteration space must be in the form" in str(err.value)
        assert "But got \"d:e\"" in str(err.value)

    # Invalid {} expression in first loop bound
    content = _CONFIG_CONTENT + "iteration-spaces=a:b:c:{X}:2:3:4"
    config_file = tmpdir.join("config3")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "Only '{start}' and '{stop}' are allowed as bracketed "\
               "expression in an iteration space." in str(err.value)
        assert "But got {X}" in str(err.value)

    # Invalid {} expression in last loop bound:
    content = _CONFIG_CONTENT + "iteration-spaces=a:b:c:1:2:3:{Y}"
    config_file = tmpdir.join("config4")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "Only '{start}' and '{stop}' are allowed as bracketed "\
               "expression in an iteration space." in str(err.value)
        assert "But got {Y}" in str(err.value)

    # Add an invalid key:
    content = _CONFIG_CONTENT + "invalid-key=value"
    config_file = tmpdir.join("config5")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "Invalid key \"invalid-key\" found in \"{0}\".".\
            format(str(config_file)) in str(err.value)

        for i in [
                "DEFAULTAPI", "DEFAULTSTUBAPI", "DISTRIBUTED_MEMORY",
                "REPRODUCIBLE_REDUCTIONS"
        ]:
            # They keys are returned in lower case
            assert i.lower() in config.get_default_keys()

    with pytest.raises(InternalError) as err:
        GOLoop.add_bounds(1)
    # Different error message (for type) in python2 vs python3:
    assert "The parameter 'bound_info' must be a string, got '1' "\
           "(type <type 'int'>)" in str(err.value) or \
           "The parameter 'bound_info' must be a string, got '1' "\
           "(type <class 'int'>)" in str(err.value)

    # Test syntactically invalid loop boundaries
    with pytest.raises(ConfigurationError) as err:
        GOLoop.add_bounds("offset:field:space:1(:2:3:4")
    assert "Expression '1(' is not a valid do loop boundary" in str(err.value)
    with pytest.raises(ConfigurationError) as err:
        GOLoop.add_bounds("offset:field:space:1:2:3:4+")
    assert "Expression '4+' is not a valid do loop boundary" in str(err.value)

    # Test invalid field properties - too many fields
    content = _CONFIG_CONTENT + "grid-properties = a: {0}%%b:c:d:e"
    config_file = tmpdir.join("config1")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "Invalid property \"a\" found with value \"{0}%b:c:d:e\"" \
               in str(err.value)

    # Test invalid field properties - not enough fields
    content = _CONFIG_CONTENT + "grid-properties = a:b"
    config_file = tmpdir.join("config1")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        assert "Invalid property \"a\" found with value \"b\"" \
               in str(err.value)

    # Test missing required values
    content = _CONFIG_CONTENT + "grid-properties = a:b:array:real"
    config_file = tmpdir.join("config1")
    with config_file.open(mode="w") as new_cfg:
        new_cfg.write(content)
        new_cfg.close()

        config = Config()
        with pytest.raises(ConfigurationError) as err:
            config.load(str(config_file))
        # The config file {0} does not contain values for "..."
        assert "does not contain values for the following, mandatory grid " \
            "property: \"go_grid_xstop\"" in str(err.value)
Esempio n. 22
0
    def _init_field(self, arg_type, operates_on):
        '''
        Validates metadata descriptors for field arguments and
        initialises field argument properties accordingly.

        :param arg_type: LFRic API field (vector) argument type.
        :type arg_type: :py:class:`psyclone.expression.FunctionVar`
        :param operates_on: value of operates_on from the parsed kernel \
                            metadata (used for validation).
        :type operates_on: str

        :raises InternalError: if argument type other than a field is \
                               passed in.
        :raises ParseError: if there are fewer than 3 metadata arguments.
        :raises ParseError: if there are more than 4 metadata arguments.
        :raises ParseError: if the 3rd argument is not a valid function space.
        :raises ParseError: if the optional 4th argument is not a stencil \
                            specification or a mesh identifier (for \
                            inter-grid kernels).
        :raises ParseError: if a field passed to a kernel that operates on \
                            DoFs does not have a valid access \
                            (one of [READ, WRITE, READWRITE]).
        :raises ParseError: if a field on a discontinuous function space \
                            passed to a kernel that operates on cell-columns \
                            does not have a valid access (one of \
                            [READ, WRITE, READWRITE]).
        :raises ParseError: if a field on a continuous function space \
                            passed to a kernel that operates on cell-columns \
                            does not have a valid access (one of [READ, INC]).
        :raises InternalError: if an invalid value for operates_on is \
                               passed in.
        :raises ParseError: if a field with a stencil access is not read-only.

        '''
        # Check whether something other than a field is passed in
        if self._argument_type not in LFRicArgDescriptor.VALID_FIELD_NAMES:
            raise InternalError(
                "LFRicArgDescriptor._init_field(): expected a field "
                "argument but got an argument of type '{0}'.".format(
                    arg_type.args[0]))

        # There must be at least 3 arguments
        if self._nargs < 3:
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must have at "
                "least 3 arguments if its first argument is of {0} type, "
                "but found {1} in '{2}'.".format(
                    LFRicArgDescriptor.VALID_FIELD_NAMES, self._nargs,
                    arg_type))
        # There must be at most 4 arguments
        if self._nargs > 4:
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must have at "
                "most 4 arguments if its first argument is of {0} type, "
                "but found {1} in '{2}'.".format(
                    LFRicArgDescriptor.VALID_FIELD_NAMES, self._nargs,
                    arg_type))

        # Field data_type is "gh_real" for now, but will be determined by
        # metadata descriptor as the second argument in issue #817
        self._data_type = "gh_real"

        # The 3rd argument must be a valid function space name
        if arg_type.args[2].name not in \
           FunctionSpace.VALID_FUNCTION_SPACE_NAMES:
            raise ParseError(
                "In the LFRic API the 3rd argument of a 'meta_arg' "
                "entry must be a valid function space name (one of {0}) if "
                "its first argument is of {1} type, but found '{2}' in "
                "'{3}'.".format(FunctionSpace.VALID_FUNCTION_SPACE_NAMES,
                                LFRicArgDescriptor.VALID_FIELD_NAMES,
                                arg_type.args[2].name, arg_type))
        self._function_space1 = arg_type.args[2].name

        # The optional 4th argument is either a stencil specification
        # or a mesh identifier (for inter-grid kernels)
        if self._nargs == 4:
            try:
                if "stencil" in str(arg_type.args[3]):
                    self._stencil = get_stencil(
                        arg_type.args[3],
                        LFRicArgDescriptor.VALID_STENCIL_TYPES)
                elif "mesh" in str(arg_type.args[3]):
                    self._mesh = get_mesh(arg_type.args[3],
                                          LFRicArgDescriptor.VALID_MESH_TYPES)
                else:
                    raise ParseError("Unrecognised metadata entry")
            except ParseError as err:
                raise ParseError(
                    "In the LFRic API the 4th argument of a 'meta_arg' "
                    "field entry must be either a valid stencil specification"
                    "or a mesh identifier (for inter-grid kernels). However, "
                    "entry '{0}' raised the following error: {1}.".format(
                        arg_type, str(err)))

        # Test allowed accesses for fields
        field_disc_accesses = [
            AccessType.READ, AccessType.WRITE, AccessType.READWRITE
        ]
        field_cont_accesses = [AccessType.READ, AccessType.INC]
        # Convert generic access types to GH_* names for error messages
        api_config = Config.get().api_conf(API)
        rev_access_mapping = api_config.get_reverse_access_mapping()
        # Create a list of allowed accesses for use in error messages
        fld_disc_acc_msg = [
            rev_access_mapping[acc] for acc in field_disc_accesses
        ]
        fld_cont_acc_msg = [
            rev_access_mapping[acc] for acc in field_cont_accesses
        ]
        # Joint lists of valid function spaces for continuous fields
        fld_cont_spaces = (FunctionSpace.CONTINUOUS_FUNCTION_SPACES +
                           FunctionSpace.VALID_ANY_SPACE_NAMES)

        # Check accesses for kernels that operate on DoFs
        if operates_on == "dof":
            if self._access_type not in field_disc_accesses:
                raise ParseError(
                    "In the LFRic API, allowed field accesses for a "
                    "kernel that operates on DoFs are {0}, but found "
                    "'{1}' for '{2}' in '{3}'.".format(
                        fld_disc_acc_msg,
                        rev_access_mapping[self._access_type],
                        self._function_space1.lower(), arg_type))
        # Check accesses for kernels that operate on cell-columns
        elif operates_on == "cell_column":
            # Fields on discontinuous function spaces
            if (self._function_space1.lower()
                    in FunctionSpace.VALID_DISCONTINUOUS_NAMES
                    and self._access_type not in field_disc_accesses):
                raise ParseError(
                    "In the LFRic API, allowed accesses for fields on "
                    "discontinuous function spaces that are arguments to "
                    "kernels that operate on cell-columns are {0}, but found "
                    "'{1}' for '{2}' in '{3}'.".format(
                        fld_disc_acc_msg,
                        rev_access_mapping[self._access_type],
                        self._function_space1.lower(), arg_type))
            # Fields on continuous function spaces
            if (self._function_space1.lower() in fld_cont_spaces
                    and self._access_type not in field_cont_accesses):
                raise ParseError(
                    "In the LFRic API, allowed accesses for fields on "
                    "continuous function spaces that are arguments to "
                    "kernels that operate on cell-columns are {0}, but found "
                    "'{1}' for '{2}' in '{3}'.".format(
                        fld_cont_acc_msg,
                        rev_access_mapping[self._access_type],
                        self._function_space1.lower(), arg_type))
        # Raise an InternalError for an invalid value of operates-on
        else:
            from psyclone.dynamo0p3 import VALID_ITERATION_SPACES
            raise InternalError(
                "Invalid operates_on '{0}' in the kernel metadata (expected "
                "one of {1}).".format(operates_on, VALID_ITERATION_SPACES))

        # Test allowed accesses for fields that have stencil specification
        if self._stencil and self._access_type != AccessType.READ:
            raise ParseError(
                "In the LFRic API a field with a stencil access must be "
                "read-only ('{0}'), but found '{1}' in '{2}'.".format(
                    rev_access_mapping[AccessType.READ],
                    rev_access_mapping[self._access_type], arg_type))
Esempio n. 23
0
def test_get_repo_config_file():
    '''Check the mechanism by which we ensure that the repository config
    file is picked up by the test suite.
    '''
    config_file = Config.get_repository_config_file()
    assert "config/psyclone.cfg" in config_file
Esempio n. 24
0
    def _init_operator(self, arg_type):
        '''
        Validates metadata descriptors for operator arguments and
        initialises operator argument properties accordingly.

        :param arg_type: LFRic API operator argument type.
        :type arg_type: :py:class:`psyclone.expression.FunctionVar`

        :raises InternalError: if argument type other than an operator is \
                               passed in.
        :raises ParseError: if there are not exactly 4 metadata arguments.
        :raises ParseError: if the function space to- is not one of the \
                            valid function spaces.
        :raises ParseError: if the function space from- is not one of the \
                            valid function spaces.
        :raises ParseError: if the operator argument has an invalid access.

        '''
        # Check whether something other than an operator is passed in
        if self._argument_type not in LFRicArgDescriptor.VALID_OPERATOR_NAMES:
            raise InternalError(
                "LFRicArgDescriptor._init_operator(): expected an "
                "operator argument but got an argument of type '{0}'.".format(
                    self._argument_type))

        # We expect 4 arguments with the 3rd and 4th each being a
        # function space
        if self._nargs != 4:
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must have 4 "
                "arguments if its first argument is an operator (one "
                "of {0}), but found {1} in '{2}'.".format(
                    LFRicArgDescriptor.VALID_OPERATOR_NAMES, self._nargs,
                    arg_type))

        # Operator data_type is "gh_real" for now, but will be determined by
        # metadata descriptor as the second argument in issue #817
        self._data_type = "gh_real"

        # Operator arguments need to have valid to- and from- function spaces
        if arg_type.args[2].name not in \
           FunctionSpace.VALID_FUNCTION_SPACE_NAMES:
            raise ParseError(
                "In the LFRic API the 3rd argument of a 'meta_arg' "
                "operator entry must be a valid function space name (one of "
                "{0}), but found '{1}' in '{2}'.".format(
                    FunctionSpace.VALID_FUNCTION_SPACE_NAMES,
                    arg_type.args[2].name, arg_type))
        self._function_space1 = arg_type.args[2].name
        if arg_type.args[3].name not in \
           FunctionSpace.VALID_FUNCTION_SPACE_NAMES:
            raise ParseError(
                "In the LFRic API the 4th argument of a 'meta_arg' "
                "operator entry must be a valid function space name (one "
                "of {0}), but found '{1}' in '{2}'.".format(
                    FunctionSpace.VALID_FUNCTION_SPACE_NAMES,
                    arg_type.args[3].name, arg_type))
        self._function_space2 = arg_type.args[3].name

        # Test allowed accesses for operators
        operator_accesses = [
            AccessType.READ, AccessType.WRITE, AccessType.READWRITE
        ]
        # Convert generic access types to GH_* names for error messages
        api_config = Config.get().api_conf(API)
        rev_access_mapping = api_config.get_reverse_access_mapping()
        op_acc_msg = [rev_access_mapping[acc] for acc in operator_accesses]
        if self._access_type not in operator_accesses:
            raise ParseError(
                "In the LFRic API, allowed accesses for operators are {0} "
                "because they behave as discontinuous quantities, but found "
                "'{1}' in '{2}'.".format(op_acc_msg,
                                         rev_access_mapping[self._access_type],
                                         arg_type))
Esempio n. 25
0
    def create_driver(self, input_list, output_list):
        # pylint: disable=too-many-locals, too-many-statements
        '''This function creates a driver that can read the
        output created by the extraction code. This is a stand-alone
        program that will read the input data, calls the kernels/
        instrumented region, and then compares the results with the
        stored results in the file.

        TODO: #644: we need type information here.

        :param input_list: list of variables that are input parameters.
        :type input_list: list of str
        :param output_list: list of variables that are output parameters.
        :type output_list: list or str
        '''

        from psyclone.f2pygen import AllocateGen, AssignGen, CallGen,\
            CommentGen, DeclGen, ModuleGen, SubroutineGen, UseGen, \
            TypeDeclGen
        from psyclone.gocean1p0 import GOSymbolTable
        from psyclone.psyir.symbols import Symbol

        all_vars = list(set(input_list).union(set(output_list)))
        all_vars.sort()

        module_name, region_name = self.region_identifier
        module = ModuleGen(name=module_name)
        prog = SubroutineGen(parent=module, name=module_name+"_code",
                             implicitnone=True)
        module.add(prog)
        use = UseGen(prog, self.add_psydata_class_prefix("psy_data_mod"),
                     only=True,
                     funcnames=[self.add_psydata_class_prefix("PSyDataType")])
        prog.add(use)

        # Use a symbol table to make sure all variable names are unique
        sym_table = GOSymbolTable()
        sym = Symbol("PSyDataType")
        sym_table.add(sym)

        psy_data = sym_table.new_symbol_name(self.add_psydata_class_prefix
                                             ("psy_data"))
        sym_table.add(Symbol(psy_data))
        var_decl = TypeDeclGen(prog, datatype=self.add_psydata_class_prefix
                               ("PSyDataType"),
                               entity_decls=[psy_data])
        prog.add(var_decl)

        call = CallGen(prog,
                       "{0}%OpenRead(\"{1}\", \"{2}\")"
                       .format(psy_data, module_name, region_name))
        prog.add(call)

        post_suffix = self._post_name

        # Variables might need to be renamed in order to guarantee unique
        # variable names in the driver: An example of this would be if the
        # user code contains a variable 'dx', and the kernel takes a
        # property 'dx' as well. In the original code that is no problem,
        # since the property is used via field%grid%dx. But the stand-alone
        # driver renames field%grid%dx to dx, which can cause a name clash.
        # Similar problems can exist with any user defined type, since all
        # user defined types are rewritten to just use the field name.
        # We use a mapping to support renaming of variables: it takes as
        # key the variable as used in the original program (e.g. 'dx' from
        # an expression like field%grid%dx), and maps it to a unique local
        # name (e.g. dx_0).

        rename_variable = {}
        for var_name in all_vars:
            # TODO #644: we need to identify arrays!!
            # Support GOcean properties, which are accessed via a
            # derived type (e.g. 'fld%grid%dx'). In this stand-alone
            # driver we don't have the derived type, instead we create
            # variable based on the field in the derived type ('dx'
            # in the example above), and pass this variable to the
            # instrumented code.
            last_percent = var_name.rfind("%")
            if last_percent > -1:
                # Strip off the derived type, and only leave the last
                # field, which is used as the local variable name.
                local_name = var_name[last_percent+1:]
            else:
                # No derived type, so we can just use the
                # variable name directly in the driver
                local_name = var_name
            unique_local_name = sym_table.new_symbol_name(local_name)
            rename_variable[local_name] = unique_local_name
            sym_table.add(Symbol(unique_local_name))
            local_name = unique_local_name

            # TODO: #644 - we need to identify arrays!!
            # Any variable used needs to be defined. We also need
            # to handle the kind property better and not rely on
            # a hard-coded value.
            decl = DeclGen(prog, "real", [local_name], kind="8",
                           dimension=":,:", allocatable=True)
            prog.add(decl)
            is_input = var_name in input_list
            is_output = var_name in output_list

            if is_input and not is_output:
                # We only need the pre-variable, and we can read
                # it from the file (this call also allocates space for it).
                call = CallGen(prog,
                               "{0}%ReadVariable(\"{1}\", {2})"
                               .format(psy_data, var_name, local_name))
                prog.add(call)
            elif is_input:
                # Now must be input and output:
                # First read the pre-variable (which also allocates it):
                call = CallGen(prog,
                               "{0}%ReadVariable(\"{1}\", {2})"
                               .format(psy_data, var_name, local_name))
                prog.add(call)
                # Then declare the post variable, and and read its values
                # (ReadVariable will also allocate it)
                sym = Symbol(local_name+post_suffix)
                sym_table.add(sym)
                decl = DeclGen(prog, "real", [local_name+post_suffix],
                               dimension=":,:", kind="8", allocatable=True)
                prog.add(decl)
                call = CallGen(prog,
                               "{0}%ReadVariable(\"{1}{3}\", {2}{3})"
                               .format(psy_data, var_name, local_name,
                                       post_suffix))
                prog.add(call)
            else:
                # Now the variable is output only. We need to read the
                # post variable in, and create and allocate a pre variable
                # with the same size as the post
                sym = Symbol(local_name+post_suffix)
                sym_table.add(sym)
                decl = DeclGen(prog, "real", [local_name+post_suffix],
                               dimension=":,:", kind="8", allocatable=True)
                prog.add(decl)
                call = CallGen(prog,
                               "{0}%ReadVariable(\"{1}{3}\", {2}{3})"
                               .format(psy_data, var_name, local_name,
                                       post_suffix))
                prog.add(call)
                decl = DeclGen(prog, "real", [local_name], kind="8",
                               dimension=":,:", allocatable=True)
                prog.add(decl)
                alloc = AllocateGen(prog, [var_name],
                                    mold="{0}".format(local_name +
                                                      post_suffix))
                prog.add(alloc)
                # Initialise the variable with 0, since it might contain
                # values that are not set at all (halo regions, or a
                # kernel might not set all values). This way the array
                # comparison with the post value works as expected
                # TODO #644 - create the right "0.0" type here (e.g.
                # 0.0d0, ...)
                assign = AssignGen(prog, local_name, "0.0d0")
                prog.add(assign)

        # Now add the region that was extracted here:
        prog.add(CommentGen(prog, ""))
        prog.add(CommentGen(prog, " RegionStart"))

        # For the driver we have to re-create the code of the
        # instrumented region, but in this stand-alone driver the
        # arguments are not dl_esm_inf fields anymore, but simple arrays.
        # Similarly, for properties we cannot use e.g. 'fld%grid%dx'
        # anymore, we have to use e.g. a local variable 'dx' that has
        # been created. Since we are using the existing way of creating
        # the code for the instrumented region, we need to modify how
        # these variables are created. We do this by temporarily
        # modifying the properties in the config file.
        api_config = Config.get().api_conf("gocean1.0")
        all_props = api_config.grid_properties
        # Keep a copy of the original values, so we can restore
        # them later
        orig_props = dict(all_props)

        # 1) A grid property is defined like "{0}%grid%dx". This is
        #    changed to be just 'dx', i.e. the final component of
        #    the current value (but we also take renaming into account,
        #    so 'dx' might become 'dx_0').
        #    If a property is not used, it doesn't matter if we modify
        #    its definition, so we just change all properties.
        for name, prop in all_props.items():
            last_percent = prop.fortran.rfind("%")
            if last_percent > -1:
                # Get the last field name, which will be the
                # local variable name
                local_name = prop.fortran[last_percent+1:]
                unique_name = rename_variable.get(local_name, local_name)
                all_props[name] = GOceanConfig.make_property(
                    unique_name, prop.type, prop.intrinsic_type)

        # 2) The property 'grid_data' is a reference to the data on the
        #    grid (i.e. the actual field) , and it is defined as "{0}%data".
        #    This just becomes {0} ('a_fld%data' in the original program
        #    becomes just 'a_fld', and 'a_fld' is declared to be a plain
        #    Fortran 2d-array)
        all_props["go_grid_data"] = GOceanConfig.make_property(
            "{0}", "array", "real")

        # Each kernel caches the argument code, so we also
        # need to clear this cached data to make sure the new
        # value for "go_grid_data" is actually used.
        from psyclone.psyGen import CodedKern
        for kernel in self.psy_data_body.walk(CodedKern):
            kernel.clear_cached_data()

        # Recreate the instrumented region. Due to the changes in the
        # config files, fields and properties will now become local
        # plain arrays and variables:
        for child in self.psy_data_body:
            child.gen_code(prog)

        # Now reset all properties back to the original values:
        for name in all_props.keys():
            all_props[name] = orig_props[name]

        prog.add(CommentGen(prog, " RegionEnd"))
        prog.add(CommentGen(prog, ""))

        for var_name in output_list:
            prog.add(CommentGen(prog, " Check {0}".format(var_name)))

        code = str(module.root)

        with open("driver-{0}-{1}.f90".
                  format(module_name, region_name), "w") as out:
            out.write(code)
Esempio n. 26
0
    def _init_scalar(self, arg_type):
        '''
        Validates metadata descriptors for scalar arguments and
        initialises scalar argument properties accordingly.

        :param arg_type: LFRic API scalar argument type.
        :type arg_type: :py:class:`psyclone.expression.FunctionVar`

        :raises InternalError: if argument type other than a scalar is \
                               passed in.
        :raises ParseError: if there are not exactly 3 metadata arguments.
        :raises InternalError: if a scalar argument has an invalid data type.
        :raises ParseError: if scalar arguments do not have a read-only or
                            a reduction access.
        :raises ParseError: if a scalar argument that is not a real \
                            scalar has a reduction access.

        '''
        # Check whether something other than a scalar is passed in
        if self._argument_type not in LFRicArgDescriptor.VALID_SCALAR_NAMES:
            raise InternalError(
                "LFRicArgDescriptor._init_scalar(): expected a scalar "
                "argument but got an argument of type '{0}'.".format(
                    arg_type.args[0]))

        # There must be 3 argument descriptors to describe a scalar.
        # TODO in #874: Remove support for the old-style 2 descriptors.
        min_scalar_nargs = 2 + self._offset
        if self._nargs != min_scalar_nargs:
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must have {0} "
                "arguments if its first argument is 'gh_{{r,i}}scalar', but "
                "found {1} in '{2}'.".format(min_scalar_nargs, self._nargs,
                                             arg_type))

        # Check whether an invalid data type for a scalar argument is passed
        # in. Valid data types for scalars are valid data types in LFRic API.
        # TODO in #874: Remove the support for old-style scalar metadata that
        #               assigns the data type from the scalar name (the 1st
        #               argument).
        #               Note: The main check for the valid scalar data types
        #               will be ParseError in the class constructor and this
        #               scalar init method only needs to check for
        #               InternalError.
        if not self._data_type and self._offset == 0:
            self._data_type = arg_type.args[0].name
            # Translate the old-style argument type into the current one
            self._argument_type = "gh_scalar"
        if (self._data_type not in LFRicArgDescriptor.VALID_SCALAR_DATA_TYPES):
            raise InternalError(
                "LFRicArgDescriptor._init_scalar(): expected one of {0} "
                "as the data type but got '{1}'.".format(
                    LFRicArgDescriptor.VALID_SCALAR_DATA_TYPES,
                    self._data_type))

        # Test allowed accesses for scalars (read_only or reduction)
        scalar_accesses = [AccessType.READ] + \
            AccessType.get_valid_reduction_modes()
        # Convert generic access types to GH_* names for error messages
        api_config = Config.get().api_conf(API)
        rev_access_mapping = api_config.get_reverse_access_mapping()
        if self._access_type not in scalar_accesses:
            api_specific_name = rev_access_mapping[self._access_type]
            valid_reductions = AccessType.get_valid_reduction_names()
            raise ParseError(
                "In the LFRic API scalar arguments must have read-only "
                "('gh_read') or a reduction {0} access but found '{1}' "
                "in '{2}'.".format(valid_reductions, api_specific_name,
                                   arg_type))
        # Reduction access is currently only valid for real scalar arguments
        if self._data_type != "gh_real" and self._access_type in \
           AccessType.get_valid_reduction_modes():
            raise ParseError(
                "In the LFRic API a reduction access '{0}' is only valid "
                "with a real scalar argument, but a scalar argument with "
                "'{1}' data type was found in '{2}'.".format(
                    self._access_type.api_specific_name(), self._data_type,
                    arg_type))

        # Scalars don't have vector size
        self._vector_size = 0
Esempio n. 27
0
def kernel_outputdir(tmpdir, monkeypatch):
    '''Sets the PSyclone _kernel_output_dir Config parameter to tmpdir.'''
    config = Config.get()
    monkeypatch.setattr(config, "_kernel_output_dir", str(tmpdir))
    return tmpdir
Esempio n. 28
0
    def __init__(self, ast=None, children=None, parent=None, options=None):

        super(PSyDataNode, self).__init__(ast=ast, children=children,
                                          parent=parent)
        if not options:
            options = {}

        # _prefix stores a prefix to be used with all external PSyData
        # symbols (i.e. data types and module name), used in the
        # method 'add_psydata_class_prefix'.
        prefix = options.get("prefix", self._default_prefix)
        # Check that the prefix is one of those listed as being supported
        # in the configuration file. If it *is* listed then it is assumed
        # that a matching PSyData wrapper library is available at compile time.
        # See the User Guide for more information:
        # https://psyclone-dev.readthedocs.io/en/latest/psy_data.html#psy-data
        if prefix and prefix not in Config.get().valid_psy_data_prefixes:
            raise InternalError(
                "Invalid 'prefix' parameter: found '{0}', expected one of {1} "
                "as defined in {2}".format(
                    prefix, Config.get().valid_psy_data_prefixes,
                    Config.get().filename))
        if not prefix:
            self._prefix = ""
        else:
            self._prefix = prefix + "_"

        # Create the list of symbol names that will be imported from
        # the PSyData Fortran module. We use a namedtuple to improve
        # readability. Currently there is only one imported symbol (the
        # name of the PSyData derived type) but we keep a list for future
        # extensibility.
        _PSyDataSymbol = namedtuple("_PSyDataSymbol", "name symbol_type")
        self.imported_symbols = [_PSyDataSymbol(self.type_name,
                                                DataTypeSymbol)]

        # Root of the name to use for variables associated with
        # PSyData regions
        self._psy_data_symbol_with_prefix = \
            self.add_psydata_class_prefix("psy_data")

        # The name of the PSyData variable that is used for this PSyDataNode
        self._var_name = ""

        # The region identifier caches the computed module- and region-name
        # as a tuple of strings. This is required so that a derived class can
        # query the actual name of a region (e.g. during generation of a driver
        # for an extract node). If the user does not define a name, i.e.
        # module_name and region_name are empty, a unique name will be
        # computed in gen_code() or lower_to_language_level(). If this name was
        # stored in module_name and region_name, and gen() is called again, the
        # names would not be computed again, since the code detects already
        # defined module and region names. This can then result in duplicated
        # region names: The test 'test_region' in profile_test triggers this.
        # gen()) is called first after one profile region is applied, then
        # another profile region is added, and gen() is called again. The
        # second profile region would compute a new name, which then happens
        # to be the same as the name computed for the first region in the
        # first gen_code call (which indeed implies that the name of the
        # first profile region is different the second time it is computed).
        # So in order to guarantee that the computed module and region names
        # are unique when gen_code is called more than once, we
        # cannot store a computed name in module_name and region_name.
        self._region_identifier = ("", "")
        # Name of the region.
        self._module_name = None
        self._region_name = None

        name = options.get("region_name", None)

        if name:
            # pylint: disable=too-many-boolean-expressions
            if not isinstance(name, tuple) or not len(name) == 2 or \
               not name[0] or not isinstance(name[0], str) or \
               not name[1] or not isinstance(name[1], str):
                raise InternalError(
                    "Error in PSyDataNode. The name must be a "
                    "tuple containing two non-empty strings.")
            # pylint: enable=too-many-boolean-expressions
            # Valid PSyData names have been provided by the user.
            self._module_name = name[0]
            self._region_name = name[1]
            self.set_region_identifier(self._module_name,
                                       self._region_name)

        # TODO #435. This can be removed when update() is removed.
        self.use_stmt = ""
Esempio n. 29
0
def setup():
    '''Make sure that all tests here use gocean0.1 as API.'''
    Config.get().api = "gocean0.1"
Esempio n. 30
0
    def _init_field(self, arg_type, operates_on):
        '''
        Validates metadata descriptors for field arguments and
        initialises field argument properties accordingly.

        :param arg_type: LFRic API field (vector) argument type.
        :type arg_type: :py:class:`psyclone.expression.FunctionVar`
        :param operates_on: value of operates_on from the parsed kernel \
                            metadata (used for validation).
        :type operates_on: str

        :raises InternalError: if argument type other than a field is \
                               passed in.
        :raises ParseError: if there are fewer than 4 metadata arguments.
        :raises ParseError: if there are more than 5 metadata arguments.
        :raises InternalError: if a field argument has an invalid data type.
        :raises ParseError: if the 4th argument is not a valid function space.
        :raises ParseError: if the optional 5th argument is not a stencil \
                            specification or a mesh identifier (for \
                            inter-grid kernels).
        :raises ParseError: if a field passed to a kernel that operates on \
                            DoFs does not have a valid access \
                            (one of [READ, WRITE, READWRITE]).
        :raises ParseError: if a field on a discontinuous function space \
                            passed to a kernel that operates on cell-columns \
                            does not have a valid access (one of \
                            [READ, WRITE, READWRITE]).
        :raises ParseError: if a field on a continuous function space \
                            passed to a kernel that operates on cell-columns \
                            does not have a valid access (one of [READ, INC]).
        :raises ParseError: if the kernel operates on the domain and is \
                            passed a field on a continuous space.
        :raises InternalError: if an invalid value for operates_on is \
                               passed in.
        :raises ParseError: if a field with a stencil access is not read-only.
        :raises ParseError: if a field with a stencil access is passed to a \
                            kernel that operates on the domain.

        '''
        # Check whether something other than a field is passed in
        if self._argument_type not in LFRicArgDescriptor.VALID_FIELD_NAMES:
            raise InternalError(
                "Expected a field argument but got an argument of type "
                "'{0}'.".format(arg_type.args[0]))

        # TODO in #874: Remove support for the old-style field descriptors
        #               throughout this routine.
        # There must be at least 4 arguments
        nargs_field_min = 3 + self._offset
        if self._nargs < nargs_field_min:
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must have at "
                "least {0} arguments if its first argument is of {1} type, "
                "but found {2} in '{3}'.".format(
                    nargs_field_min, LFRicArgDescriptor.VALID_FIELD_NAMES,
                    self._nargs, arg_type))
        # There must be at most 5 arguments
        nargs_field_max = 4 + self._offset
        if self._nargs > nargs_field_max:
            raise ParseError(
                "In the LFRic API each 'meta_arg' entry must have at "
                "most {0} arguments if its first argument is of {1} type, "
                "but found {2} in '{3}'.".format(
                    nargs_field_max, LFRicArgDescriptor.VALID_FIELD_NAMES,
                    self._nargs, arg_type))

        # Check whether an invalid data type for a field argument is passed in.
        # TODO in #874: Remove the support for old-style field metadata that
        #               prescribes the data type.
        if not self._data_type and self._offset == 0:
            self._data_type = "gh_real"
        if (self._data_type not in LFRicArgDescriptor.VALID_FIELD_DATA_TYPES):
            raise InternalError(
                "Expected one of {0} as the field data type but got '{1}'.".
                format(LFRicArgDescriptor.VALID_FIELD_DATA_TYPES,
                       self._data_type))

        # The 4th argument must be a valid function-space name
        prop_ind = 2 + self._offset
        if arg_type.args[prop_ind].name not in \
           FunctionSpace.VALID_FUNCTION_SPACE_NAMES:
            raise ParseError(
                "In the LFRic API argument {0} of a 'meta_arg' field entry "
                "must be a valid function-space name (one of {1}) if its "
                "first argument is of {2} type, but found '{3}' in '{4}'.".
                format(prop_ind + 1, FunctionSpace.VALID_FUNCTION_SPACE_NAMES,
                       LFRicArgDescriptor.VALID_FIELD_NAMES,
                       arg_type.args[prop_ind].name, arg_type))
        self._function_space1 = arg_type.args[prop_ind].name

        # The optional 5th argument is either a stencil specification
        # or a mesh identifier (for inter-grid kernels)
        prop_ind = 3 + self._offset
        if self._nargs == nargs_field_max:
            try:
                if "stencil" in str(arg_type.args[prop_ind]):
                    self._stencil = get_stencil(
                        arg_type.args[prop_ind],
                        LFRicArgDescriptor.VALID_STENCIL_TYPES)
                elif "mesh" in str(arg_type.args[prop_ind]):
                    self._mesh = get_mesh(arg_type.args[prop_ind],
                                          LFRicArgDescriptor.VALID_MESH_TYPES)
                else:
                    raise ParseError("Unrecognised metadata entry")
            except ParseError as err:
                six.raise_from(
                    ParseError(
                        "In the LFRic API argument {0} of a 'meta_arg' field "
                        "entry must be either a valid stencil specification"
                        "or a mesh identifier (for inter-grid kernels). However, "
                        "entry '{1}' raised the following error: {2}.".format(
                            prop_ind + 1, arg_type, str(err))), err)

        # Test allowed accesses for fields
        field_disc_accesses = [
            AccessType.READ, AccessType.WRITE, AccessType.READWRITE
        ]
        field_cont_accesses = [AccessType.READ, AccessType.INC]
        # Convert generic access types to GH_* names for error messages
        api_config = Config.get().api_conf(API)
        rev_access_mapping = api_config.get_reverse_access_mapping()
        # Create a list of allowed accesses for use in error messages
        fld_disc_acc_msg = [
            rev_access_mapping[acc] for acc in field_disc_accesses
        ]
        fld_cont_acc_msg = [
            rev_access_mapping[acc] for acc in field_cont_accesses
        ]
        # Joint lists of valid function spaces for continuous fields
        fld_cont_spaces = (FunctionSpace.CONTINUOUS_FUNCTION_SPACES +
                           FunctionSpace.VALID_ANY_SPACE_NAMES)

        # Check accesses for kernels that operate on DoFs
        if operates_on == "dof":
            if self._access_type not in field_disc_accesses:
                raise ParseError(
                    "In the LFRic API, allowed field accesses for a "
                    "kernel that operates on DoFs are {0}, but found "
                    "'{1}' for '{2}' in '{3}'.".format(
                        fld_disc_acc_msg,
                        rev_access_mapping[self._access_type],
                        self._function_space1.lower(), arg_type))
        # Check accesses for kernels that operate on cell-columns or the
        # domain
        elif operates_on in ["cell_column", "domain"]:
            # Fields on discontinuous function spaces
            if (self._function_space1.lower()
                    in FunctionSpace.VALID_DISCONTINUOUS_NAMES
                    and self._access_type not in field_disc_accesses):
                raise ParseError(
                    "In the LFRic API, allowed accesses for fields on "
                    "discontinuous function spaces that are arguments to "
                    "kernels that operate on either cell-columns or the domain"
                    " are {0}, but found '{1}' for '{2}' in '{3}'.".format(
                        fld_disc_acc_msg,
                        rev_access_mapping[self._access_type],
                        self._function_space1.lower(), arg_type))
            # Fields on continuous function spaces
            if self._function_space1.lower() in fld_cont_spaces:
                if operates_on == "domain":
                    raise ParseError(
                        "In the LFRic API, kernels that operate on the domain "
                        "only accept field arguments on discontinuous function"
                        " spaces but found '{0}' in '{1}'".format(
                            self._function_space1.lower(), arg_type))

                if self._access_type not in field_cont_accesses:
                    raise ParseError(
                        "In the LFRic API, allowed accesses for fields on "
                        "continuous function spaces that are arguments to "
                        "kernels that operate on cell-columns are {0}, but "
                        "found '{1}' for '{2}' in '{3}'.".format(
                            fld_cont_acc_msg,
                            rev_access_mapping[self._access_type],
                            self._function_space1.lower(), arg_type))
        # Raise an InternalError for an invalid value of operates-on
        else:
            from psyclone.dynamo0p3 import VALID_ITERATION_SPACES
            raise InternalError(
                "Invalid operates_on '{0}' in the kernel metadata (expected "
                "one of {1}).".format(operates_on, VALID_ITERATION_SPACES))

        # Test allowed accesses for fields that have stencil specification
        if self._stencil:
            if self._access_type != AccessType.READ:
                raise ParseError(
                    "In the LFRic API a field with a stencil access must be "
                    "read-only ('{0}'), but found '{1}' in '{2}'.".format(
                        rev_access_mapping[AccessType.READ],
                        rev_access_mapping[self._access_type], arg_type))
            if operates_on == "domain":
                raise ParseError(
                    "In the LFRic API, kernels that operate on the domain "
                    "are not permitted to have arguments with a stencil "
                    "access but found: '{0}'".format(arg_type))