def test_openmp_loop_trans():
    ''' test of the OpenMP transformation of an all-points loop '''
    psy, invoke = get_invoke("openmp_fuse_test.f90", API, name="invoke_0")
    schedule = invoke.schedule
    ompf = GOceanOMPParallelLoopTrans()

    omp1_schedule, _ = ompf.apply(schedule.children[0])

    # Replace the original loop schedule with the transformed one
    psy.invokes.get('invoke_0').schedule = omp1_schedule

    # Store the results of applying this code transformation as
    # a string
    gen = str(psy.gen)

    omp_do_idx = -1
    # Iterate over the lines of generated code
    for idx, line in enumerate(gen.split('\n')):
        if '!$omp parallel do' in line:
            omp_do_idx = idx
        if 'DO j=' in line:
            outer_do_idx = idx
        if 'DO i=' in line:
            inner_do_idx = idx
            if omp_do_idx > -1:
                break

    # The OpenMP 'parallel do' directive must occur immediately before
    # the DO loop itself
    assert outer_do_idx-omp_do_idx == 1 and\
        inner_do_idx-outer_do_idx == 1
예제 #2
0
def test_single_node_ompparalleldo_gocean1p0():
    ''' Test that applying Extract Transformation on a Node enclosed
    within an OMP Parallel DO Directive produces the correct result
    in GOcean1.0 API. Note that this test only pases due to TODO #969:
    the loop boundaries are actually missing. Once #969 is fixed, this
    test should be removed, since it is covered by the test further down!
    But for now this test is left in place since it tests the omp
    functionality with a passing test.
    '''

    etrans = GOceanExtractTrans()
    otrans = GOceanOMPParallelLoopTrans()

    # Test a Loop nested within the OMP Parallel DO Directive
    psy, invoke = get_invoke("single_invoke_three_kernels.f90",
                             GOCEAN_API,
                             idx=0,
                             dist_mem=False)
    schedule = invoke.schedule
    # This test expects constant loop bounds
    schedule._const_loop_bounds = True

    # Apply GOceanOMPParallelLoopTrans to the second Loop
    otrans.apply(schedule.children[1])
    # Now enclose the parallel region within an ExtractNode (inserted
    # at the previous location of the OMPParallelDoDirective
    etrans.apply(schedule.children[1])

    code = str(psy.gen)
    output = """      ! ExtractStart
      !
      CALL extract_psy_data%PreStart("psy_single_invoke_three_kernels", """ \
      """"invoke_0:compute_cv_code:r0", 2, 3)
      CALL extract_psy_data%PreDeclareVariable("p_fld", p_fld)
      CALL extract_psy_data%PreDeclareVariable("v_fld", v_fld)
      CALL extract_psy_data%PreDeclareVariable("cv_fld_post", cv_fld)
      CALL extract_psy_data%PreDeclareVariable("i_post", i)
      CALL extract_psy_data%PreDeclareVariable("j_post", j)
      CALL extract_psy_data%PreEndDeclaration
      CALL extract_psy_data%ProvideVariable("p_fld", p_fld)
      CALL extract_psy_data%ProvideVariable("v_fld", v_fld)
      CALL extract_psy_data%PreEnd
      !$omp parallel do default(shared), private(i,j), schedule(static)
      DO j=2,jstop+1
        DO i=2,istop
          CALL compute_cv_code(i, j, cv_fld%data, p_fld%data, v_fld%data)
        END DO
      END DO
      !$omp end parallel do
      CALL extract_psy_data%PostStart
      CALL extract_psy_data%ProvideVariable("cv_fld_post", cv_fld)
      CALL extract_psy_data%ProvideVariable("i_post", i)
      CALL extract_psy_data%ProvideVariable("j_post", j)
      CALL extract_psy_data%PostEnd
      !
      ! ExtractEnd"""

    assert output in code
예제 #3
0
def test_single_node_ompparalleldo_gocean1p0():
    ''' Test that applying Extract Transformation on a Node enclosed
    within an OMP Parallel DO Directive produces the correct result
    in GOcean1.0 API. '''

    etrans = GOceanExtractTrans()
    otrans = GOceanOMPParallelLoopTrans()

    # Test a Loop nested within the OMP Parallel DO Directive
    psy, invoke = get_invoke("single_invoke_three_kernels.f90",
                             GOCEAN_API,
                             idx=0,
                             dist_mem=False)
    schedule = invoke.schedule
    # This test expects constant loop bounds
    schedule._const_loop_bounds = True

    # Apply GOceanOMPParallelLoopTrans to the second Loop
    schedule, _ = otrans.apply(schedule.children[1])
    # Now enclose the parallel region within an ExtractNode (inserted
    # at the previous location of the OMPParallelDoDirective
    schedule, _ = etrans.apply(schedule.children[1])

    code = str(psy.gen)
    output = """      ! ExtractStart
      !
      CALL extract_psy_data%PreStart("psy_single_invoke_three_kernels", """ \
      """"invoke_0:compute_cv_code:r0", 2, 3)
      CALL extract_psy_data%PreDeclareVariable("p_fld", p_fld)
      CALL extract_psy_data%PreDeclareVariable("v_fld", v_fld)
      CALL extract_psy_data%PreDeclareVariable("cv_fld_post", cv_fld)
      CALL extract_psy_data%PreDeclareVariable("i_post", i)
      CALL extract_psy_data%PreDeclareVariable("j_post", j)
      CALL extract_psy_data%PreEndDeclaration
      CALL extract_psy_data%ProvideVariable("p_fld", p_fld)
      CALL extract_psy_data%ProvideVariable("v_fld", v_fld)
      CALL extract_psy_data%PreEnd
      !$omp parallel do default(shared), private(i,j), schedule(static)
      DO j=2,jstop+1
        DO i=2,istop
          CALL compute_cv_code(i, j, cv_fld%data, p_fld%data, v_fld%data)
        END DO
      END DO
      !$omp end parallel do
      CALL extract_psy_data%PostStart
      CALL extract_psy_data%ProvideVariable("cv_fld_post", cv_fld)
      CALL extract_psy_data%ProvideVariable("i_post", i)
      CALL extract_psy_data%ProvideVariable("j_post", j)
      CALL extract_psy_data%PostEnd
      !
      ! ExtractEnd"""

    assert output in code
예제 #4
0
def test_openmp_loop_fuse_trans():
    ''' test of the OpenMP transformation of a fused loop '''
    _, info = parse(os.path.join(os.path.
                                 dirname(os.path.abspath(__file__)),
                                 "test_files", "gocean0p1",
                                 "openmp_fuse_test.f90"),
                    api=API)
    psy = PSyFactory(API).create(info)
    invoke = psy.invokes.get('invoke_0')
    schedule = invoke.schedule
    lftrans = GOceanLoopFuseTrans()
    ompf = GOceanOMPParallelLoopTrans()

    # fuse all outer loops
    lf_schedule, _ = lftrans.apply(schedule.children[0],
                                   schedule.children[1])
    schedule, _ = lftrans.apply(lf_schedule.children[0],
                                lf_schedule.children[1])
    # fuse all inner loops
    lf_schedule, _ = lftrans.apply(schedule.children[0].
                                   children[0],
                                   schedule.children[0].
                                   children[1])
    schedule, _ = lftrans.apply(lf_schedule.children[0].
                                children[0],
                                lf_schedule.children[0].
                                children[1])

    # Add an OpenMP directive around the fused loop
    lf_schedule, _ = ompf.apply(schedule.children[0])

    # Replace the original loop schedule with the transformed one
    psy.invokes.get('invoke_0').schedule = lf_schedule

    # Store the results of applying this code transformation as
    # a string
    gen = str(psy.gen)

    # Iterate over the lines of generated code
    for idx, line in enumerate(gen.split('\n')):
        if '!$omp parallel do' in line:
            omp_do_idx = idx
        if 'DO j=' in line:
            outer_do_idx = idx
        if 'DO i=' in line:
            inner_do_idx = idx

    # The OpenMP 'parallel do' directive must occur immediately before
    # the DO loop itself
    assert outer_do_idx-omp_do_idx == 1 and\
        outer_do_idx-inner_do_idx == -1
예제 #5
0
def test_gocean_loop_fuse_with_not_a_loop():
    ''' Test that an appropriate error is raised by the GOceanLoopFuseTrans
    class when we attempt to fuse a loop with something that
    is not a loop '''
    _, invoke = get_invoke("openmp_fuse_test.f90", API, name="invoke_0")
    schedule = invoke.schedule
    # Use the bare LoopFuseTrans in order tests its error checking
    lftrans = GOceanLoopFuseTrans()
    ompf = GOceanOMPParallelLoopTrans()
    # Enclose the first loop within an OMP parallel do
    ompf.apply(schedule.children[0])
    # Attempt to (erroneously) fuse this OMP parallel do
    # with the next loop in the schedule
    with pytest.raises(TransformationError):
        lftrans.apply(schedule.children[0], schedule.children[1])
def test_go_omp_parallel_loop_applied_to_non_loop():
    ''' Test that we raise a TransformationError if we attempt to
    apply a GOcean OMP Parallel DO transformation to something that
    is not a loop '''
    _, invoke = get_invoke("single_invoke_three_kernels.f90", 0)
    schedule = invoke.schedule

    ompl = GOceanOMPParallelLoopTrans()
    omp_schedule, _ = ompl.apply(schedule.children[0])

    # Attempt to (erroneously) apply the OMP Loop transformation
    # to the first node in the schedule (which is now itself an
    # OMP Loop transformation)
    with pytest.raises(TransformationError):
        _, _ = ompl.apply(omp_schedule.children[0])
def test_omp_parallel_region_inside_parallel_do():
    ''' Test that a generation error is raised if we attempt
    to have an OpenMP parallel region within an OpenMP
    parallel do (with the latter applied first) '''
    _, invoke = get_invoke("single_invoke_three_kernels.f90", 0)
    schedule = invoke.schedule

    ompl = GOceanOMPParallelLoopTrans()
    ompr = OMPParallelTrans()

    # Put an OpenMP parallel do directive around one of the loops
    _, _ = ompl.apply(schedule.children[1])

    # Now attempt to put a parallel region inside that parallel do
    with pytest.raises(TransformationError):
        _, _ = ompr.apply([schedule.children[1].children[0]])
def test_go_omp_parallel_loop_applied_to_wrong_loop_type():
    ''' Test that we raise a TransformationError if we attempt to
    apply a GOcean OMP Parallel DO transformation to a loop of
    the wrong type '''
    _, invoke = get_invoke("single_invoke_three_kernels.f90", 0)
    schedule = invoke.schedule

    # Manually break the loop-type of the first loop in order to
    # test that this error is handled. We have to work-around
    # the setter method to do this since it has error checking
    # too!
    schedule.children[0]._loop_type = "wrong"

    ompl = GOceanOMPParallelLoopTrans()
    # Attempt to apply the transformation to the loop that has been
    # given an incorrect type
    with pytest.raises(TransformationError):
        _, _ = ompl.apply(schedule.children[0])
예제 #9
0
def test_loop_fuse_with_not_a_loop():
    ''' Test that an appropriate error is raised by the LoopFuseTrans
    base class wen we attempt to fuse a loop with something that
    is not a loop '''
    _, invoke = get_invoke("openmp_fuse_test.f90", API, name="invoke_0")
    schedule = invoke.schedule
    # Use the bare LoopFuseTrans in order tests its error checking
    lftrans = LoopFuseTrans()
    ompf = GOceanOMPParallelLoopTrans()
    # Enclose the first loop within an OMP parallel do
    ompf.apply(schedule.children[0])
    # Attempt to (erroneously) fuse this OMP parallel do
    # with the next loop in the schedule
    with pytest.raises(TransformationError) as ex:
        lftrans.apply(schedule.children[0], schedule.children[1])
    # Exercise the __str__ method of TransformationError
    assert ("Target of LoopFuseTrans transformation must be a sub-class of "
            "Loop but got 'OMPParallelDoDirective'" in str(ex.value))
def test_omp_region_nodes_not_children_of_same_parent():
    ''' Test that we raise appropriate error if user attempts
    to put a region around nodes that are not children of
    the same parent '''
    _, invoke = get_invoke("single_invoke_three_kernels.f90", 0)
    schedule = invoke.schedule

    ompl = GOceanOMPParallelLoopTrans()
    ompr = OMPParallelTrans()

    # Put an OpenMP parallel do around the first loop in the schedule
    _, _ = ompl.apply(schedule.children[0])

    # Attempt to put an OpenMP parallel region around that same loop
    # (which is now a child of an OpenMP loop directive) and the
    # second loop in the schedule
    with pytest.raises(TransformationError):
        _, _ = ompr.apply(
            [schedule.children[0].children[0], schedule.children[1]])
예제 #11
0
def test_gocean_loop_fuse_with_not_a_loop():
    ''' Test that an appropriate error is raised by the GOceanLoopFuseTrans
    class when we attempt to fuse a loop with something that
    is not a loop '''
    _, info = parse(os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 "test_files", "gocean0p1",
                                 "openmp_fuse_test.f90"),
                    api=API)
    psy = PSyFactory(API).create(info)
    invoke = psy.invokes.get('invoke_0')
    schedule = invoke.schedule
    # Use the bare LoopFuseTrans in order tests its error checking
    lftrans = GOceanLoopFuseTrans()
    ompf = GOceanOMPParallelLoopTrans()
    # Enclose the first loop within an OMP parallel do
    new_sched, _ = ompf.apply(schedule.children[0])
    # Attempt to (erroneously) fuse this OMP parallel do
    # with the next loop in the schedule
    with pytest.raises(TransformationError):
        _, _ = lftrans.apply(new_sched.children[0], new_sched.children[1])
def test_openmp_loop_fuse_trans():
    ''' test of the OpenMP transformation of a fused loop '''
    psy, invoke = get_invoke("openmp_fuse_test.f90", API, name="invoke_0")
    schedule = invoke.schedule
    lftrans = GOceanLoopFuseTrans()
    ompf = GOceanOMPParallelLoopTrans()

    # fuse all outer loops
    lf_schedule, _ = lftrans.apply(schedule.children[0],
                                   schedule.children[1])
    schedule, _ = lftrans.apply(lf_schedule.children[0],
                                lf_schedule.children[1])
    # fuse all inner loops
    lf_schedule, _ = lftrans.apply(schedule.children[0].loop_body[0],
                                   schedule.children[0].loop_body[1])
    schedule, _ = lftrans.apply(lf_schedule.children[0].loop_body[0],
                                lf_schedule.children[0].loop_body[1])

    # Add an OpenMP directive around the fused loop
    lf_schedule, _ = ompf.apply(schedule.children[0])

    # Replace the original loop schedule with the transformed one
    psy.invokes.get('invoke_0').schedule = lf_schedule

    # Store the results of applying this code transformation as
    # a string
    gen = str(psy.gen)

    # Iterate over the lines of generated code
    for idx, line in enumerate(gen.split('\n')):
        if '!$omp parallel do' in line:
            omp_do_idx = idx
        if 'DO j=' in line:
            outer_do_idx = idx
        if 'DO i=' in line:
            inner_do_idx = idx

    # The OpenMP 'parallel do' directive must occur immediately before
    # the DO loop itself
    assert outer_do_idx-omp_do_idx == 1 and\
        outer_do_idx-inner_do_idx == -1
def test_omp_parallel_loop():
    '''Test that we can generate an OMP PARALLEL DO correctly,
    independent of whether or not we are generating constant loop bounds '''
    psy, invoke = get_invoke("single_invoke_three_kernels.f90", 0)
    schedule = invoke.schedule

    omp = GOceanOMPParallelLoopTrans()
    cbtrans = GOConstLoopBoundsTrans()
    omp_sched, _ = omp.apply(schedule.children[0])

    invoke.schedule = omp_sched
    gen = str(psy.gen)
    gen = gen.lower()
    expected = ("!$omp parallel do default(shared), private(j,i), "
                "schedule(static)\n"
                "      do j=2,jstop\n"
                "        do i=2,istop+1\n"
                "          call compute_cu_code(i, j, cu_fld%data, "
                "p_fld%data, u_fld%data)\n"
                "        end do \n"
                "      end do \n"
                "      !$omp end parallel do")
    assert expected in gen

    newsched, _ = cbtrans.apply(omp_sched, const_bounds=False)
    invoke.schedule = newsched
    gen = str(psy.gen)
    gen = gen.lower()
    expected = (
        "      !$omp parallel do default(shared), private(j,i), "
        "schedule(static)\n"
        "      do j=cu_fld%internal%ystart,cu_fld%internal%ystop\n"
        "        do i=cu_fld%internal%xstart,cu_fld%internal%xstop\n"
        "          call compute_cu_code(i, j, cu_fld%data, p_fld%data, "
        "u_fld%data)\n"
        "        end do \n"
        "      end do \n"
        "      !$omp end parallel do")
    assert expected in gen
예제 #14
0
def test_single_node_ompparalleldo_gocean1p0():
    ''' Test that applying Extract Transformation on a Node enclosed
    within an OMP Parallel DO Directive produces the correct result
    in GOcean1.0 API. '''
    from psyclone.transformations import GOceanOMPParallelLoopTrans

    etrans = GOceanExtractRegionTrans()
    otrans = GOceanOMPParallelLoopTrans()

    # Test a Loop nested within the OMP Parallel DO Directive
    _, invoke_info = parse(os.path.join(GOCEAN_BASE_PATH,
                                        "single_invoke_three_kernels.f90"),
                           api=GOCEAN_API)
    psy = PSyFactory(GOCEAN_API, distributed_memory=False).create(invoke_info)
    invoke = psy.invokes.invoke_list[0]
    schedule = invoke.schedule

    # Apply GOceanOMPParallelLoopTrans to the second Loop
    schedule, _ = otrans.apply(schedule.children[1])
    # Now enclose the parallel region within an ExtractNode (inserted
    # at the previous location of the OMPParallelDoDirective
    schedule, _ = etrans.apply(schedule.children[1])

    code = str(psy.gen)
    output = ("      ! ExtractStart\n"
              "      ! CALL write_extract_arguments(argument_list)\n"
              "      !\n"
              "      !$omp parallel do default(shared), private(i,j), "
              "schedule(static)\n"
              "      DO j=2,jstop+1\n"
              "        DO i=2,istop\n"
              "          CALL compute_cv_code(i, j, cv_fld%data, "
              "p_fld%data, v_fld%data)\n"
              "        END DO \n"
              "      END DO \n"
              "      !$omp end parallel do\n"
              "      !\n"
              "      ! ExtractEnd\n")
    assert output in code
def test_omp_parallel_do_around_parallel_region():
    ''' Test that a generation error is raised if we attempt
    to have an OpenMP parallel region around an OpenMP
    parallel do (with the latter applied second) '''
    psy, invoke = get_invoke("single_invoke_three_kernels.f90", 0)
    schedule = invoke.schedule

    ompl = GOceanOMPParallelLoopTrans()
    ompr = OMPParallelTrans()

    # Put a parallel region around two of the loops
    omp_schedule, _ = ompr.apply(schedule.children[0:2])

    # Put an OpenMP parallel do directive around one of those loops
    # (which is now a child of the region directive)
    schedule, _ = ompl.apply(omp_schedule.children[0].children[0])

    # Replace the original loop schedule with the transformed one
    invoke.schedule = schedule

    # Attempt to generate the transformed code
    with pytest.raises(GenerationError):
        _ = psy.gen
def test_omp_parallel_do_inside_parallel_region():
    ''' Test that a generation error is raised if we attempt
    to have an OpenMP parallel do within an OpenMP
    parallel region '''
    psy, invoke = get_invoke("single_invoke_three_kernels.f90", 0)
    schedule = invoke.schedule

    ompl = GOceanOMPParallelLoopTrans()
    ompr = OMPParallelTrans()

    # Put an OpenMP parallel do directive around all of the loops
    for child in schedule.children:
        omp_schedule, _ = ompl.apply(child)

    # Now enclose all of the children within a parallel region
    schedule, _ = ompr.apply(omp_schedule.children)

    # Replace the original loop schedule with the transformed one
    invoke.schedule = schedule

    # Attempt to generate the transformed code
    with pytest.raises(GenerationError):
        _ = psy.gen
예제 #17
0
def test_single_node_ompparalleldo_gocean1p0_failing_const_loop():
    ''' Test that applying Extract Transformation on a Node enclosed
    within an OMP Parallel DO Directive produces the correct result
    in GOcean1.0 API. This test is mostly identical to the previous one,
    but uses const loop bounds. At this stage, the dependency analysis
    still reports `cv_fld%internal%xstart` etc as loop boundaries, but
    the code created will be using istop and jstop.

    '''
    etrans = GOceanExtractTrans()
    otrans = GOceanOMPParallelLoopTrans()
    ctrans = GOConstLoopBoundsTrans()

    # Test a Loop nested within the OMP Parallel DO Directive
    psy, invoke = get_invoke("single_invoke_three_kernels.f90",
                             GOCEAN_API,
                             idx=0,
                             dist_mem=False)
    schedule = invoke.schedule
    ctrans.apply(schedule)
    # Required for #969
    schedule.view()

    # Apply GOceanOMPParallelLoopTrans to the second Loop
    otrans.apply(schedule.children[1])

    # Now enclose the parallel region within an ExtractNode (inserted
    # at the previous location of the OMPParallelDoDirective
    etrans.apply(schedule.children[1])

    code = str(psy.gen)
    output = """      ! ExtractStart
      !
      CALL extract_psy_data%PreStart("psy_single_invoke_three_kernels", """ \
      """"invoke_0:compute_cv_code:r0", 6, 3)
      CALL extract_psy_data%PreDeclareVariable("istop", istop)
      CALL extract_psy_data%PreDeclareVariable("jstop", jstop)
      CALL extract_psy_data%PreDeclareVariable("p_fld", p_fld)
      CALL extract_psy_data%PreDeclareVariable("v_fld", v_fld)
      CALL extract_psy_data%PreDeclareVariable("cv_fld_post", cv_fld)
      CALL extract_psy_data%PreDeclareVariable("i_post", i)
      CALL extract_psy_data%PreDeclareVariable("j_post", j)
      CALL extract_psy_data%PreEndDeclaration
      CALL extract_psy_data%ProvideVariable("istop", istop)
      CALL extract_psy_data%ProvideVariable("jstop", jstop)
      CALL extract_psy_data%ProvideVariable("p_fld", p_fld)
      CALL extract_psy_data%ProvideVariable("v_fld", v_fld)
      CALL extract_psy_data%PreEnd
      !$omp parallel do default(shared), private(i,j), schedule(static)
      DO j=2,jstop+1
        DO i=2,istop
          CALL compute_cv_code(i, j, cv_fld%data, p_fld%data, v_fld%data)
        END DO
      END DO
      !$omp end parallel do
      CALL extract_psy_data%PostStart
      CALL extract_psy_data%ProvideVariable("cv_fld_post", cv_fld)
      CALL extract_psy_data%ProvideVariable("i_post", i)
      CALL extract_psy_data%ProvideVariable("j_post", j)
      CALL extract_psy_data%PostEnd
      !
      ! ExtractEnd"""
    assert output in code
예제 #18
0
def test_single_node_ompparalleldo_gocean1p0_failing():
    ''' Test that applying Extract Transformation on a Node enclosed
    within an OMP Parallel DO Directive produces the correct result
    in GOcean1.0 API. This test is mostly identical to the previous one,
    but due to TODO #969 the loop boundaries are not defined and are
    therefore missing. Once #969 is fixed, the previous test can be
    removed.

    '''
    etrans = GOceanExtractTrans()
    otrans = GOceanOMPParallelLoopTrans()

    # Test a Loop nested within the OMP Parallel DO Directive
    psy, invoke = get_invoke("single_invoke_three_kernels.f90",
                             GOCEAN_API,
                             idx=0,
                             dist_mem=False)
    schedule = invoke.schedule

    # Apply GOceanOMPParallelLoopTrans to the second Loop
    otrans.apply(schedule.children[1])

    # Now enclose the parallel region within an ExtractNode (inserted
    # at the previous location of the OMPParallelDoDirective
    etrans.apply(schedule.children[1])

    code = str(psy.gen)
    output = """      ! ExtractStart
      !
      CALL extract_psy_data%PreStart("psy_single_invoke_three_kernels", """ \
      """"invoke_0:compute_cv_code:r0", 6, 3)
      CALL extract_psy_data%PreDeclareVariable("cv_fld%internal%xstart", """ \
                                               """cv_fld%internal%xstart)
      CALL extract_psy_data%PreDeclareVariable("cv_fld%internal%xstop", """ \
                                               """cv_fld%internal%xstop)
      CALL extract_psy_data%PreDeclareVariable("cv_fld%internal%ystart", """ \
                                               """cv_fld%internal%ystart)
      CALL extract_psy_data%PreDeclareVariable("cv_fld%internal%ystop", """ \
                                               """cv_fld%internal%ystop)
      CALL extract_psy_data%PreDeclareVariable("p_fld", p_fld)
      CALL extract_psy_data%PreDeclareVariable("v_fld", v_fld)
      CALL extract_psy_data%PreDeclareVariable("cv_fld_post", cv_fld)
      CALL extract_psy_data%PreDeclareVariable("i_post", i)
      CALL extract_psy_data%PreDeclareVariable("j_post", j)
      CALL extract_psy_data%PreEndDeclaration
      CALL extract_psy_data%ProvideVariable("cv_fld%internal%xstart", """ \
                                            """cv_fld%internal%xstart)
      CALL extract_psy_data%ProvideVariable("cv_fld%internal%xstop", """ \
                                            """cv_fld%internal%xstop)
      CALL extract_psy_data%ProvideVariable("cv_fld%internal%ystart", """ \
                                            """cv_fld%internal%ystart)
      CALL extract_psy_data%ProvideVariable("cv_fld%internal%ystop", """ \
                                            """cv_fld%internal%ystop)
      CALL extract_psy_data%ProvideVariable("p_fld", p_fld)
      CALL extract_psy_data%ProvideVariable("v_fld", v_fld)
      CALL extract_psy_data%PreEnd
      !$omp parallel do default(shared), private(i,j), schedule(static)
      DO j=cv_fld%internal%ystart,cv_fld%internal%ystop
        DO i=cv_fld%internal%xstart,cv_fld%internal%xstop
          CALL compute_cv_code(i, j, cv_fld%data, p_fld%data, v_fld%data)
        END DO
      END DO
      !$omp end parallel do
      CALL extract_psy_data%PostStart
      CALL extract_psy_data%ProvideVariable("cv_fld_post", cv_fld)
      CALL extract_psy_data%ProvideVariable("i_post", i)
      CALL extract_psy_data%ProvideVariable("j_post", j)
      CALL extract_psy_data%PostEnd
      !
      ! ExtractEnd"""
    assert output in code