def test_bad_simd_specification_in_codegen(): with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" platform: name: portable lang: opencl # deep vectorization depth: 4 is_simd: True """)) file.seek(0) with assert_raises(ValidationError): build_and_validate('codegen_platform.yaml', file.name)
def view_page_sandbox(pctx): # type: (CoursePageContext) -> http.HttpResponse if not pctx.has_permission(pperm.use_page_sandbox): raise PermissionDenied() from course.validation import ValidationError from relate.utils import dict_to_struct, Struct import yaml PAGE_SESSION_KEY = make_sandbox_session_key( # noqa PAGE_SESSION_KEY_PREFIX, pctx.course.identifier) ANSWER_DATA_SESSION_KEY = make_sandbox_session_key( # noqa ANSWER_DATA_SESSION_KEY_PREFIX, pctx.course.identifier) PAGE_DATA_SESSION_KEY = make_sandbox_session_key( # noqa PAGE_DATA_SESSION_KEY_PREFIX, pctx.course.identifier) request = pctx.request page_source = pctx.request.session.get(PAGE_SESSION_KEY) page_errors = None page_warnings = None is_clear_post = (request.method == "POST" and "clear" in request.POST) is_clear_response_post = (request.method == "POST" and "clear_response" in request.POST) is_preview_post = (request.method == "POST" and "preview" in request.POST) def make_form(data=None): # type: (Optional[Text]) -> PageSandboxForm return PageSandboxForm(page_source, "yaml", request.user.editor_mode, ugettext("Enter YAML markup for a flow page."), data) if is_preview_post: edit_form = make_form(pctx.request.POST) new_page_source = None if edit_form.is_valid(): try: from pytools.py_codegen import remove_common_indentation new_page_source = remove_common_indentation( edit_form.cleaned_data["content"], require_leading_newline=False) from course.content import expand_yaml_macros new_page_source = expand_yaml_macros(pctx.repo, pctx.course_commit_sha, new_page_source) yaml_data = yaml.load(new_page_source) # type: ignore page_desc = dict_to_struct(yaml_data) if not isinstance(page_desc, Struct): raise ValidationError( "Provided page source code is not " "a dictionary. Do you need to remove a leading " "list marker ('-') or some stray indentation?") from course.validation import validate_flow_page, ValidationContext vctx = ValidationContext(repo=pctx.repo, commit_sha=pctx.course_commit_sha) validate_flow_page(vctx, "sandbox", page_desc) page_warnings = vctx.warnings except Exception: import sys tp, e, _ = sys.exc_info() page_errors = (ugettext("Page failed to load/validate") + ": " + "%(err_type)s: %(err_str)s" % { "err_type": tp.__name__, "err_str": e }) # type: ignore else: # Yay, it did validate. request.session[ PAGE_SESSION_KEY] = page_source = new_page_source del new_page_source edit_form = make_form(pctx.request.POST) elif is_clear_post: page_source = None pctx.request.session[PAGE_DATA_SESSION_KEY] = None pctx.request.session[ANSWER_DATA_SESSION_KEY] = None del pctx.request.session[PAGE_DATA_SESSION_KEY] del pctx.request.session[ANSWER_DATA_SESSION_KEY] edit_form = make_form() elif is_clear_response_post: page_source = None pctx.request.session[PAGE_DATA_SESSION_KEY] = None pctx.request.session[ANSWER_DATA_SESSION_KEY] = None del pctx.request.session[PAGE_DATA_SESSION_KEY] del pctx.request.session[ANSWER_DATA_SESSION_KEY] edit_form = make_form(pctx.request.POST) else: edit_form = make_form() have_valid_page = page_source is not None if have_valid_page: yaml_data = yaml.load(page_source) # type: ignore page_desc = cast(FlowPageDesc, dict_to_struct(yaml_data)) from course.content import instantiate_flow_page try: page = instantiate_flow_page("sandbox", pctx.repo, page_desc, pctx.course_commit_sha) except Exception: import sys tp, e, _ = sys.exc_info() page_errors = (ugettext("Page failed to load/validate") + ": " + "%(err_type)s: %(err_str)s" % { "err_type": tp.__name__, "err_str": e }) # type: ignore have_valid_page = False if have_valid_page: page_desc = cast(FlowPageDesc, page_desc) # Try to recover page_data, answer_data page_data = get_sandbox_data_for_page(pctx, page_desc, PAGE_DATA_SESSION_KEY) answer_data = get_sandbox_data_for_page(pctx, page_desc, ANSWER_DATA_SESSION_KEY) from course.models import FlowSession from course.page import PageContext page_context = PageContext( course=pctx.course, repo=pctx.repo, commit_sha=pctx.course_commit_sha, # This helps code pages retrieve the editor pref. flow_session=FlowSession(course=pctx.course, participation=pctx.participation), in_sandbox=True) if page_data is None: page_data = page.initialize_page_data(page_context) pctx.request.session[PAGE_DATA_SESSION_KEY] = (page_desc.type, page_desc.id, page_data) title = page.title(page_context, page_data) body = page.body(page_context, page_data) feedback = None page_form_html = None if page.expects_answer(): from course.page.base import PageBehavior page_behavior = PageBehavior(show_correctness=True, show_answer=True, may_change_answer=True) if request.method == "POST" and not is_preview_post: page_form = page.process_form_post(page_context, page_data, request.POST, request.FILES, page_behavior) if page_form.is_valid(): answer_data = page.answer_data(page_context, page_data, page_form, request.FILES) feedback = page.grade(page_context, page_data, answer_data, grade_data=None) pctx.request.session[ANSWER_DATA_SESSION_KEY] = ( page_desc.type, page_desc.id, answer_data) else: try: page_form = page.make_form(page_context, page_data, answer_data, page_behavior) except Exception: import sys tp, e, _ = sys.exc_info() page_errors = ( ugettext("Page failed to load/validate " "(change page ID to clear faults)") + ": " + "%(err_type)s: %(err_str)s" % { "err_type": tp.__name__, "err_str": e }) # type: ignore # noqa: E501 have_valid_page = False page_form = None if page_form is not None: page_form.helper.add_input( Submit("submit", ugettext("Submit answer"), accesskey="g")) page_form_html = page.form_to_html(pctx.request, page_context, page_form, answer_data) correct_answer = page.correct_answer(page_context, page_data, answer_data, grade_data=None) return render_course_page( pctx, "course/sandbox-page.html", { "edit_form": edit_form, "page_errors": page_errors, "page_warnings": page_warnings, "form": edit_form, # to placate form.media "have_valid_page": True, "title": title, "body": body, "page_form_html": page_form_html, "feedback": feedback, "correct_answer": correct_answer, }) else: return render_course_page( pctx, "course/sandbox-page.html", { "edit_form": edit_form, "form": edit_form, # to placate form.media "have_valid_page": False, "page_errors": page_errors, "page_warnings": page_warnings, })
def view_page_sandbox(pctx): if not pctx.has_permission(pperm.use_page_sandbox): raise PermissionDenied() from course.validation import ValidationError from relate.utils import dict_to_struct, Struct import yaml PAGE_SESSION_KEY = ( # noqa "cf_validated_sandbox_page:" + pctx.course.identifier) ANSWER_DATA_SESSION_KEY = ( # noqa "cf_page_sandbox_answer_data:" + pctx.course.identifier) PAGE_DATA_SESSION_KEY = ( # noqa "cf_page_sandbox_page_data:" + pctx.course.identifier) request = pctx.request page_source = pctx.request.session.get(PAGE_SESSION_KEY) page_errors = None page_warnings = None is_clear_post = (request.method == "POST" and "clear" in request.POST) is_clear_response_post = (request.method == "POST" and "clear_response" in request.POST) is_preview_post = (request.method == "POST" and "preview" in request.POST) def make_form(data=None): return PageSandboxForm( page_source, "yaml", request.user.editor_mode, ugettext("Enter YAML markup for a flow page."), data) if is_preview_post: edit_form = make_form(pctx.request.POST) new_page_source = None if edit_form.is_valid(): try: from pytools.py_codegen import remove_common_indentation new_page_source = remove_common_indentation( edit_form.cleaned_data["content"], require_leading_newline=False) page_desc = dict_to_struct(yaml.load(new_page_source)) if not isinstance(page_desc, Struct): raise ValidationError("Provided page source code is not " "a dictionary. Do you need to remove a leading " "list marker ('-') or some stray indentation?") from course.validation import validate_flow_page, ValidationContext vctx = ValidationContext( repo=pctx.repo, commit_sha=pctx.course_commit_sha) validate_flow_page(vctx, "sandbox", page_desc) page_warnings = vctx.warnings except: import sys tp, e, _ = sys.exc_info() page_errors = ( ugettext("Page failed to load/validate") + ": " + "%(err_type)s: %(err_str)s" % { "err_type": tp.__name__, "err_str": e}) else: # Yay, it did validate. request.session[PAGE_SESSION_KEY] = page_source = new_page_source del new_page_source edit_form = make_form(pctx.request.POST) elif is_clear_post: page_source = None pctx.request.session[PAGE_DATA_SESSION_KEY] = None pctx.request.session[ANSWER_DATA_SESSION_KEY] = None del pctx.request.session[PAGE_DATA_SESSION_KEY] del pctx.request.session[ANSWER_DATA_SESSION_KEY] edit_form = make_form() elif is_clear_response_post: page_source = None pctx.request.session[PAGE_DATA_SESSION_KEY] = None pctx.request.session[ANSWER_DATA_SESSION_KEY] = None del pctx.request.session[PAGE_DATA_SESSION_KEY] del pctx.request.session[ANSWER_DATA_SESSION_KEY] edit_form = make_form(pctx.request.POST) else: edit_form = make_form() have_valid_page = page_source is not None if have_valid_page: page_desc = dict_to_struct(yaml.load(page_source)) from course.content import instantiate_flow_page try: page = instantiate_flow_page("sandbox", pctx.repo, page_desc, pctx.course_commit_sha) except: import sys tp, e, _ = sys.exc_info() page_errors = ( ugettext("Page failed to load/validate") + ": " + "%(err_type)s: %(err_str)s" % { "err_type": tp.__name__, "err_str": e}) have_valid_page = False if have_valid_page: # Try to recover page_data, answer_data page_data = get_sandbox_data_for_page( pctx, page_desc, PAGE_DATA_SESSION_KEY) answer_data = get_sandbox_data_for_page( pctx, page_desc, ANSWER_DATA_SESSION_KEY) from course.models import FlowSession from course.page import PageContext page_context = PageContext( course=pctx.course, repo=pctx.repo, commit_sha=pctx.course_commit_sha, # This helps code pages retrieve the editor pref. flow_session=FlowSession( course=pctx.course, participation=pctx.participation), in_sandbox=True) if page_data is None: page_data = page.initialize_page_data(page_context) pctx.request.session[PAGE_DATA_SESSION_KEY] = ( page_desc.type, page_desc.id, page_data) title = page.title(page_context, page_data) body = page.body(page_context, page_data) feedback = None page_form_html = None if page.expects_answer(): from course.page.base import PageBehavior page_behavior = PageBehavior( show_correctness=True, show_answer=True, may_change_answer=True) if request.method == "POST" and not is_preview_post: page_form = page.process_form_post(page_context, page_data, request.POST, request.FILES, page_behavior) if page_form.is_valid(): answer_data = page.answer_data(page_context, page_data, page_form, request.FILES) feedback = page.grade(page_context, page_data, answer_data, grade_data=None) pctx.request.session[ANSWER_DATA_SESSION_KEY] = ( page_desc.type, page_desc.id, answer_data) else: try: page_form = page.make_form(page_context, page_data, answer_data, page_behavior) except: import sys tp, e, _ = sys.exc_info() page_errors = ( ugettext("Page failed to load/validate " "(change page ID to clear faults)") + ": " + "%(err_type)s: %(err_str)s" % { "err_type": tp.__name__, "err_str": e}) have_valid_page = False page_form = None if page_form is not None: page_form.helper.add_input( Submit("submit", ugettext("Submit answer"), accesskey="g")) page_form_html = page.form_to_html( pctx.request, page_context, page_form, answer_data) correct_answer = page.correct_answer( page_context, page_data, answer_data, grade_data=None) return render_course_page(pctx, "course/sandbox-page.html", { "edit_form": edit_form, "page_errors": page_errors, "page_warnings": page_warnings, "form": edit_form, # to placate form.media "have_valid_page": True, "title": title, "body": body, "page_form_html": page_form_html, "feedback": feedback, "correct_answer": correct_answer, }) else: return render_course_page(pctx, "course/sandbox-page.html", { "edit_form": edit_form, "form": edit_form, # to placate form.media "have_valid_page": False, "page_errors": page_errors, "page_warnings": page_warnings, })
def test_load_memory_limits(): # create dummy loopy opts def __dummy_opts(name): return type('', (object, ), { 'platform_name': name, 'lang': '', 'order': '' }) # test codegen limits = memory_limits.get_limits( __dummy_opts('portable'), [], __prefixify('codegen_platform.yaml', examples_dir)) assert limits.limits[memory_type.m_global] == 1e9 assert limits.limits[memory_type.m_constant] == 1e6 assert limits.limits[memory_type.m_alloc] == 100e6 assert len(limits.limits) == 3 # test test_matrix -- this includes per-platform specification limits = memory_limits.get_limits( __dummy_opts('intel'), [], __prefixify('test_matrix.yaml', examples_dir)) assert limits.limits[memory_type.m_global] == 5e9 assert limits.limits[memory_type.m_constant] == 64e3 assert limits.limits[memory_type.m_local] == 1e6 assert limits.limits[memory_type.m_alloc] == 1e9 assert len(limits.limits) == 4 limits = memory_limits.get_limits( __dummy_opts('openmp'), [], __prefixify('test_matrix.yaml', examples_dir)) assert limits.limits[memory_type.m_global] == 5e10 assert len(limits.limits) == 1 # and finally, test bad specifications with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: nvidia lang: opencl width: [128] memory-limits: - global: 5 Gb platforms: [nvidia] - global: 10 Gb platforms: [nvidia] test-list: - test-type: performance eval-type: jacobian """)) file.flush() with assert_raises(InvalidInputSpecificationException): limits = memory_limits.get_limits(__dummy_opts('nvidia'), [], file.name) with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: nvidia lang: opencl width: [128] memory-limits: - global: 5 Gb - global: 10 Gb test-list: - test-type: performance eval-type: jacobian """)) file.flush() with assert_raises(InvalidInputSpecificationException): limits = memory_limits.get_limits(__dummy_opts('nvidia'), [], file.name) # try with file w/o limits with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: nvidia lang: opencl width: [128] test-list: - test-type: performance eval-type: jacobian """)) file.flush() limits = memory_limits.get_limits(__dummy_opts('nvidia'), [], file.name) assert limits.limits == {}
def test_get_test_matrix(): # test that the example test matrix is specified correctly def update(want, state, key, seen): if state[key] not in seen[key]: if six.callable(want[key]): want[key](state, want, seen) else: if is_iterable(state[key]): for k in state[key]: if k not in seen[key]: want[key].remove(k) seen[key].add(k) else: want[key].remove(state[key]) seen[key].add(state[key]) return want, seen def run(want, loop, final_checks=None): from copy import deepcopy seen = defaultdict(lambda: set()) test = deepcopy(want) for state in loop: for key in test: update(test, state, key, seen) # assert we didn't miss anything (that isn't callable -- those handle # themselves) assert not any(len(v) for v in test.values() if not six.callable(v)) if final_checks: assert final_checks(seen) import logging logger = logging.getLogger(__name__) logger.debug('loading test matrix schema') test_matrix = __prefixify('test_matrix.yaml', examples_dir) # get the species validation test logger.debug('loading test matrix from file') _, loop, max_vec_width = get_test_matrix('.', KernelType.species_rates, test_matrix, True, langs=current_test_langs, raise_on_missing=True) assert max_vec_width == 8 from collections import defaultdict def width_check(state, want, seen): if state['lang'] == 'c': assert state['width'] is None assert state['depth'] is None else: seen['width'].add(state['width']) def check_final_widths(seen): return not (set(seen['width']) - set([None, 2, 4, 8])) # check we have reasonable values base = { 'platform': ['intel', 'openmp'], 'width': width_check, 'conp': [True, False], 'order': ['C', 'F'], 'num_cores': num_cores_default()[0] } logger.debug('check 1') run(base, loop, final_checks=check_final_widths) # repeat for jacobian logger.debug('loading test matrix from file [1]') _, loop, _ = get_test_matrix('.', KernelType.jacobian, test_matrix, True, langs=current_test_langs, raise_on_missing=True) jacbase = base.copy() jacbase.update({ 'jac_format': [ enum_to_string(JacobianFormat.sparse), enum_to_string(JacobianFormat.full) ], 'jac_type': [enum_to_string(JacobianType.exact)], 'use_atomic_doubles': [True, False] }) # true for OpenMP by default logger.debug('check 2') run(jacbase, loop, final_checks=check_final_widths) # next, do species performance logger.debug('loading test matrix from file [2]') _, loop, _ = get_test_matrix('.', KernelType.species_rates, test_matrix, False, langs=current_test_langs, raise_on_missing=True) want = base.copy() want.update({'order': ['F']}) logger.debug('check 3') run(want, loop, final_checks=check_final_widths) # and finally, the Jacobian performance logger.debug('loading test matrix from file [4]') _, loop, _ = get_test_matrix('.', KernelType.jacobian, test_matrix, False, langs=current_test_langs, raise_on_missing=True) want = jacbase.copy() # no more openmp want.update({'use_atomic_doubles': [False]}) def update_jactype(state, want, seen): if state['jac_type'] == enum_to_string(JacobianType.finite_difference): assert state['num_cores'] == 1 assert state['width'] is None assert state['depth'] is None assert state['order'] == 'C' assert state['conp'] is True else: assert state['width'] in [4, None] want.update({'platform': ['intel'], 'jac_type': update_jactype}) def check_final_widths(seen): return len(seen['width'] - set([4, None])) == 0 logger.debug('check 5') run(want, loop, final_checks=check_final_widths) # test gpu vs cpu specs logger.debug('writing temp file') with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: nvidia lang: opencl width: [128] - name: intel lang: opencl width: [4] test-list: - test-type: performance eval-type: jacobian exact: sparse: gpuwidth: [64] order: ['F'] full: width: [2] gpuorder: ['C'] """)) file.flush() logger.debug('loading test matrix from file [5]') _, loop, _ = get_test_matrix('.', KernelType.jacobian, file.name, False, langs=current_test_langs, raise_on_missing=True) from pyjac.utils import platform_is_gpu def sparsetest(state, want, seen): if state['jac_type'] == enum_to_string(JacobianType.exact): if state['jac_format'] == enum_to_string(JacobianFormat.sparse): if platform_is_gpu(state['platform']): assert state['width'] in [64, None] else: assert state['width'] in [4, None] assert state['order'] == 'F' else: if platform_is_gpu(state['platform']): assert state['order'] == 'C' assert state['width'] in [128, None] else: assert state['width'] in [2, None] want = {'jac_format': sparsetest} logger.debug('check 6') run(want, loop) # test model override logger.debug('writing temp file 2') with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti - name: H2 path: mech: h2o2.cti platform-list: - name: nvidia lang: opencl width: [128] test-list: - test-type: performance eval-type: jacobian finite_difference: both: models: ['H2'] """)) file.flush() logger.debug('loading test matrix from file [6]') _, loop, _ = get_test_matrix('.', KernelType.jacobian, file.name, False, langs=current_test_langs, raise_on_missing=True) def modeltest(state, want, seen): if state['jac_type'] == enum_to_string(JacobianType.finite_difference): assert set(state['models']) == set(['H2']) else: assert set(state['models']) == set(['H2', 'CH4']) want = {'models': modeltest} logger.debug('check 7') run(want, loop) # finally test bad model spec logger.debug('writing temp file 3') with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti - name: H2 path: mech: h2o2.cti platform-list: - name: nvidia lang: opencl width: [128] test-list: - test-type: performance eval-type: jacobian finite_difference: both: models: ['BAD'] """)) file.flush() logger.debug('loading test matrix from file [7]') with assert_raises(InvalidOverrideException): get_test_matrix('.', KernelType.jacobian, file.name, False, langs=current_test_langs, raise_on_missing=True) # test gpu vectype specification logger.debug('writing temp file 4') with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti - name: H2 path: mech: h2o2.cti platform-list: - name: nvidia lang: opencl width: [128] - name: openmp lang: c test-list: - test-type: performance eval-type: jacobian finite_difference: both: width: [] depth: [] gpuwidth: [128] """)) file.flush() logger.debug('loading test matrix from file [8]') _, loop, _ = get_test_matrix('.', KernelType.jacobian, file.name, False, langs=current_test_langs, raise_on_missing=True) def modeltest(state, want, seen): if state['jac_type'] == enum_to_string(JacobianType.finite_difference): if state['platform'] == 'openmp': assert not bool(state['width']) else: assert state['width'] == 128 want = {'models': modeltest} logger.debug('check 8') run(want, loop) # test that source terms evaluations don't inherit exact jacobian overrides logger.debug('writing temp file 5') with NamedTemporaryFile(mode='w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 mech: gri30.cti path: platform-list: - lang: c name: openmp test-list: - test-type: performance # limit to intel platforms: [openmp] eval-type: both exact: both: num_cores: [1] order: [F] gpuorder: [C] conp: [conp] width: [2, 4] gpuwidth: [128] models: [] """)) file.flush() logger.debug('loading test matrix from file [9]') _, loop, _ = get_test_matrix('.', KernelType.species_rates, file.name, False, langs=current_test_langs, raise_on_missing=True) want = { 'platform': ['openmp'], 'width': [None], 'conp': [True, False], 'order': ['C', 'F'], 'models': ['CH4'], 'num_cores': num_cores_default()[0] } logger.debug('check 9') run(want, loop)
def test_override(): # test the base override schema with NamedTemporaryFile(mode='w', suffix='.yaml') as file: file.write( remove_common_indentation(""" override: num_cores: [1] order: ['F'] gpuorder: ['C'] conp: ['conp'] width: [2, 4] gpuwidth: [128] models: ['C2H4'] """)) file.flush() file.seek(0) data = build_and_validate('common_schema.yaml', file.name)['override'] assert data['num_cores'] == [1] assert data['order'] == ['F'] assert data['gpuorder'] == ['C'] assert data['conp'] == ['conp'] assert data['width'] == [2, 4] assert data['gpuwidth'] == [128] assert data['models'] == ['C2H4'] # now test embedded overrides with NamedTemporaryFile(mode='w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 mech: gri30.cti path: platform-list: - lang: c name: openmp test-list: - test-type: performance # limit to intel platforms: [intel] eval-type: jacobian exact: both: num_cores: [1] order: [F] gpuorder: [C] conp: [conp] depth: [2, 4] gpudepth: [128] models: [C2H4] """)) file.flush() file.seek(0) data = build_and_validate('test_matrix_schema.yaml', file.name, update=True) data = data['test-list'][0]['exact']['both'] assert data['num_cores'] == [1] assert data['order'] == ['F'] assert data['gpuorder'] == ['C'] assert data['conp'] == ['conp'] assert data['depth'] == [2, 4] assert data['gpudepth'] == [128] assert data['models'] == ['C2H4']
def test_duplicate_tests_fails(): with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: openmp lang: c test-list: - test-type: performance eval-type: jacobian - test-type: performance eval-type: both """)) file.seek(0) with assert_raises(DuplicateTestException): tests = build_and_validate('test_matrix_schema.yaml', file.name) load_tests(tests, file.name) with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: openmp lang: c test-list: - test-type: performance eval-type: jacobian exact: sparse: num_cores: [1] full: num_cores: [1] """)) file.seek(0) tests = build_and_validate('test_matrix_schema.yaml', file.name) load_tests(tests, file.name) with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: openmp lang: c test-list: - test-type: performance eval-type: jacobian exact: both: num_cores: [1] full: num_cores: [1] """)) file.seek(0) with assert_raises(OverrideCollisionException): tests = build_and_validate('test_matrix_schema.yaml', file.name) load_tests(tests, file.name)
def test_unsimdable(): from loopy.kernel.array import (VectorArrayDimTag) inds = ('j', 'i') test_size = 16 for opt in opts_loop(is_simd=True, skip_test=lambda state: state['depth'] and state['is_simd']): # make a kernel via the mapstore / usual methods base = creator('base', dtype=kint_type, shape=(10,), order=opt.order, initializer=np.arange(10, dtype=kint_type)) mstore = MapStore(opt, base, 8192) def __create_var(name, size=(test_size, 10)): return creator(name, kint_type, size, opt.order) # now create different arrays: # one that will cause a map transform mapt = creator('map', dtype=kint_type, shape=(10,), order=opt.order, initializer=np.array(list(range(0, 3)) + list(range(4, 11)), kint_type)) mapv = __create_var('mapv') mstore.check_and_add_transform(mapv, mapt) # one that is only an affine transform affinet = creator('affine', dtype=kint_type, shape=(10,), order=opt.order, initializer=np.arange(2, 12, dtype=kint_type)) affinev = __create_var('affinev', (test_size, 12)) mstore.check_and_add_transform(affinev, affinet) # and one that is a child of the affine transform affinet2 = creator('affine2', dtype=kint_type, shape=(10,), order=opt.order, initializer=np.arange(4, 14, dtype=kint_type)) mstore.check_and_add_transform(affinet2, affinet) # and add a child to it affinev2 = __create_var('affinev2', (test_size, 14)) mstore.check_and_add_transform(affinev2, affinet2) # and finally, a child of the map transform mapt2 = creator('map2', dtype=kint_type, shape=(10,), order=opt.order, initializer=np.array(list(range(0, 2)) + list(range(3, 11)), kint_type)) mstore.check_and_add_transform(mapt2, mapt) # and a child mapv2 = __create_var('mapv2') mstore.check_and_add_transform(mapv2, mapt2) # now create an kernel info affine_lp, affine_str = mstore.apply_maps(affinev, *inds) affine2_lp, affine2_str = mstore.apply_maps(affinev2, *inds) map_lp, map_str = mstore.apply_maps(mapv, *inds) map2_lp, map2_str = mstore.apply_maps(mapv2, *inds) instructions = Template(remove_common_indentation(""" ${affine_str} = 0 ${affine2_str} = 0 ${map_str} = 0 ${map2_str} = 0 """)).safe_substitute(**locals()) info = knl_info('test', instructions, mstore, kernel_data=[ affine_lp, affine2_lp, map_lp, map2_lp]) # create a dummy kgen kgen = make_kernel_generator(opt, KernelType.dummy, [info], type('namestore', (object,), {'jac': 0}), test_size=test_size, name='test') # make kernels kgen._make_kernels() # and call simdable cant_simd = _unSIMDable_arrays(kgen.kernels[0], opt, mstore, warn=False) if opt.depth: assert sorted(cant_simd) == [mapt2.name, map_lp.name, map2_lp.name] else: assert cant_simd == [] # make sure we can generate code lp.generate_code_v2(kgen.kernels[0]).device_code() if not kgen.array_split.vector_width: continue # check that we've vectorized all arrays assert all(len(arr.shape) == 3 for arr in kgen.kernels[0].args if isinstance(arr, lp.ArrayArg)) # get the split axis _, _, vec_axis, _ = kgen.array_split.split_shape(affine_lp) assert all(isinstance(arr.dim_tags[vec_axis], VectorArrayDimTag) for arr in kgen.kernels[0].args if arr.name not in cant_simd and isinstance(arr, lp.ArrayArg))
def test_get_test_matrix(): # test that the example test matrix is specified correctly def update(want, state, key, seen): if state[key] not in seen[key]: if six.callable(want[key]): want[key](state, want, seen) else: if is_iterable(state[key]): for k in state[key]: if k not in seen[key]: want[key].remove(k) seen[key].add(k) else: want[key].remove(state[key]) seen[key].add(state[key]) return want, seen def run(want, loop, final_checks=None): from copy import deepcopy seen = defaultdict(lambda: set()) test = deepcopy(want) for state in loop: for key in test: update(test, state, key, seen) # assert we didn't miss anything (that isn't callable -- those handle # themselves) assert not any(len(v) for v in test.values() if not six.callable(v)) if final_checks: assert final_checks(seen) test_matrix = __prefixify('test_matrix.yaml', examples_dir) # get the species validation test _, loop, max_vec_width = get_test_matrix('.', build_type.species_rates, test_matrix, True, langs=current_test_langs, raise_on_missing=True) assert max_vec_width == 8 from collections import defaultdict def vecsize_check(state, want, seen): if state['lang'] == 'c': assert state['vecsize'] is None assert state['wide'] is False assert state['deep'] is False else: seen['vecsize'].add(state['vecsize']) def check_final_vecsizes(seen): return sorted(seen['vecsize']) == [2, 4, 8] # check we have reasonable values base = { 'platform': ['intel', 'openmp'], 'wide': [True, False], 'vecsize': vecsize_check, 'conp': [True, False], 'order': ['C', 'F'], 'num_cores': num_cores_default()[0] } run(base, loop, final_checks=check_final_vecsizes) # repeat for jacobian _, loop, _ = get_test_matrix('.', build_type.jacobian, test_matrix, True, langs=current_test_langs, raise_on_missing=True) jacbase = base.copy() jacbase.update({ 'sparse': [ enum_to_string(JacobianFormat.sparse), enum_to_string(JacobianFormat.full) ], 'jac_type': [enum_to_string(JacobianType.exact)], 'use_atomics': [True, False] }) # true for OpenMP by default run(jacbase, loop, final_checks=check_final_vecsizes) # next, do species performance _, loop, _ = get_test_matrix('.', build_type.species_rates, test_matrix, False, langs=current_test_langs, raise_on_missing=True) want = base.copy() want.update({'order': ['F']}) run(want, loop, final_checks=check_final_vecsizes) # and finally, the Jacobian performance _, loop, _ = get_test_matrix('.', build_type.jacobian, test_matrix, False, langs=current_test_langs, raise_on_missing=True) want = jacbase.copy() # no more openmp want.update({'use_atomics': [False]}) def update_jactype(state, want, seen): if state['jac_type'] == enum_to_string(JacobianType.finite_difference): assert state['num_cores'] == 1 assert state['vecsize'] is None assert state['wide'] is False assert state['depth'] is False assert state['order'] == 'C' assert state['conp'] is True else: assert state['vecsize'] == 4 want.update({'platform': ['intel'], 'jac_type': update_jactype}) def check_final_vecsizes(seen): return len(seen['vecsize'] - set([4, None])) == 0 run(want, loop, final_checks=check_final_vecsizes) # test gpu vs cpu specs with NamedTemporaryFile('w', suffix='.yaml') as file: file.write(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: nvidia lang: opencl vectype: [wide] vecsize: [128] - name: intel lang: opencl vectype: [wide] vecsize: [4] test-list: - test-type: performance eval-type: jacobian exact: sparse: gpuvecsize: [64] order: ['F'] full: vecsize: [2] gpuorder: ['C'] """) file.flush() _, loop, _ = get_test_matrix('.', build_type.jacobian, file.name, False, langs=current_test_langs, raise_on_missing=True) from pyjac.tests import platform_is_gpu def sparsetest(state, want, seen): if state['jac_type'] == enum_to_string(JacobianType.exact): if state['sparse'] == enum_to_string(JacobianFormat.sparse): if platform_is_gpu(state['platform']): assert state['vecsize'] == 64 else: assert state['vecsize'] == 4 assert state['order'] == 'F' else: if platform_is_gpu(state['platform']): assert state['order'] == 'C' assert state['vecsize'] == 128 else: assert state['vecsize'] == 2 want = {'sparse': sparsetest} run(want, loop) # test model override with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti - name: H2 path: mech: h2o2.cti platform-list: - name: nvidia lang: opencl vectype: [wide] vecsize: [128] test-list: - test-type: performance eval-type: jacobian finite_difference: both: models: ['H2'] """)) file.flush() _, loop, _ = get_test_matrix('.', build_type.jacobian, file.name, False, langs=current_test_langs, raise_on_missing=True) def modeltest(state, want, seen): if state['jac_type'] == enum_to_string(JacobianType.finite_difference): assert set(state['models']) == set(['H2']) else: assert set(state['models']) == set(['H2', 'CH4']) want = {'models': modeltest} run(want, loop) # finally test bad model spec with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti - name: H2 path: mech: h2o2.cti platform-list: - name: nvidia lang: opencl vectype: [wide] vecsize: [128] test-list: - test-type: performance eval-type: jacobian finite_difference: both: models: ['BAD'] """)) file.flush() with assert_raises(InvalidOverrideException): get_test_matrix('.', build_type.jacobian, file.name, False, langs=current_test_langs, raise_on_missing=True) # test gpu vectype specification with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti - name: H2 path: mech: h2o2.cti platform-list: - name: nvidia lang: opencl vectype: [wide, par] vecsize: [128] - name: openmp lang: c vectype: [par] test-list: - test-type: performance eval-type: jacobian finite_difference: both: vectype: [par] gpuvectype: [wide] """)) file.flush() _, loop, _ = get_test_matrix('.', build_type.jacobian, file.name, False, langs=current_test_langs, raise_on_missing=True) def modeltest(state, want, seen): if state['jac_type'] == enum_to_string(JacobianType.finite_difference): if state['platform'] == 'openmp': assert not bool(state['vecsize']) else: assert state['vecsize'] == 128 want = {'models': modeltest} run(want, loop) # test that source terms evaluations don't inherit exact jacobian overrides with NamedTemporaryFile(mode='w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 mech: gri30.cti path: platform-list: - lang: c name: openmp vectype: ['par'] test-list: - test-type: performance # limit to intel platforms: [openmp] eval-type: both exact: both: num_cores: [1] order: [F] gpuorder: [C] conp: [conp] vecsize: [2, 4] gpuvecsize: [128] gpuvectype: [wide] vectype: [wide] models: [] """)) file.flush() _, loop, _ = get_test_matrix('.', build_type.species_rates, file.name, False, langs=current_test_langs, raise_on_missing=True) want = { 'platform': ['openmp'], 'wide': [False], 'vecsize': [None], 'conp': [True, False], 'order': ['C', 'F'], 'models': ['CH4'], 'num_cores': num_cores_default()[0] } run(want, loop)