def test_bad_simd_specification_in_codegen(): with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" platform: name: portable lang: opencl # deep vectorization depth: 4 is_simd: True """)) file.seek(0) with assert_raises(ValidationError): build_and_validate('codegen_platform.yaml', file.name)
def setUp(self): lp.set_caching_enabled(False) if not self.is_setup: utils.setup_logging() # load equations self.dirpath = os.path.dirname(os.path.realpath(__file__)) gasname = os.path.join(self.dirpath, 'test.cti') # first check test config gasname = get_mechanism_file() # load the gas gas = ct.Solution(gasname) # the mechanism elems, specs, reacs = read_mech_ct(gasname) # and finally check for a test platform platform = get_platform_file() try: if platform is None: platform = '' raise OSError platform = build_and_validate('test_platform_schema.yaml', platform) except (OSError, IOError): logger = logging.getLogger(__name__) logger.warn('Test platform file {} was not found, reverting ' 'to default.'.format(platform)) platform = None self.store = storage(platform, gas, specs, reacs) self.is_setup = True
def load_memory_limits(input_file, schema='common_schema.yaml'): """ Conviencence method for loading inputs from memory limits file Parameters ---------- input_file: The input file to load """ def __limitfy(limits): # note: this is only safe because we've already validated. # hence, NO ERRORS! if limits: return { k: parse_bytestr(object, v) if not k == 'platforms' else v for k, v in six.iteritems(limits) } if input_file: try: memory_limits = build_and_validate('common_schema.yaml', input_file, allow_unknown=True) return [__limitfy(memory_limits['memory-limits'])] except ValidationError: # TODO: fix this -- need a much better way of specifying / passing in # limits memory_limits = build_and_validate('test_matrix_schema.yaml', input_file, allow_unknown=True) return [__limitfy(x) for x in memory_limits['memory-limits']] except KeyError: # no limits pass return {}
def setUp(self): lp.set_caching_enabled(False) if not self.is_setup: utils.setup_logging() # first check test config gasname = get_mechanism_file() # load the gas gas = ct.Solution(gasname) # the mechanism elems, specs, reacs = read_mech_ct(gasname) # get sort type sorting = get_rxn_sorting() if sorting != reaction_sorting.none: # get ordering ordering = sort_reactions(reacs, sorting, return_order=True) # and apply reacs = sort_reactions(reacs, sorting) ct_reacs = gas.reactions() # and apply to gas gas = ct.Solution(thermo='IdealGas', kinetics='GasKinetics', species=gas.species(), reactions=[ct_reacs[i] for i in ordering]) # and reassign utils.reassign_species_lists(reacs, specs) # and finally check for a test platform platform = get_platform_file() try: if platform is None: platform = '' raise OSError platform = build_and_validate('test_platform_schema.yaml', platform) except (OSError, IOError): logger = logging.getLogger(__name__) logger.warn('Test platform file {} was not found, reverting ' 'to default.'.format(platform)) platform = None self.store = storage(platform, gas, specs, reacs) self.is_setup = True
def load_platform(codegen): """ Loads a code-generation platform from a file, and returns the corresponding :class:`loopy_options` Parameters ---------- codegen: str The user-specified code-generation platform yaml file Returns ------- :class:`loopy_options` The loaded platform Raises ------ :class:`cerberus.ValidationError`: A validation error if the supplied codegen platform doesn't comply with the :doc:`../schemas/codegen_platform.yaml` """ platform = build_and_validate('codegen_platform.yaml', codegen)['platform'] width = platform['vectype'] == 'wide' depth = platform['vectype'] == 'deep' if width: width = platform['vecsize'] elif depth: depth = platform['vecsize'] # TODO: implement memory limits loading here # optional params get passed as kwargs kwargs = {} if 'order' in platform and platform['order'] is not None: kwargs['order'] = platform['order'] if 'atomics' in platform: kwargs['use_atomics'] = platform['atomics'] return loopy_options(width=width, depth=depth, lang=platform['lang'], platform=platform['name'], **kwargs)
def _internal(source, schema, includes): # make schema built = build_and_validate(schema, source, includes=includes) assert built is not None return built
def test_override(): # test the base override schema with NamedTemporaryFile(mode='w', suffix='.yaml') as file: file.write( remove_common_indentation(""" override: num_cores: [1] order: ['F'] gpuorder: ['C'] conp: ['conp'] width: [2, 4] gpuwidth: [128] models: ['C2H4'] """)) file.flush() file.seek(0) data = build_and_validate('common_schema.yaml', file.name)['override'] assert data['num_cores'] == [1] assert data['order'] == ['F'] assert data['gpuorder'] == ['C'] assert data['conp'] == ['conp'] assert data['width'] == [2, 4] assert data['gpuwidth'] == [128] assert data['models'] == ['C2H4'] # now test embedded overrides with NamedTemporaryFile(mode='w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 mech: gri30.cti path: platform-list: - lang: c name: openmp test-list: - test-type: performance # limit to intel platforms: [intel] eval-type: jacobian exact: both: num_cores: [1] order: [F] gpuorder: [C] conp: [conp] depth: [2, 4] gpudepth: [128] models: [C2H4] """)) file.flush() file.seek(0) data = build_and_validate('test_matrix_schema.yaml', file.name, update=True) data = data['test-list'][0]['exact']['both'] assert data['num_cores'] == [1] assert data['order'] == ['F'] assert data['gpuorder'] == ['C'] assert data['conp'] == ['conp'] assert data['depth'] == [2, 4] assert data['gpudepth'] == [128] assert data['models'] == ['C2H4']
def test_duplicate_tests_fails(): with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: openmp lang: c test-list: - test-type: performance eval-type: jacobian - test-type: performance eval-type: both """)) file.seek(0) with assert_raises(DuplicateTestException): tests = build_and_validate('test_matrix_schema.yaml', file.name) load_tests(tests, file.name) with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: openmp lang: c test-list: - test-type: performance eval-type: jacobian exact: sparse: num_cores: [1] full: num_cores: [1] """)) file.seek(0) tests = build_and_validate('test_matrix_schema.yaml', file.name) load_tests(tests, file.name) with NamedTemporaryFile('w', suffix='.yaml') as file: file.write( remove_common_indentation(""" model-list: - name: CH4 path: mech: gri30.cti platform-list: - name: openmp lang: c test-list: - test-type: performance eval-type: jacobian exact: both: num_cores: [1] full: num_cores: [1] """)) file.seek(0) with assert_raises(OverrideCollisionException): tests = build_and_validate('test_matrix_schema.yaml', file.name) load_tests(tests, file.name)
def __get_test_matrix(**kwargs): return build_and_validate('test_matrix_schema.yaml', __prefixify('test_matrix.yaml', examples_dir), **kwargs)
def get_test_matrix(work_dir, test_type, test_matrix, for_validation, raise_on_missing=True, langs=get_test_langs()): """Runs a set of mechanisms and an ordered dictionary for performance and functional testing Parameters ---------- work_dir : str Working directory with mechanisms and for data test_type: :class:`build_type.jacobian` Controls some testing options (e.g., whether to do a sparse matrix or not) test_matrix: str The test matrix file to load for_validation: bool If determines which test type to load from the test matrix, validation or performance raise_on_missing: bool Raise an exception of the specified :param:`test_matrix` file is not found langs: list of str The allowed languages, modifiable by the :envvar:`TEST_LANGS` or test_langs in :file:`test_setup.py` Returns ------- mechanisms : dict A dictionary indicating which mechanism are available for testing, The structure is as follows: mech_name : {'mech' : file path to the Cantera mechanism 'ns' : number of species in the mechanism 'limits' : {'full': XXX, 'sparse': XXX}}: a dictionary of limits on the number of conditions that can be evaluated for this mechanism (full & sparse jacobian respectively) due to memory constraints params : OrderedDict The parameters to put in an oploop max_vec_width : int The maximum vector width to test """ work_dir = abspath(work_dir) # validate the test matrix matrix_name = test_matrix test_matrix = build_and_validate('test_matrix_schema.yaml', test_matrix) # check that we have the working directory if not exists(work_dir): raise Exception('Work directory {} for '.format(work_dir) + 'testing not found, exiting...') # load the models models = load_models(work_dir, test_matrix) assert isinstance(test_type, build_type) # load tests tests = load_tests(test_matrix, matrix_name) # filter those that match the test type valid_str = 'validation' if for_validation else 'performance' tests = [test for test in tests if test['test-type'] == valid_str] tests = [ test for test in tests if test['eval-type'] == enum_to_string(test_type) or test['eval-type'] == 'both' ] # and dictify tests = [OrderedDict(test) for test in tests] if not tests: raise Exception('No tests found in matrix {} for {} test of {}, ' 'exiting...'.format(matrix_name, valid_str, enum_to_string(test_type))) # get defaults we haven't migrated to schema yet rate_spec = ['fixed', 'hybrid'] if test_type != build_type.jacobian \ else ['fixed'] sparse = ([ enum_to_string(JacobianFormat.sparse), enum_to_string(JacobianFormat.full) ] if test_type == build_type.jacobian else [enum_to_string(JacobianFormat.full)]) jac_types = [ enum_to_string(JacobianType.exact), enum_to_string(JacobianType.finite_difference) ] if (test_type == build_type.jacobian and not for_validation) else [enum_to_string(JacobianType.exact)] split_kernels = [False] # and default # of cores, this may be overriden default_num_cores, can_override_cores = num_cores_default() # load platforms platforms = load_platforms(test_matrix, langs=langs, raise_on_empty=raise_on_missing) platforms = [OrderedDict(platform) for platform in platforms] out_params = [] logger = logging.getLogger(__name__) for test in tests: # filter platforms plats = [p.copy() for p in platforms] if 'platforms' in test: plats = [ plat for plat in plats if plat['platform'] in test['platforms'] ] if len(plats) < len(platforms): logger.info( 'Platforms ({}) filtered out for test type: {}'.format( ', '.join([ p['platform'] for p in platforms if p not in plats ]), ' - '.join([test['test-type'], test['eval-type']]))) if not len(plats): logger.warn('No platforms found for test {}, skipping...'.format( ' - '.join([test['test-type'], test['eval-type']]))) continue for plookup in plats: clean = plookup.copy() # get default number of cores cores = default_num_cores[:] # get default vector widths widths = plookup['width'] is_wide = widths is not None depths = plookup['depth'] is_deep = depths is not None if is_deep and not is_wide: widths = depths[:] # sanity check if is_wide or is_deep: assert widths is not None # special gpu handling for cores is_gpu = False # test platform type if platform_is_gpu(plookup['platform']): # set cores to 1 is_gpu = True cores = [1] def apply_vectypes(lookup, widths, is_wide=is_wide, is_deep=is_deep): if is_wide or is_deep: # set vec widths use_par = None in widths or (is_wide and is_deep) lookup['vecsize'] = [x for x in widths[:] if x is not None] base = [True] if not use_par else [True, False] if is_wide: lookup['wide'] = base[:] base.pop() if is_deep: lookup['deep'] = base[:] else: lookup['vecsize'] = [None] lookup['wide'] = [False] lookup['deep'] = [False] del lookup['width'] del lookup['depth'] apply_vectypes(plookup, widths) # default is both conp / conv conp = [True, False] order = ['C', 'F'] # loop over possible overrides oploop = OptionLoop( OrderedDict([('ttype', [enum_to_string(test_type)]), ('jtype', jac_types), ('stype', sparse)])) for i, state in enumerate(oploop): ttype = state['ttype'] jtype = state['jtype'] stype = state['stype'] def override_log(key, old, new): logging.info( 'Replacing {} for test type: {}. Old value:' ' ({}), New value: ({})'.format( key, stringify_args( [ttype, test['eval-type'], jtype, stype], joiner='.'), stringify_args(listify(old)), stringify_args(listify(new)))) # copy defaults icores = cores[:] iorder = order[:] iconp = conp[:] ivecsizes = widths[:] if widths is not None else [None] imodels = tuple(models.keys()) # load overides overrides = get_overrides(test, ttype, jtype, stype) # check that we can apply if 'num_cores' in overrides and not can_override_cores: raise InvalidTestEnivironmentException( ttype, 'num_cores', matrix_name, 'num_threads') elif 'num_cores' in overrides and is_gpu: logger = logging.getLogger(__name__) logger.info( 'Discarding unused "num_cores" override for GPU ' 'platform {}'.format(plookup['platform'])) del overrides['num_cores'] # 'num_cores', 'order', 'conp', 'vecsize', 'vectype' # now apply overrides outplat = plookup.copy() for current in overrides: ivectypes_override = None for override in overrides: if override == 'num_cores': override_log('num_cores', icores, overrides[override]) icores = overrides[override] elif override == 'order' and not is_gpu: override_log('order', iorder, overrides[override]) iorder = overrides[override] elif override == 'gpuorder' and is_gpu: override_log('order', iorder, overrides[override]) iorder = overrides[override] elif override == 'conp': iconp_save = iconp[:] iconp = [] if 'conp' in overrides[override]: iconp.append(True) if 'conv' in overrides[override]: iconp.append(False) override_log('conp', iconp_save, iconp) elif override == 'vecsize' and not is_gpu: override_log('vecsize', ivecsizes, overrides[override]) outplat['vecsize'] = listify(overrides[override]) elif override == 'gpuvecsize' and is_gpu: override_log('gpuvecsize', ivecsizes, overrides[override]) outplat['vecsize'] = listify(overrides[override]) elif override == 'vectype' and not is_gpu: # we have to do this at the end ivectypes_override = overrides[override] elif override == 'gpuvectype' and is_gpu: ivectypes_override = overrides[override] elif override == 'models': # check that all models are valid for model in overrides[override]: if model not in imodels: raise InvalidOverrideException( override, model, imodels) # and replace override_log('models', stringify_args(imodels), stringify_args(overrides[override])) imodels = tuple(overrides[override]) if ivectypes_override is not None: c = clean.copy() apply_vectypes(c, outplat['vecsize'], is_wide='wide' in ivectypes_override, is_deep='deep' in ivectypes_override) # and copy into working outplat['wide'] = c['wide'] if 'wide' in c else [False] outplat['deep'] = c['deep'] if 'deep' in c else [False] outplat['vecsize'] = c['vecsize'] old = [''] if is_wide: old += ['wide'] if is_deep: old += ['deep'] elif not is_wide: old += ['par'] override_log('vecsize', old, ivectypes_override) # and finally, convert back to an option loop format out_params.append( [('num_cores', icores), ('order', iorder), ('rate_spec', rate_spec), ('split_kernels', split_kernels), ('conp', iconp), ('sparse', [stype]), ('jac_type', [jtype]), ('models', [imodels])] + [(key, value) for key, value in six.iteritems(outplat)]) max_vec_width = 1 vector_params = [ dict(p)['vecsize'] for p in out_params if 'vecsize' in dict(p) and dict(p)['vecsize'] != [None] ] if vector_params: max_vec_width = max(max_vec_width, max([max(x) for x in vector_params])) from . import reduce_oploop loop = reduce_oploop(out_params) return models, loop, max_vec_width