def parse(self, filename): error_code = 0x00 with open(filename, 'r') as f: data = f.read() try: root = hit.parse(os.path.abspath(filename), data) except Exception as err: print(err) return 0x01 # Parse Error w = DupWalker(os.path.abspath(filename)) root.walk(w, hit.NodeType.All) for err in w.errors: print err error_code = 1 error_code = self._parseNode(filename, root) del root if len(self.params_ignored): print 'Warning detected when parsing file "' + os.path.join( os.getcwd(), filename) + '"' print ' Ignored Parameter(s): ', self.params_ignored return error_code
def _createMergedInputFile(name, files, **kwargs): style = kwargs.pop('style', 'inl') title = kwargs.pop('title', None) # Create the moose.i file for containing the entire moose workshop fid = open(name, 'w') # Add the root block fid.write('[presentation]\n') fid.write(' style = ' + style + '\n') if (title != None): fid.write(' title = "' + title + '"\n') # Add the blocks from the various input files for f in files: with open(f, "r") as input_file: root = hit.parse(f, input_file.read()) presentation = root.find("presentation") for child in presentation.children(node_type=hit.NodeType.Section): fid.write(child.render(1) + '\n') # Close the block and file fid.write('[]\n') fid.close()
def extractContent(self, filename, settings): """ Extract input file content with GetPot parser if 'block' is available. (override) """ if settings['main_comment']: with open(filename) as fid: content = fid.read() match = re.search(r'(?P<comment>.*?)\[.*?\]', content, flags=re.MULTILINE | re.DOTALL) return match.group('comment') if not settings['block']: return super(ListingInputPattern, self).extractContent(filename, settings) with open(filename) as f: content = f.read() root = hit.parse(filename, content) node = root.find(settings['block']) if node is not None: return node.render() return super(ListingInputPattern, self).extractContent(filename, settings)
def parse(content, root=None, filename=''): """ Parse a hit tree from a *content* string and return a `pyhit.Node` object. The returned object is the root of the loaded tree. The *root* input can provide a node object for the tree to populate; if it is given this same node is returned. The *filename*, if provided, will be used for error reporting when manipulating the tree. """ hit_node = hit.parse(filename, content) hit.explode(hit_node) return _parse_hit(Node(root, hit_node), hit_node, filename)
def readTestRoot(fname): with open(fname, 'r') as f: data = f.read() root = hit.parse(fname, data) args = [] if root.find('run_tests_args'): args = shlex.split(root.param('run_tests_args')) hit_node = HitNode(hitnode=root) hit_parse(hit_node, root, '') # TODO: add check to see if the binary exists before returning. This can be used to # allow users to control fallthrough for e.g. individual module binaries vs. the # combined binary. return root.param('app_name'), args, hit_node
def parse(content, root=None, filename=''): """ Parse a hit tree from a string. Inputs: content[str]: string to process root[Node]: (Optional) root node of the tree filename[str]: (Optional) filename for error reporting """ hit_node = hit.parse(filename, content) hit.explode(hit_node) root = Node(root, hit_node) if root is not None else Node(None, hit_node) _parse_hit(root, hit_node, filename) return root
def readTestRoot(fname): with open(fname, 'r') as f: data = f.read() root = hit.parse(fname, data) args = [] if root.find('run_tests_args'): args = shlex.split(root.param('run_tests_args')) hit_node = HitNode(hitnode=root) hit_parse(hit_node, root, '') # TODO: add check to see if the binary exists before returning. This can be used to # allow users to control fallthrough for e.g. individual module binaries vs. the # combined binary. return root.param('app_name'), args, hit_node
def findTestRoot(start=os.getcwd(), method=os.environ.get('METHOD', 'opt')): rootdir = os.path.abspath(start) while os.path.dirname(rootdir) != rootdir: fname = os.path.join(rootdir, 'testroot') if os.path.exists(fname): with open(fname, 'r') as f: data = f.read() root = hit.parse(fname, data) args = [] if root.find('cli_args'): args = shlex.split(root.param('cli_args')) # TODO: add check to see if the binary exists before returning. This can be used to # allow users to control fallthrough for e.g. individual module binaries vs. the # combined binary. return rootdir, root.param('app_name'), args rootdir = os.path.dirname(rootdir) raise RuntimeError('test root directory not found')
def read_benchmarks(benchlist): if not os.path.exists(benchlist): raise ValueError('benchmark list file "{}" does not exist'.format(benchlist)) benches = [] with open(benchlist, 'r') as f: data = f.read() root = hit.parse(benchlist, data) for child in root.find('benchmarks').children(): ex = child.param('binary').strip() infile = child.param('input').strip() args = '' if child.find('cli_args'): args = child.param('cli_args') benches.append(BenchEntry(child.path(), ex, infile, args)) return benches
def parse(self, filename, default_values=None): with open(filename, 'r') as f: data = f.read() self.fname = os.path.abspath(filename) try: root = hit.parse(os.path.abspath(filename), data) except Exception as err: self.errors.append('{}'.format(err)) return self.root = root w = DupWalker(os.path.abspath(filename)) root.walk(w, hit.NodeType.Field) self.errors.extend(w.errors) self._parseNode(filename, root, default_values)
def readInputData(self, data, filename): try: self.filename = os.path.abspath(filename) root = hit.parse(os.path.abspath(filename), data) hit.explode(root) w = DupWalker(os.path.abspath(filename)) root.walk(w, hit.NodeType.Field) if w.errors: for err in w.errors: mooseutils.mooseWarning(err) raise PeacockException("Parser errors") self.original_text = data self.root_node = root self.changed = False except PeacockException as e: msg = "Failed to parse input file %s:\n%s\n" % (filename, e) mooseutils.mooseWarning(msg) raise e
def parse(self, filename, default_values = None): with open(filename, 'r') as f: data = f.read() self.fname = os.path.abspath(filename) try: root = hit.parse(os.path.abspath(filename), data) except Exception as err: self.errors.append('{}'.format(err)) return self.root = root w = DupWalker(os.path.abspath(filename)) root.walk(w, hit.NodeType.Field) self.errors.extend(w.errors) self._parseNode(filename, root, default_values)
def hit_load(filename): """ Read and parse a hit file (MOOSE input file format). Inputs: filenae[str]: The filename to open and parse. Returns a HitNode object, which is the root of the tree. HitNode objects are custom versions of the anytree.Node objects. """ if os.path.exists(filename): with open(filename, 'r') as fid: content = fid.read() else: message.mooseError("Unable to load the hit file ", filename) hit_node = hit.parse(filename, content) root = HitNode(hitnode=hit_node) hit_parse(root, hit_node, filename) return root
def hit_load(filename): """ Read and parse a hit file (MOOSE input file format). Inputs: filenae[str]: The filename to open and parse. Returns a HitNode object, which is the root of the tree. HitNode objects are custom versions of the anytree.Node objects. """ if os.path.exists(filename): with open(filename, 'r') as fid: content = fid.read() else: message.mooseError("Unable to load the hit file ", filename) hit_node = hit.parse(filename, content) root = HitNode(hitnode=hit_node) hit_parse(root, hit_node, filename) return root
def getInputFileFormat(self, extra=[]): """ Does a dump and uses the GetPotParser to parse the output. """ args = ["--disable-refcount-printing", "--dump"] + extra output = run_app(args) self.assertIn("### START DUMP DATA ###\n", output) self.assertIn("### END DUMP DATA ###\n", output) output = output.split('### START DUMP DATA ###\n')[1] output = output.split('### END DUMP DATA ###')[0] self.assertNotEqual(len(output), 0) root = hit.parse("dump.i", output) hit.explode(root) w = DupWalker("dump.i") root.walk(w, hit.NodeType.All) if w.errors: print("\n".join(w.errors)) self.assertEqual(len(w.errors), 0) return root
def findTestRoot(start=os.getcwd(), method=os.environ.get('METHOD', 'opt')): rootdir = os.path.abspath(start) while os.path.dirname(rootdir) != rootdir: fname = os.path.join(rootdir, 'testroot') if os.path.exists(fname): with open(fname, 'r') as f: data = f.read() root = hit.parse(fname, data) args = [] if root.find('cli_args'): args = shlex.split(root.param('cli_args')) hit_node = HitNode(hitnode=root) hit_parse(hit_node, root, '') # TODO: add check to see if the binary exists before returning. This can be used to # allow users to control fallthrough for e.g. individual module binaries vs. the # combined binary. return rootdir, root.param('app_name'), args, hit_node rootdir = os.path.dirname(rootdir) raise RuntimeError('test root directory not found')
def getInputFileFormat(self, extra=[]): """ Does a dump and uses the GetPotParser to parse the output. """ args = ["--disable-refcount-printing", "--dump"] + extra output = run_app(args) self.assertIn("### START DUMP DATA ###\n", output) self.assertIn("### END DUMP DATA ###\n", output) output = output.split('### START DUMP DATA ###\n')[1] output = output.split('### END DUMP DATA ###')[0] self.assertNotEqual(len(output), 0) root = hit.parse("dump.i", output) hit.explode(root) w = DupWalker("dump.i") root.walk(w, hit.NodeType.All) if w.errors: print("\n".join(w.errors)) self.assertEqual(len(w.errors), 0) return root
def extractContent(self, filename, settings): """ Extract input file content with GetPot parser if 'block' is available. (override) """ if settings['main_comment']: with open(filename) as fid: content = fid.read() match = re.search(r'(?P<comment>.*?)\[.*?\]', content, flags=re.MULTILINE|re.DOTALL) return match.group('comment') if not settings['block']: return super(ListingInputPattern, self).extractContent(filename, settings) with open(filename) as f: content = f.read() root = hit.parse(filename, content) node = root.find(settings['block']) if node is not None: return node.render() return super(ListingInputPattern, self).extractContent(filename, settings)
def read_benchmarks(benchlist): if not os.path.exists(benchlist): raise ValueError('benchmark list file "{}" does not exist'.format(benchlist)) benches = [] with open(benchlist, 'r') as f: data = f.read() root = hit.parse(benchlist, data) for child in root.find('benchmarks').children(): if not child.param('binary'): raise ValueError('missing required "binary" field') ex = child.param('binary').strip() if not child.param('input'): raise ValueError('missing required "input" field') infile = child.param('input').strip() args = '' if child.find('cli_args'): args = child.param('cli_args') benches.append(BenchEntry(child.path(), ex, infile, args)) return benches
def __init__(self, input_file, **kwargs): # Determine the format self._format = kwargs.pop('format', 'remark') self._reveal_dir = None if self._format == 'reveal': self._reveal_dir = os.getenv( 'REVEAL_JS_DIR', os.path.join(os.getenv('HOME'), 'projects', 'reveal.js')) if not os.path.exists(self._reveal_dir): print 'ERROR: Attempted to output in Reveal.js format, but Reveal.js directory was not found, set the REAVEL_JS_DIR enviornmental variable or clone the repository into your ~/projects directory.' sys.exit() # Create the Factory and Warehouse self.factory = Factory() self.warehouse = SlideSetWarehouse(format=self._format) # Set the location of PresentationBuilder directory self._source_dir = os.path.abspath( os.path.join( os.path.split(inspect.getfile(self.__class__))[0], '..')) # Extract input/output file names f, ext = os.path.splitext(input_file) self._input_file = input_file self._output_file = f + '.html' blaster_dir = os.path.abspath( os.path.dirname(os.path.dirname(__file__))) # Register the objects to be created self.factory.loadPlugins([blaster_dir], 'slidesets', "IS_PRESENTATION") self.factory.loadPlugins([blaster_dir], 'slides', "IS_PRESENTATION") self.factory.loadPlugins([blaster_dir], 'images', "IS_PRESENTATION") # Build the Parser object self.parser = Parser(self.factory, self.warehouse, check_for_type=False) # Create SlideSet objects via the Parser object by parsing the input file print colorText('Parsing input...', 'CYAN') default_params = HitNode(hitnode=hit.parse('', '')) err = self.parser.parse(input_file, default_params) if err: sys.exit(err) print '' # Store the top-level 'presentation' level parameters pres = self.parser.root.find('presentation') self._params = {} if pres is not None: for child in pres.children(): self._params[child.path()] = child.param() # Extract CSS style self._style() # Build the slides self.warehouse.execute()
def __init__(self, argv, moose_dir, app_name=None): os.environ['MOOSE_DIR'] = moose_dir os.environ['PYTHONPATH'] = os.path.join(moose_dir, 'python') + ':' + os.environ.get('PYTHONPATH', '') if app_name: rootdir, app_name, args, root_params = '.', app_name, [], HitNode(hitnode=hit.parse('','')) else: rootdir, app_name, args, root_params = findTestRoot(start=os.getcwd()) orig_cwd = os.getcwd() os.chdir(rootdir) argv = argv[:1] + args + argv[1:] self.factory = Factory() self.app_name = app_name self.root_params = root_params # Build a Warehouse to hold the MooseObjects self.warehouse = Warehouse() # Get dependant applications and load dynamic tester plugins # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))] sys.path.append(os.path.join(moose_dir, 'framework', 'scripts')) # For find_dep_apps.py # Use the find_dep_apps script to get the dependant applications for an app import find_dep_apps depend_app_dirs = find_dep_apps.findDepApps(app_name, use_current_only=True) dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')]) # Finally load the plugins! self.factory.loadPlugins(dirs, 'testers', "IS_TESTER") self._infiles = ['tests', 'speedtests'] self.parse_errors = [] self.test_table = [] self.num_passed = 0 self.num_failed = 0 self.num_skipped = 0 self.num_pending = 0 self.host_name = gethostname() self.moose_dir = moose_dir self.base_dir = os.getcwd() self.run_tests_dir = os.path.abspath('.') self.results_storage = '.previous_test_results.json' self.code = '2d2d6769726c2d6d6f6465' self.error_code = 0x0 self.keyboard_talk = True # Assume libmesh is a peer directory to MOOSE if not defined if os.environ.has_key("LIBMESH_DIR"): self.libmesh_dir = os.environ['LIBMESH_DIR'] else: self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed') self.file = None # Failed Tests file object self.writeFailedTest = None # Parse arguments self.parseCLArgs(argv) checks = {} checks['platform'] = util.getPlatforms() checks['submodules'] = util.getInitializedSubmodules(self.run_tests_dir) checks['exe_objects'] = None # This gets calculated on demand checks['registered_apps'] = None # This gets extracted on demand # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user # to select whether they want to probe for libMesh configuration options. if self.options.skip_config_checks: checks['compiler'] = set(['ALL']) checks['petsc_version'] = 'N/A' checks['petsc_version_release'] = set(['ALL']) checks['slepc_version'] = 'N/A' checks['library_mode'] = set(['ALL']) checks['mesh_mode'] = set(['ALL']) checks['dtk'] = set(['ALL']) checks['unique_ids'] = set(['ALL']) checks['vtk'] = set(['ALL']) checks['tecplot'] = set(['ALL']) checks['dof_id_bytes'] = set(['ALL']) checks['petsc_debug'] = set(['ALL']) checks['curl'] = set(['ALL']) checks['threading'] = set(['ALL']) checks['superlu'] = set(['ALL']) checks['parmetis'] = set(['ALL']) checks['chaco'] = set(['ALL']) checks['party'] = set(['ALL']) checks['ptscotch'] = set(['ALL']) checks['slepc'] = set(['ALL']) checks['unique_id'] = set(['ALL']) checks['cxx11'] = set(['ALL']) checks['asio'] = set(['ALL']) checks['boost'] = set(['ALL']) checks['fparser_jit'] = set(['ALL']) else: checks['compiler'] = util.getCompilers(self.libmesh_dir) checks['petsc_version'] = util.getPetscVersion(self.libmesh_dir) checks['petsc_version_release'] = util.getLibMeshConfigOption(self.libmesh_dir, 'petsc_version_release') checks['slepc_version'] = util.getSlepcVersion(self.libmesh_dir) checks['library_mode'] = util.getSharedOption(self.libmesh_dir) checks['mesh_mode'] = util.getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode') checks['dtk'] = util.getLibMeshConfigOption(self.libmesh_dir, 'dtk') checks['unique_ids'] = util.getLibMeshConfigOption(self.libmesh_dir, 'unique_ids') checks['vtk'] = util.getLibMeshConfigOption(self.libmesh_dir, 'vtk') checks['tecplot'] = util.getLibMeshConfigOption(self.libmesh_dir, 'tecplot') checks['dof_id_bytes'] = util.getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes') checks['petsc_debug'] = util.getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug') checks['curl'] = util.getLibMeshConfigOption(self.libmesh_dir, 'curl') checks['threading'] = util.getLibMeshThreadingModel(self.libmesh_dir) checks['superlu'] = util.getLibMeshConfigOption(self.libmesh_dir, 'superlu') checks['parmetis'] = util.getLibMeshConfigOption(self.libmesh_dir, 'parmetis') checks['chaco'] = util.getLibMeshConfigOption(self.libmesh_dir, 'chaco') checks['party'] = util.getLibMeshConfigOption(self.libmesh_dir, 'party') checks['ptscotch'] = util.getLibMeshConfigOption(self.libmesh_dir, 'ptscotch') checks['slepc'] = util.getLibMeshConfigOption(self.libmesh_dir, 'slepc') checks['unique_id'] = util.getLibMeshConfigOption(self.libmesh_dir, 'unique_id') checks['cxx11'] = util.getLibMeshConfigOption(self.libmesh_dir, 'cxx11') checks['asio'] = util.getIfAsioExists(self.moose_dir) checks['boost'] = util.getLibMeshConfigOption(self.libmesh_dir, 'boost') checks['fparser_jit'] = util.getLibMeshConfigOption(self.libmesh_dir, 'fparser_jit') # Override the MESH_MODE option if using the '--distributed-mesh' # or (deprecated) '--parallel-mesh' option. if (self.options.parallel_mesh == True or self.options.distributed_mesh == True) or \ (self.options.cli_args != None and \ (self.options.cli_args.find('--parallel-mesh') != -1 or self.options.cli_args.find('--distributed-mesh') != -1)): option_set = set(['ALL', 'DISTRIBUTED']) checks['mesh_mode'] = option_set method = set(['ALL', self.options.method.upper()]) checks['method'] = method # This is so we can easily pass checks around to any scheduler plugin self.options._checks = checks self.initialize(argv, app_name) os.chdir(orig_cwd)
def __init__(self, argv, moose_dir, app_name=None): os.environ['MOOSE_DIR'] = moose_dir os.environ['PYTHONPATH'] = os.path.join(moose_dir, 'python') + ':' + os.environ.get('PYTHONPATH', '') if app_name: rootdir, app_name, args, root_params = '.', app_name, [], HitNode(hitnode=hit.parse('','')) else: rootdir, app_name, args, root_params = findTestRoot(start=os.getcwd()) orig_cwd = os.getcwd() os.chdir(rootdir) argv = argv[:1] + args + argv[1:] self.factory = Factory() self.app_name = app_name self.root_params = root_params # Build a Warehouse to hold the MooseObjects self.warehouse = Warehouse() # Get dependant applications and load dynamic tester plugins # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))] sys.path.append(os.path.join(moose_dir, 'framework', 'scripts')) # For find_dep_apps.py # Use the find_dep_apps script to get the dependant applications for an app import find_dep_apps depend_app_dirs = find_dep_apps.findDepApps(app_name, use_current_only=True) dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')]) # Finally load the plugins! self.factory.loadPlugins(dirs, 'testers', "IS_TESTER") self._infiles = ['tests', 'speedtests'] self.parse_errors = [] self.test_table = [] self.num_passed = 0 self.num_failed = 0 self.num_skipped = 0 self.num_pending = 0 self.host_name = gethostname() self.moose_dir = moose_dir self.base_dir = os.getcwd() self.run_tests_dir = os.path.abspath('.') self.results_storage = '.previous_test_results.json' self.code = '2d2d6769726c2d6d6f6465' self.error_code = 0x0 self.keyboard_talk = True # Assume libmesh is a peer directory to MOOSE if not defined if os.environ.has_key("LIBMESH_DIR"): self.libmesh_dir = os.environ['LIBMESH_DIR'] else: self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed') self.file = None # Failed Tests file object self.writeFailedTest = None # Parse arguments self.parseCLArgs(argv) checks = {} checks['platform'] = util.getPlatforms() checks['submodules'] = util.getInitializedSubmodules(self.run_tests_dir) checks['exe_objects'] = None # This gets calculated on demand checks['registered_apps'] = None # This gets extracted on demand # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user # to select whether they want to probe for libMesh configuration options. if self.options.skip_config_checks: checks['compiler'] = set(['ALL']) checks['petsc_version'] = 'N/A' checks['petsc_version_release'] = set(['ALL']) checks['slepc_version'] = 'N/A' checks['library_mode'] = set(['ALL']) checks['mesh_mode'] = set(['ALL']) checks['dtk'] = set(['ALL']) checks['unique_ids'] = set(['ALL']) checks['vtk'] = set(['ALL']) checks['tecplot'] = set(['ALL']) checks['dof_id_bytes'] = set(['ALL']) checks['petsc_debug'] = set(['ALL']) checks['curl'] = set(['ALL']) checks['threading'] = set(['ALL']) checks['superlu'] = set(['ALL']) checks['parmetis'] = set(['ALL']) checks['chaco'] = set(['ALL']) checks['party'] = set(['ALL']) checks['ptscotch'] = set(['ALL']) checks['slepc'] = set(['ALL']) checks['unique_id'] = set(['ALL']) checks['cxx11'] = set(['ALL']) checks['asio'] = set(['ALL']) checks['boost'] = set(['ALL']) checks['fparser_jit'] = set(['ALL']) else: checks['compiler'] = util.getCompilers(self.libmesh_dir) checks['petsc_version'] = util.getPetscVersion(self.libmesh_dir) checks['petsc_version_release'] = util.getLibMeshConfigOption(self.libmesh_dir, 'petsc_version_release') checks['slepc_version'] = util.getSlepcVersion(self.libmesh_dir) checks['library_mode'] = util.getSharedOption(self.libmesh_dir) checks['mesh_mode'] = util.getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode') checks['dtk'] = util.getLibMeshConfigOption(self.libmesh_dir, 'dtk') checks['unique_ids'] = util.getLibMeshConfigOption(self.libmesh_dir, 'unique_ids') checks['vtk'] = util.getLibMeshConfigOption(self.libmesh_dir, 'vtk') checks['tecplot'] = util.getLibMeshConfigOption(self.libmesh_dir, 'tecplot') checks['dof_id_bytes'] = util.getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes') checks['petsc_debug'] = util.getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug') checks['curl'] = util.getLibMeshConfigOption(self.libmesh_dir, 'curl') checks['threading'] = util.getLibMeshThreadingModel(self.libmesh_dir) checks['superlu'] = util.getLibMeshConfigOption(self.libmesh_dir, 'superlu') checks['parmetis'] = util.getLibMeshConfigOption(self.libmesh_dir, 'parmetis') checks['chaco'] = util.getLibMeshConfigOption(self.libmesh_dir, 'chaco') checks['party'] = util.getLibMeshConfigOption(self.libmesh_dir, 'party') checks['ptscotch'] = util.getLibMeshConfigOption(self.libmesh_dir, 'ptscotch') checks['slepc'] = util.getLibMeshConfigOption(self.libmesh_dir, 'slepc') checks['unique_id'] = util.getLibMeshConfigOption(self.libmesh_dir, 'unique_id') checks['cxx11'] = util.getLibMeshConfigOption(self.libmesh_dir, 'cxx11') checks['asio'] = util.getIfAsioExists(self.moose_dir) checks['boost'] = util.getLibMeshConfigOption(self.libmesh_dir, 'boost') checks['fparser_jit'] = util.getLibMeshConfigOption(self.libmesh_dir, 'fparser_jit') # Override the MESH_MODE option if using the '--distributed-mesh' # or (deprecated) '--parallel-mesh' option. if (self.options.parallel_mesh == True or self.options.distributed_mesh == True) or \ (self.options.cli_args != None and \ (self.options.cli_args.find('--parallel-mesh') != -1 or self.options.cli_args.find('--distributed-mesh') != -1)): option_set = set(['ALL', 'DISTRIBUTED']) checks['mesh_mode'] = option_set method = set(['ALL', self.options.method.upper()]) checks['method'] = method # This is so we can easily pass checks around to any scheduler plugin self.options._checks = checks self.initialize(argv, app_name) os.chdir(orig_cwd)