def __init__(self, info: py2dmat.Info): super().__init__(info) self._name = "leed" info_s = info.solver # Check keywords def check_keywords(key, segment, registered_list): if (key in registered_list) is False: msg = "Error: {} in {} is not correct keyword.".format( key, segment) raise RuntimeError(msg) keywords_solver = ["name", "config", "reference"] keywords = {} keywords["config"] = ["path_to_solver"] keywords["reference"] = ["path_to_base_dir"] for key in info_s.keys(): check_keywords(key, "solver", keywords_solver) if key == "name": continue for key_child in info_s[key].keys(): check_keywords(key_child, key, keywords[key]) # Set environment p2solver = info_s["config"].get("path_to_solver", "satl2.exe") if os.path.dirname(p2solver) != "": # ignore ENV[PATH] self.path_to_solver = self.root_dir / Path(p2solver).expanduser() else: for P in itertools.chain([self.root_dir], os.environ["PATH"].split(":")): self.path_to_solver = Path(P) / p2solver if os.access(self.path_to_solver, mode=os.X_OK): break if not os.access(self.path_to_solver, mode=os.X_OK): raise exception.InputError( f"ERROR: solver ({p2solver}) is not found") self.path_to_base_dir = info_s["reference"]["path_to_base_dir"] # check files files = [ "exp.d", "rfac.d", "tleed4.i", "tleed5.i", "tleed.o", "short.t" ] for file in files: if not os.path.exists(os.path.join(self.path_to_base_dir, file)): raise exception.InputError( f"ERROR: input file ({file}) is not found in ({self.path_to_base_dir})" ) self.input = Solver.Input(info)
def _read_param( self, info: py2dmat.Info, num_walkers: int = 1 ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Generate continuous data from info Returns ======= initial_list: np.ndarray num_walkers \\times dimension array min_list max_list unit_list """ if "param" not in info.algorithm: raise exception.InputError( "ERROR: [algorithm.param] is not defined in the input") info_param = info.algorithm["param"] if "min_list" not in info_param: raise exception.InputError( "ERROR: algorithm.param.min_list is not defined in the input") min_list = np.array(info_param["min_list"]) if len(min_list) != self.dimension: raise exception.InputError( f"ERROR: len(min_list) != dimension ({len(min_list)} != {self.dimension})" ) if "max_list" not in info_param: raise exception.InputError( "ERROR: algorithm.param.max_list is not defined in the input") max_list = np.array(info_param["max_list"]) if len(max_list) != self.dimension: raise exception.InputError( f"ERROR: len(max_list) != dimension ({len(max_list)} != {self.dimension})" ) unit_list = np.array( info_param.get("unit_list", [1.0] * self.dimension)) if len(unit_list) != self.dimension: raise exception.InputError( f"ERROR: len(unit_list) != dimension ({len(unit_list)} != {self.dimension})" ) initial_list = np.array(info_param.get("initial_list", [])) if initial_list.ndim == 1: initial_list = initial_list.reshape(1, -1) if initial_list.size == 0: initial_list = min_list + (max_list - min_list) * self.rng.rand( num_walkers, self.dimension) if initial_list.shape[0] != num_walkers: raise exception.InputError( f"ERROR: initial_list.shape[0] != num_walkers ({initial_list.shape[0]} != {num_walkers})" ) if initial_list.shape[1] != self.dimension: raise exception.InputError( f"ERROR: initial_list.shape[1] != dimension ({initial_list.shape[1]} != {self.dimension})" ) return initial_list, min_list, max_list, unit_list
def __init__(self, info: py2dmat.Info, runner: Optional[py2dmat.Runner] = None) -> None: self.mpicomm = mpi.comm() self.mpisize = mpi.size() self.mpirank = mpi.rank() self.timer = {"init": {}, "prepare": {}, "run": {}, "post": {}} self.timer["init"]["total"] = 0.0 self.status = AlgorithmStatus.INIT if "dimension" not in info.base: raise exception.InputError( "ERROR: base.dimension is not defined in the input") try: self.dimension = int(str(info.base["dimension"])) except ValueError: raise exception.InputError( "ERROR: base.dimension should be positive integer") if self.dimension < 1: raise exception.InputError( "ERROR: base.dimension should be positive integer") if "label_list" in info.algorithm: label = info.algorithm["label_list"] if len(label) != self.dimension: raise exception.InputError( f"ERROR: len(label_list) != dimension ({len(label)} != {self.dimension})" ) self.label_list = label else: self.label_list = [f"x{d+1}" for d in range(self.dimension)] self.__init_rng(info) self.root_dir = info.base["root_dir"] self.output_dir = info.base["output_dir"] self.proc_dir = self.output_dir / str(self.mpirank) self.proc_dir.mkdir(parents=True, exist_ok=True) # Some cache of the filesystem may delay making a dictionary # especially when mkdir just after removing the old one while not self.proc_dir.is_dir(): time.sleep(0.1) if self.mpisize > 1: self.mpicomm.Barrier() if runner is not None: self.set_runner(runner)
def __init__(self, info): self.root_dir = info.base["root_dir"] self.output_dir = info.base["output_dir"] if "dimension" in info.solver: self.dimension = info.solver["dimension"] else: self.dimension = info.base["dimension"] info_s = info.solver info_param = info_s.get("param", {}) v = info_param.setdefault("string_list", ["value_01", "value_02"]) if len(v) != self.dimension: raise exception.InputError( f"ERROR: len(string_list) != dimension ({len(v)} != {self.dimension})" ) self.string_list = v info_config = info_s.get("config", {}) self.surface_input_file = Path( info_config.get("surface_input_file", "surf.txt")) filename = info_config.get("surface_template_file", "template.txt") filename = Path(filename).expanduser().resolve() self.surface_template_file = self.root_dir / filename if not self.surface_template_file.exists(): raise exception.InputError( f"ERROR: surface_template_file ({self.surface_template_file}) does not exist" ) self._check_template() filename = info_config.get("bulk_output_file", "bulkP.b") filename = Path(filename).expanduser().resolve() self.bulk_output_file = self.root_dir / filename if not self.bulk_output_file.exists(): raise exception.InputError( f"ERROR: bulk_output_file ({self.bulk_output_file}) does not exist" )
def _check_template(self) -> None: found = [False] * self.dimension with open(self.surface_template_file, "r") as file_input: for line in file_input: for index, placeholder in enumerate(self.string_list): if line.find(placeholder) != -1: found[index] = True if not np.all(found): msg = "ERROR: the following labels do not appear in the template file:" for label, f in zip(self.string_list, found): if not f: msg += "\n" msg += label raise exception.InputError(msg)
def __init__(self, info: py2dmat.Info): super().__init__(info) self._name = "sim_trhepd_rheed" p2solver = info.solver["config"].get("surface_exec_file", "surf.exe") if os.path.dirname(p2solver) != "": # ignore ENV[PATH] self.path_to_solver = self.root_dir / Path(p2solver).expanduser() else: for P in itertools.chain([self.root_dir], os.environ["PATH"].split(":")): self.path_to_solver = Path(P) / p2solver if os.access(self.path_to_solver, mode=os.X_OK): break if not os.access(self.path_to_solver, mode=os.X_OK): raise exception.InputError( f"ERROR: solver ({p2solver}) is not found") info_config = info.solver.get("config", {}) self.input = Solver.Input(info) self.output = Solver.Output(info) self.result = None
def __init__(self, info: py2dmat.Info): super().__init__(info) self._name = "sxrd" info_s = info.solver # Check keywords def check_keywords(key, segment, registered_list): if (key in registered_list) is False: msg = "Error: {} in {} is not correct keyword.".format(key, segment) raise RuntimeError(msg) keywords_solver = ["name", "config", "reference", "param"] keywords = {} keywords["config"] = ["sxrd_exec_file", "bulk_struc_in_file"] keywords["reference"] = ["f_in_file"] keywords["param"] = [ "scale_factor", "type_vector", "opt_scale_factor", "domain", ] for key in info_s.keys(): check_keywords(key, "solver", keywords_solver) if key == "name": continue for key_child in info_s[key].keys(): check_keywords(key_child, key, keywords[key]) # Check keywords of param.domain list keywords_domain = ["domain_occupancy", "atom"] keywords_atom = [ "name", "pos_center", "DWfactor", "occupancy", "displace_vector", "opt_DW", "opt_occupancy", ] for domain in info_s["param"]["domain"]: for key_domain in domain.keys(): check_keywords(key_domain, "domain", keywords_domain) for atom in domain["atom"]: for key_atom in atom.keys(): check_keywords(key_atom, "atom", keywords_atom) # Set environment p2solver = info_s["config"].get("sxrd_exec_file", "sxrdcalc") if os.path.dirname(p2solver) != "": # ignore ENV[PATH] self.path_to_solver = self.root_dir / Path(p2solver).expanduser() else: for P in itertools.chain([self.root_dir], os.environ["PATH"].split(":")): self.path_to_solver = Path(P) / p2solver if os.access(self.path_to_solver, mode=os.X_OK): break if not os.access(self.path_to_solver, mode=os.X_OK): raise exception.InputError(f"ERROR: solver ({p2solver}) is not found") self.path_to_f_in = info_s["reference"]["f_in_file"] self.path_to_bulk = info_s["config"]["bulk_struc_in_file"] self.input = Solver.Input(info)
def _meshgrid(self, info: py2dmat.Info, split: bool = False) -> Tuple[np.ndarray, np.ndarray]: """Generate discrete data from info Arguments ========== info: split: if True, splits data into mpisize parts and returns mpirank-th one (default: False) Returns ======= grid: Ncandidate x dimension id_list: """ if "param" not in info.algorithm: raise exception.InputError( "ERROR: [algorithm.param] is not defined in the input") info_param = info.algorithm["param"] if "mesh_path" in info_param: mesh_path = (self.root_dir / pathlib.Path(info_param["mesh_path"]).expanduser()) comments = info_param.get("comments", "#") delimiter = info_param.get("delimiter", None) skiprows = info_param.get("skiprows", 0) data = np.loadtxt( mesh_path, comments=comments, delimiter=delimiter, skiprows=skiprows, ) grid = data else: if "min_list" not in info_param: raise exception.InputError( "ERROR: algorithm.param.min_list is not defined in the input" ) min_list = np.array(info_param["min_list"], dtype=float) if len(min_list) != self.dimension: raise exception.InputError( f"ERROR: len(min_list) != dimension ({len(min_list)} != {self.dimension})" ) if "max_list" not in info_param: raise exception.InputError( "ERROR: algorithm.param.max_list is not defined in the input" ) max_list = np.array(info_param["max_list"], dtype=float) if len(max_list) != self.dimension: raise exception.InputError( f"ERROR: len(max_list) != dimension ({len(max_list)} != {self.dimension})" ) if "num_list" not in info_param: raise exception.InputError( "ERROR: algorithm.param.num_list is not defined in the input" ) num_list = np.array(info_param["num_list"], dtype=int) if len(num_list) != self.dimension: raise exception.InputError( f"ERROR: len(num_list) != dimension ({len(num_list)} != {self.dimension})" ) xs = [ np.linspace(mn, mx, num=nm) for mn, mx, nm in zip(min_list, max_list, num_list) ] data = np.array([g.flatten() for g in np.meshgrid(*xs)]).transpose() grid = np.array([np.hstack([i, d]) for i, d in enumerate(data)]) ncandidates = grid.shape[0] ns_total = np.arange(ncandidates) if split: id_list = np.array_split(ns_total, self.mpisize)[self.mpirank] return grid[id_list, :], id_list else: return grid, ns_total
def __init__(self, info): if "dimension" in info.solver: self.dimension = info.solver["dimension"] else: self.dimension = info.base["dimension"] info_s = info.solver # solver.config info_config = info_s.get("config", {}) self.surface_output_file = info_config.get("surface_output_file", "surf-bulkP.s") v = info_config.get("calculated_first_line", 5) if not (isinstance(v, int) and v >= 0): raise exception.InputError( "ERROR: calculated_first_line should be non-negative integer" ) self.calculated_first_line = v v = info_config.get("calculated_last_line", 60) if not (isinstance(v, int) and v >= 0): raise exception.InputError( "ERROR: calculated_last_line should be non-negative integer" ) self.calculated_last_line = v v = info_config.get("row_number", 8) if not (isinstance(v, int) and v >= 0): raise exception.InputError( "ERROR: row_number should be non-negative integer") self.row_number = v # solver.post info_post = info_s.get("post", {}) v = info_post.get("normalization", "TOTAL") if v not in ["TOTAL", "MAX"]: raise exception.InputError( "ERROR: normalization must be TOTAL or MAX") self.normalization = v v = info_post.get("Rfactor_type", "A") if v not in ["A", "B"]: raise exception.InputError( "ERROR: Rfactor_type must be A or B") self.Rfactor_type = v v = info_post.get("omega", 0.5) if v <= 0.0: raise exception.InputError("ERROR: omega should be positive") self.omega = v self.remove_work_dir = info_post.get("remove_work_dir", False) # solver.param info_param = info_s.get("param", {}) v = info_param.setdefault("string_list", ["value_01", "value_02"]) if len(v) != self.dimension: raise exception.InputError( f"ERROR: len(string_list) != dimension ({len(v)} != {self.dimension})" ) self.string_list = v v = info_param.get("degree_max", 6.0) self.degree_max = v # solver.reference info_ref = info_s.get("reference", {}) reference_path = info_ref.get("path", "experiment.txt") v = info_ref.setdefault("first", 1) if not (isinstance(v, int) and v >= 0): raise exception.InputError( "ERROR: reference_first_line should be non-negative integer" ) firstline = v v = info_ref.setdefault("last", 56) if not (isinstance(v, int) and v >= firstline): raise exception.InputError( "ERROR: reference_last_line < reference_first_line") lastline = v # Read experiment-data nline = lastline - firstline + 1 self.degree_list = [] self.reference = [] with open(reference_path, "r") as fp: for _ in range(firstline - 1): fp.readline() for _ in range(nline): line = fp.readline() words = line.split() self.degree_list.append(float(words[0])) self.reference.append(float(words[1])) self.reference_norm = 0.0 if self.normalization == "TOTAL": self.reference_norm = sum(self.reference) else: # self.normalization == "MAX": self.reference_norm = max(self.reference) self.reference_normalized = [ I_exp / self.reference_norm for I_exp in self.reference ]