def _sample_phase_constitution(phase_name, phase_constituents, sublattice_dof, comps, variables, sampler, fixed_grid, pdens): """ Sample the internal degrees of freedom of a phase. Parameters ---------- phase_name phase_constituents sublattice_dof comps variables sampler fixed_grid pdens Returns ------- ndarray of points """ # Eliminate pure vacancy endmembers from the calculation vacancy_indices = list() for idx, sublattice in enumerate(phase_constituents): active_in_subl = sorted(set(phase_constituents[idx]).intersection(comps)) is_vacancy = [spec.number_of_atoms == 0 for spec in active_in_subl] subl_va_indices = list(idx for idx, x in enumerate(is_vacancy) if x == True) vacancy_indices.append(subl_va_indices) if len(vacancy_indices) != len(phase_constituents): vacancy_indices = None # Add all endmembers to guarantee their presence points = endmember_matrix(sublattice_dof, vacancy_indices=vacancy_indices) if fixed_grid is True: # Sample along the edges of the endmembers # These constitution space edges are often the equilibrium points! em_pairs = list(itertools.combinations(points, 2)) lingrid = np.linspace(0, 1, pdens) extra_points = [first_em * lingrid[np.newaxis].T + second_em * lingrid[::-1][np.newaxis].T for first_em, second_em in em_pairs] points = np.concatenate(list(itertools.chain([points], extra_points))) # Sample composition space for more points if sum(sublattice_dof) > len(sublattice_dof): points = np.concatenate((points, sampler(sublattice_dof, pdof=pdens) )) # Filter out nan's that may have slipped in if we sampled too high a vacancy concentration # Issues with this appear to be platform-dependent points = points[~np.isnan(points).any(axis=-1)] # Ensure that points has the correct dimensions and dtype points = np.atleast_2d(np.asarray(points, dtype=np.float)) return points
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, **kwargs): """ Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : str or sequence Names of components to consider in the calculation. phases : str or sequence Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. fake_points : bool, optional (Default: False) If True, the first few points of the output surface will be fictitious points used to define an equilibrium hyperplane guaranteed to be above all the other points. This is used for convex hull computations. points : ndarray or a dict of phase names to ndarray, optional Columns of ndarrays must be internal degrees of freedom (site fractions), sorted. If this is not specified, points will be generated automatically. pdens : int, a dict of phase names to int, or a seq of both, optional Number of points to sample per degree of freedom. model : Model, a dict of phase names to Model, or a seq of both, optional Model class to use for each phase. Returns ------- xray.Dataset of the sampled attribute as a function of state variables Examples -------- None yet. """ # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None) model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model) callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None) if isinstance(phases, str): phases = [phases] if isinstance(comps, str): comps = [comps] components = [x for x in sorted(comps) if not x.startswith('VA')] # Convert keyword strings to proper state variable objects # If we don't do this, sympy will get confused during substitution statevar_dict = collections.OrderedDict((v.StateVariable(key), unpack_condition(value)) \ for (key, value) in sorted(kwargs.items())) str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \ for (key, value) in statevar_dict.items()) all_phase_data = [] comp_sets = {} largest_energy = -np.inf maximum_internal_dof = 0 # Consider only the active phases active_phases = dict((name.upper(), dbf.phases[name.upper()]) \ for name in unpack_phases(phases)) for phase_name, phase_obj in sorted(active_phases.items()): # Build the symbolic representation of the energy mod = model_dict[phase_name] # if this is an object type, we need to construct it if isinstance(mod, type): try: model_dict[phase_name] = mod = mod(dbf, comps, phase_name) except DofError: # we can't build the specified phase because the # specified components aren't found in every sublattice # we'll just skip it logger.warning( """Suspending specified phase %s due to some sublattices containing only unspecified components""", phase_name) continue if points_dict[phase_name] is None: try: out = getattr(mod, output) maximum_internal_dof = max(maximum_internal_dof, len(out.atoms(v.SiteFraction))) except AttributeError: raise AttributeError( 'Missing Model attribute {0} specified for {1}'.format( output, mod.__class__)) else: maximum_internal_dof = max( maximum_internal_dof, np.asarray(points_dict[phase_name]).shape[-1]) for phase_name, phase_obj in sorted(active_phases.items()): try: mod = model_dict[phase_name] except KeyError: continue # Construct an ordered list of the variables variables, sublattice_dof = generate_dof(phase_obj, mod.components) # Build the "fast" representation of that model if callable_dict[phase_name] is None: out = getattr(mod, output) # As a last resort, treat undefined symbols as zero # But warn the user when we do this # This is consistent with TC's behavior undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable)) for undef in undefs: out = out.xreplace({undef: float(0)}) logger.warning( 'Setting undefined symbol %s for phase %s to zero', undef, phase_name) comp_sets[phase_name] = make_callable(out, \ list(statevar_dict.keys()) + variables, mode=mode) else: comp_sets[phase_name] = callable_dict[phase_name] points = points_dict[phase_name] if points is None: # Eliminate pure vacancy endmembers from the calculation vacancy_indices = list() for idx, sublattice in enumerate(phase_obj.constituents): active_in_subl = sorted( set(phase_obj.constituents[idx]).intersection(comps)) if 'VA' in active_in_subl and 'VA' in sorted(comps): vacancy_indices.append(active_in_subl.index('VA')) if len(vacancy_indices) != len(phase_obj.constituents): vacancy_indices = None logger.debug('vacancy_indices: %s', vacancy_indices) # Add all endmembers to guarantee their presence points = endmember_matrix(sublattice_dof, vacancy_indices=vacancy_indices) # Sample composition space for more points if sum(sublattice_dof) > len(sublattice_dof): points = np.concatenate( (points, point_sample(sublattice_dof, pdof=pdens_dict[phase_name]))) # If there are nontrivial sublattices with vacancies in them, # generate a set of points where their fraction is zero and renormalize for idx, sublattice in enumerate(phase_obj.constituents): if 'VA' in set(sublattice) and len(sublattice) > 1: var_idx = variables.index( v.SiteFraction(phase_name, idx, 'VA')) addtl_pts = np.copy(points) # set vacancy fraction to log-spaced between 1e-10 and 1e-6 addtl_pts[:, var_idx] = np.power( 10.0, -10.0 * (1.0 - addtl_pts[:, var_idx])) # renormalize site fractions cur_idx = 0 for ctx in sublattice_dof: end_idx = cur_idx + ctx addtl_pts[:, cur_idx:end_idx] /= \ addtl_pts[:, cur_idx:end_idx].sum(axis=1)[:, None] cur_idx = end_idx # add to points matrix points = np.concatenate((points, addtl_pts), axis=0) # Filter out nan's that may have slipped in if we sampled too high a vacancy concentration # Issues with this appear to be platform-dependent points = points[~np.isnan(points).any(axis=-1)] # Ensure that points has the correct dimensions and dtype points = np.atleast_2d(np.asarray(points, dtype=np.float)) phase_ds = _compute_phase_values(phase_obj, components, variables, str_statevar_dict, points, comp_sets[phase_name], output, maximum_internal_dof) # largest_energy is really only relevant if fake_points is set if fake_points: largest_energy = max(phase_ds[output].max(), largest_energy) all_phase_data.append(phase_ds) if fake_points: if output != 'GM': raise ValueError( 'fake_points=True should only be used with output=\'GM\'') phase_ds = _generate_fake_points(components, statevar_dict, largest_energy, output, maximum_internal_dof) final_ds = xray.concat(itertools.chain([phase_ds], all_phase_data), dim='points') else: # speedup for single-phase case (found by profiling) if len(all_phase_data) > 1: final_ds = xray.concat(all_phase_data, dim='points') else: final_ds = all_phase_data[0] if (not fake_points) and (len(all_phase_data) == 1): pass else: # Reset the points dimension to use a single global index final_ds['points'] = np.arange(len(final_ds.points)) return final_ds
def _sample_phase_constitution(phase_name, phase_constituents, sublattice_dof, comps, variables, sampler, fixed_grid, pdens): """ Sample the internal degrees of freedom of a phase. Parameters ---------- phase_name phase_constituents sublattice_dof comps variables sampler fixed_grid pdens Returns ------- ndarray of points """ # Eliminate pure vacancy endmembers from the calculation vacancy_indices = list() for idx, sublattice in enumerate(phase_constituents): active_in_subl = sorted( set(phase_constituents[idx]).intersection(comps)) if 'VA' in active_in_subl and 'VA' in sorted(comps): vacancy_indices.append(active_in_subl.index('VA')) if len(vacancy_indices) != len(phase_constituents): vacancy_indices = None # Add all endmembers to guarantee their presence points = endmember_matrix(sublattice_dof, vacancy_indices=vacancy_indices) if fixed_grid is True: # Sample along the edges of the endmembers # These constitution space edges are often the equilibrium points! em_pairs = list(itertools.combinations(points, 2)) lingrid = np.linspace(0, 1, pdens) extra_points = [ first_em * lingrid[np.newaxis].T + second_em * lingrid[::-1][np.newaxis].T for first_em, second_em in em_pairs ] points = np.concatenate(list(itertools.chain([points], extra_points))) # Sample composition space for more points if sum(sublattice_dof) > len(sublattice_dof): points = np.concatenate((points, sampler(sublattice_dof, pdof=pdens))) # If there are nontrivial sublattices with vacancies in them, # generate a set of points where their fraction is zero and renormalize for idx, sublattice in enumerate(phase_constituents): if 'VA' in set(sublattice) and len(sublattice) > 1: var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA')) addtl_pts = np.copy(points) # set vacancy fraction to log-spaced between 1e-10 and 1e-6 addtl_pts[:, var_idx] = np.power( 10.0, -10.0 * (1.0 - addtl_pts[:, var_idx])) # renormalize site fractions cur_idx = 0 for ctx in sublattice_dof: end_idx = cur_idx + ctx addtl_pts[:, cur_idx:end_idx] /= \ addtl_pts[:, cur_idx:end_idx].sum(axis=1)[:, None] cur_idx = end_idx # add to points matrix points = np.concatenate((points, addtl_pts), axis=0) # Filter out nan's that may have slipped in if we sampled too high a vacancy concentration # Issues with this appear to be platform-dependent points = points[~np.isnan(points).any(axis=-1)] # Ensure that points has the correct dimensions and dtype points = np.atleast_2d(np.asarray(points, dtype=np.float)) return points
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, tmpman=None, **kwargs): """ Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : str or sequence Names of components to consider in the calculation. phases : str or sequence Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. fake_points : bool, optional (Default: False) If True, the first few points of the output surface will be fictitious points used to define an equilibrium hyperplane guaranteed to be above all the other points. This is used for convex hull computations. broadcast : bool, optional If True, broadcast given state variable lists against each other to create a grid. If False, assume state variables are given as equal-length lists. tmpman : TempfileManager, optional Context manager for temporary file creation during the calculation. points : ndarray or a dict of phase names to ndarray, optional Columns of ndarrays must be internal degrees of freedom (site fractions), sorted. If this is not specified, points will be generated automatically. pdens : int, a dict of phase names to int, or a seq of both, optional Number of points to sample per degree of freedom. model : Model, a dict of phase names to Model, or a seq of both, optional Model class to use for each phase. sampler : callable, a dict of phase names to callable, or a seq of both, optional Function to sample phase constitution space. Must have same signature as 'pycalphad.core.utils.point_sample' grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True) Whether to add evenly spaced points between end-members. The density of points is determined by 'pdens' Returns ------- Dataset of the sampled attribute as a function of state variables Examples -------- None yet. """ # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None) model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model) callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None) sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None) fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True) if isinstance(phases, str): phases = [phases] if isinstance(comps, str): comps = [comps] if points_dict is None and broadcast is False: raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.') components = [x for x in sorted(comps) if not x.startswith('VA')] # Convert keyword strings to proper state variable objects # If we don't do this, sympy will get confused during substitution statevar_dict = collections.OrderedDict((v.StateVariable(key), unpack_condition(value)) \ for (key, value) in sorted(kwargs.items())) str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \ for (key, value) in statevar_dict.items()) all_phase_data = [] comp_sets = {} largest_energy = -np.inf maximum_internal_dof = 0 # Consider only the active phases active_phases = dict((name.upper(), dbf.phases[name.upper()]) \ for name in unpack_phases(phases)) for phase_name, phase_obj in sorted(active_phases.items()): # Build the symbolic representation of the energy mod = model_dict[phase_name] # if this is an object type, we need to construct it if isinstance(mod, type): try: model_dict[phase_name] = mod = mod(dbf, comps, phase_name) except DofError: # we can't build the specified phase because the # specified components aren't found in every sublattice # we'll just skip it logger.warning("""Suspending specified phase %s due to some sublattices containing only unspecified components""", phase_name) continue if points_dict[phase_name] is None: try: out = getattr(mod, output) maximum_internal_dof = max(maximum_internal_dof, len(out.atoms(v.SiteFraction))) except AttributeError: raise AttributeError('Missing Model attribute {0} specified for {1}' .format(output, mod.__class__)) else: maximum_internal_dof = max(maximum_internal_dof, np.asarray(points_dict[phase_name]).shape[-1]) for phase_name, phase_obj in sorted(active_phases.items()): try: mod = model_dict[phase_name] except KeyError: continue # Construct an ordered list of the variables variables, sublattice_dof = generate_dof(phase_obj, mod.components) # Build the "fast" representation of that model if callable_dict[phase_name] is None: out = getattr(mod, output) # As a last resort, treat undefined symbols as zero # But warn the user when we do this # This is consistent with TC's behavior undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable)) for undef in undefs: out = out.xreplace({undef: float(0)}) logger.warning('Setting undefined symbol %s for phase %s to zero', undef, phase_name) comp_sets[phase_name] = build_functions(out, list(statevar_dict.keys()) + variables, tmpman=tmpman, include_obj=True, include_grad=False, include_hess=False) else: comp_sets[phase_name] = callable_dict[phase_name] points = points_dict[phase_name] if points is None: # Eliminate pure vacancy endmembers from the calculation vacancy_indices = list() for idx, sublattice in enumerate(phase_obj.constituents): active_in_subl = sorted(set(phase_obj.constituents[idx]).intersection(comps)) if 'VA' in active_in_subl and 'VA' in sorted(comps): vacancy_indices.append(active_in_subl.index('VA')) if len(vacancy_indices) != len(phase_obj.constituents): vacancy_indices = None logger.debug('vacancy_indices: %s', vacancy_indices) # Add all endmembers to guarantee their presence points = endmember_matrix(sublattice_dof, vacancy_indices=vacancy_indices) if fixedgrid_dict[phase_name] is True: # Sample along the edges of the endmembers # These constitution space edges are often the equilibrium points! em_pairs = list(itertools.combinations(points, 2)) for first_em, second_em in em_pairs: extra_points = first_em * np.linspace(0, 1, pdens_dict[phase_name])[np.newaxis].T + \ second_em * np.linspace(0, 1, pdens_dict[phase_name])[::-1][np.newaxis].T points = np.concatenate((points, extra_points)) # Sample composition space for more points if sum(sublattice_dof) > len(sublattice_dof): sampler = sampler_dict[phase_name] if sampler is None: sampler = point_sample points = np.concatenate((points, sampler(sublattice_dof, pdof=pdens_dict[phase_name]) )) # If there are nontrivial sublattices with vacancies in them, # generate a set of points where their fraction is zero and renormalize for idx, sublattice in enumerate(phase_obj.constituents): if 'VA' in set(sublattice) and len(sublattice) > 1: var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA')) addtl_pts = np.copy(points) # set vacancy fraction to log-spaced between 1e-10 and 1e-6 addtl_pts[:, var_idx] = np.power(10.0, -10.0*(1.0 - addtl_pts[:, var_idx])) # renormalize site fractions cur_idx = 0 for ctx in sublattice_dof: end_idx = cur_idx + ctx addtl_pts[:, cur_idx:end_idx] /= \ addtl_pts[:, cur_idx:end_idx].sum(axis=1)[:, None] cur_idx = end_idx # add to points matrix points = np.concatenate((points, addtl_pts), axis=0) # Filter out nan's that may have slipped in if we sampled too high a vacancy concentration # Issues with this appear to be platform-dependent points = points[~np.isnan(points).any(axis=-1)] # Ensure that points has the correct dimensions and dtype points = np.atleast_2d(np.asarray(points, dtype=np.float)) phase_ds = _compute_phase_values(phase_obj, components, variables, str_statevar_dict, points, comp_sets[phase_name], output, maximum_internal_dof, broadcast=broadcast) # largest_energy is really only relevant if fake_points is set if fake_points: largest_energy = max(phase_ds[output].max(), largest_energy) all_phase_data.append(phase_ds) if fake_points: if output != 'GM': raise ValueError('fake_points=True should only be used with output=\'GM\'') phase_ds = _generate_fake_points(components, statevar_dict, largest_energy, output, maximum_internal_dof, broadcast) final_ds = concat(itertools.chain([phase_ds], all_phase_data), dim='points') else: # speedup for single-phase case (found by profiling) if len(all_phase_data) > 1: final_ds = concat(all_phase_data, dim='points') else: final_ds = all_phase_data[0] if (not fake_points) and (len(all_phase_data) == 1): pass else: # Reset the points dimension to use a single global index final_ds['points'] = np.arange(len(final_ds.points)) return final_ds
def energy_surf(dbf, comps, phases, mode=None, output='GM', **kwargs): """ Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : list Names of components to consider in the calculation. phases : list Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. pdens : int, a dict of phase names to int, or a list of both, optional Number of points to sample per degree of freedom. model : Model, a dict of phase names to Model, or a list of both, optional Model class to use for each phase. Returns ------- DataFrame of the output as a function of composition, temperature, etc. Examples -------- None yet. """ warnings.warn('Use pycalphad.calculate() instead', DeprecationWarning, stacklevel=2) # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model) callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None) # Convert keyword strings to proper state variable objects # If we don't do this, sympy will get confused during substitution statevar_dict = \ collections.OrderedDict((v.StateVariable(key), value) \ for (key, value) in sorted(kwargs.items())) # Generate all combinations of state variables for 'map' calculation # Wrap single values of state variables in lists # Use 'kwargs' because we want state variable names to be stringified statevar_values = [_listify(val) for val in statevar_dict.values()] statevars_to_map = np.array(list(itertools.product(*statevar_values))) # Consider only the active phases active_phases = dict((name.upper(), dbf.phases[name.upper()]) \ for name in phases) comp_sets = {} # Construct a list to hold all the data all_phase_data = [] for phase_name, phase_obj in sorted(active_phases.items()): # Build the symbolic representation of the energy mod = model_dict[phase_name] # if this is an object type, we need to construct it if isinstance(mod, type): try: mod = mod(dbf, comps, phase_name) except DofError: # we can't build the specified phase because the # specified components aren't found in every sublattice # we'll just skip it logger.warning("""Suspending specified phase %s due to some sublattices containing only unspecified components""", phase_name) continue try: out = getattr(mod, output) except AttributeError: raise AttributeError('Missing Model attribute {0} specified for {1}' .format(output, mod.__class__)) # Construct an ordered list of the variables variables, sublattice_dof = generate_dof(phase_obj, mod.components) site_ratios = list(phase_obj.sublattices) # Build the "fast" representation of that model if callable_dict[phase_name] is None: # As a last resort, treat undefined symbols as zero # But warn the user when we do this # This is consistent with TC's behavior undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable)) for undef in undefs: out = out.xreplace({undef: float(0)}) logger.warning('Setting undefined symbol %s for phase %s to zero', undef, phase_name) comp_sets[phase_name] = make_callable(out, \ list(statevar_dict.keys()) + variables, mode=mode) else: comp_sets[phase_name] = callable_dict[phase_name] # Eliminate pure vacancy endmembers from the calculation vacancy_indices = list() for idx, sublattice in enumerate(phase_obj.constituents): if 'VA' in sorted(sublattice) and 'VA' in sorted(comps): vacancy_indices.append(sorted(sublattice).index('VA')) if len(vacancy_indices) != len(phase_obj.constituents): vacancy_indices = None logger.debug('vacancy_indices: %s', vacancy_indices) # Add all endmembers to guarantee their presence points = endmember_matrix(sublattice_dof, vacancy_indices=vacancy_indices) # Sample composition space for more points if sum(sublattice_dof) > len(sublattice_dof): points = np.concatenate((points, point_sample(sublattice_dof, pdof=pdens_dict[phase_name]) )) # If there are nontrivial sublattices with vacancies in them, # generate a set of points where their fraction is zero and renormalize for idx, sublattice in enumerate(phase_obj.constituents): if 'VA' in set(sublattice) and len(sublattice) > 1: var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA')) addtl_pts = np.copy(points) # set vacancy fraction to log-spaced between 1e-10 and 1e-6 addtl_pts[:, var_idx] = np.power(10.0, -10.0*(1.0 - addtl_pts[:, var_idx])) # renormalize site fractions cur_idx = 0 for ctx in sublattice_dof: end_idx = cur_idx + ctx addtl_pts[:, cur_idx:end_idx] /= \ addtl_pts[:, cur_idx:end_idx].sum(axis=1)[:, None] cur_idx = end_idx # add to points matrix points = np.concatenate((points, addtl_pts), axis=0) data_dict = {'Phase': phase_name} # Broadcast compositions and state variables along orthogonal axes # This lets us eliminate an expensive Python loop data_dict[output] = \ comp_sets[phase_name](*itertools.chain( np.transpose(statevars_to_map[:, :, np.newaxis], (1, 2, 0)), np.transpose(points[:, :, np.newaxis], (1, 0, 2)))).T.ravel() # Save state variables, with values indexed appropriately statevar_vals = np.repeat(statevars_to_map, len(points), axis=0).T data_dict.update({str(statevar): vals for statevar, vals \ in zip(statevar_dict.keys(), statevar_vals)}) # Map the internal degrees of freedom to global coordinates # Normalize site ratios by the sum of site ratios times a factor # related to the site fraction of vacancies site_ratio_normalization = np.zeros(len(points)) for idx, sublattice in enumerate(phase_obj.constituents): vacancy_column = np.ones(len(points)) if 'VA' in set(sublattice): var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA')) vacancy_column -= points[:, var_idx] site_ratio_normalization += site_ratios[idx] * vacancy_column for comp in sorted(comps): if comp == 'VA': continue avector = [float(vxx.species == comp) * \ site_ratios[vxx.sublattice_index] for vxx in variables] data_dict['X('+comp+')'] = np.tile(np.divide(np.dot( points[:, :], avector), site_ratio_normalization), statevars_to_map.shape[0]) # Copy coordinate information into data_dict # TODO: Is there a more memory-efficient way to deal with this? # Perhaps with hierarchical indexing... var_fmt = 'Y({0},{1},{2})' data_dict.update({var_fmt.format(vxx.phase_name, vxx.sublattice_index, vxx.species): \ np.tile(vals, statevars_to_map.shape[0]) \ for vxx, vals in zip(variables, points.T)}) all_phase_data.append(pd.DataFrame(data_dict)) # all_phases_data now contains energy surface information for the system return pd.concat(all_phase_data, axis=0, join='outer', \ ignore_index=True, verify_integrity=False)
def _sample_phase_constitution(model, sampler, fixed_grid, pdens): """ Sample the internal degrees of freedom of a phase. Parameters ---------- model : Model Instance of a pycalphad Model sampler : Callable Callable returning an ArrayLike of points fixed_grid : bool If True, sample pdens points between each pair of endmembers pdens : int Number of points to sample in each sampled dimension Returns ------- ndarray of points """ # Eliminate pure vacancy endmembers from the calculation ALLOWED_CHARGE = 1E-10 vacancy_indices = [] for sublattice in model.constituents: subl_va_indices = [ idx for idx, spec in enumerate(sorted(set(sublattice))) if spec.number_of_atoms == 0 ] vacancy_indices.append(subl_va_indices) if len(vacancy_indices) != len(model.constituents): vacancy_indices = None sublattice_dof = [len(subl) for subl in model.constituents] # Add all endmembers to guarantee their presence points = endmember_matrix(sublattice_dof, vacancy_indices=vacancy_indices) site_ratios = model.site_ratios constant_site_ratios = True # The only implementation with variable site ratios is the two-sublattice ionic liquid. # This check is convenient for detecting 2SL ionic liquids without keeping other state. for sr in site_ratios: try: float(sr) except (TypeError, RuntimeError): constant_site_ratios = False species_charge = [] for sublattice in range(len(model.constituents)): for species in sorted(model.constituents[sublattice]): species_charge.append(species.charge * site_ratios[sublattice]) species_charge = np.array(species_charge) charge_constrained_space = constant_site_ratios and np.any( species_charge != 0) # We differentiate between (specifically) charge balance and general linear constraints for future use # This simplifies adding future constraints, such as disordered configuration sampling, or site fraction conditions # Note that if a phase only consists of site fraction balance constraints, # we do not consider that 'linearly constrained' for the purposes of sampling, # since the default sampler handles that case with an efficient method. linearly_constrained_space = charge_constrained_space if charge_constrained_space: endmembers = points Q = np.dot(endmembers, species_charge) # Sort endmembers by their charge charge_neutral_endmember_idxs = [] charge_positive_endmember_idxs = [] charge_negative_endmember_idxs = [] for em_idx in range(endmembers.shape[0]): if Q[em_idx] > ALLOWED_CHARGE: charge_positive_endmember_idxs.append(em_idx) elif Q[em_idx] < -ALLOWED_CHARGE: charge_negative_endmember_idxs.append(em_idx) else: charge_neutral_endmember_idxs.append(em_idx) # Find all endmember pairs between the em_pts = [ endmembers[em_idx] for em_idx in charge_neutral_endmember_idxs ] for pos_em_idx, neg_em_idx in itertools.product( charge_positive_endmember_idxs, charge_negative_endmember_idxs): # Solve equation: Q_{pos}*x + Q_{neg}(1-x) = 0 x = -Q[neg_em_idx] / (Q[pos_em_idx] - Q[neg_em_idx]) em_pts.append(endmembers[pos_em_idx] * x + endmembers[neg_em_idx] * (1 - x)) # Charge neutral endmembers and mixed pseudo-endmembers points = np.asarray(em_pts) if (fixed_grid is True) and not linearly_constrained_space: # Sample along the edges of the endmembers # These constitution space edges are often the equilibrium points! em_pairs = list(itertools.combinations(points, 2)) lingrid = np.linspace(0, 1, pdens) extra_points = [ first_em * lingrid[np.newaxis].T + second_em * lingrid[::-1][np.newaxis].T for first_em, second_em in em_pairs ] points = np.concatenate(list(itertools.chain([points], extra_points))) # Sample composition space for more points if sum(sublattice_dof) > len(sublattice_dof): if linearly_constrained_space: # construct constraint Jacobian for this phase # Model technically already does this so it would be better to reuse that functionality # number of sublattices, plus charge balance num_constraints = len(sublattice_dof) + 1 constraint_jac = np.zeros((num_constraints, points.shape[-1])) constraint_rhs = np.zeros(num_constraints) # site fraction balance dof_idx = 0 constraint_idx = 0 for subl_dof in sublattice_dof: constraint_jac[constraint_idx, dof_idx:dof_idx + subl_dof] = 1 constraint_rhs[constraint_idx] = 1 constraint_idx += 1 dof_idx += subl_dof # charge balance constraint_jac[constraint_idx, :] = species_charge constraint_rhs[constraint_idx] = 0 # Sample additional points which obey the constraints # Mean of pseudo-endmembers is feasible by convexity of the space initial_point = np.mean(points, axis=0) num_points = (pdens**2) * (constraint_jac.shape[1] - constraint_jac.shape[0]) extra_points = hr_point_sample(constraint_jac, constraint_rhs, initial_point, num_points) points = np.concatenate((points, extra_points)) assert np.max( np.abs(constraint_jac.dot(points.T).T - constraint_rhs)) < 1e-6 if points.shape[0] == 0: warnings.warn( f'{model.phase_name} has zero feasible configurations under the given conditions' ) else: points = np.concatenate((points, sampler(sublattice_dof, pdof=pdens))) # Filter out nan's that may have slipped in if we sampled too high a vacancy concentration # Issues with this appear to be platform-dependent points = points[~np.isnan(points).any(axis=-1)] # Ensure that points has the correct dimensions and dtype points = np.atleast_2d(np.asarray(points, dtype=np.float_)) return points