def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, **kwargs): """ Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : str or sequence Names of components to consider in the calculation. phases : str or sequence Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. fake_points : bool, optional (Default: False) If True, the first few points of the output surface will be fictitious points used to define an equilibrium hyperplane guaranteed to be above all the other points. This is used for convex hull computations. points : ndarray or a dict of phase names to ndarray, optional Columns of ndarrays must be internal degrees of freedom (site fractions), sorted. If this is not specified, points will be generated automatically. pdens : int, a dict of phase names to int, or a seq of both, optional Number of points to sample per degree of freedom. model : Model, a dict of phase names to Model, or a seq of both, optional Model class to use for each phase. Returns ------- xray.Dataset of the sampled attribute as a function of state variables Examples -------- None yet. """ # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None) model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model) callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None) if isinstance(phases, str): phases = [phases] if isinstance(comps, str): comps = [comps] components = [x for x in sorted(comps) if not x.startswith('VA')] # Convert keyword strings to proper state variable objects # If we don't do this, sympy will get confused during substitution statevar_dict = collections.OrderedDict((v.StateVariable(key), unpack_condition(value)) \ for (key, value) in sorted(kwargs.items())) str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \ for (key, value) in statevar_dict.items()) all_phase_data = [] comp_sets = {} largest_energy = -np.inf maximum_internal_dof = 0 # Consider only the active phases active_phases = dict((name.upper(), dbf.phases[name.upper()]) \ for name in unpack_phases(phases)) for phase_name, phase_obj in sorted(active_phases.items()): # Build the symbolic representation of the energy mod = model_dict[phase_name] # if this is an object type, we need to construct it if isinstance(mod, type): try: model_dict[phase_name] = mod = mod(dbf, comps, phase_name) except DofError: # we can't build the specified phase because the # specified components aren't found in every sublattice # we'll just skip it logger.warning( """Suspending specified phase %s due to some sublattices containing only unspecified components""", phase_name) continue if points_dict[phase_name] is None: try: out = getattr(mod, output) maximum_internal_dof = max(maximum_internal_dof, len(out.atoms(v.SiteFraction))) except AttributeError: raise AttributeError( 'Missing Model attribute {0} specified for {1}'.format( output, mod.__class__)) else: maximum_internal_dof = max( maximum_internal_dof, np.asarray(points_dict[phase_name]).shape[-1]) for phase_name, phase_obj in sorted(active_phases.items()): try: mod = model_dict[phase_name] except KeyError: continue # Construct an ordered list of the variables variables, sublattice_dof = generate_dof(phase_obj, mod.components) # Build the "fast" representation of that model if callable_dict[phase_name] is None: out = getattr(mod, output) # As a last resort, treat undefined symbols as zero # But warn the user when we do this # This is consistent with TC's behavior undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable)) for undef in undefs: out = out.xreplace({undef: float(0)}) logger.warning( 'Setting undefined symbol %s for phase %s to zero', undef, phase_name) comp_sets[phase_name] = make_callable(out, \ list(statevar_dict.keys()) + variables, mode=mode) else: comp_sets[phase_name] = callable_dict[phase_name] points = points_dict[phase_name] if points is None: # Eliminate pure vacancy endmembers from the calculation vacancy_indices = list() for idx, sublattice in enumerate(phase_obj.constituents): active_in_subl = sorted( set(phase_obj.constituents[idx]).intersection(comps)) if 'VA' in active_in_subl and 'VA' in sorted(comps): vacancy_indices.append(active_in_subl.index('VA')) if len(vacancy_indices) != len(phase_obj.constituents): vacancy_indices = None logger.debug('vacancy_indices: %s', vacancy_indices) # Add all endmembers to guarantee their presence points = endmember_matrix(sublattice_dof, vacancy_indices=vacancy_indices) # Sample composition space for more points if sum(sublattice_dof) > len(sublattice_dof): points = np.concatenate( (points, point_sample(sublattice_dof, pdof=pdens_dict[phase_name]))) # If there are nontrivial sublattices with vacancies in them, # generate a set of points where their fraction is zero and renormalize for idx, sublattice in enumerate(phase_obj.constituents): if 'VA' in set(sublattice) and len(sublattice) > 1: var_idx = variables.index( v.SiteFraction(phase_name, idx, 'VA')) addtl_pts = np.copy(points) # set vacancy fraction to log-spaced between 1e-10 and 1e-6 addtl_pts[:, var_idx] = np.power( 10.0, -10.0 * (1.0 - addtl_pts[:, var_idx])) # renormalize site fractions cur_idx = 0 for ctx in sublattice_dof: end_idx = cur_idx + ctx addtl_pts[:, cur_idx:end_idx] /= \ addtl_pts[:, cur_idx:end_idx].sum(axis=1)[:, None] cur_idx = end_idx # add to points matrix points = np.concatenate((points, addtl_pts), axis=0) # Filter out nan's that may have slipped in if we sampled too high a vacancy concentration # Issues with this appear to be platform-dependent points = points[~np.isnan(points).any(axis=-1)] # Ensure that points has the correct dimensions and dtype points = np.atleast_2d(np.asarray(points, dtype=np.float)) phase_ds = _compute_phase_values(phase_obj, components, variables, str_statevar_dict, points, comp_sets[phase_name], output, maximum_internal_dof) # largest_energy is really only relevant if fake_points is set if fake_points: largest_energy = max(phase_ds[output].max(), largest_energy) all_phase_data.append(phase_ds) if fake_points: if output != 'GM': raise ValueError( 'fake_points=True should only be used with output=\'GM\'') phase_ds = _generate_fake_points(components, statevar_dict, largest_energy, output, maximum_internal_dof) final_ds = xray.concat(itertools.chain([phase_ds], all_phase_data), dim='points') else: # speedup for single-phase case (found by profiling) if len(all_phase_data) > 1: final_ds = xray.concat(all_phase_data, dim='points') else: final_ds = all_phase_data[0] if (not fake_points) and (len(all_phase_data) == 1): pass else: # Reset the points dimension to use a single global index final_ds['points'] = np.arange(len(final_ds.points)) return final_ds
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, to_xarray=True, **kwargs): """ Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : str or sequence Names of components to consider in the calculation. phases : str or sequence Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. fake_points : bool, optional (Default: False) If True, the first few points of the output surface will be fictitious points used to define an equilibrium hyperplane guaranteed to be above all the other points. This is used for convex hull computations. broadcast : bool, optional If True, broadcast given state variable lists against each other to create a grid. If False, assume state variables are given as equal-length lists. points : ndarray or a dict of phase names to ndarray, optional Columns of ndarrays must be internal degrees of freedom (site fractions), sorted. If this is not specified, points will be generated automatically. pdens : int, a dict of phase names to int, or a seq of both, optional Number of points to sample per degree of freedom. Default: 2000; Default when called from equilibrium(): 500 model : Model, a dict of phase names to Model, or a seq of both, optional Model class to use for each phase. sampler : callable, a dict of phase names to callable, or a seq of both, optional Function to sample phase constitution space. Must have same signature as 'pycalphad.core.utils.point_sample' grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True) Whether to add evenly spaced points between end-members. The density of points is determined by 'pdens' parameters : dict, optional Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database. Returns ------- Dataset of the sampled attribute as a function of state variables Examples -------- None yet. """ # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None) callables = kwargs.pop('callables', {}) sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None) fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True) parameters = parameters or dict() if isinstance(parameters, dict): parameters = OrderedDict(sorted(parameters.items(), key=str)) if isinstance(phases, str): phases = [phases] if isinstance(comps, (str, v.Species)): comps = [comps] comps = sorted(unpack_components(dbf, comps)) if points_dict is None and broadcast is False: raise ValueError( 'The \'points\' keyword argument must be specified if broadcast=False is also given.' ) nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0] all_phase_data = [] largest_energy = 1e10 # Consider only the active phases list_of_possible_phases = filter_phases(dbf, comps) if len(list_of_possible_phases) == 0: raise ConditionError( 'There are no phases in the Database that can be active with components {0}' .format(comps)) active_phases = { name: dbf.phases[name] for name in filter_phases(dbf, comps, phases) } if len(active_phases) == 0: raise ConditionError( 'None of the passed phases ({0}) are active. List of possible phases: {1}.' .format(phases, list_of_possible_phases)) models = instantiate_models(dbf, comps, list(active_phases.keys()), model=kwargs.pop('model', None), parameters=parameters) if isinstance(output, (list, tuple, set)): raise NotImplementedError( 'Only one property can be specified in calculate() at a time') output = output if output is not None else 'GM' # Implicitly add 'N' state variable as a string to keyword arguements if it's not passed if kwargs.get('N') is None: kwargs['N'] = 1 if np.any(np.array(kwargs['N']) != 1): raise ConditionError('N!=1 is not yet supported, got N={}'.format( kwargs['N'])) # TODO: conditions dict of StateVariable instances should become part of the calculate API statevar_strings = [ sv for sv in kwargs.keys() if getattr(v, sv) is not None ] # If we don't do this, sympy will get confused during substitution statevar_dict = dict((v.StateVariable(key), unpack_condition(value)) for key, value in kwargs.items() if key in statevar_strings) # Sort after default state variable check to fix gh-116 statevar_dict = collections.OrderedDict( sorted(statevar_dict.items(), key=lambda x: str(x[0]))) phase_records = build_phase_records(dbf, comps, active_phases, statevar_dict, models=models, parameters=parameters, output=output, callables=callables, build_gradients=False, build_hessians=False, verbose=kwargs.pop('verbose', False)) str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \ for (key, value) in statevar_dict.items()) maximum_internal_dof = max( len(models[phase_name].site_fractions) for phase_name in active_phases) for phase_name, phase_obj in sorted(active_phases.items()): mod = models[phase_name] phase_record = phase_records[phase_name] points = points_dict[phase_name] variables, sublattice_dof = generate_dof(phase_obj, mod.components) if points is None: points = _sample_phase_constitution( phase_name, phase_obj.constituents, sublattice_dof, comps, tuple(variables), sampler_dict[phase_name] or point_sample, fixedgrid_dict[phase_name], pdens_dict[phase_name]) points = np.atleast_2d(points) fp = fake_points and (phase_name == sorted(active_phases.keys())[0]) phase_ds = _compute_phase_values(nonvacant_components, str_statevar_dict, points, phase_record, output, maximum_internal_dof, broadcast=broadcast, largest_energy=float(largest_energy), fake_points=fp) all_phase_data.append(phase_ds) # speedup for single-phase case (found by profiling) if len(all_phase_data) > 1: concatenated_coords = all_phase_data[0].coords data_vars = all_phase_data[0].data_vars concatenated_data_vars = {} for var in data_vars.keys(): data_coords = data_vars[var][0] points_idx = data_coords.index('points') # concatenation axis arrs = [] for phase_data in all_phase_data: arrs.append(getattr(phase_data, var)) concat_data = np.concatenate(arrs, axis=points_idx) concatenated_data_vars[var] = (data_coords, concat_data) final_ds = LightDataset(data_vars=concatenated_data_vars, coords=concatenated_coords) else: final_ds = all_phase_data[0] if to_xarray: return final_ds.get_dataset() else: return final_ds
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, **kwargs): """ Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : str or sequence Names of components to consider in the calculation. phases : str or sequence Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. fake_points : bool, optional (Default: False) If True, the first few points of the output surface will be fictitious points used to define an equilibrium hyperplane guaranteed to be above all the other points. This is used for convex hull computations. broadcast : bool, optional If True, broadcast given state variable lists against each other to create a grid. If False, assume state variables are given as equal-length lists. points : ndarray or a dict of phase names to ndarray, optional Columns of ndarrays must be internal degrees of freedom (site fractions), sorted. If this is not specified, points will be generated automatically. pdens : int, a dict of phase names to int, or a seq of both, optional Number of points to sample per degree of freedom. Default: 2000; Default when called from equilibrium(): 500 model : Model, a dict of phase names to Model, or a seq of both, optional Model class to use for each phase. sampler : callable, a dict of phase names to callable, or a seq of both, optional Function to sample phase constitution space. Must have same signature as 'pycalphad.core.utils.point_sample' grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True) Whether to add evenly spaced points between end-members. The density of points is determined by 'pdens' parameters : dict, optional Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database. Returns ------- Dataset of the sampled attribute as a function of state variables Examples -------- None yet. """ # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None) model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model) callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None) mass_dict = unpack_kwarg(kwargs.pop('massfuncs', None), default_arg=None) sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None) fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True) parameters = parameters or dict() if isinstance(parameters, dict): parameters = OrderedDict(sorted(parameters.items(), key=str)) param_symbols = tuple(parameters.keys()) param_values = np.atleast_1d(np.array(list(parameters.values()), dtype=np.float)) if isinstance(phases, str): phases = [phases] if isinstance(comps, (str, v.Species)): comps = [comps] comps = sorted(unpack_components(dbf, comps)) if points_dict is None and broadcast is False: raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.') nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0] # Convert keyword strings to proper state variable objects # If we don't do this, sympy will get confused during substitution statevar_dict = dict((v.StateVariable(key), unpack_condition(value)) for (key, value) in kwargs.items()) # XXX: CompiledModel assumes P, T are the only state variables if statevar_dict.get(v.P, None) is None: statevar_dict[v.P] = 101325 if statevar_dict.get(v.T, None) is None: statevar_dict[v.T] = 300 # Sort after default state variable check to fix gh-116 statevar_dict = collections.OrderedDict(sorted(statevar_dict.items(), key=lambda x: str(x[0]))) str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \ for (key, value) in statevar_dict.items()) all_phase_data = [] comp_sets = {} largest_energy = 1e30 maximum_internal_dof = 0 # Consider only the active phases active_phases = dict((name.upper(), dbf.phases[name.upper()]) \ for name in unpack_phases(phases)) for phase_name, phase_obj in sorted(active_phases.items()): # Build the symbolic representation of the energy mod = model_dict[phase_name] # if this is an object type, we need to construct it if isinstance(mod, type): try: model_dict[phase_name] = mod = mod(dbf, comps, phase_name, parameters=parameters) except DofError: # we can't build the specified phase because the # specified components aren't found in every sublattice # we'll just skip it warnings.warn("""Suspending specified phase {} due to some sublattices containing only unspecified components""".format(phase_name)) continue if points_dict[phase_name] is None: maximum_internal_dof = max(maximum_internal_dof, sum(len(x) for x in mod.constituents)) else: maximum_internal_dof = max(maximum_internal_dof, np.asarray(points_dict[phase_name]).shape[-1]) for phase_name, phase_obj in sorted(active_phases.items()): try: mod = model_dict[phase_name] except KeyError: continue # this is a phase model we couldn't construct for whatever reason; skip it if isinstance(mod, type): continue # Construct an ordered list of the variables variables, sublattice_dof = generate_dof(phase_obj, mod.components) # Build the "fast" representation of that model if callable_dict[phase_name] is None: try: out = getattr(mod, output) except AttributeError: raise AttributeError('Missing Model attribute {0} specified for {1}' .format(output, mod.__class__)) # As a last resort, treat undefined symbols as zero # But warn the user when we do this # This is consistent with TC's behavior undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable)) for undef in undefs: out = out.xreplace({undef: float(0)}) warnings.warn('Setting undefined symbol {0} for phase {1} to zero'.format(undef, phase_name)) comp_sets[phase_name] = build_functions(out, list(statevar_dict.keys()) + variables, include_obj=True, include_grad=False, parameters=param_symbols) else: comp_sets[phase_name] = callable_dict[phase_name] if mass_dict[phase_name] is None: pure_elements = [spec for spec in nonvacant_components if (len(spec.constituents.keys()) == 1 and list(spec.constituents.keys())[0] == spec.name) ] # TODO: In principle, we should also check for undefs in mod.moles() mass_dict[phase_name] = [build_functions(mod.moles(el), list(statevar_dict.keys()) + variables, include_obj=True, include_grad=False, parameters=param_symbols) for el in pure_elements] phase_record = PhaseRecord_from_cython(comps, list(statevar_dict.keys()) + variables, np.array(dbf.phases[phase_name].sublattices, dtype=np.float), param_values, comp_sets[phase_name], None, None, mass_dict[phase_name], None) points = points_dict[phase_name] if points is None: points = _sample_phase_constitution(phase_name, phase_obj.constituents, sublattice_dof, comps, tuple(variables), sampler_dict[phase_name] or point_sample, fixedgrid_dict[phase_name], pdens_dict[phase_name]) points = np.atleast_2d(points) fp = fake_points and (phase_name == sorted(active_phases.keys())[0]) phase_ds = _compute_phase_values(nonvacant_components, str_statevar_dict, points, phase_record, output, maximum_internal_dof, broadcast=broadcast, largest_energy=float(largest_energy), fake_points=fp) all_phase_data.append(phase_ds) # speedup for single-phase case (found by profiling) if len(all_phase_data) > 1: final_ds = concat(all_phase_data, dim='points') final_ds['points'].values = np.arange(len(final_ds['points'])) final_ds.coords['points'].values = np.arange(len(final_ds['points'])) else: final_ds = all_phase_data[0] return final_ds
def energy_surf(dbf, comps, phases, mode=None, output='GM', **kwargs): """ Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : list Names of components to consider in the calculation. phases : list Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. pdens : int, a dict of phase names to int, or a list of both, optional Number of points to sample per degree of freedom. model : Model, a dict of phase names to Model, or a list of both, optional Model class to use for each phase. Returns ------- DataFrame of the output as a function of composition, temperature, etc. Examples -------- None yet. """ warnings.warn('Use pycalphad.calculate() instead', DeprecationWarning, stacklevel=2) # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model) callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None) # Convert keyword strings to proper state variable objects # If we don't do this, sympy will get confused during substitution statevar_dict = \ collections.OrderedDict((v.StateVariable(key), value) \ for (key, value) in sorted(kwargs.items())) # Generate all combinations of state variables for 'map' calculation # Wrap single values of state variables in lists # Use 'kwargs' because we want state variable names to be stringified statevar_values = [_listify(val) for val in statevar_dict.values()] statevars_to_map = np.array(list(itertools.product(*statevar_values))) # Consider only the active phases active_phases = dict((name.upper(), dbf.phases[name.upper()]) \ for name in phases) comp_sets = {} # Construct a list to hold all the data all_phase_data = [] for phase_name, phase_obj in sorted(active_phases.items()): # Build the symbolic representation of the energy mod = model_dict[phase_name] # if this is an object type, we need to construct it if isinstance(mod, type): try: mod = mod(dbf, comps, phase_name) except DofError: # we can't build the specified phase because the # specified components aren't found in every sublattice # we'll just skip it logger.warning("""Suspending specified phase %s due to some sublattices containing only unspecified components""", phase_name) continue try: out = getattr(mod, output) except AttributeError: raise AttributeError('Missing Model attribute {0} specified for {1}' .format(output, mod.__class__)) # Construct an ordered list of the variables variables, sublattice_dof = generate_dof(phase_obj, mod.components) site_ratios = list(phase_obj.sublattices) # Build the "fast" representation of that model if callable_dict[phase_name] is None: # As a last resort, treat undefined symbols as zero # But warn the user when we do this # This is consistent with TC's behavior undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable)) for undef in undefs: out = out.xreplace({undef: float(0)}) logger.warning('Setting undefined symbol %s for phase %s to zero', undef, phase_name) comp_sets[phase_name] = make_callable(out, \ list(statevar_dict.keys()) + variables, mode=mode) else: comp_sets[phase_name] = callable_dict[phase_name] # Eliminate pure vacancy endmembers from the calculation vacancy_indices = list() for idx, sublattice in enumerate(phase_obj.constituents): if 'VA' in sorted(sublattice) and 'VA' in sorted(comps): vacancy_indices.append(sorted(sublattice).index('VA')) if len(vacancy_indices) != len(phase_obj.constituents): vacancy_indices = None logger.debug('vacancy_indices: %s', vacancy_indices) # Add all endmembers to guarantee their presence points = endmember_matrix(sublattice_dof, vacancy_indices=vacancy_indices) # Sample composition space for more points if sum(sublattice_dof) > len(sublattice_dof): points = np.concatenate((points, point_sample(sublattice_dof, pdof=pdens_dict[phase_name]) )) # If there are nontrivial sublattices with vacancies in them, # generate a set of points where their fraction is zero and renormalize for idx, sublattice in enumerate(phase_obj.constituents): if 'VA' in set(sublattice) and len(sublattice) > 1: var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA')) addtl_pts = np.copy(points) # set vacancy fraction to log-spaced between 1e-10 and 1e-6 addtl_pts[:, var_idx] = np.power(10.0, -10.0*(1.0 - addtl_pts[:, var_idx])) # renormalize site fractions cur_idx = 0 for ctx in sublattice_dof: end_idx = cur_idx + ctx addtl_pts[:, cur_idx:end_idx] /= \ addtl_pts[:, cur_idx:end_idx].sum(axis=1)[:, None] cur_idx = end_idx # add to points matrix points = np.concatenate((points, addtl_pts), axis=0) data_dict = {'Phase': phase_name} # Broadcast compositions and state variables along orthogonal axes # This lets us eliminate an expensive Python loop data_dict[output] = \ comp_sets[phase_name](*itertools.chain( np.transpose(statevars_to_map[:, :, np.newaxis], (1, 2, 0)), np.transpose(points[:, :, np.newaxis], (1, 0, 2)))).T.ravel() # Save state variables, with values indexed appropriately statevar_vals = np.repeat(statevars_to_map, len(points), axis=0).T data_dict.update({str(statevar): vals for statevar, vals \ in zip(statevar_dict.keys(), statevar_vals)}) # Map the internal degrees of freedom to global coordinates # Normalize site ratios by the sum of site ratios times a factor # related to the site fraction of vacancies site_ratio_normalization = np.zeros(len(points)) for idx, sublattice in enumerate(phase_obj.constituents): vacancy_column = np.ones(len(points)) if 'VA' in set(sublattice): var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA')) vacancy_column -= points[:, var_idx] site_ratio_normalization += site_ratios[idx] * vacancy_column for comp in sorted(comps): if comp == 'VA': continue avector = [float(vxx.species == comp) * \ site_ratios[vxx.sublattice_index] for vxx in variables] data_dict['X('+comp+')'] = np.tile(np.divide(np.dot( points[:, :], avector), site_ratio_normalization), statevars_to_map.shape[0]) # Copy coordinate information into data_dict # TODO: Is there a more memory-efficient way to deal with this? # Perhaps with hierarchical indexing... var_fmt = 'Y({0},{1},{2})' data_dict.update({var_fmt.format(vxx.phase_name, vxx.sublattice_index, vxx.species): \ np.tile(vals, statevars_to_map.shape[0]) \ for vxx, vals in zip(variables, points.T)}) all_phase_data.append(pd.DataFrame(data_dict)) # all_phases_data now contains energy surface information for the system return pd.concat(all_phase_data, axis=0, join='outer', \ ignore_index=True, verify_integrity=False)