Exemplo n.º 1
0
def test_filter_phases_removes_disordered_phases_from_order_disorder():
    """Databases with order-disorder models should have the disordered phases be filtered if candidate_phases kwarg is not passed to filter_phases.
    If candidate_phases kwarg is passed, disordered phases just are filtered if respective ordered phases are inactive"""
    all_phases = set(ALNIPT_DBF.phases.keys())
    filtered_phases = set(filter_phases(ALNIPT_DBF, unpack_components(ALNIPT_DBF, ['AL', 'NI', 'PT', 'VA'])))
    assert all_phases.difference(filtered_phases) == {'FCC_A1'}
    comps = unpack_components(ALCRNI_DBF, ['NI', 'AL', 'CR', 'VA'])
    filtered_phases = set(filter_phases(ALCRNI_DBF, comps, ['FCC_A1', 'L12_FCC', 'LIQUID', 'BCC_A2']))
    assert filtered_phases == {'L12_FCC', 'LIQUID', 'BCC_A2'}
    filtered_phases = set(filter_phases(ALCRNI_DBF, comps, ['FCC_A1', 'LIQUID', 'BCC_A2']))
    assert filtered_phases == {'FCC_A1', 'LIQUID', 'BCC_A2'}
    filtered_phases = set(filter_phases(ALCRNI_DBF, comps, ['FCC_A1']))
    assert filtered_phases == {'FCC_A1'}
Exemplo n.º 2
0
def get_prop_samples(dbf, comps, phase_name, desired_data):
    """
    Return data values and the conditions to calculate them by pycalphad
    calculate from the datasets

    Parameters
    ----------
    dbf : pycalphad.Database
        Database to consider
    comps : list
        List of active component names
    phase_name : str
        Name of the phase to consider from the Database
    desired_data : list
        List of dictionary datasets that contain the values to sample

    Returns
    -------
    dict
        Dictionary of condition kwargs for pycalphad's calculate and the expected values

    """
    # TODO: assumes T, P as conditions
    # sublattice constituents are Species objects, so we need to be doing intersections with those
    species_comps = unpack_components(dbf, comps)
    phase_constituents = dbf.phases[phase_name].constituents
    # phase constituents must be filtered to only active:
    phase_constituents = [[c.name for c in sorted(subl_constituents.intersection(set(species_comps)))] for subl_constituents in phase_constituents]

    # calculate needs points, state variable lists, and values to compare to
    calculate_dict = {
        'P': np.array([]),
        'T': np.array([]),
        'points': np.atleast_2d([[]]).reshape(-1, sum([len(subl) for subl in phase_constituents])),
        'values': np.array([]),
    }

    for datum in desired_data:
        # extract the data we care about
        datum_T = datum['conditions']['T']
        datum_P = datum['conditions']['P']
        configurations = datum['solver']['sublattice_configurations']
        occupancies = datum['solver'].get('sublattice_occupancies')
        values = np.array(datum['values'])

        # broadcast and flatten the conditions arrays
        P, T = ravel_conditions(values, datum_P, datum_T)
        if occupancies is None:
            occupancies = [None] * len(configurations)

        # calculate the points arrays, should be 2d array of points arrays
        points = np.array([calculate_points_array(phase_constituents, config, occup) for config, occup in zip(configurations, occupancies)])

        # add everything to the calculate_dict
        calculate_dict['P'] = np.concatenate([calculate_dict['P'], P])
        calculate_dict['T'] = np.concatenate([calculate_dict['T'], T])
        calculate_dict['points'] = np.concatenate([calculate_dict['points'], np.repeat(points, len(T)/points.shape[0], axis=0)], axis=0)
        calculate_dict['values'] = np.concatenate([calculate_dict['values'], values.flatten()])

    return calculate_dict
Exemplo n.º 3
0
def test_filter_phases_removes_disordered_phases_from_order_disorder():
    """Databases with order-disorder models should have the disordered phases be filtered."""
    all_phases = set(ALNIPT_DBF.phases.keys())
    filtered_phases = set(
        filter_phases(ALNIPT_DBF,
                      unpack_components(ALNIPT_DBF, ['AL', 'NI', 'PT', 'VA'])))
    assert all_phases.difference(filtered_phases) == {'FCC_A1'}
Exemplo n.º 4
0
def get_pure_elements(dbf, comps):
    """
    Return a list of pure elements in the system

    Parameters
    ----------
    dbf : pycalphad.Database
        A Database object
    comps : list
        A list of component names (species and pure elements)

    Returns
    -------
    list
        A list of pure elements in the Database
    """
    comps = sorted(unpack_components(dbf, comps))
    components = [x for x in comps]
    desired_active_pure_elements = [
        list(x.constituents.keys()) for x in components
    ]
    desired_active_pure_elements = [
        el.upper() for constituents in desired_active_pure_elements
        for el in constituents
    ]
    pure_elements = sorted(
        set([x for x in desired_active_pure_elements if x != 'VA']))
    return pure_elements
Exemplo n.º 5
0
def test_filter_phases_removes_phases_with_inactive_sublattices():
    """Phases that have no active components in any sublattice should be filtered"""
    all_phases = set(ALNIPT_DBF.phases.keys())
    filtered_phases = set(
        filter_phases(ALNIPT_DBF,
                      unpack_components(ALNIPT_DBF, ['AL', 'NI', 'VA'])))
    assert all_phases.difference(filtered_phases) == {
        'FCC_A1', 'PT8AL21', 'PT5AL21', 'PT2AL', 'PT2AL3', 'PT5AL3', 'ALPT2'
    }
Exemplo n.º 6
0
def order_disorder_dict(dbf, comps, phases):
    """Return a dictionary with the sublattice degrees of freedom and equivalent
    sublattices for order/disorder phases

    Parameters
    ----------
    dbf : pycalphad.Database
    comps : list[str]
        List of active components to consider
    phases : list[str]
        List of active phases to consider

    Returns
    -------
    dict

    Notes
    -----
    Phases which should be checked for ordered/disordered configurations are
    determined heuristically for this script.

    The heuristic for a phase satisfies the following:
    1. The phase is the ordered part of an order-disorder model
    2. The equivalent sublattices have all the same number of elements
    """
    species = unpack_components(dbf, comps)
    ord_disord_phases = {}
    for phase_name in phases:
        phase_obj = dbf.phases[phase_name]
        if phase_name == phase_obj.model_hints.get('ordered_phase', ''):
            # This phase is active and modeled with an order/disorder model.
            dof = generate_dof(dbf.phases[phase_name], species)[1]
            # Define the symmetrically equivalent sublattices as any sublattices
            # that have the same site ratio. Create a {site_ratio: [subl idx]} dict
            site_ratio_idxs = defaultdict(lambda: [])
            for subl_idx, site_ratio in enumerate(phase_obj.sublattices):
                site_ratio_idxs[site_ratio].append(subl_idx)
            equiv_sublattices = list(site_ratio_idxs.values())
            ord_disord_phases[phase_name] = {
                'subl_dof': dof,
                'symmetric_subl_idx': equiv_sublattices,
                'disordered_phase': phase_obj.model_hints['disordered_phase']
            }
    return ord_disord_phases
Exemplo n.º 7
0
def sample_phase_points(dbf, comps, phase_name, conditions, calc_pdens, pdens):
    """Sample new points from a phase around the single phase equilibrium site fractions at the given conditions.

    Parameters
    ----------
    dbf :

    comps :

    phase_name :

    conditions :

    calc_pdens :
        The point density passed to calculate for the nominal points added.
    pdens : int
        The number of points to add in the local sampling at each set of equilibrium site fractions.

    Returns
    -------
    np.ndarray[:,:]

    """
    _, subl_dof = generate_dof(dbf.phases[phase_name],
                               unpack_components(dbf, comps))
    # subl_dof is number of species in each sublattice, e.g. (FE,NI,TI)(FE,NI)(FE,NI,TI) is [3, 2, 3]
    eqgrid = equilibrium(dbf, comps, [phase_name], conditions)
    all_eq_pts = eqgrid.Y.values[eqgrid.Phase.values == phase_name]
    # sample points locally
    additional_points = local_sample(all_eq_pts, subl_dof, pdens)
    # get the grid between endmembers and random point sampling from calculate
    pts_calc = calculate(dbf,
                         comps,
                         phase_name,
                         pdens=calc_pdens,
                         P=101325,
                         T=300,
                         N=1).Y.values.squeeze()
    return np.concatenate([additional_points, pts_calc], axis=0)
Exemplo n.º 8
0
def get_zpf_data(dbf: Database, comps: Sequence[str], phases: Sequence[str],
                 datasets: PickleableTinyDB, parameters: Dict[str, float]):
    """
    Return the ZPF data used in the calculation of ZPF error

    Parameters
    ----------
    comps : list
        List of active component names
    phases : list
        List of phases to consider
    datasets : espei.utils.PickleableTinyDB
        Datasets that contain single phase data
    parameters : dict
        Dictionary mapping symbols to optimize to their initial values

    Returns
    -------
    list
        List of data dictionaries with keys ``weight``, ``data_comps`` and
        ``phase_regions``. ``data_comps`` are the components for the data in
        question. ``phase_regions`` are the ZPF phases, state variables and compositions.
    """
    desired_data = datasets.search(
        (tinydb.where('output') == 'ZPF')
        & (tinydb.where('components').test(lambda x: set(x).issubset(comps)))
        & (tinydb.where('phases').test(
            lambda x: len(set(phases).intersection(x)) > 0)))

    zpf_data = []  # 1:1 correspondence with each dataset
    for data in desired_data:
        data_comps = list(set(data['components']).union({'VA'}))
        species = sorted(unpack_components(dbf, data_comps), key=str)
        data_phases = filter_phases(dbf, species, candidate_phases=phases)
        models = instantiate_models(dbf,
                                    species,
                                    data_phases,
                                    parameters=parameters)
        all_regions = data['values']
        conditions = data['conditions']
        phase_regions = []
        # Each phase_region is one set of phases in equilibrium (on a tie-line),
        # e.g. [["ALPHA", ["B"], [0.25]], ["BETA", ["B"], [0.5]]]
        for idx, phase_region in enumerate(all_regions):
            # We need to construct a PhaseRegion by matching up phases/compositions to the conditions
            if len(phase_region) < 2:
                # Skip single-phase regions for fitting purposes
                continue
            # Extract the conditions for entire phase region
            region_potential_conds = extract_conditions(conditions, idx)
            region_potential_conds[v.N] = region_potential_conds.get(
                v.N) or 1.0  # Add v.N condition, if missing
            # Extract all the phases and compositions from the tie-line points
            region_phases, region_comp_conds, phase_flags = extract_phases_comps(
                phase_region)
            region_phase_records = [
                build_phase_records(dbf,
                                    species,
                                    data_phases, {
                                        **region_potential_conds,
                                        **comp_conds
                                    },
                                    models,
                                    parameters=parameters,
                                    build_gradients=True,
                                    build_hessians=True)
                for comp_conds in region_comp_conds
            ]
            phase_regions.append(
                PhaseRegion(region_phases, region_potential_conds,
                            region_comp_conds, phase_flags, dbf, species,
                            data_phases, models, region_phase_records))

        data_dict = {
            'weight': data.get('weight', 1.0),
            'data_comps': data_comps,
            'phase_regions': phase_regions,
            'dataset_reference': data['reference']
        }
        zpf_data.append(data_dict)
    return zpf_data
Exemplo n.º 9
0
def test_filter_phases_removes_phases_with_inactive_sublattices():
    """Phases that have no active components in any sublattice should be filtered"""
    all_phases = set(ALNIPT_DBF.phases.keys())
    filtered_phases = set(filter_phases(ALNIPT_DBF, unpack_components(ALNIPT_DBF, ['AL', 'NI', 'VA'])))
    assert all_phases.difference(filtered_phases) == {'FCC_A1', 'PT8AL21', 'PT5AL21', 'PT2AL', 'PT2AL3', 'PT5AL3', 'ALPT2'}
Exemplo n.º 10
0
def simulate_scheil_solidification(dbf,
                                   comps,
                                   phases,
                                   composition,
                                   start_temperature,
                                   step_temperature=1.0,
                                   liquid_phase_name='LIQUID',
                                   eq_kwargs=None,
                                   stop=0.0001,
                                   verbose=False,
                                   adaptive=True):
    """Perform a Scheil-Gulliver solidification simulation.

    Parameters
    ----------
    dbf : pycalphad.Database
        Database object.
    comps : list
        List of components in the system.
    phases : list
        List of phases in the system.
    composition : Dict[v.X, float]
        Dictionary of independent `v.X` composition variables.
    start_temperature : float
        Starting temperature for simulation. Must be single phase liquid.
    step_temperature : Optional[float]
        Temperature step size. Defaults to 1.0.
    liquid_phase_name : Optional[str]
        Name of the phase treated as liquid (i.e. the phase with infinitely
        fast diffusion). Defaults to 'LIQUID'.
    eq_kwargs: Optional[Dict[str, Any]]
        Keyword arguments for equilibrium
    stop: Optional[float]
        Stop when the phase fraction of liquid is below this amount.
    adaptive: Optional[bool]
        Whether to add additional points near the equilibrium points at each
        step. Only takes effect if ``points`` is in the eq_kwargs dict.

    Returns
    -------
    SolidificationResult

    """
    eq_kwargs = eq_kwargs or dict()
    STEP_SCALE_FACTOR = 1.2  # How much to try to adapt the temperature step by
    MAXIMUM_STEP_SIZE_REDUCTION = 5.0
    T_STEP_ORIG = step_temperature
    phases = filter_phases(dbf, unpack_components(dbf, comps), phases)
    models = instantiate_models(dbf, comps, phases)
    if verbose:
        print('building callables... ', end='')
    cbs = build_callables(dbf,
                          comps,
                          phases,
                          models,
                          additional_statevars={v.P, v.T, v.N},
                          build_gradients=True,
                          build_hessians=True)
    if verbose:
        print('done')
    solid_phases = sorted(set(phases) - {liquid_phase_name})
    temp = start_temperature
    independent_comps = sorted([str(comp)[2:] for comp in composition.keys()])
    x_liquid = {comp: [composition[v.X(comp)]] for comp in independent_comps}
    fraction_solid = [0.0]
    temperatures = [temp]
    phase_amounts = {ph: [0.0] for ph in solid_phases}
    ord_disord_dict = order_disorder_dict(dbf, comps, phases)

    if adaptive and ('points' in eq_kwargs.get('calc_opts', {})):
        # Dynamically add points as the simulation runs
        species = unpack_components(dbf, comps)
        dof_dict = {
            ph: generate_dof(dbf.phases[ph], species)[1]
            for ph in phases
        }
    else:
        adaptive = False

    converged = False
    phases_seen = {liquid_phase_name, ''}
    liquid_comp = composition
    while fraction_solid[-1] < 1:
        conds = {v.T: temp, v.P: 101325.0, v.N: 1.0}
        comp_conds = liquid_comp
        fmt_comp_conds = ', '.join(
            [f'{c}={val:0.2f}' for c, val in comp_conds.items()])
        conds.update(comp_conds)
        eq = equilibrium(dbf,
                         comps,
                         phases,
                         conds,
                         callables=cbs,
                         model=models,
                         **eq_kwargs)
        if adaptive:
            # Update the points dictionary with local samples around the equilibrium site fractions
            points_dict = eq_kwargs['calc_opts']['points']
            for vtx in range(eq.vertex.size):
                masked = eq.isel(vertex=vtx)
                ph = str(masked.Phase.values.squeeze())
                pts = points_dict.get(ph)
                if pts is not None:
                    if verbose:
                        print(f'Adding points to {ph}. ', end='')
                    dof = dof_dict[ph]
                    points_dict[ph] = np.concatenate([
                        pts,
                        local_sample(
                            masked.Y.values.squeeze()[:sum(dof)].reshape(
                                1, -1),
                            dof,
                            pdens=20)
                    ],
                                                     axis=0)

        eq_phases = order_disorder_eq_phases(eq, ord_disord_dict)
        num_eq_phases = np.nansum(
            np.array([str(ph) for ph in eq_phases]) != '')
        new_phases_seen = set(eq_phases).difference(phases_seen)
        if len(new_phases_seen) > 0:
            if verbose:
                print(f'New phases seen: {new_phases_seen}. ', end='')
            phases_seen |= new_phases_seen
        if liquid_phase_name not in eq["Phase"].values.squeeze():
            found_ph = set(eq_phases) - {''}
            if verbose:
                print(
                    f'No liquid phase found at T={temp:0.3f}, {fmt_comp_conds}. (Found {found_ph}) ',
                    end='')
            if len(found_ph) == 0:
                # No phases found in equilibrium. Just continue on lowering the temperature without changing anything
                if verbose:
                    print(f'(Convergence failure) ', end='')
            if T_STEP_ORIG / step_temperature > MAXIMUM_STEP_SIZE_REDUCTION:
                # Only found solid phases and the step size has already been reduced. Stop running without converging.
                if verbose:
                    print('Maximum step size reduction exceeded. Stopping.')
                converged = False
                break
            else:
                # Only found solid phases. Try reducing the step size to zero-in on the correct phases
                if verbose:
                    print(f'Stepping back and reducing step size.')
                temp += step_temperature
                step_temperature /= STEP_SCALE_FACTOR
                continue
        # TODO: Will break if there is a liquid miscibility gap
        liquid_vertex = sorted(
            np.nonzero(
                eq["Phase"].values.squeeze().flat == liquid_phase_name))[0]
        liquid_comp = {}
        for comp in independent_comps:
            x = float(eq["X"].isel(vertex=liquid_vertex).squeeze().sel(
                component=comp).values)
            x_liquid[comp].append(x)
            liquid_comp[v.X(comp)] = x
        np_liq = np.nansum(
            eq.where(eq["Phase"] == liquid_phase_name).NP.values)
        current_fraction_solid = float(fraction_solid[-1])
        found_phase_amounts = [(liquid_phase_name, np_liq)
                               ]  # tuples of phase name, amount
        for solid_phase in solid_phases:
            if solid_phase not in eq_phases:
                phase_amounts[solid_phase].append(0.0)
                continue
            np_tieline = np.nansum(
                eq.isel(vertex=eq_phases.index(solid_phase))
                ["NP"].values.squeeze())
            found_phase_amounts.append((solid_phase, np_tieline))
            delta_fraction_solid = (1 - current_fraction_solid) * np_tieline
            current_fraction_solid += delta_fraction_solid
            phase_amounts[solid_phase].append(delta_fraction_solid)
        fraction_solid.append(current_fraction_solid)
        temperatures.append(temp)
        NL = 1 - fraction_solid[-1]
        if verbose:
            phase_amnts = ' '.join(
                [f'NP({ph})={amnt:0.3f}' for ph, amnt in found_phase_amounts])
            if NL < 1.0e-3:
                print(
                    f'T={temp:0.3f}, {fmt_comp_conds}, ΔT={step_temperature:0.3f}, NL: {NL:.2E}, {phase_amnts} ',
                    end='')
            else:
                print(
                    f'T={temp:0.3f}, {fmt_comp_conds}, ΔT={step_temperature:0.3f}, NL: {NL:0.3f}, {phase_amnts} ',
                    end='')
        if NL < stop:
            if verbose:
                print(
                    f'Liquid fraction below criterion {stop} . Stopping at {fmt_comp_conds}'
                )
            converged = True
            break
        if verbose:
            print()  # add line break
        temp -= step_temperature

    if fraction_solid[-1] < 1:
        for comp in independent_comps:
            x_liquid[comp].append(np.nan)
        fraction_solid.append(1.0)
        temperatures.append(temp)
        # set the final phase amount to the phase fractions in the eutectic
        # this method gives the sum total phase amounts of 1.0 by construction
        for solid_phase in solid_phases:
            if solid_phase in eq_phases:
                amount = np.nansum(
                    eq.isel(vertex=eq_phases.index(solid_phase))
                    ["NP"].values.squeeze())
                phase_amounts[solid_phase].append(
                    float(amount) * (1 - current_fraction_solid))
            else:
                phase_amounts[solid_phase].append(0.0)

    return SolidificationResult(x_liquid, fraction_solid, temperatures,
                                phase_amounts, converged, "scheil")
Exemplo n.º 11
0
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, **kwargs):
    """
    Sample the property surface of 'output' containing the specified
    components and phases. Model parameters are taken from 'dbf' and any
    state variables (T, P, etc.) can be specified as keyword arguments.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : str or sequence
        Names of components to consider in the calculation.
    phases : str or sequence
        Names of phases to consider in the calculation.
    mode : string, optional
        See 'make_callable' docstring for details.
    output : string, optional
        Model attribute to sample.
    fake_points : bool, optional (Default: False)
        If True, the first few points of the output surface will be fictitious
        points used to define an equilibrium hyperplane guaranteed to be above
        all the other points. This is used for convex hull computations.
    broadcast : bool, optional
        If True, broadcast given state variable lists against each other to create a grid.
        If False, assume state variables are given as equal-length lists.
    points : ndarray or a dict of phase names to ndarray, optional
        Columns of ndarrays must be internal degrees of freedom (site fractions), sorted.
        If this is not specified, points will be generated automatically.
    pdens : int, a dict of phase names to int, or a seq of both, optional
        Number of points to sample per degree of freedom.
        Default: 2000; Default when called from equilibrium(): 500
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    sampler : callable, a dict of phase names to callable, or a seq of both, optional
        Function to sample phase constitution space.
        Must have same signature as 'pycalphad.core.utils.point_sample'
    grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True)
        Whether to add evenly spaced points between end-members.
        The density of points is determined by 'pdens'
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.

    Returns
    -------
    Dataset of the sampled attribute as a function of state variables

    Examples
    --------
    None yet.
    """
    # Here we check for any keyword arguments that are special, i.e.,
    # there may be keyword arguments that aren't state variables
    pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)
    points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None)
    callables = kwargs.pop('callables', {})
    sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None)
    fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True)
    parameters = parameters or dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    if isinstance(phases, str):
        phases = [phases]
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    comps = sorted(unpack_components(dbf, comps))
    if points_dict is None and broadcast is False:
        raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.')
    nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0]

    all_phase_data = []
    largest_energy = 1e10

    # Consider only the active phases
    list_of_possible_phases = filter_phases(dbf, comps)
    active_phases = sorted(set(list_of_possible_phases).intersection(set(phases)))
    active_phases = {name: dbf.phases[name] for name in active_phases}
    if len(list_of_possible_phases) == 0:
        raise ConditionError('There are no phases in the Database that can be active with components {0}'.format(comps))
    if len(active_phases) == 0:
        raise ConditionError('None of the passed phases ({0}) are active. List of possible phases: {1}.'
                             .format(phases, list_of_possible_phases))

    models = instantiate_models(dbf, comps, list(active_phases.keys()), model=kwargs.pop('model', None), parameters=parameters)

    if isinstance(output, (list, tuple, set)):
        raise NotImplementedError('Only one property can be specified in calculate() at a time')
    output = output if output is not None else 'GM'

    # Implicitly add 'N' state variable as a string to keyword arguements if it's not passed
    if kwargs.get('N') is None:
        kwargs['N'] = 1
    if np.any(np.array(kwargs['N']) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(kwargs['N']))

    # TODO: conditions dict of StateVariable instances should become part of the calculate API
    statevar_strings = [sv for sv in kwargs.keys() if getattr(v, sv) is not None]
    # If we don't do this, sympy will get confused during substitution
    statevar_dict = dict((v.StateVariable(key), unpack_condition(value)) for key, value in kwargs.items() if key in statevar_strings)
    # Sort after default state variable check to fix gh-116
    statevar_dict = collections.OrderedDict(sorted(statevar_dict.items(), key=lambda x: str(x[0])))
    phase_records = build_phase_records(dbf, comps, active_phases, statevar_dict,
                                   models=models, parameters=parameters,
                                   output=output, callables=callables,
                                   verbose=kwargs.pop('verbose', False))
    str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \
                                                for (key, value) in statevar_dict.items())
    maximum_internal_dof = max(len(models[phase_name].site_fractions) for phase_name in active_phases)
    for phase_name, phase_obj in sorted(active_phases.items()):
        mod = models[phase_name]
        phase_record = phase_records[phase_name]
        points = points_dict[phase_name]
        variables, sublattice_dof = generate_dof(phase_obj, mod.components)
        if points is None:
            points = _sample_phase_constitution(phase_name, phase_obj.constituents, sublattice_dof, comps,
                                                tuple(variables), sampler_dict[phase_name] or point_sample,
                                                fixedgrid_dict[phase_name], pdens_dict[phase_name])
        points = np.atleast_2d(points)

        fp = fake_points and (phase_name == sorted(active_phases.keys())[0])
        phase_ds = _compute_phase_values(nonvacant_components, str_statevar_dict,
                                         points, phase_record, output,
                                         maximum_internal_dof, broadcast=broadcast,
                                         largest_energy=float(largest_energy), fake_points=fp)
        all_phase_data.append(phase_ds)

    # speedup for single-phase case (found by profiling)
    if len(all_phase_data) > 1:
        final_ds = concat(all_phase_data, dim='points')
        final_ds['points'].values = np.arange(len(final_ds['points']))
        final_ds.coords['points'].values = np.arange(len(final_ds['points']))
    else:
        final_ds = all_phase_data[0]
    return final_ds
Exemplo n.º 12
0
def equilibrium(dbf, comps, phases, conditions, output=None, model=None,
                verbose=False, broadcast=True, calc_opts=None,
                scheduler='sync', parameters=None, solver=None, callables=None,
                **kwargs):
    """
    Calculate the equilibrium state of a system containing the specified
    components and phases, under the specified conditions.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : list
        Names of components to consider in the calculation.
    phases : list or dict
        Names of phases to consider in the calculation.
    conditions : dict or (list of dict)
        StateVariables and their corresponding value.
    output : str or list of str, optional
        Additional equilibrium model properties (e.g., CPM, HM, etc.) to compute.
        These must be defined as attributes in the Model class of each phase.
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    verbose : bool, optional
        Print details of calculations. Useful for debugging.
    broadcast : bool
        If True, broadcast conditions against each other. This will compute all combinations.
        If False, each condition should be an equal-length list (or single-valued).
        Disabling broadcasting is useful for calculating equilibrium at selected conditions,
        when those conditions don't comprise a grid.
    calc_opts : dict, optional
        Keyword arguments to pass to `calculate`, the energy/property calculation routine.
    scheduler : Dask scheduler, optional
        Job scheduler for performing the computation.
        If None, return a Dask graph of the computation instead of actually doing it.
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.
    solver : pycalphad.core.solver.SolverBase
        Instance of a solver that is used to calculate local equilibria.
        Defaults to a pycalphad.core.solver.InteriorPointSolver.
    callables : dict, optional
        Pre-computed callable functions for equilibrium calculation.

    Returns
    -------
    Structured equilibrium calculation, or Dask graph if scheduler=None.

    Examples
    --------
    None yet.
    """
    if not broadcast:
        raise NotImplementedError('Broadcasting cannot yet be disabled')
    comps = sorted(unpack_components(dbf, comps))
    phases = unpack_phases(phases) or sorted(dbf.phases.keys())
    # remove phases that cannot be active
    list_of_possible_phases = filter_phases(dbf, comps)
    active_phases = sorted(set(list_of_possible_phases).intersection(set(phases)))
    if len(list_of_possible_phases) == 0:
        raise ConditionError('There are no phases in the Database that can be active with components {0}'.format(comps))
    if len(active_phases) == 0:
        raise ConditionError('None of the passed phases ({0}) are active. List of possible phases: {1}.'.format(phases, list_of_possible_phases))
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    if len(set(comps) - set(dbf.species)) > 0:
        raise EquilibriumError('Components not found in database: {}'
                               .format(','.join([c.name for c in (set(comps) - set(dbf.species))])))
    calc_opts = calc_opts if calc_opts is not None else dict()
    solver = solver if solver is not None else InteriorPointSolver(verbose=verbose)
    parameters = parameters if parameters is not None else dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    models = instantiate_models(dbf, comps, active_phases, model=model, parameters=parameters)
    # Temporary solution until constraint system improves
    if conditions.get(v.N) is None:
        conditions[v.N] = 1
    if np.any(np.array(conditions[v.N]) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(conditions[v.N]))
    # Modify conditions values to be within numerical limits, e.g., X(AL)=0
    # Also wrap single-valued conditions with lists
    conds = _adjust_conditions(conditions)

    for cond in conds.keys():
        if isinstance(cond, (v.Composition, v.ChemicalPotential)) and cond.species not in comps:
            raise ConditionError('{} refers to non-existent component'.format(cond))
    state_variables = sorted(get_state_variables(models=models, conds=conds), key=str)
    str_conds = OrderedDict((str(key), value) for key, value in conds.items())
    num_calcs = np.prod([len(i) for i in str_conds.values()])
    components = [x for x in sorted(comps)]
    desired_active_pure_elements = [list(x.constituents.keys()) for x in components]
    desired_active_pure_elements = [el.upper() for constituents in desired_active_pure_elements for el in constituents]
    pure_elements = sorted(set([x for x in desired_active_pure_elements if x != 'VA']))
    if verbose:
        print('Components:', ' '.join([str(x) for x in comps]))
        print('Phases:', end=' ')
    output = output if output is not None else 'GM'
    output = output if isinstance(output, (list, tuple, set)) else [output]
    output = set(output)
    output |= {'GM'}
    output = sorted(output)
    need_hessians = any(type(c) in v.CONDITIONS_REQUIRING_HESSIANS for c in conds.keys())
    phase_records = build_phase_records(dbf, comps, active_phases, conds, models,
                                        output='GM', callables=callables,
                                        parameters=parameters, verbose=verbose,
                                        build_gradients=True, build_hessians=need_hessians)
    if verbose:
        print('[done]', end='\n')

    # 'calculate' accepts conditions through its keyword arguments
    grid_opts = calc_opts.copy()
    statevar_strings = [str(x) for x in state_variables]
    grid_opts.update({key: value for key, value in str_conds.items() if key in statevar_strings})
    if 'pdens' not in grid_opts:
        grid_opts['pdens'] = 500
    grid = delayed(calculate, pure=False)(dbf, comps, active_phases,
                                          model=models, fake_points=True,
                                          callables=callables, output='GM',
                                          parameters=parameters, **grid_opts)
    coord_dict = str_conds.copy()
    coord_dict['vertex'] = np.arange(
        len(pure_elements) + 1)  # +1 is to accommodate the degenerate degree of freedom at the invariant reactions
    coord_dict['component'] = pure_elements
    grid_shape = tuple(len(x) for x in conds.values()) + (len(pure_elements)+1,)
    properties = delayed(starting_point, pure=False)(conds, state_variables, phase_records, grid)
    conditions_per_chunk_per_axis = 2
    if num_calcs > 1:
        # Generate slices of 'properties'
        slices = []
        for val in grid_shape[:-1]:
            idx_arr = list(range(val))
            num_chunks = int(np.floor(val/conditions_per_chunk_per_axis))
            if num_chunks > 0:
                cond_slices = [x for x in np.array_split(np.asarray(idx_arr), num_chunks) if len(x) > 0]
            else:
                cond_slices = [idx_arr]
            slices.append(cond_slices)
        chunk_dims = [len(slc) for slc in slices]
        chunk_grid = np.array(np.unravel_index(np.arange(np.prod(chunk_dims)), chunk_dims)).T
        res = []
        for chunk in chunk_grid:
            prop_slice = properties[OrderedDict(list(zip(str_conds.keys(),
                                                         [np.atleast_1d(sl)[ch] for ch, sl in zip(chunk, slices)])))]
            job = delayed(_solve_eq_at_conditions, pure=False)(comps, prop_slice, phase_records, grid,
                                                               list(str_conds.keys()), state_variables, verbose, solver=solver)
            res.append(job)
        properties = delayed(_merge_property_slices, pure=False)(properties, chunk_grid, slices, list(str_conds.keys()), res)
    else:
        # Single-process job; don't create child processes
        properties = delayed(_solve_eq_at_conditions, pure=False)(comps, properties, phase_records, grid,
                                                                  list(str_conds.keys()), state_variables, verbose, solver=solver)

    # Compute equilibrium values of any additional user-specified properties
    # We already computed these properties so don't recompute them
    output = sorted(set(output) - {'GM', 'MU'})
    for out in output:
        if (out is None) or (len(out) == 0):
            continue
        # TODO: How do we know if a specified property should be per_phase or not?
        # For now, we make a best guess
        if (out == 'degree_of_ordering') or (out == 'DOO'):
            per_phase = True
        else:
            per_phase = False
        eqcal = delayed(_eqcalculate, pure=False)(dbf, comps, active_phases, conditions, out,
                                                  data=properties, per_phase=per_phase,
                                                  callables=callables,
                                                  parameters=parameters,
                                                  model=models, **calc_opts)
        properties = delayed(properties.merge, pure=False)(eqcal, compat='equals')
    if scheduler is not None:
        properties = dask.compute(properties, scheduler=scheduler)[0]
    properties.attrs['created'] = datetime.utcnow().isoformat()
    if len(kwargs) > 0:
        warnings.warn('The following equilibrium keyword arguments were passed, but unused:\n{}'.format(kwargs))
    return properties
Exemplo n.º 13
0
def equilibrium(dbf,
                comps,
                phases,
                conditions,
                output=None,
                model=None,
                verbose=False,
                broadcast=True,
                calc_opts=None,
                scheduler='sync',
                parameters=None,
                solver=None,
                callables=None,
                **kwargs):
    """
    Calculate the equilibrium state of a system containing the specified
    components and phases, under the specified conditions.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : list
        Names of components to consider in the calculation.
    phases : list or dict
        Names of phases to consider in the calculation.
    conditions : dict or (list of dict)
        StateVariables and their corresponding value.
    output : str or list of str, optional
        Additional equilibrium model properties (e.g., CPM, HM, etc.) to compute.
        These must be defined as attributes in the Model class of each phase.
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    verbose : bool, optional
        Print details of calculations. Useful for debugging.
    broadcast : bool
        If True, broadcast conditions against each other. This will compute all combinations.
        If False, each condition should be an equal-length list (or single-valued).
        Disabling broadcasting is useful for calculating equilibrium at selected conditions,
        when those conditions don't comprise a grid.
    calc_opts : dict, optional
        Keyword arguments to pass to `calculate`, the energy/property calculation routine.
    scheduler : Dask scheduler, optional
        Job scheduler for performing the computation.
        If None, return a Dask graph of the computation instead of actually doing it.
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.
    solver : pycalphad.core.solver.SolverBase
        Instance of a solver that is used to calculate local equilibria.
        Defaults to a pycalphad.core.solver.InteriorPointSolver.
    callables : dict, optional
        Pre-computed callable functions for equilibrium calculation.

    Returns
    -------
    Structured equilibrium calculation, or Dask graph if scheduler=None.

    Examples
    --------
    None yet.
    """
    if not broadcast:
        raise NotImplementedError('Broadcasting cannot yet be disabled')
    from pycalphad import __version__ as pycalphad_version
    comps = sorted(unpack_components(dbf, comps))
    phases = unpack_phases(phases) or sorted(dbf.phases.keys())
    # remove phases that cannot be active
    list_of_possible_phases = filter_phases(dbf, comps)
    active_phases = sorted(
        set(list_of_possible_phases).intersection(set(phases)))
    if len(list_of_possible_phases) == 0:
        raise ConditionError(
            'There are no phases in the Database that can be active with components {0}'
            .format(comps))
    if len(active_phases) == 0:
        raise ConditionError(
            'None of the passed phases ({0}) are active. List of possible phases: {1}.'
            .format(phases, list_of_possible_phases))
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    if len(set(comps) - set(dbf.species)) > 0:
        raise EquilibriumError('Components not found in database: {}'.format(
            ','.join([c.name for c in (set(comps) - set(dbf.species))])))
    indep_vars = ['T', 'P']
    calc_opts = calc_opts if calc_opts is not None else dict()
    model = model if model is not None else Model
    solver = solver if solver is not None else InteriorPointSolver(
        verbose=verbose)
    parameters = parameters if parameters is not None else dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    # Modify conditions values to be within numerical limits, e.g., X(AL)=0
    # Also wrap single-valued conditions with lists
    conds = _adjust_conditions(conditions)
    for cond in conds.keys():
        if isinstance(cond,
                      (v.Composition,
                       v.ChemicalPotential)) and cond.species not in comps:
            raise ConditionError(
                '{} refers to non-existent component'.format(cond))
    str_conds = OrderedDict((str(key), value) for key, value in conds.items())
    num_calcs = np.prod([len(i) for i in str_conds.values()])
    components = [x for x in sorted(comps)]
    desired_active_pure_elements = [
        list(x.constituents.keys()) for x in components
    ]
    desired_active_pure_elements = [
        el.upper() for constituents in desired_active_pure_elements
        for el in constituents
    ]
    pure_elements = sorted(
        set([x for x in desired_active_pure_elements if x != 'VA']))
    other_output_callables = {}
    if verbose:
        print('Components:', ' '.join([str(x) for x in comps]))
        print('Phases:', end=' ')
    output = output if output is not None else 'GM'
    output = output if isinstance(output, (list, tuple, set)) else [output]
    output = set(output)
    output |= {'GM'}
    output = sorted(output)
    for o in output:
        if o == 'GM':
            eq_callables = build_callables(dbf,
                                           comps,
                                           active_phases,
                                           model=model,
                                           parameters=parameters,
                                           output=o,
                                           build_gradients=True,
                                           callables=callables,
                                           verbose=verbose)
        else:
            other_output_callables[o] = build_callables(dbf,
                                                        comps,
                                                        active_phases,
                                                        model=model,
                                                        parameters=parameters,
                                                        output=o,
                                                        build_gradients=False,
                                                        verbose=False)

    phase_records = eq_callables['phase_records']
    models = eq_callables['model']
    maximum_internal_dof = max(
        len(mod.site_fractions) for mod in models.values())
    if verbose:
        print('[done]', end='\n')

    # 'calculate' accepts conditions through its keyword arguments
    grid_opts = calc_opts.copy()
    grid_opts.update(
        {key: value
         for key, value in str_conds.items() if key in indep_vars})
    if 'pdens' not in grid_opts:
        grid_opts['pdens'] = 500
    coord_dict = str_conds.copy()
    coord_dict['vertex'] = np.arange(
        len(pure_elements) + 1
    )  # +1 is to accommodate the degenerate degree of freedom at the invariant reactions
    grid_shape = np.meshgrid(*coord_dict.values(), indexing='ij',
                             sparse=False)[0].shape
    coord_dict['component'] = pure_elements

    grid = delayed(calculate, pure=False)(dbf,
                                          comps,
                                          active_phases,
                                          output='GM',
                                          model=models,
                                          fake_points=True,
                                          callables=eq_callables,
                                          parameters=parameters,
                                          **grid_opts)

    max_phase_name_len = max(len(name) for name in active_phases)
    # Need to allow for '_FAKE_' psuedo-phase
    max_phase_name_len = max(max_phase_name_len, 6)

    properties = delayed(Dataset, pure=False)(
        {
            'NP': (list(str_conds.keys()) + ['vertex'], np.empty(grid_shape)),
            'GM': (list(str_conds.keys()), np.empty(grid_shape[:-1])),
            'MU': (list(str_conds.keys()) + ['component'],
                   np.empty(grid_shape[:-1] + (len(pure_elements), ))),
            'X': (list(str_conds.keys()) + ['vertex', 'component'],
                  np.empty(grid_shape + (len(pure_elements), ))),
            'Y': (list(str_conds.keys()) + ['vertex', 'internal_dof'],
                  np.empty(grid_shape + (maximum_internal_dof, ))),
            'Phase': (list(str_conds.keys()) + ['vertex'],
                      np.empty(grid_shape, dtype='U%s' % max_phase_name_len)),
            'points': (list(str_conds.keys()) + ['vertex'],
                       np.empty(grid_shape, dtype=np.int32))
        },
        coords=coord_dict,
        attrs={
            'engine': 'pycalphad %s' % pycalphad_version
        },
    )
    # One last call to ensure 'properties' and 'grid' are consistent with one another
    properties = delayed(lower_convex_hull, pure=False)(grid, properties)
    conditions_per_chunk_per_axis = 2
    if num_calcs > 1:
        # Generate slices of 'properties'
        slices = []
        for val in grid_shape[:-1]:
            idx_arr = list(range(val))
            num_chunks = int(np.floor(val / conditions_per_chunk_per_axis))
            if num_chunks > 0:
                cond_slices = [
                    x for x in np.array_split(np.asarray(idx_arr), num_chunks)
                    if len(x) > 0
                ]
            else:
                cond_slices = [idx_arr]
            slices.append(cond_slices)
        chunk_dims = [len(slc) for slc in slices]
        chunk_grid = np.array(
            np.unravel_index(np.arange(np.prod(chunk_dims)), chunk_dims)).T
        res = []
        for chunk in chunk_grid:
            prop_slice = properties[OrderedDict(
                list(
                    zip(str_conds.keys(), [
                        np.atleast_1d(sl)[ch] for ch, sl in zip(chunk, slices)
                    ])))]
            job = delayed(_solve_eq_at_conditions,
                          pure=False)(comps,
                                      prop_slice,
                                      phase_records,
                                      grid,
                                      list(str_conds.keys()),
                                      verbose,
                                      solver=solver)
            res.append(job)
        properties = delayed(_merge_property_slices,
                             pure=False)(properties, chunk_grid, slices,
                                         list(str_conds.keys()), res)
    else:
        # Single-process job; don't create child processes
        properties = delayed(_solve_eq_at_conditions,
                             pure=False)(comps,
                                         properties,
                                         phase_records,
                                         grid,
                                         list(str_conds.keys()),
                                         verbose,
                                         solver=solver)

    # Compute equilibrium values of any additional user-specified properties
    # We already computed these properties so don't recompute them
    output = sorted(set(output) - {'GM', 'MU'})
    for out in output:
        if (out is None) or (len(out) == 0):
            continue
        # TODO: How do we know if a specified property should be per_phase or not?
        # For now, we make a best guess
        if (out == 'degree_of_ordering') or (out == 'DOO'):
            per_phase = True
        else:
            per_phase = False
        eqcal = delayed(_eqcalculate,
                        pure=False)(dbf,
                                    comps,
                                    active_phases,
                                    conditions,
                                    out,
                                    data=properties,
                                    per_phase=per_phase,
                                    callables=other_output_callables[out],
                                    parameters=parameters,
                                    model=models,
                                    **calc_opts)
        properties = delayed(properties.merge, pure=False)(eqcal,
                                                           inplace=True,
                                                           compat='equals')
    if scheduler is not None:
        properties = dask.compute(properties, scheduler=scheduler)[0]
    properties.attrs['created'] = datetime.utcnow().isoformat()
    if len(kwargs) > 0:
        warnings.warn(
            'The following equilibrium keyword arguments were passed, but unused:\n{}'
            .format(kwargs))
    return properties
Exemplo n.º 14
0
def build_phase_records(dbf,
                        comps,
                        phases,
                        state_variables,
                        models,
                        output='GM',
                        callables=None,
                        parameters=None,
                        verbose=False,
                        build_gradients=True,
                        build_hessians=True):
    """
    Combine compiled callables and callables from conditions into PhaseRecords.

    Parameters
    ----------
    dbf : Database
        A Database object
    comps : List[Union[str, v.Species]]
        List of active pure elements or species.
    phases : list
        List of phase names
    state_variables : Iterable[v.StateVariable]
        State variables used to produce the generated functions.
    models : Mapping[str, Model]
        Mapping of phase names to model instances
    parameters : dict, optional
        Maps SymEngine Symbol to numbers, for overriding the values of parameters in the Database.
    callables : dict, optional
        Pre-computed callables. If None are passed, they will be built.
        Maps {'output' -> {'function' -> {'phase_name' -> AutowrapFunction()}}
    output : str
        Output property of the particular Model to sample
    verbose : bool, optional
        Print the name of the phase when its callables are built
    build_gradients : bool
        Whether or not to build gradient functions. Defaults to False. Only
        takes effect if callables are not passed.
    build_hessians : bool
        Whether or not to build Hessian functions. Defaults to False. Only
        takes effect if callables are not passed.

    Returns
    -------
    dict
        Dictionary mapping phase names to PhaseRecord instances.

    Notes
    -----
    If callables are passed, don't rebuild them. This means that the callables
    are not checked for incompatibility. Users of build_callables are
    responsible for ensuring that the state variables, parameters and models
    used to construct the callables are compatible with the ones used to
    build the constraints and phase records.

    """
    comps = sorted(unpack_components(dbf, comps))
    parameters = parameters if parameters is not None else {}
    callables = callables if callables is not None else {}
    _constraints = {
        'internal_cons_func': {},
        'internal_cons_jac': {},
        'internal_cons_hess': {},
    }
    phase_records = {}
    state_variables = sorted(get_state_variables(models=models,
                                                 conds=state_variables),
                             key=str)
    param_symbols, param_values = extract_parameters(parameters)

    if callables.get(output) is None:
        callables = build_callables(dbf,
                                    comps,
                                    phases,
                                    models,
                                    parameter_symbols=parameters.keys(),
                                    output=output,
                                    additional_statevars=state_variables,
                                    build_gradients=False,
                                    build_hessians=False)
    # Temporary solution. PhaseRecord needs rework: https://github.com/pycalphad/pycalphad/pull/329#discussion_r634579356
    formulacallables = build_callables(dbf,
                                       comps,
                                       phases,
                                       models,
                                       parameter_symbols=parameters.keys(),
                                       output='G',
                                       additional_statevars=state_variables,
                                       build_gradients=build_gradients,
                                       build_hessians=build_hessians)

    # If a vector of parameters is specified, only pass the first row to the PhaseRecord
    # Future callers of PhaseRecord.obj_parameters_2d() can pass the full param_values array as an argument
    if len(param_values.shape) > 1:
        param_values = param_values[0]

    for name in phases:
        mod = models[name]
        site_fracs = mod.site_fractions
        # build constraint functions
        cfuncs = build_constraints(mod,
                                   state_variables + site_fracs,
                                   parameters=param_symbols)
        _constraints['internal_cons_func'][name] = cfuncs.internal_cons_func
        _constraints['internal_cons_jac'][name] = cfuncs.internal_cons_jac
        _constraints['internal_cons_hess'][name] = cfuncs.internal_cons_hess
        num_internal_cons = cfuncs.num_internal_cons

        phase_records[name.upper()] = PhaseRecord(
            comps, state_variables, site_fracs, param_values,
            callables[output]['callables'][name],
            formulacallables['G']['callables'][name],
            formulacallables['G']['grad_callables'][name],
            formulacallables['G']['hess_callables'][name],
            callables[output]['massfuncs'][name],
            formulacallables['G']['formulamolefuncs'][name],
            formulacallables['G']['formulamolegradfuncs'][name],
            formulacallables['G']['formulamolehessfuncs'][name],
            _constraints['internal_cons_func'][name],
            _constraints['internal_cons_jac'][name],
            _constraints['internal_cons_hess'][name], num_internal_cons)

        if verbose:
            print(name + ' ')
    return phase_records
Exemplo n.º 15
0
def eq_callables_dict(dbf,
                      comps,
                      phases,
                      model=None,
                      param_symbols=None,
                      output='GM',
                      build_gradients=True):
    """
    Create a dictionary of callable dictionaries for phases in equilibrium

    Parameters
    ----------
    dbf : pycalphad.Database
        A pycalphad Database object
    comps : list
        List of component names
    phases : list
        List of phase names
    model : dict or type
        Dictionary of {phase_name: Model subclass} or a type corresponding to a
        Model subclass. Defaults to ``Model``.
    param_symbols : list
        SymPy Symbol objects that will be preserved in the callable functions.
    output : str
        Output property of the particular Model to sample
    build_gradients : bool
        Whether or not to build gradient functions. Defaults to True.

    Returns
    -------
    dict
        Dictionary of keyword argument callables to pass to equilibrium.

    Notes
    -----
    Based on the pycalphad equilibrium method for building phases as of commit 37ff75ce.

    Examples
    --------
    >>> dbf = Database('AL-NI.tdb')
    >>> comps = ['AL', 'NI', 'VA']
    >>> phases = ['FCC_L12', 'BCC_B2', 'LIQUID', 'AL3NI5', 'AL3NI2', 'AL3NI']
    >>> eq_callables = eq_callables_dict(dbf, comps, phases)
    >>> equilibrium(dbf, comps, phases, conditions, **eq_callables)
    """
    comps = sorted(unpack_components(dbf, comps))
    pure_elements = get_pure_elements(dbf, comps)

    eq_callables = {
        'massfuncs': {},
        'massgradfuncs': {},
        'callables': {},
        'grad_callables': {},
        'hess_callables': {},
    }

    models = unpack_kwarg(model, default_arg=Model)
    param_symbols = param_symbols if param_symbols is not None else []
    # wrap param symbols in Symbols if they are strings
    if all([isinstance(sym, string_types) for sym in param_symbols]):
        param_symbols = [Symbol(sym) for sym in param_symbols]
    param_values = np.zeros_like(param_symbols, dtype=np.float64)

    phase_records = {}
    # create models
    # starting from pycalphad
    for name in phases:
        mod = models[name]
        if isinstance(mod, type):
            models[name] = mod = mod(dbf, comps, name)
        site_fracs = mod.site_fractions
        variables = sorted(site_fracs, key=str)
        try:
            out = getattr(mod, output)
        except AttributeError:
            raise AttributeError(
                'Missing Model attribute {0} specified for {1}'.format(
                    output, mod.__class__))

        # Build the callables of the output
        # Only force undefineds to zero if we're not overriding them
        undefs = list(
            out.atoms(Symbol) - out.atoms(v.StateVariable) -
            set(param_symbols))
        for undef in undefs:
            out = out.xreplace({undef: float(0)})
        build_output = build_functions(out,
                                       tuple([v.P, v.T] + site_fracs),
                                       parameters=param_symbols,
                                       include_grad=build_gradients)
        if build_gradients:
            cf, gf = build_output
        else:
            cf = build_output
            gf = None
        hf = None
        eq_callables['callables'][name] = cf
        eq_callables['grad_callables'][name] = gf
        eq_callables['hess_callables'][name] = hf

        # Build the callables for mass
        # TODO: In principle, we should also check for undefs in mod.moles()

        if build_gradients:
            mcf, mgf = zip(*[
                build_functions(mod.moles(el), [v.P, v.T] + variables,
                                include_obj=True,
                                include_grad=build_gradients,
                                parameters=param_symbols)
                for el in pure_elements
            ])
        else:
            mcf = tuple([
                build_functions(mod.moles(el), [v.P, v.T] + variables,
                                include_obj=True,
                                include_grad=build_gradients,
                                parameters=param_symbols)
                for el in pure_elements
            ])
            mgf = None
        eq_callables['massfuncs'][name] = mcf
        eq_callables['massgradfuncs'][name] = mgf

        # creating the phase records triggers the compile
        phase_records[name.upper()] = PhaseRecord_from_cython(
            comps, variables,
            np.array(dbf.phases[name].sublattices, dtype=np.float),
            param_values, eq_callables['callables'][name],
            eq_callables['grad_callables'][name],
            eq_callables['hess_callables'][name],
            eq_callables['massfuncs'][name],
            eq_callables['massgradfuncs'][name])

    # finally, add the models to the eq_callables
    eq_callables['model'] = dict(models)
    return eq_callables
Exemplo n.º 16
0
def equilibrium(dbf, comps, phases, conditions, output=None, model=None,
                verbose=False, broadcast=True, calc_opts=None,
                scheduler=dask.local.get_sync,
                parameters=None, **kwargs):
    """
    Calculate the equilibrium state of a system containing the specified
    components and phases, under the specified conditions.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : list
        Names of components to consider in the calculation.
    phases : list or dict
        Names of phases to consider in the calculation.
    conditions : dict or (list of dict)
        StateVariables and their corresponding value.
    output : str or list of str, optional
        Additional equilibrium model properties (e.g., CPM, HM, etc.) to compute.
        These must be defined as attributes in the Model class of each phase.
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    verbose : bool, optional
        Print details of calculations. Useful for debugging.
    broadcast : bool
        If True, broadcast conditions against each other. This will compute all combinations.
        If False, each condition should be an equal-length list (or single-valued).
        Disabling broadcasting is useful for calculating equilibrium at selected conditions,
        when those conditions don't comprise a grid.
    calc_opts : dict, optional
        Keyword arguments to pass to `calculate`, the energy/property calculation routine.
    scheduler : Dask scheduler, optional
        Job scheduler for performing the computation.
        If None, return a Dask graph of the computation instead of actually doing it.
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.

    Returns
    -------
    Structured equilibrium calculation, or Dask graph if scheduler=None.

    Examples
    --------
    None yet.
    """
    if not broadcast:
        raise NotImplementedError('Broadcasting cannot yet be disabled')
    from pycalphad import __version__ as pycalphad_version
    comps = sorted(unpack_components(dbf, comps))
    phases = unpack_phases(phases) or sorted(dbf.phases.keys())
    # remove phases that cannot be active
    list_of_possible_phases = filter_phases(dbf, comps)
    active_phases = sorted(set(list_of_possible_phases).intersection(set(phases)))
    if len(list_of_possible_phases) == 0:
        raise ConditionError('There are no phases in the Database that can be active with components {0}'.format(comps))
    if len(active_phases) == 0:
        raise ConditionError('None of the passed phases ({0}) are active. List of possible phases: {1}.'.format(phases, list_of_possible_phases))
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    if len(set(comps) - set(dbf.species)) > 0:
        raise EquilibriumError('Components not found in database: {}'
                               .format(','.join([c.name for c in (set(comps) - set(dbf.species))])))
    indep_vars = ['T', 'P']
    calc_opts = calc_opts if calc_opts is not None else dict()
    model = model if model is not None else Model
    phase_records = dict()
    diagnostic = kwargs.pop('_diagnostic', False)
    callable_dict = kwargs.pop('callables', dict())
    mass_dict = unpack_kwarg(kwargs.pop('massfuncs', None), default_arg=None)
    mass_grad_dict = unpack_kwarg(kwargs.pop('massgradfuncs', None), default_arg=None)
    grad_callable_dict = kwargs.pop('grad_callables', dict())
    hess_callable_dict = kwargs.pop('hess_callables', dict())
    parameters = parameters if parameters is not None else dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    param_symbols = tuple(parameters.keys())
    param_values = np.atleast_1d(np.array(list(parameters.values()), dtype=np.float))
    maximum_internal_dof = 0
    # Modify conditions values to be within numerical limits, e.g., X(AL)=0
    # Also wrap single-valued conditions with lists
    conds = _adjust_conditions(conditions)
    for cond in conds.keys():
        if isinstance(cond, (v.Composition, v.ChemicalPotential)) and cond.species not in comps:
            raise ConditionError('{} refers to non-existent component'.format(cond))
    str_conds = OrderedDict((str(key), value) for key, value in conds.items())
    num_calcs = np.prod([len(i) for i in str_conds.values()])
    indep_vals = list([float(x) for x in np.atleast_1d(val)]
                      for key, val in str_conds.items() if key in indep_vars)
    components = [x for x in sorted(comps)]
    desired_active_pure_elements = [list(x.constituents.keys()) for x in components]
    desired_active_pure_elements = [el.upper() for constituents in desired_active_pure_elements for el in constituents]
    pure_elements = sorted(set([x for x in desired_active_pure_elements if x != 'VA']))
    # Construct models for each phase; prioritize user models
    models = unpack_kwarg(model, default_arg=Model)
    if verbose:
        print('Components:', ' '.join([str(x) for x in comps]))
        print('Phases:', end=' ')
    max_phase_name_len = max(len(name) for name in active_phases)
    # Need to allow for '_FAKE_' psuedo-phase
    max_phase_name_len = max(max_phase_name_len, 6)
    for name in active_phases:
        mod = models[name]
        if isinstance(mod, type):
            models[name] = mod = mod(dbf, comps, name, parameters=parameters)
        site_fracs = mod.site_fractions
        variables = sorted(site_fracs, key=str)
        maximum_internal_dof = max(maximum_internal_dof, len(site_fracs))
        out = models[name].energy
        if (not callable_dict.get(name, False)) or not (grad_callable_dict.get(name, False)):
            # Only force undefineds to zero if we're not overriding them
            undefs = [x for x in out.free_symbols if (not isinstance(x, v.StateVariable)) and not (x in param_symbols)]
            for undef in undefs:
                out = out.xreplace({undef: float(0)})
            cf, gf = build_functions(out, tuple([v.P, v.T] + site_fracs),
                                     parameters=param_symbols)
            hf = None
            if callable_dict.get(name, None) is None:
                callable_dict[name] = cf
            if grad_callable_dict.get(name, None) is None:
                grad_callable_dict[name] = gf
            if hess_callable_dict.get(name, None) is None:
                hess_callable_dict[name] = hf

        if (mass_dict[name] is None) or (mass_grad_dict[name] is None):
            # TODO: In principle, we should also check for undefs in mod.moles()
            tup1, tup2 = zip(*[build_functions(mod.moles(el), [v.P, v.T] + variables,
                                               include_obj=True, include_grad=True,
                                               parameters=param_symbols)
                               for el in pure_elements])
            if mass_dict[name] is None:
                mass_dict[name] = tup1
            if mass_grad_dict[name] is None:
                mass_grad_dict[name] = tup2

        phase_records[name.upper()] = PhaseRecord_from_cython(comps, variables,
                                                              np.array(dbf.phases[name].sublattices, dtype=np.float),
                                                              param_values, callable_dict[name],
                                                              grad_callable_dict[name], hess_callable_dict[name],
                                                              mass_dict[name], mass_grad_dict[name])
        if verbose:
            print(name, end=' ')
    if verbose:
        print('[done]', end='\n')

    # 'calculate' accepts conditions through its keyword arguments
    grid_opts = calc_opts.copy()
    grid_opts.update({key: value for key, value in str_conds.items() if key in indep_vars})
    if 'pdens' not in grid_opts:
        grid_opts['pdens'] = 500
    coord_dict = str_conds.copy()
    coord_dict['vertex'] = np.arange(len(pure_elements))
    grid_shape = np.meshgrid(*coord_dict.values(),
                             indexing='ij', sparse=False)[0].shape
    coord_dict['component'] = pure_elements

    grid = delayed(calculate, pure=False)(dbf, comps, active_phases, output='GM',
                                          model=models, callables=callable_dict, massfuncs=mass_dict,
                                          fake_points=True, parameters=parameters, **grid_opts)

    properties = delayed(Dataset, pure=False)({'NP': (list(str_conds.keys()) + ['vertex'],
                                                      np.empty(grid_shape)),
                                               'GM': (list(str_conds.keys()),
                                                      np.empty(grid_shape[:-1])),
                                               'MU': (list(str_conds.keys()) + ['component'],
                                                      np.empty(grid_shape)),
                                               'X': (list(str_conds.keys()) + ['vertex', 'component'],
                                                     np.empty(grid_shape + (grid_shape[-1],))),
                                               'Y': (list(str_conds.keys()) + ['vertex', 'internal_dof'],
                                                     np.empty(grid_shape + (maximum_internal_dof,))),
                                               'Phase': (list(str_conds.keys()) + ['vertex'],
                                                         np.empty(grid_shape, dtype='U%s' % max_phase_name_len)),
                                               'points': (list(str_conds.keys()) + ['vertex'],
                                                          np.empty(grid_shape, dtype=np.int32))
                                               },
                                              coords=coord_dict,
                                              attrs={'engine': 'pycalphad %s' % pycalphad_version},
                                              )
    # One last call to ensure 'properties' and 'grid' are consistent with one another
    properties = delayed(lower_convex_hull, pure=False)(grid, properties)
    conditions_per_chunk_per_axis = 2
    if num_calcs > 1:
        # Generate slices of 'properties'
        slices = []
        for val in grid_shape[:-1]:
            idx_arr = list(range(val))
            num_chunks = int(np.floor(val/conditions_per_chunk_per_axis))
            if num_chunks > 0:
                cond_slices = [x for x in np.array_split(np.asarray(idx_arr), num_chunks) if len(x) > 0]
            else:
                cond_slices = [idx_arr]
            slices.append(cond_slices)
        chunk_dims = [len(slc) for slc in slices]
        chunk_grid = np.array(np.unravel_index(np.arange(np.prod(chunk_dims)), chunk_dims)).T
        res = []
        for chunk in chunk_grid:
            prop_slice = properties[OrderedDict(list(zip(str_conds.keys(),
                                                         [np.atleast_1d(sl)[ch] for ch, sl in zip(chunk, slices)])))]
            job = delayed(_solve_eq_at_conditions, pure=False)(comps, prop_slice, phase_records, grid,
                                                              list(str_conds.keys()), verbose)
            res.append(job)
        properties = delayed(_merge_property_slices, pure=False)(properties, chunk_grid, slices, list(str_conds.keys()), res)
    else:
        # Single-process job; don't create child processes
        properties = delayed(_solve_eq_at_conditions, pure=False)(comps, properties, phase_records, grid,
                                                                 list(str_conds.keys()), verbose)

    # Compute equilibrium values of any additional user-specified properties
    output = output if isinstance(output, (list, tuple, set)) else [output]
    # We already computed these properties so don't recompute them
    output = sorted(set(output) - {'GM', 'MU'})
    for out in output:
        if (out is None) or (len(out) == 0):
            continue
        # TODO: How do we know if a specified property should be per_phase or not?
        # For now, we make a best guess
        if (out == 'degree_of_ordering') or (out == 'DOO'):
            per_phase = True
        else:
            per_phase = False
        eqcal = delayed(_eqcalculate, pure=False)(dbf, comps, active_phases, conditions, out,
                                                  data=properties, per_phase=per_phase, model=models, **calc_opts)
        properties = delayed(properties.merge, pure=False)(eqcal, inplace=True, compat='equals')
    if scheduler is not None:
        properties = dask.compute(properties, get=scheduler)[0]
    properties.attrs['created'] = datetime.utcnow().isoformat()
    if len(kwargs) > 0:
        warnings.warn('The following equilibrium keyword arguments were passed, but unused:\n{}'.format(kwargs))
    return properties
Exemplo n.º 17
0
def build_callables(dbf,
                    comps,
                    phases,
                    model=None,
                    parameters=None,
                    callables=None,
                    output='GM',
                    build_gradients=True,
                    verbose=False):
    """
    Create dictionaries of callable dictionaries and PhaseRecords.

    Parameters
    ----------
    dbf : Database
        A Database object
    comps : list
        List of component names
    phases : list
        List of phase names
    model : dict or type
        Dictionary of {phase_name: Model subclass} or a type corresponding to a
        Model subclass. Defaults to ``Model``.
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.
    callables : dict, optional
        Pre-computed callables
    output : str
        Output property of the particular Model to sample
    build_gradients : bool
        Whether or not to build gradient functions. Defaults to True.

    verbose : bool
        Print the name of the phase when its callables are built

    Returns
    -------
    callables : dict
        Dictionary of keyword argument callables to pass to equilibrium.

    Example
    -------
    >>> dbf = Database('AL-NI.tdb')
    >>> comps = ['AL', 'NI', 'VA']
    >>> phases = ['FCC_L12', 'BCC_B2', 'LIQUID', 'AL3NI5', 'AL3NI2', 'AL3NI']
    >>> callables = build_callables(dbf, comps, phases)
    >>> equilibrium(dbf, comps, phases, conditions, **callables)
    """
    parameters = parameters if parameters is not None else {}
    if len(parameters) > 0:
        param_symbols, param_values = zip(*[(key, val) for key, val in sorted(
            parameters.items(), key=operator.itemgetter(0))])
        param_values = np.asarray(param_values, dtype=np.float64)
    else:
        param_symbols = []
        param_values = np.empty(0)
    comps = sorted(unpack_components(dbf, comps))
    pure_elements = get_pure_elements(dbf, comps)

    callables = callables if callables is not None else {}
    _callables = {
        'massfuncs': {},
        'massgradfuncs': {},
        'callables': {},
        'grad_callables': {}
    }

    models = unpack_kwarg(model, default_arg=Model)
    param_symbols = [wrap_symbol(sym) for sym in param_symbols]
    phase_records = {}
    # create models
    for name in phases:
        mod = models[name]
        if isinstance(mod, type):
            models[name] = mod = mod(dbf,
                                     comps,
                                     name,
                                     parameters=param_symbols)
        site_fracs = mod.site_fractions
        variables = sorted(site_fracs, key=str)
        try:
            out = getattr(mod, output)
        except AttributeError:
            raise AttributeError(
                'Missing Model attribute {0} specified for {1}'.format(
                    output, mod.__class__))

        if callables.get('callables', {}).get(name, False) and \
                ((not build_gradients) or callables.get('grad_callables',{}).get(name, False)):
            _callables['callables'][name] = callables['callables'][name]
            if build_gradients:
                _callables['grad_callables'][name] = callables[
                    'grad_callables'][name]
            else:
                _callables['grad_callables'][name] = None
        else:
            # Build the callables of the output
            # Only force undefineds to zero if we're not overriding them
            undefs = {
                x
                for x in out.free_symbols
                if not isinstance(x, v.StateVariable)
            } - set(param_symbols)
            undef_vals = repeat(0., len(undefs))
            out = out.xreplace(dict(zip(undefs, undef_vals)))
            build_output = build_functions(out,
                                           tuple([v.P, v.T] + site_fracs),
                                           parameters=param_symbols,
                                           include_grad=build_gradients)
            if build_gradients:
                cf, gf = build_output
            else:
                cf = build_output
                gf = None
            _callables['callables'][name] = cf
            _callables['grad_callables'][name] = gf

        if callables.get('massfuncs', {}).get(name, False) and \
                ((not build_gradients) or callables.get('massgradfuncs', {}).get(name, False)):
            _callables['massfuncs'][name] = callables['massfuncs'][name]
            if build_gradients:
                _callables['massgradfuncs'][name] = callables['massgradfuncs'][
                    name]
            else:
                _callables['massgradfuncs'][name] = None
        else:
            # Build the callables for mass
            # TODO: In principle, we should also check for undefs in mod.moles()

            if build_gradients:
                mcf, mgf = zip(*[
                    build_functions(mod.moles(el), [v.P, v.T] + variables,
                                    include_obj=True,
                                    include_grad=build_gradients,
                                    parameters=param_symbols)
                    for el in pure_elements
                ])
            else:
                mcf = tuple([
                    build_functions(mod.moles(el), [v.P, v.T] + variables,
                                    include_obj=True,
                                    include_grad=build_gradients,
                                    parameters=param_symbols)
                    for el in pure_elements
                ])
                mgf = None
            _callables['massfuncs'][name] = mcf
            _callables['massgradfuncs'][name] = mgf
        if not callables.get('phase_records', {}).get(name, False):
            pv = param_values
        else:
            # Copy parameter values from old PhaseRecord, if it exists
            pv = callables['phase_records'][name].parameters
        phase_records[name.upper()] = PhaseRecord_from_cython(
            comps, variables,
            np.array(dbf.phases[name].sublattices, dtype=np.float), pv,
            _callables['callables'][name], _callables['grad_callables'][name],
            _callables['massfuncs'][name], _callables['massgradfuncs'][name])
        if verbose:
            print(name + ' ')

    # Update PhaseRecords with any user-specified parameter values, in case we skipped the build phase
    # We assume here that users know what they are doing, and pass compatible combinations of callables and parameters
    # See discussion in gh-192 for details
    if len(param_values) > 0:
        for prx_name in phase_records:
            if len(phase_records[prx_name].parameters) != len(param_values):
                raise ValueError(
                    'User-specified callables and parameters are incompatible')
            phase_records[prx_name].parameters = param_values
    # finally, add the models to the callables
    _callables['model'] = dict(models)
    _callables['phase_records'] = phase_records
    return _callables
Exemplo n.º 18
0
def build_callables(dbf, comps, phases, models, parameter_symbols=None,
                    output='GM', build_gradients=True, build_hessians=False,
                    additional_statevars=None):
    """
    Create a compiled callables dictionary.

    Parameters
    ----------
    dbf : Database
        A Database object
    comps : list
        List of component names
    phases : list
        List of phase names
    models : dict
        Dictionary of {phase_name: Model subclass}
    parameter_symbols : list, optional
        List of string or SymPy Symbols that will be overridden in the callables.
    output : str, optional
        Output property of the particular Model to sample. Defaults to 'GM'
    build_gradients : bool, optional
        Whether or not to build gradient functions. Defaults to True.
    build_hessians : bool, optional
        Whether or not to build Hessian functions. Defaults to False.
    additional_statevars : set, optional
        State variables to include in the callables that may not be in the models (e.g. from conditions)
    verbose : bool, optional
        Print the name of the phase when its callables are built

    Returns
    -------
    callables : dict
        Dictionary of keyword argument callables to pass to equilibrium.
        Maps {'output' -> {'function' -> {'phase_name' -> AutowrapFunction()}}.

    Notes
    -----
    *All* the state variables used in calculations must be specified.
    If these are not specified as state variables of the models (e.g. often the
    case for v.N), then it must be supplied by the additional_statevars keyword
    argument.

    Examples
    --------
    >>> from pycalphad import Database, equilibrium, variables as v
    >>> from pycalphad.codegen.callables import build_callables
    >>> from pycalphad.core.utils import instantiate_models
    >>> dbf = Database('AL-NI.tdb')
    >>> comps = ['AL', 'NI', 'VA']
    >>> phases = ['LIQUID', 'AL3NI5', 'AL3NI2', 'AL3NI']
    >>> models = instantiate_models(dbf, comps, phases)
    >>> callables = build_callables(dbf, comps, phases, models, additional_statevars={v.P, v.T, v.N})
    >>> 'GM' in callables.keys()
    True
    >>> 'massfuncs' in callables['GM']
    True
    >>> conditions = {v.P: 101325, v.T: 2500, v.X('AL'): 0.2}
    >>> equilibrium(dbf, comps, phases, conditions, callables=callables)
    """
    additional_statevars = set(additional_statevars) if additional_statevars is not None else set()
    parameter_symbols = parameter_symbols if parameter_symbols is not None else []
    parameter_symbols = sorted([wrap_symbol(x) for x in parameter_symbols], key=str)
    comps = sorted(unpack_components(dbf, comps))
    pure_elements = get_pure_elements(dbf, comps)

    _callables = {
        'massfuncs': {},
        'massgradfuncs': {},
        'masshessfuncs': {},
        'callables': {},
        'grad_callables': {},
        'hess_callables': {},
        'internal_cons': {},
        'internal_jac': {},
        'internal_cons_hess': {},
        'mp_cons': {},
        'mp_jac': {},
    }

    state_variables = get_state_variables(models=models)
    state_variables |= additional_statevars
    if state_variables != {v.T, v.P, v.N}:
        warnings.warn("State variables in `build_callables` are not {{N, P, T}}, "
                      "but {}. Be sure you know what you are doing. "
                      "State variables can be added with the `additional_statevars` "
                      "argument.".format(state_variables))
    state_variables = sorted(state_variables, key=str)

    for name in phases:
        mod = models[name]
        site_fracs = mod.site_fractions
        try:
            out = getattr(mod, output)
        except AttributeError:
            raise AttributeError('Missing Model attribute {0} specified for {1}'
                                 .format(output, mod.__class__))

        # Build the callables of the output
        # Only force undefineds to zero if we're not overriding them
        undefs = {x for x in out.free_symbols if not isinstance(x, v.StateVariable)} - set(parameter_symbols)
        undef_vals = repeat(0., len(undefs))
        out = out.xreplace(dict(zip(undefs, undef_vals)))
        build_output = build_functions(out, tuple(state_variables + site_fracs), parameters=parameter_symbols,
                                       include_grad=build_gradients, include_hess=build_hessians)
        cf, gf, hf = build_output.func, build_output.grad, build_output.hess
        _callables['callables'][name] = cf
        _callables['grad_callables'][name] = gf
        _callables['hess_callables'][name] = hf

        # Build the callables for mass
        # TODO: In principle, we should also check for undefs in mod.moles()
        mcf, mgf, mhf = zip(*[build_functions(mod.moles(el), state_variables + site_fracs,
                                              include_obj=True,
                                              include_grad=build_gradients,
                                              include_hess=build_hessians,
                                              parameters=parameter_symbols)
                              for el in pure_elements])

        _callables['massfuncs'][name] = mcf
        _callables['massgradfuncs'][name] = mgf
        _callables['masshessfuncs'][name] = mhf
    return {output: _callables}
def build_eqpropdata(
        data: tinydb.database.Document,
        dbf: Database,
        parameters: Optional[Dict[str, float]] = None,
        data_weight_dict: Optional[Dict[str, float]] = None) -> EqPropData:
    """
    Build EqPropData for the calculations corresponding to a single dataset.

    Parameters
    ----------
    data : tinydb.database.Document
        Document corresponding to a single ESPEI dataset.
    dbf : Database
        Database that should be used to construct the `Model` and `PhaseRecord` objects.
    parameters : Optional[Dict[str, float]]
        Mapping of parameter symbols to values.
    data_weight_dict : Optional[Dict[str, float]]
        Mapping of a data type (e.g. `HM` or `SM`) to a weight.

    Returns
    -------
    EqPropData
    """
    parameters = parameters if parameters is not None else {}
    data_weight_dict = data_weight_dict if data_weight_dict is not None else {}
    property_std_deviation = {
        'HM': 500.0,  # J/mol
        'SM': 0.2,  # J/K-mol
        'CPM': 0.2,  # J/K-mol
    }

    params_keys, _ = extract_parameters(parameters)

    data_comps = list(set(data['components']).union({'VA'}))
    species = sorted(unpack_components(dbf, data_comps), key=str)
    data_phases = filter_phases(dbf, species, candidate_phases=data['phases'])
    models = instantiate_models(dbf,
                                species,
                                data_phases,
                                parameters=parameters)
    output = data['output']
    property_output = output.split('_')[
        0]  # property without _FORM, _MIX, etc.
    samples = np.array(data['values']).flatten()
    reference = data.get('reference', '')

    # Models are now modified in response to the data from this data
    if 'reference_states' in data:
        property_output = output[:-1] if output.endswith(
            'R'
        ) else output  # unreferenced model property so we can tell shift_reference_state what to build.
        reference_states = []
        for el, vals in data['reference_states'].items():
            reference_states.append(
                ReferenceState(
                    v.Species(el),
                    vals['phase'],
                    fixed_statevars=vals.get('fixed_state_variables')))
        for mod in models.values():
            mod.shift_reference_state(reference_states,
                                      dbf,
                                      output=(property_output, ))

    data['conditions'].setdefault(
        'N', 1.0
    )  # Add default for N. Nothing else is supported in pycalphad anyway.
    pot_conds = OrderedDict([(getattr(v, key),
                              unpack_condition(data['conditions'][key]))
                             for key in sorted(data['conditions'].keys())
                             if not key.startswith('X_')])
    comp_conds = OrderedDict([(v.X(key[2:]),
                               unpack_condition(data['conditions'][key]))
                              for key in sorted(data['conditions'].keys())
                              if key.startswith('X_')])

    phase_records = build_phase_records(dbf,
                                        species,
                                        data_phases, {
                                            **pot_conds,
                                            **comp_conds
                                        },
                                        models,
                                        parameters=parameters,
                                        build_gradients=True,
                                        build_hessians=True)

    # Now we need to unravel the composition conditions
    # (from Dict[v.X, Sequence[float]] to Sequence[Dict[v.X, float]]), since the
    # composition conditions are only broadcast against the potentials, not
    # each other. Each individual composition needs to be computed
    # independently, since broadcasting over composition cannot be turned off
    # in pycalphad.
    rav_comp_conds = [
        OrderedDict(zip(comp_conds.keys(), pt_comps))
        for pt_comps in zip(*comp_conds.values())
    ]

    # Build weights, should be the same size as the values
    total_num_calculations = len(rav_comp_conds) * np.prod(
        [len(vals) for vals in pot_conds.values()])
    dataset_weights = np.array(data.get('weight',
                                        1.0)) * np.ones(total_num_calculations)
    weights = (property_std_deviation.get(property_output, 1.0) /
               data_weight_dict.get(property_output, 1.0) /
               dataset_weights).flatten()

    return EqPropData(dbf, species, data_phases, pot_conds, rav_comp_conds,
                      models, params_keys, phase_records, output, samples,
                      weights, reference)
Exemplo n.º 20
0
    def __init__(self, dbe, comps, phase_name, parameters=None, build_reference=True):
        self.components = set()
        self.constituents = []
        self.phase_name = phase_name.upper()
        phase = dbe.phases[self.phase_name]
        self.site_ratios = list(phase.sublattices)
        for idx, sublattice in enumerate(phase.constituents):
            subl_comps = set(sublattice).intersection(unpack_components(dbe, comps))
            self.components |= subl_comps
            # Support for variable site ratios in ionic liquid model
            if phase.model_hints.get('ionic_liquid_2SL', False):
                if idx == 0:
                    subl_idx = 1
                elif idx == 1:
                    subl_idx = 0
                else:
                    raise ValueError('Two-sublattice ionic liquid specified with more than two sublattices')
                self.site_ratios[subl_idx] = Add(*[v.SiteFraction(self.phase_name, idx, spec) * abs(spec.charge) for spec in subl_comps])
        if phase.model_hints.get('ionic_liquid_2SL', False):
            # Special treatment of "neutral" vacancies in 2SL ionic liquid
            # These are treated as having variable valence
            for idx, sublattice in enumerate(phase.constituents):
                subl_comps = set(sublattice).intersection(unpack_components(dbe, comps))
                if v.Species('VA') in subl_comps:
                    if idx == 0:
                        subl_idx = 1
                    elif idx == 1:
                        subl_idx = 0
                    else:
                        raise ValueError('Two-sublattice ionic liquid specified with more than two sublattices')
                    self.site_ratios[subl_idx] += self.site_ratios[idx] * v.SiteFraction(self.phase_name, idx, v.Species('VA'))
        self.site_ratios = tuple(self.site_ratios)

        # Verify that this phase is still possible to build
        for sublattice in phase.constituents:
            if len(set(sublattice).intersection(self.components)) == 0:
                # None of the components in a sublattice are active
                # We cannot build a model of this phase
                raise DofError(
                    '{0}: Sublattice {1} of {2} has no components in {3}' \
                    .format(self.phase_name, sublattice,
                            phase.constituents,
                            self.components))
            self.constituents.append(set(sublattice).intersection(self.components))
        self.components = sorted(self.components)
        desired_active_pure_elements = [list(x.constituents.keys()) for x in self.components]
        desired_active_pure_elements = [el.upper() for constituents in desired_active_pure_elements
                                        for el in constituents]
        self.pure_elements = sorted(set(desired_active_pure_elements))
        self.nonvacant_elements = [x for x in self.pure_elements if x != 'VA']

        # Convert string symbol names to sympy Symbol objects
        # This makes xreplace work with the symbols dict
        symbols = {Symbol(s): val for s, val in dbe.symbols.items()}

        if parameters is not None:
            self._parameters_arg = parameters
            if isinstance(parameters, dict):
                symbols.update([(wrap_symbol(s), val) for s, val in parameters.items()])
            else:
                # Lists of symbols that should remain symbolic
                for s in parameters:
                    symbols.pop(wrap_symbol(s))
        else:
            self._parameters_arg = None

        self._symbols = {wrap_symbol(key): value for key, value in symbols.items()}

        self.models = OrderedDict()
        self.build_phase(dbe)
        # build reference model, this needs to be behind a flag to avoid recursion
        if build_reference:
            self.build_reference_model(dbe)

        for name, value in self.models.items():
            self.models[name] = self.symbol_replace(value, symbols)

        self.site_fractions = sorted([x for x in self.variables if isinstance(x, v.SiteFraction)], key=str)
        self.state_variables = sorted([x for x in self.variables if not isinstance(x, v.SiteFraction)], key=str)
Exemplo n.º 21
0
def get_zpf_data(dbf: Database,
                 comps: Sequence[str],
                 phases: Sequence[str],
                 datasets: PickleableTinyDB,
                 parameters: Dict[str, float],
                 model: Optional[Dict[str, Type[Model]]] = None):
    """
    Return the ZPF data used in the calculation of ZPF error

    Parameters
    ----------
    comps : list
        List of active component names
    phases : list
        List of phases to consider
    datasets : espei.utils.PickleableTinyDB
        Datasets that contain single phase data
    parameters : dict
        Dictionary mapping symbols to optimize to their initial values
    model : Optional[Dict[str, Type[Model]]]
        Dictionary phase names to pycalphad Model classes.

    Returns
    -------
    list
        List of data dictionaries with keys ``weight``, ``phase_regions`` and ``dataset_references``.
    """
    desired_data = datasets.search(
        (tinydb.where('output') == 'ZPF')
        & (tinydb.where('components').test(lambda x: set(x).issubset(comps)))
        & (tinydb.where('phases').test(
            lambda x: len(set(phases).intersection(x)) > 0)))

    zpf_data = []  # 1:1 correspondence with each dataset
    for data in desired_data:
        data_comps = list(set(data['components']).union({'VA'}))
        species = sorted(unpack_components(dbf, data_comps), key=str)
        data_phases = filter_phases(dbf, species, candidate_phases=phases)
        models = instantiate_models(dbf,
                                    species,
                                    data_phases,
                                    model=model,
                                    parameters=parameters)
        # assumed N, P, T state variables
        phase_recs = build_phase_records(dbf,
                                         species,
                                         data_phases, {v.N, v.P, v.T},
                                         models,
                                         parameters=parameters,
                                         build_gradients=True,
                                         build_hessians=True)
        all_phase_points = {
            phase_name: _sample_phase_constitution(models[phase_name],
                                                   point_sample, True, 50)
            for phase_name in data_phases
        }
        all_regions = data['values']
        conditions = data['conditions']
        phase_regions = []
        # Each phase_region is one set of phases in equilibrium (on a tie-line),
        # e.g. [["ALPHA", ["B"], [0.25]], ["BETA", ["B"], [0.5]]]
        for idx, phase_region in enumerate(all_regions):
            # Extract the conditions for entire phase region
            pot_conds = _extract_pot_conds(conditions, idx)
            pot_conds.setdefault(v.N, 1.0)  # Add v.N condition, if missing
            # Extract all the phases and compositions from the tie-line points
            vertices = []
            for vertex in phase_region:
                phase_name, comp_conds, disordered_flag = _extract_phases_comps(
                    vertex)
                # Construct single-phase points satisfying the conditions for each phase in the region
                mod = models[phase_name]
                composition = _compute_vertex_composition(
                    data_comps, comp_conds)
                if np.any(np.isnan(composition)):
                    # We can't construct points because we don't have a known composition
                    has_missing_comp_cond = True
                    phase_points = None
                elif _phase_is_stoichiometric(mod):
                    has_missing_comp_cond = False
                    phase_points = None
                else:
                    has_missing_comp_cond = False
                    # Only sample points that have an average mass residual within tol
                    tol = 0.02
                    phase_points = _subsample_phase_points(
                        phase_recs[phase_name], all_phase_points[phase_name],
                        composition, tol)
                    assert phase_points.shape[
                        0] > 0, f"phase {phase_name} must have at least one set of points within the target tolerance {pot_conds} {comp_conds}"
                vtx = RegionVertex(phase_name, composition, comp_conds,
                                   phase_points, phase_recs, disordered_flag,
                                   has_missing_comp_cond)
                vertices.append(vtx)
            region = PhaseRegion(vertices, pot_conds, species, data_phases,
                                 models)
            phase_regions.append(region)

        data_dict = {
            'weight': data.get('weight', 1.0),
            'phase_regions': phase_regions,
            'dataset_reference': data['reference']
        }
        zpf_data.append(data_dict)
    return zpf_data
Exemplo n.º 22
0
def calculate(dbf,
              comps,
              phases,
              mode=None,
              output='GM',
              fake_points=False,
              broadcast=True,
              parameters=None,
              to_xarray=True,
              **kwargs):
    """
    Sample the property surface of 'output' containing the specified
    components and phases. Model parameters are taken from 'dbf' and any
    state variables (T, P, etc.) can be specified as keyword arguments.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : str or sequence
        Names of components to consider in the calculation.
    phases : str or sequence
        Names of phases to consider in the calculation.
    mode : string, optional
        See 'make_callable' docstring for details.
    output : string, optional
        Model attribute to sample.
    fake_points : bool, optional (Default: False)
        If True, the first few points of the output surface will be fictitious
        points used to define an equilibrium hyperplane guaranteed to be above
        all the other points. This is used for convex hull computations.
    broadcast : bool, optional
        If True, broadcast given state variable lists against each other to create a grid.
        If False, assume state variables are given as equal-length lists.
    points : ndarray or a dict of phase names to ndarray, optional
        Columns of ndarrays must be internal degrees of freedom (site fractions), sorted.
        If this is not specified, points will be generated automatically.
    pdens : int, a dict of phase names to int, or a seq of both, optional
        Number of points to sample per degree of freedom.
        Default: 2000; Default when called from equilibrium(): 500
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    sampler : callable, a dict of phase names to callable, or a seq of both, optional
        Function to sample phase constitution space.
        Must have same signature as 'pycalphad.core.utils.point_sample'
    grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True)
        Whether to add evenly spaced points between end-members.
        The density of points is determined by 'pdens'
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.

    Returns
    -------
    Dataset of the sampled attribute as a function of state variables

    Examples
    --------
    None yet.
    """
    # Here we check for any keyword arguments that are special, i.e.,
    # there may be keyword arguments that aren't state variables
    pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)
    points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None)
    callables = kwargs.pop('callables', {})
    sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None)
    fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True),
                                  default_arg=True)
    parameters = parameters or dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    if isinstance(phases, str):
        phases = [phases]
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    comps = sorted(unpack_components(dbf, comps))
    if points_dict is None and broadcast is False:
        raise ValueError(
            'The \'points\' keyword argument must be specified if broadcast=False is also given.'
        )
    nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0]

    all_phase_data = []
    largest_energy = 1e10

    # Consider only the active phases
    list_of_possible_phases = filter_phases(dbf, comps)
    if len(list_of_possible_phases) == 0:
        raise ConditionError(
            'There are no phases in the Database that can be active with components {0}'
            .format(comps))
    active_phases = {
        name: dbf.phases[name]
        for name in filter_phases(dbf, comps, phases)
    }
    if len(active_phases) == 0:
        raise ConditionError(
            'None of the passed phases ({0}) are active. List of possible phases: {1}.'
            .format(phases, list_of_possible_phases))

    models = instantiate_models(dbf,
                                comps,
                                list(active_phases.keys()),
                                model=kwargs.pop('model', None),
                                parameters=parameters)

    if isinstance(output, (list, tuple, set)):
        raise NotImplementedError(
            'Only one property can be specified in calculate() at a time')
    output = output if output is not None else 'GM'

    # Implicitly add 'N' state variable as a string to keyword arguements if it's not passed
    if kwargs.get('N') is None:
        kwargs['N'] = 1
    if np.any(np.array(kwargs['N']) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(
            kwargs['N']))

    # TODO: conditions dict of StateVariable instances should become part of the calculate API
    statevar_strings = [
        sv for sv in kwargs.keys() if getattr(v, sv) is not None
    ]
    # If we don't do this, sympy will get confused during substitution
    statevar_dict = dict((v.StateVariable(key), unpack_condition(value))
                         for key, value in kwargs.items()
                         if key in statevar_strings)
    # Sort after default state variable check to fix gh-116
    statevar_dict = collections.OrderedDict(
        sorted(statevar_dict.items(), key=lambda x: str(x[0])))
    phase_records = build_phase_records(dbf,
                                        comps,
                                        active_phases,
                                        statevar_dict,
                                        models=models,
                                        parameters=parameters,
                                        output=output,
                                        callables=callables,
                                        build_gradients=False,
                                        build_hessians=False,
                                        verbose=kwargs.pop('verbose', False))
    str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \
                                                for (key, value) in statevar_dict.items())
    maximum_internal_dof = max(
        len(models[phase_name].site_fractions) for phase_name in active_phases)
    for phase_name, phase_obj in sorted(active_phases.items()):
        mod = models[phase_name]
        phase_record = phase_records[phase_name]
        points = points_dict[phase_name]
        variables, sublattice_dof = generate_dof(phase_obj, mod.components)
        if points is None:
            points = _sample_phase_constitution(
                phase_name, phase_obj.constituents, sublattice_dof, comps,
                tuple(variables), sampler_dict[phase_name] or point_sample,
                fixedgrid_dict[phase_name], pdens_dict[phase_name])
        points = np.atleast_2d(points)

        fp = fake_points and (phase_name == sorted(active_phases.keys())[0])
        phase_ds = _compute_phase_values(nonvacant_components,
                                         str_statevar_dict,
                                         points,
                                         phase_record,
                                         output,
                                         maximum_internal_dof,
                                         broadcast=broadcast,
                                         largest_energy=float(largest_energy),
                                         fake_points=fp)
        all_phase_data.append(phase_ds)

    # speedup for single-phase case (found by profiling)
    if len(all_phase_data) > 1:
        concatenated_coords = all_phase_data[0].coords

        data_vars = all_phase_data[0].data_vars
        concatenated_data_vars = {}
        for var in data_vars.keys():
            data_coords = data_vars[var][0]
            points_idx = data_coords.index('points')  # concatenation axis
            arrs = []
            for phase_data in all_phase_data:
                arrs.append(getattr(phase_data, var))
            concat_data = np.concatenate(arrs, axis=points_idx)
            concatenated_data_vars[var] = (data_coords, concat_data)
        final_ds = LightDataset(data_vars=concatenated_data_vars,
                                coords=concatenated_coords)
    else:
        final_ds = all_phase_data[0]
    if to_xarray:
        return final_ds.get_dataset()
    else:
        return final_ds
def get_thermochemical_data(dbf,
                            comps,
                            phases,
                            datasets,
                            weight_dict=None,
                            symbols_to_fit=None):
    """

    Parameters
    ----------
    dbf : pycalphad.Database
        Database to consider
    comps : list
        List of active component names
    phases : list
        List of phases to consider
    datasets : espei.utils.PickleableTinyDB
        Datasets that contain single phase data
    weight_dict : dict
        Dictionary of weights for each data type, e.g. {'HM': 200, 'SM': 2}
    symbols_to_fit : list
        Parameters to fit. Used to build the models and PhaseRecords.

    Returns
    -------
    list
        List of data dictionaries to iterate over
    """
    # phase by phase, then property by property, then by model exclusions
    if weight_dict is None:
        weight_dict = {}

    if symbols_to_fit is not None:
        symbols_to_fit = sorted(symbols_to_fit)
    else:
        symbols_to_fit = database_symbols_to_fit(dbf)

    # estimated from NIST TRC uncertainties
    property_std_deviation = {
        'HM': 500.0 / weight_dict.get('HM', 1.0),  # J/mol
        'SM': 0.2 / weight_dict.get('SM', 1.0),  # J/K-mol
        'CPM': 0.2 / weight_dict.get('CPM', 1.0),  # J/K-mol
    }
    properties = [
        'HM_FORM', 'SM_FORM', 'CPM_FORM', 'HM_MIX', 'SM_MIX', 'CPM_MIX'
    ]

    ref_states = []
    for el in get_pure_elements(dbf, comps):
        ref_state = ReferenceState(el, dbf.refstates[el]['phase'])
        ref_states.append(ref_state)
    all_data_dicts = []
    for phase_name in phases:
        for prop in properties:
            desired_data = get_prop_data(
                comps,
                phase_name,
                prop,
                datasets,
                additional_query=(where('solver').exists()))
            if len(desired_data) == 0:
                continue
            unique_exclusions = set([
                tuple(sorted(d.get('excluded_model_contributions', [])))
                for d in desired_data
            ])
            for exclusion in unique_exclusions:
                data_dict = {
                    'phase_name': phase_name,
                    'prop': prop,
                    # needs the following keys to be added:
                    # species, calculate_dict, phase_records, model, output, weights
                }
                # get all the data with these model exclusions
                if exclusion == tuple([]):
                    exc_search = (
                        ~where('excluded_model_contributions').exists()) & (
                            where('solver').exists())
                else:
                    exc_search = (where('excluded_model_contributions').test(
                        lambda x: tuple(sorted(x)) == exclusion)) & (
                            where('solver').exists())
                curr_data = get_prop_data(comps,
                                          phase_name,
                                          prop,
                                          datasets,
                                          additional_query=exc_search)
                calculate_dict = get_prop_samples(dbf, comps, phase_name,
                                                  curr_data)
                mod = Model(dbf, comps, phase_name, parameters=symbols_to_fit)
                if prop.endswith('_FORM'):
                    output = ''.join(prop.split('_')[:-1]) + 'R'
                    mod.shift_reference_state(
                        ref_states,
                        dbf,
                        contrib_mods={e: sympy.S.Zero
                                      for e in exclusion})
                else:
                    output = prop
                for contrib in exclusion:
                    mod.models[contrib] = sympy.S.Zero
                    mod.reference_model.models[contrib] = sympy.S.Zero
                species = sorted(unpack_components(dbf, comps), key=str)
                data_dict['species'] = species
                model = {phase_name: mod}
                statevar_dict = {
                    getattr(v, c, None): vals
                    for c, vals in calculate_dict.items()
                    if isinstance(getattr(v, c, None), v.StateVariable)
                }
                statevar_dict = OrderedDict(
                    sorted(statevar_dict.items(), key=lambda x: str(x[0])))
                str_statevar_dict = OrderedDict(
                    (str(k), vals) for k, vals in statevar_dict.items())
                phase_records = build_phase_records(
                    dbf,
                    species, [phase_name],
                    statevar_dict,
                    model,
                    output=output,
                    parameters={s: 0
                                for s in symbols_to_fit},
                    build_gradients=False,
                    build_hessians=False)
                data_dict['str_statevar_dict'] = str_statevar_dict
                data_dict['phase_records'] = phase_records
                data_dict['calculate_dict'] = calculate_dict
                data_dict['model'] = model
                data_dict['output'] = output
                data_dict['weights'] = np.array(
                    property_std_deviation[prop.split('_')[0]]) / np.array(
                        calculate_dict.pop('weights'))
                all_data_dicts.append(data_dict)
    return all_data_dicts
Exemplo n.º 24
0
def plot_interaction(dbf, comps, phase_name, configuration, output, datasets=None, symmetry=None, ax=None, plot_kwargs=None, dataplot_kwargs=None) -> plt.Axes:
    """
    Return one set of plotted Axes with data compared to calculated parameters

    Parameters
    ----------
    dbf : Database
        pycalphad thermodynamic database containing the relevant parameters.
    comps : Sequence[str]
        Names of components to consider in the calculation.
    phase_name : str
        Name of the considered phase phase
    configuration : Tuple[Tuple[str]]
        ESPEI-style configuration
    output : str
        Model property to plot on the y-axis e.g. ``'HM_MIX'``, or ``'SM_MIX'``.
        Must be a ``'_MIX'`` property.
    datasets : tinydb.TinyDB
    symmetry : list
        List of lists containing indices of symmetric sublattices e.g. [[0, 1], [2, 3]]
    ax : plt.Axes
        Default axes used if not specified.
    plot_kwargs : Optional[Dict[str, Any]]
        Keyword arguments to ``ax.plot`` for the predicted data.
    dataplot_kwargs : Optional[Dict[str, Any]]
        Keyword arguments to ``ax.plot`` the observed data.

    Returns
    -------
    plt.Axes

    """
    if not output.endswith('_MIX'):
        raise ValueError("`plot_interaction` only supports HM_MIX, SM_MIX, or CPM_MIX outputs.")
    if not plot_kwargs:
        plot_kwargs = {}
    if not dataplot_kwargs:
        dataplot_kwargs = {}

    if not ax:
        ax = plt.subplot()

    # Plot predicted values from the database
    grid, predicted_values = _get_interaction_predicted_values(dbf, comps, phase_name, configuration, output)
    plot_kwargs.setdefault('label', 'This work')
    plot_kwargs.setdefault('color', 'k')
    ax.plot(grid, predicted_values, **plot_kwargs)

    # Plot the observed values from the datasets
    # TODO: model exclusions handling
    # TODO: better reference state handling
    mod_srf = Model(dbf, comps, phase_name, parameters={'GHSER'+c.upper(): 0 for c in comps})
    mod_srf.models = {'ref': mod_srf.models['ref']}

    # _MIX assumption
    prop = output.split('_MIX')[0]
    desired_props = (f"{prop}_MIX", f"{prop}_FORM")
    if datasets is not None:
        solver_qry = (tinydb.where('solver').test(symmetry_filter, configuration, recursive_tuplify(symmetry) if symmetry else symmetry))
        desired_data = get_prop_data(comps, phase_name, desired_props, datasets, additional_query=solver_qry)
        desired_data = filter_configurations(desired_data, configuration, symmetry)
        desired_data = filter_temperatures(desired_data)
    else:
        desired_data = []

    species = unpack_components(dbf, comps)
    # phase constituents are Species objects, so we need to be doing intersections with those
    phase_constituents = dbf.phases[phase_name].constituents
    # phase constituents must be filtered to only active
    constituents = [[sp.name for sp in sorted(subl_constituents.intersection(species))] for subl_constituents in phase_constituents]
    subl_dof = list(map(len, constituents))
    calculate_dict = get_prop_samples(desired_data, constituents)
    sample_condition_dicts = _get_sample_condition_dicts(calculate_dict, subl_dof)
    interacting_subls = [c for c in recursive_tuplify(configuration) if isinstance(c, tuple)]
    if (len(set(interacting_subls)) == 1) and (len(interacting_subls[0]) == 2):
        # This configuration describes all sublattices with the same two elements interacting
        # In general this is a high-dimensional space; just plot the diagonal to see the disordered mixing
        endpoints = endmembers_from_interaction(configuration)
        endpoints = [endpoints[0], endpoints[-1]]
        disordered_config = True
    else:
        disordered_config = False
    bib_reference_keys = sorted({entry.get('reference', '') for entry in desired_data})
    symbol_map = bib_marker_map(bib_reference_keys)
    for data in desired_data:
        indep_var_data = None
        response_data = np.zeros_like(data['values'], dtype=np.float_)
        if disordered_config:
            # Take the second element of the first interacting sublattice as the coordinate
            # Because it's disordered all sublattices should be equivalent
            # TODO: Fix this to filter because we need to guarantee the plot points are disordered
            occ = data['solver']['sublattice_occupancies']
            subl_idx = np.nonzero([isinstance(c, (list, tuple)) for c in occ[0]])[0]
            if len(subl_idx) > 1:
                subl_idx = int(subl_idx[0])
            else:
                subl_idx = int(subl_idx)
            indep_var_data = [c[subl_idx][1] for c in occ]
        else:
            interactions = np.array([cond_dict[Symbol('YS')] for cond_dict in sample_condition_dicts])
            indep_var_data = 1 - (interactions+1)/2
        if data['output'].endswith('_FORM'):
            # All the _FORM data we have still has the lattice stability contribution
            # Need to zero it out to shift formation data to mixing
            temps = data['conditions'].get('T', 298.15)
            pressures = data['conditions'].get('P', 101325)
            points = build_sitefractions(phase_name, data['solver']['sublattice_configurations'],
                                            data['solver']['sublattice_occupancies'])
            for point_idx in range(len(points)):
                missing_variables = mod_srf.ast.atoms(v.SiteFraction) - set(points[point_idx].keys())
                # Set unoccupied values to zero
                points[point_idx].update({key: 0 for key in missing_variables})
                # Change entry to a sorted array of site fractions
                points[point_idx] = list(OrderedDict(sorted(points[point_idx].items(), key=str)).values())
            points = np.array(points, dtype=np.float_)
            # TODO: Real temperature support
            points = points[None, None]
            stability = calculate(dbf, comps, [phase_name], output=data['output'][:-5],
                                    T=temps, P=pressures, points=points,
                                    model=mod_srf)
            response_data -= stability[data['output'][:-5]].values.squeeze()

        response_data += np.array(data['values'], dtype=np.float_)
        response_data = response_data.flatten()
        ref = data.get('reference', '')
        dataplot_kwargs.setdefault('markersize', 8)
        dataplot_kwargs.setdefault('linestyle', 'none')
        dataplot_kwargs.setdefault('clip_on', False)
        # Cannot use setdefault because it won't overwrite previous iterations
        dataplot_kwargs['label'] = symbol_map[ref]['formatted']
        dataplot_kwargs['marker'] = symbol_map[ref]['markers']['marker']
        dataplot_kwargs['fillstyle'] = symbol_map[ref]['markers']['fillstyle']
        ax.plot(indep_var_data, response_data, **dataplot_kwargs)
    ax.set_xlim((0, 1))
    ax.set_xlabel(str(':'.join(endpoints[0])) + ' to ' + str(':'.join(endpoints[1])))
    ax.set_ylabel(plot_mapping.get(output, output))
    leg = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))  # legend outside
    leg.get_frame().set_edgecolor('black')
    return ax
Exemplo n.º 25
0
def simulate_equilibrium_solidification(dbf,
                                        comps,
                                        phases,
                                        composition,
                                        start_temperature,
                                        step_temperature=1.0,
                                        liquid_phase_name='LIQUID',
                                        adaptive=True,
                                        eq_kwargs=None,
                                        binary_search_tol=0.1,
                                        verbose=False):
    """
    Compute the equilibrium solidification path.

    Decreases temperature until no liquid is found, performing a binary search to get the soildus temperature.

    dbf : pycalphad.Database
        Database object.
    comps : list
        List of components in the system.
    phases : list
        List of phases in the system.
    composition : Dict[v.X, float]
        Dictionary of independent `v.X` composition variables.
    start_temperature : float
        Starting temperature for simulation. Should be single phase liquid.
    step_temperature : Optional[float]
        Temperature step size. Defaults to 1.0.
    liquid_phase_name : Optional[str]
        Name of the phase treated as liquid (i.e. the phase with infinitely
        fast diffusion). Defaults to 'LIQUID'.
    eq_kwargs: Optional[Dict[str, Any]]
        Keyword arguments for equilibrium
    binary_search_tol : float
        Stop the binary search when the difference between temperatures is less than this amount.
    adaptive: Optional[bool]
        Whether to add additional points near the equilibrium points at each
        step. Only takes effect if ``points`` is in the eq_kwargs dict.

    """
    eq_kwargs = eq_kwargs or dict()
    phases = filter_phases(dbf, unpack_components(dbf, comps), phases)
    ord_disord_dict = order_disorder_dict(dbf, comps, phases)
    solid_phases = sorted(set(phases) - {liquid_phase_name})
    independent_comps = sorted([str(comp)[2:] for comp in composition.keys()])
    models = instantiate_models(dbf, comps, phases)
    if verbose:
        print('building callables... ', end='')
    cbs = build_callables(dbf,
                          comps,
                          phases,
                          models,
                          additional_statevars={v.P, v.T, v.N},
                          build_gradients=True,
                          build_hessians=True)
    if verbose:
        print('done')
    conds = {v.P: 101325, v.N: 1.0}
    conds.update(composition)

    if adaptive and ('points' in eq_kwargs.get('calc_opts', {})):
        # Dynamically add points as the simulation runs
        species = unpack_components(dbf, comps)
        dof_dict = {
            ph: generate_dof(dbf.phases[ph], species)[1]
            for ph in phases
        }
    else:
        adaptive = False

    temperatures = []
    x_liquid = {comp: [] for comp in independent_comps}
    fraction_solid = []
    phase_amounts = {ph: []
                     for ph in solid_phases}  # instantaneous phase amounts
    cum_phase_amounts = {ph: [] for ph in solid_phases}
    converged = False
    current_T = start_temperature
    if verbose:
        print('T=')
    while fraction_solid[-1] < 1 if len(fraction_solid) > 0 else True:
        sys.stdout.flush()
        conds[v.T] = current_T
        if verbose:
            print(f'{current_T} ', end='')
        eq = equilibrium(dbf,
                         comps,
                         phases,
                         conds,
                         callables=cbs,
                         model=models,
                         to_xarray=False,
                         **eq_kwargs)
        if not is_converged(eq):
            if verbose:
                comp_conds = {
                    cond: val
                    for cond, val in conds.items() if isinstance(cond, v.X)
                }
                print(f"Convergence failure at T={conds[v.T]} X={comp_conds} ")
        if adaptive:
            # Update the points dictionary with local samples around the equilibrium site fractions
            update_points(eq, eq_kwargs['calc_opts']['points'], dof_dict)
        if liquid_phase_name in eq.Phase:
            # Add the liquid phase composition
            # TODO: will break in a liquid miscibility gap
            liquid_vertex = np.nonzero(eq.Phase == liquid_phase_name)[-1][0]
            for comp in independent_comps:
                x_liquid[comp].append(
                    float(eq.X[..., liquid_vertex,
                               eq.component.index(comp)]))
            temperatures.append(current_T)
            current_T -= step_temperature
        else:
            # binary search to find the solidus
            T_high = current_T + step_temperature  # High temperature, liquid
            T_low = current_T  # Low temperature, solids only
            found_ph = set(eq.Phase[eq.Phase != ''].tolist())
            if verbose:
                print(
                    f'Found phases {found_ph}. Starting binary search between T={(T_low, T_high)} ',
                    end='')
            while (T_high - T_low) > binary_search_tol:
                bin_search_T = (T_high - T_low) * 0.5 + T_low
                conds[v.T] = bin_search_T
                eq = equilibrium(dbf,
                                 comps,
                                 phases,
                                 conds,
                                 callables=cbs,
                                 model=models,
                                 to_xarray=False,
                                 **eq_kwargs)
                if adaptive:
                    # Update the points dictionary with local samples around the equilibrium site fractions
                    update_points(eq, eq_kwargs['calc_opts']['points'],
                                  dof_dict)
                if not is_converged(eq):
                    if verbose:
                        comp_conds = {
                            cond: val
                            for cond, val in conds.items()
                            if isinstance(cond, v.X)
                        }
                        print(
                            f"Convergence failure at T={conds[v.T]} X={comp_conds} "
                        )
                if liquid_phase_name in eq.Phase:
                    T_high = bin_search_T
                else:
                    T_low = bin_search_T
            conds[v.T] = T_low
            temperatures.append(T_low)
            eq = equilibrium(dbf,
                             comps,
                             phases,
                             conds,
                             callables=cbs,
                             model=models,
                             to_xarray=False,
                             **eq_kwargs)
            if not is_converged(eq):
                if verbose:
                    comp_conds = {
                        cond: val
                        for cond, val in conds.items()
                        if isinstance(cond, v.X)
                    }
                    print(
                        f"Convergence failure at T={conds[v.T]} X={comp_conds} "
                    )
            if verbose:
                found_phases = set(eq.Phase[eq.Phase != ''].tolist())
                print(
                    f"Finshed binary search at T={conds[v.T]} with phases={found_phases} and NP={eq.NP.squeeze()[:len(found_phases)]}"
                )
            if adaptive:
                # Update the points dictionary with local samples around the equilibrium site fractions
                update_points(eq, eq_kwargs['calc_opts']['points'], dof_dict)
            # Set the liquid phase composition to NaN
            for comp in independent_comps:
                x_liquid[comp].append(float(np.nan))

        # Calculate fraction of solid and solid phase amounts
        current_fraction_solid = 0.0
        current_cum_phase_amnts = get_phase_amounts(
            order_disorder_eq_phases(eq.get_dataset(), ord_disord_dict),
            eq.NP.squeeze(), solid_phases)
        for solid_phase, amount in current_cum_phase_amnts.items():
            # Since the equilibrium calculations always give the "cumulative" phase amount,
            # we need to take the difference to get the instantaneous.
            cum_phase_amounts[solid_phase].append(amount)
            if len(phase_amounts[solid_phase]) == 0:
                phase_amounts[solid_phase].append(amount)
            else:
                phase_amounts[solid_phase].append(
                    amount - cum_phase_amounts[solid_phase][-2])
            current_fraction_solid += amount
        fraction_solid.append(current_fraction_solid)

    converged = True if np.isclose(fraction_solid[-1], 1.0) else False
    return SolidificationResult(x_liquid, fraction_solid, temperatures,
                                phase_amounts, converged, "equilibrium")
Exemplo n.º 26
0
def map_binary(
    dbf,
    comps,
    phases,
    conds,
    eq_kwargs=None,
    calc_kwargs=None,
    boundary_sets=None,
    verbose=False,
    summary=False,
):
    """
    Map a binary T-X phase diagram

    Parameters
    ----------
    dbf : Database
    comps : list of str
    phases : list of str
        List of phases to consider in mapping
    conds : dict
        Dictionary of conditions
    eq_kwargs : dict
        Dictionary of keyword arguments to pass to equilibrium
    verbose : bool
        Print verbose output for mapping
    boundary_sets : ZPFBoundarySets
        Existing ZPFBoundarySets

    Returns
    -------
    ZPFBoundarySets

    Notes
    -----
    Assumes conditions in T and X.

    Simple algorithm to map a binary phase diagram in T-X. More or less follows
    the algorithm described in Figure 2 by Snider et al. [1] with the small
    algorithmic improvement of constructing a convex hull to find the next
    potential two phase region.

    For each temperature, proceed along increasing composition, skipping two
    over two phase regions, once calculated.
    [1] J. Snider, I. Griva, X. Sun, M. Emelianenko, Set based framework for
        Gibbs energy minimization, Calphad. 48 (2015) 18-26.
        doi: 10.1016/j.calphad.2014.09.005

    """

    eq_kwargs = eq_kwargs or {}
    calc_kwargs = calc_kwargs or {}
    # implicitly add v.N to conditions
    if v.N not in conds:
        conds[v.N] = [1.0]
    if 'pdens' not in calc_kwargs:
        calc_kwargs['pdens'] = 2000

    species = unpack_components(dbf, comps)
    phases = filter_phases(dbf, species, phases)
    parameters = eq_kwargs.get('parameters', {})
    models = eq_kwargs.get('model')
    statevars = get_state_variables(models=models, conds=conds)
    if models is None:
        models = instantiate_models(dbf,
                                    comps,
                                    phases,
                                    model=eq_kwargs.get('model'),
                                    parameters=parameters,
                                    symbols_only=True)
    prxs = build_phase_records(dbf,
                               species,
                               phases,
                               conds,
                               models,
                               output='GM',
                               parameters=parameters,
                               build_gradients=True,
                               build_hessians=True)

    indep_comp = [
        key for key, value in conds.items()
        if isinstance(key, v.MoleFraction) and len(np.atleast_1d(value)) > 1
    ]
    indep_pot = [
        key for key, value in conds.items()
        if (type(key) is v.StateVariable) and len(np.atleast_1d(value)) > 1
    ]
    if (len(indep_comp) != 1) or (len(indep_pot) != 1):
        raise ValueError(
            'Binary map requires exactly one composition and one potential coordinate'
        )
    if indep_pot[0] != v.T:
        raise ValueError(
            'Binary map requires that a temperature grid must be defined')

    # binary assumption, only one composition specified.
    comp_cond = [k for k in conds.keys() if isinstance(k, v.X)][0]
    indep_comp = comp_cond.name[2:]
    indep_comp_idx = sorted(get_pure_elements(dbf, comps)).index(indep_comp)
    composition_grid = unpack_condition(conds[comp_cond])
    dX = composition_grid[1] - composition_grid[0]
    Xmax = composition_grid.max()
    temperature_grid = unpack_condition(conds[v.T])
    dT = temperature_grid[1] - temperature_grid[0]

    boundary_sets = boundary_sets or ZPFBoundarySets(comps, comp_cond)

    equilibria_calculated = 0
    equilibrium_time = 0
    convex_hulls_calculated = 0
    convex_hull_time = 0
    curr_conds = {key: unpack_condition(val) for key, val in conds.items()}
    str_conds = sorted([str(k) for k in curr_conds.keys()])
    grid_conds = _adjust_conditions(curr_conds)
    for T_idx in range(temperature_grid.size):
        T = temperature_grid[T_idx]
        iter_equilibria = 0
        if verbose:
            print("=== T = {} ===".format(float(T)))
        curr_conds[v.T] = [float(T)]
        eq_conds = deepcopy(curr_conds)
        Xmax_visited = 0.0
        hull_time = time.time()
        grid = calculate(dbf,
                         comps,
                         phases,
                         fake_points=True,
                         output='GM',
                         T=T,
                         P=grid_conds[v.P],
                         N=1,
                         model=models,
                         parameters=parameters,
                         to_xarray=False,
                         **calc_kwargs)
        hull = starting_point(eq_conds, statevars, prxs, grid)
        convex_hull_time += time.time() - hull_time
        convex_hulls_calculated += 1
        while Xmax_visited < Xmax:
            hull_compsets = find_two_phase_region_compsets(
                hull,
                T,
                indep_comp,
                indep_comp_idx,
                minimum_composition=Xmax_visited,
                misc_gap_tol=2 * dX)
            if hull_compsets is None:
                if verbose:
                    print(
                        "== Convex hull: max visited = {} - no multiphase phase compsets found =="
                        .format(Xmax_visited, hull_compsets))
                break
            Xeq = hull_compsets.mean_composition
            eq_conds[comp_cond] = [float(Xeq)]
            eq_time = time.time()
            start_point = starting_point(eq_conds, statevars, prxs, grid)
            eq_ds = _solve_eq_at_conditions(species, start_point, prxs, grid,
                                            str_conds, statevars, False)
            equilibrium_time += time.time() - eq_time
            equilibria_calculated += 1
            iter_equilibria += 1
            # composition sets in the plane of the calculation:
            # even for isopleths, this should always be two.
            compsets = get_compsets(eq_ds, indep_comp, indep_comp_idx)
            if verbose:
                print(
                    "== Convex hull: max visited = {:0.4f} - hull compsets: {} equilibrium compsets: {} =="
                    .format(Xmax_visited, hull_compsets, compsets))
            if compsets is None:
                # equilibrium calculation, didn't find a valid multiphase composition set
                # we need to find the next feasible one from the convex hull.
                Xmax_visited += dX
                continue
            else:
                boundary_sets.add_compsets(compsets, Xtol=0.10, Ttol=2 * dT)
                if compsets.max_composition > Xmax_visited:
                    Xmax_visited = compsets.max_composition
            # this seems kind of sloppy, but captures the effect that we want to
            # keep doing equilibrium calculations, if possible.
            while Xmax_visited < Xmax and compsets is not None:
                eq_conds[comp_cond] = [float(Xmax_visited + dX)]
                eq_time = time.time()
                # TODO: starting point could be improved by basing it off the previous calculation
                start_point = starting_point(eq_conds, statevars, prxs, grid)
                eq_ds = _solve_eq_at_conditions(species, start_point, prxs,
                                                grid, str_conds, statevars,
                                                False)
                equilibrium_time += time.time() - eq_time
                equilibria_calculated += 1
                compsets = get_compsets(eq_ds, indep_comp, indep_comp_idx)
                if compsets is not None:
                    Xmax_visited = compsets.max_composition
                    boundary_sets.add_compsets(compsets,
                                               Xtol=0.10,
                                               Ttol=2 * dT)
                else:
                    Xmax_visited += dX
                if verbose:
                    print("Equilibrium: at X = {:0.4f}, found compsets {}".
                          format(Xmax_visited, compsets))
        if verbose:
            print(iter_equilibria, 'equilibria calculated in this iteration.')
    if verbose or summary:
        print("{} Convex hulls calculated ({:0.1f}s)".format(
            convex_hulls_calculated, convex_hull_time))
        print("{} Equilbria calculated ({:0.1f}s)".format(
            equilibria_calculated, equilibrium_time))
        print("{:0.0f}% of brute force calculations skipped".format(
            100 * (1 - equilibria_calculated /
                   (composition_grid.size * temperature_grid.size))))
    return boundary_sets
Exemplo n.º 27
0
def plot_endmember(dbf, comps, phase_name, configuration, output, datasets=None, symmetry=None, x='T', ax=None, plot_kwargs=None, dataplot_kwargs=None) -> plt.Axes:
    """
    Return one set of plotted Axes with data compared to calculated parameters

    Parameters
    ----------
    dbf : Database
        pycalphad thermodynamic database containing the relevant parameters.
    comps : Sequence[str]
        Names of components to consider in the calculation.
    phase_name : str
        Name of the considered phase phase
    configuration : Tuple[Tuple[str]]
        ESPEI-style configuration
    output : str
        Model property to plot on the y-axis e.g. ``'HM_MIX'``, or ``'SM_MIX'``.
        Must be a ``'_MIX'`` property.
    datasets : tinydb.TinyDB
    symmetry : list
        List of lists containing indices of symmetric sublattices e.g. [[0, 1], [2, 3]]
    ax : plt.Axes
        Default axes used if not specified.
    plot_kwargs : Optional[Dict[str, Any]]
        Keyword arguments to ``ax.plot`` for the predicted data.
    dataplot_kwargs : Optional[Dict[str, Any]]
        Keyword arguments to ``ax.plot`` the observed data.

    Returns
    -------
    plt.Axes

    """
    if output.endswith('_MIX'):
        raise ValueError("`plot_interaction` only supports HM, HM_FORM, SM, SM_FORM or CPM, CPM_FORM outputs.")
    if x not in ('T',):
        raise ValueError(f'`x` passed to `plot_endmember` must be "T" got {x}')
    if not plot_kwargs:
        plot_kwargs = {}
    if not dataplot_kwargs:
        dataplot_kwargs = {}

    if not ax:
        ax = plt.subplot()

    if datasets is not None:
        solver_qry = (tinydb.where('solver').test(symmetry_filter, configuration, recursive_tuplify(symmetry) if symmetry else symmetry))
        desired_data = get_prop_data(comps, phase_name, output, datasets, additional_query=solver_qry)
        desired_data = filter_configurations(desired_data, configuration, symmetry)
        desired_data = filter_temperatures(desired_data)
    else:
        desired_data = []

    # Plot predicted values from the database
    endpoints = endmembers_from_interaction(configuration)
    if len(endpoints) != 1:
        raise ValueError(f"The configuration passed to `plot_endmember` must be an endmebmer configuration. Got {configuration}")
    if output.endswith('_FORM'):
        # TODO: better reference state handling
        mod = Model(dbf, comps, phase_name, parameters={'GHSER'+(c.upper()*2)[:2]: 0 for c in comps})
        prop = output[:-5]
    else:
        mod = Model(dbf, comps, phase_name)
        prop = output
    endmember = _translate_endmember_to_array(endpoints[0], mod.ast.atoms(v.SiteFraction))[None, None]
    # Set up the domain of the calculation
    species = unpack_components(dbf, comps)
    # phase constituents are Species objects, so we need to be doing intersections with those
    phase_constituents = dbf.phases[phase_name].constituents
    # phase constituents must be filtered to only active
    constituents = [[sp.name for sp in sorted(subl_constituents.intersection(species))] for subl_constituents in phase_constituents]
    calculate_dict = get_prop_samples(desired_data, constituents)
    potential_values = np.asarray(calculate_dict[x] if len(calculate_dict[x]) > 0 else 298.15)
    potential_grid = np.linspace(max(potential_values.min()-1, 0), potential_values.max()+1, num=100)
    predicted_values = calculate(dbf, comps, [phase_name], output=prop, T=potential_grid, P=101325, points=endmember, model=mod)[prop].values.flatten()
    ax.plot(potential_grid, predicted_values, **plot_kwargs)

    # Plot observed values
    # TODO: model exclusions handling
    bib_reference_keys = sorted({entry.get('reference', '') for entry in desired_data})
    symbol_map = bib_marker_map(bib_reference_keys)
    for data in desired_data:
        indep_var_data = None
        response_data = np.zeros_like(data['values'], dtype=np.float_)
        indep_var_data = np.array(data['conditions'][x], dtype=np.float_).flatten()

        response_data += np.array(data['values'], dtype=np.float_)
        response_data = response_data.flatten()
        ref = data.get('reference', '')
        dataplot_kwargs.setdefault('markersize', 8)
        dataplot_kwargs.setdefault('linestyle', 'none')
        dataplot_kwargs.setdefault('clip_on', False)
        # Cannot use setdefault because it won't overwrite previous iterations
        dataplot_kwargs['label'] = symbol_map[ref]['formatted']
        dataplot_kwargs['marker'] = symbol_map[ref]['markers']['marker']
        dataplot_kwargs['fillstyle'] = symbol_map[ref]['markers']['fillstyle']
        ax.plot(indep_var_data, response_data, **dataplot_kwargs)

    ax.set_xlabel(plot_mapping.get(x, x))
    ax.set_ylabel(plot_mapping.get(output, output))
    leg = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))  # legend outside
    leg.get_frame().set_edgecolor('black')
    return ax
Exemplo n.º 28
0
def setup_context(dbf,
                  datasets,
                  symbols_to_fit=None,
                  data_weights=None,
                  phase_models=None,
                  make_callables=True):
    """
    Set up a context dictionary for calculating error.

    Parameters
    ----------
    dbf : Database
        A pycalphad Database that will be fit
    datasets : PickleableTinyDB
        A database of single- and multi-phase data to fit
    symbols_to_fit : list of str
        List of symbols in the Database that will be fit. If None (default) are
        passed, then all parameters prefixed with `VV` followed by a number,
        e.g. VV0001 will be fit.

    Returns
    -------

    Notes
    -----
    A copy of the Database is made and used in the context. To commit changes
    back to the original database, the dbf.symbols.update method should be used.
    """
    dbf = copy.deepcopy(dbf)
    if phase_models is not None:
        comps = sorted(phase_models['components'])
    else:
        comps = sorted([sp for sp in dbf.elements])
    if symbols_to_fit is None:
        symbols_to_fit = database_symbols_to_fit(dbf)
    else:
        symbols_to_fit = sorted(symbols_to_fit)
    data_weights = data_weights if data_weights is not None else {}

    if len(symbols_to_fit) == 0:
        raise ValueError(
            'No degrees of freedom. Database must contain symbols starting with \'V\' or \'VV\', followed by a number.'
        )
    else:
        _log.info('Fitting %s degrees of freedom.', len(symbols_to_fit))

    for x in symbols_to_fit:
        if isinstance(dbf.symbols[x], symengine.Piecewise):
            _log.debug('Replacing %s in database', x)
            dbf.symbols[x] = dbf.symbols[x].args[0]

    # construct the models for each phase, substituting in the SymEngine symbol to fit.
    if phase_models is not None:
        model_dict = get_model_dict(phase_models)
    else:
        model_dict = {}
    _log.trace('Building phase models (this may take some time)')
    import time
    t1 = time.time()
    phases = sorted(
        filter_phases(dbf, unpack_components(dbf, comps), dbf.phases.keys()))
    parameters = dict(zip(symbols_to_fit, [0] * len(symbols_to_fit)))
    models = instantiate_models(dbf,
                                comps,
                                phases,
                                model=model_dict,
                                parameters=parameters)
    if make_callables:
        eq_callables = build_callables(dbf,
                                       comps,
                                       phases,
                                       models,
                                       parameter_symbols=symbols_to_fit,
                                       output='GM',
                                       build_gradients=True,
                                       build_hessians=True,
                                       additional_statevars={v.N, v.P, v.T})
    else:
        eq_callables = None
    t2 = time.time()
    _log.trace('Finished building phase models (%0.2fs)', t2 - t1)
    _log.trace(
        'Getting non-equilibrium thermochemical data (this may take some time)'
    )
    t1 = time.time()
    thermochemical_data = get_thermochemical_data(
        dbf,
        comps,
        phases,
        datasets,
        model=model_dict,
        weight_dict=data_weights,
        symbols_to_fit=symbols_to_fit)
    t2 = time.time()
    _log.trace('Finished getting non-equilibrium thermochemical data (%0.2fs)',
               t2 - t1)
    _log.trace(
        'Getting equilibrium thermochemical data (this may take some time)')
    t1 = time.time()
    eq_thermochemical_data = get_equilibrium_thermochemical_data(
        dbf,
        comps,
        phases,
        datasets,
        model=model_dict,
        parameters=parameters,
        data_weight_dict=data_weights)
    t2 = time.time()
    _log.trace('Finished getting equilibrium thermochemical data (%0.2fs)',
               t2 - t1)
    _log.trace('Getting ZPF data (this may take some time)')
    t1 = time.time()
    zpf_data = get_zpf_data(dbf,
                            comps,
                            phases,
                            datasets,
                            model=model_dict,
                            parameters=parameters)
    t2 = time.time()
    _log.trace('Finished getting ZPF data (%0.2fs)', t2 - t1)

    # context for the log probability function
    # for all cases, parameters argument addressed in MCMC loop
    error_context = {
        'symbols_to_fit': symbols_to_fit,
        'zpf_kwargs': {
            'zpf_data': zpf_data,
            'data_weight': data_weights.get('ZPF', 1.0),
        },
        'equilibrium_thermochemical_kwargs': {
            'eq_thermochemical_data': eq_thermochemical_data,
        },
        'thermochemical_kwargs': {
            'thermochemical_data': thermochemical_data,
        },
        'activity_kwargs': {
            'dbf': dbf,
            'comps': comps,
            'phases': phases,
            'datasets': datasets,
            'phase_models': models,
            'callables': eq_callables,
            'data_weight': data_weights.get('ACR', 1.0),
        },
    }
    return error_context
Exemplo n.º 29
0
def _compare_data_to_parameters(dbf, comps, phase_name, desired_data, mod, configuration, x, y, ax=None):
    """
    Return one set of plotted Axes with data compared to calculated parameters

    Parameters
    ----------
    dbf : Database
        pycalphad thermodynamic database containing the relevant parameters.
    comps : list
        Names of components to consider in the calculation.
    phase_name : str
        Name of the considered phase phase
    desired_data :
    mod : Model
        A pycalphad Model. The Model may or may not have the reference state zeroed out for formation properties.
    configuration :
    x : str
        Model property to plot on the x-axis e.g. 'T', 'HM_MIX', 'SM_FORM'
    y : str
        Model property to plot on the y-axis e.g. 'T', 'HM_MIX', 'SM_FORM'
    ax : matplotlib.Axes
        Default axes used if not specified.

    Returns
    -------
    matplotlib.Axes

    """
    species = unpack_components(dbf, comps)
    # phase constituents are Species objects, so we need to be doing intersections with those
    phase_constituents = dbf.phases[phase_name].constituents
    # phase constituents must be filtered to only active:
    constituents = [[sp.name for sp in sorted(subl_constituents.intersection(species))] for subl_constituents in phase_constituents]
    subl_dof = list(map(len, constituents))
    calculate_dict = get_prop_samples(desired_data, constituents)
    sample_condition_dicts = _get_sample_condition_dicts(calculate_dict, subl_dof)
    endpoints = endmembers_from_interaction(configuration)
    interacting_subls = [c for c in recursive_tuplify(configuration) if isinstance(c, tuple)]
    disordered_config = False
    if (len(set(interacting_subls)) == 1) and (len(interacting_subls[0]) == 2):
        # This configuration describes all sublattices with the same two elements interacting
        # In general this is a high-dimensional space; just plot the diagonal to see the disordered mixing
        endpoints = [endpoints[0], endpoints[-1]]
        disordered_config = True
    if not ax:
        ax = plt.subplot()
    bar_chart = False
    bar_labels = []
    bar_data = []
    if y.endswith('_FORM'):
        # We were passed a Model object with zeroed out reference states
        yattr = y[:-5]
    else:
        yattr = y
    if len(endpoints) == 1:
        # This is an endmember so we can just compute T-dependent stuff
        Ts = calculate_dict['T']
        temperatures = np.asarray(Ts if len(Ts) > 0 else 298.15)
        if temperatures.min() != temperatures.max():
            temperatures = np.linspace(temperatures.min(), temperatures.max(), num=100)
        else:
            # We only have one temperature: let's do a bar chart instead
            bar_chart = True
            temperatures = temperatures.min()
        endmember = _translate_endmember_to_array(endpoints[0], mod.ast.atoms(v.SiteFraction))[None, None]
        predicted_quantities = calculate(dbf, comps, [phase_name], output=yattr,
                                         T=temperatures, P=101325, points=endmember, model=mod, mode='numpy')
        if y == 'HM' and x == 'T':
            # Shift enthalpy data so that value at minimum T is zero
            predicted_quantities[yattr] -= predicted_quantities[yattr].sel(T=temperatures[0]).values.flatten()
        response_data = predicted_quantities[yattr].values.flatten()
        if not bar_chart:
            extra_kwargs = {}
            if len(response_data) < 10:
                extra_kwargs['markersize'] = 20
                extra_kwargs['marker'] = '.'
                extra_kwargs['linestyle'] = 'none'
                extra_kwargs['clip_on'] = False
            ax.plot(temperatures, response_data,
                           label='This work', color='k', **extra_kwargs)
            ax.set_xlabel(plot_mapping.get(x, x))
            ax.set_ylabel(plot_mapping.get(y, y))
        else:
            bar_labels.append('This work')
            bar_data.append(response_data[0])
    elif len(endpoints) == 2:
        # Binary interaction parameter
        first_endpoint = _translate_endmember_to_array(endpoints[0], mod.ast.atoms(v.SiteFraction))
        second_endpoint = _translate_endmember_to_array(endpoints[1], mod.ast.atoms(v.SiteFraction))
        point_matrix = np.linspace(0, 1, num=100)[None].T * second_endpoint + \
            (1 - np.linspace(0, 1, num=100))[None].T * first_endpoint
        # TODO: Real temperature support
        point_matrix = point_matrix[None, None]
        predicted_quantities = calculate(dbf, comps, [phase_name], output=yattr,
                                         T=300, P=101325, points=point_matrix, model=mod, mode='numpy')
        response_data = predicted_quantities[yattr].values.flatten()
        if not bar_chart:
            extra_kwargs = {}
            if len(response_data) < 10:
                extra_kwargs['markersize'] = 20
                extra_kwargs['marker'] = '.'
                extra_kwargs['linestyle'] = 'none'
                extra_kwargs['clip_on'] = False
            ax.plot(np.linspace(0, 1, num=100), response_data, label='This work', color='k', **extra_kwargs)
            ax.set_xlim((0, 1))
            ax.set_xlabel(str(':'.join(endpoints[0])) + ' to ' + str(':'.join(endpoints[1])))
            ax.set_ylabel(plot_mapping.get(y, y))
        else:
            bar_labels.append('This work')
            bar_data.append(response_data[0])
    else:
        raise NotImplementedError('No support for plotting configuration {}'.format(configuration))

    bib_reference_keys = sorted({entry.get('reference', '') for entry in desired_data})
    symbol_map = bib_marker_map(bib_reference_keys)

    for data in desired_data:
        indep_var_data = None
        response_data = np.zeros_like(data['values'], dtype=np.float_)
        if x == 'T' or x == 'P':
            indep_var_data = np.array(data['conditions'][x], dtype=np.float_).flatten()
        elif x == 'Z':
            if disordered_config:
                # Take the second element of the first interacting sublattice as the coordinate
                # Because it's disordered all sublattices should be equivalent
                # TODO: Fix this to filter because we need to guarantee the plot points are disordered
                occ = data['solver']['sublattice_occupancies']
                subl_idx = np.nonzero([isinstance(c, (list, tuple)) for c in occ[0]])[0]
                if len(subl_idx) > 1:
                    subl_idx = int(subl_idx[0])
                else:
                    subl_idx = int(subl_idx)
                indep_var_data = [c[subl_idx][1] for c in occ]
            else:
                interactions = np.array([cond_dict[Symbol('YS')] for cond_dict in sample_condition_dicts])
                indep_var_data = 1 - (interactions+1)/2
            if y.endswith('_MIX') and data['output'].endswith('_FORM'):
                # All the _FORM data we have still has the lattice stability contribution
                # Need to zero it out to shift formation data to mixing
                mod_latticeonly = Model(dbf, comps, phase_name, parameters={'GHSER'+c.upper(): 0 for c in comps})
                mod_latticeonly.models = {key: value for key, value in mod_latticeonly.models.items()
                                          if key == 'ref'}
                temps = data['conditions'].get('T', 300)
                pressures = data['conditions'].get('P', 101325)
                points = build_sitefractions(phase_name, data['solver']['sublattice_configurations'],
                                             data['solver']['sublattice_occupancies'])
                for point_idx in range(len(points)):
                    missing_variables = mod_latticeonly.ast.atoms(v.SiteFraction) - set(points[point_idx].keys())
                    # Set unoccupied values to zero
                    points[point_idx].update({key: 0 for key in missing_variables})
                    # Change entry to a sorted array of site fractions
                    points[point_idx] = list(OrderedDict(sorted(points[point_idx].items(), key=str)).values())
                points = np.array(points, dtype=np.float_)
                # TODO: Real temperature support
                points = points[None, None]
                stability = calculate(dbf, comps, [phase_name], output=data['output'][:-5],
                                      T=temps, P=pressures, points=points,
                                      model=mod_latticeonly, mode='numpy')
                response_data -= stability[data['output'][:-5]].values.squeeze()

        response_data += np.array(data['values'], dtype=np.float_)
        response_data = response_data.flatten()
        if not bar_chart:
            extra_kwargs = {}
            extra_kwargs['markersize'] = 8
            extra_kwargs['linestyle'] = 'none'
            extra_kwargs['clip_on'] = False
            ref = data.get('reference', '')
            mark = symbol_map[ref]['markers']
            ax.plot(indep_var_data, response_data,
                    label=symbol_map[ref]['formatted'],
                    marker=mark['marker'],
                    fillstyle=mark['fillstyle'],
                    **extra_kwargs)
        else:
            bar_labels.append(data.get('reference', None))
            bar_data.append(response_data[0])
    if bar_chart:
        ax.barh(0.02 * np.arange(len(bar_data)), bar_data,
                       color='k', height=0.01)
        endmember_title = ' to '.join([':'.join(i) for i in endpoints])
        ax.get_figure().suptitle('{} (T = {} K)'.format(endmember_title, temperatures), fontsize=20)
        ax.set_yticks(0.02 * np.arange(len(bar_data)))
        ax.set_yticklabels(bar_labels, fontsize=20)
        # This bar chart is rotated 90 degrees, so "y" is now x
        ax.set_xlabel(plot_mapping.get(y, y))
    else:
        ax.set_frame_on(False)
        leg = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))  # legend outside
        leg.get_frame().set_edgecolor('black')
    return ax
Exemplo n.º 30
0
def build_callables(dbf,
                    comps,
                    phases,
                    models,
                    parameter_symbols=None,
                    output='GM',
                    build_gradients=True,
                    build_hessians=False,
                    additional_statevars=None):
    """
    Create a compiled callables dictionary.

    Parameters
    ----------
    dbf : Database
        A Database object
    comps : list
        List of component names
    phases : list
        List of phase names
    models : dict
        Dictionary of {phase_name: Model subclass}
    parameter_symbols : list, optional
        List of string or SymPy Symbols that will be overridden in the callables.
    output : str, optional
        Output property of the particular Model to sample. Defaults to 'GM'
    build_gradients : bool, optional
        Whether or not to build gradient functions. Defaults to True.
    build_hessians : bool, optional
        Whether or not to build Hessian functions. Defaults to False.
    additional_statevars : set, optional
        State variables to include in the callables that may not be in the models (e.g. from conditions)
    verbose : bool, optional
        Print the name of the phase when its callables are built

    Returns
    -------
    callables : dict
        Dictionary of keyword argument callables to pass to equilibrium.
        Maps {'output' -> {'function' -> {'phase_name' -> AutowrapFunction()}}.

    Notes
    -----
    *All* the state variables used in calculations must be specified.
    If these are not specified as state variables of the models (e.g. often the
    case for v.N), then it must be supplied by the additional_statevars keyword
    argument.

    Examples
    --------
    >>> from pycalphad import Database, equilibrium, variables as v
    >>> from pycalphad.codegen.callables import build_callables
    >>> from pycalphad.core.utils import instantiate_models
    >>> dbf = Database('AL-NI.tdb')
    >>> comps = ['AL', 'NI', 'VA']
    >>> phases = ['LIQUID', 'AL3NI5', 'AL3NI2', 'AL3NI']
    >>> models = instantiate_models(dbf, comps, phases)
    >>> callables = build_callables(dbf, comps, phases, models, additional_statevars={v.P, v.T, v.N})
    >>> 'GM' in callables.keys()
    True
    >>> 'massfuncs' in callables['GM']
    True
    >>> conditions = {v.P: 101325, v.T: 2500, v.X('AL'): 0.2}
    >>> equilibrium(dbf, comps, phases, conditions, callables=callables)
    """
    additional_statevars = set(
        additional_statevars) if additional_statevars is not None else set()
    parameter_symbols = parameter_symbols if parameter_symbols is not None else []
    parameter_symbols = sorted([wrap_symbol(x) for x in parameter_symbols],
                               key=str)
    comps = sorted(unpack_components(dbf, comps))
    pure_elements = get_pure_elements(dbf, comps)

    _callables = {
        'massfuncs': {},
        'massgradfuncs': {},
        'masshessfuncs': {},
        'callables': {},
        'grad_callables': {},
        'hess_callables': {},
        'internal_cons_func': {},
        'internal_cons_jac': {},
        'internal_cons_hess': {},
        'multiphase_cons_func': {},
        'multiphase_cons_jac': {},
        'multiphase_cons_hess': {}
    }

    state_variables = get_state_variables(models=models)
    state_variables |= additional_statevars
    if state_variables != {v.T, v.P, v.N}:
        warnings.warn(
            "State variables in `build_callables` are not {{N, P, T}}, but {}. This can lead to incorrectly "
            "calculated values if the state variables used to call the generated functions do not match the "
            "state variables used to create them. State variables can be added with the "
            "`additional_statevars` argument.".format(state_variables))
    state_variables = sorted(state_variables, key=str)

    for name in phases:
        mod = models[name]
        site_fracs = mod.site_fractions
        try:
            out = getattr(mod, output)
        except AttributeError:
            raise AttributeError(
                'Missing Model attribute {0} specified for {1}'.format(
                    output, mod.__class__))

        # Build the callables of the output
        # Only force undefineds to zero if we're not overriding them
        undefs = {
            x
            for x in out.free_symbols if not isinstance(x, v.StateVariable)
        } - set(parameter_symbols)
        undef_vals = repeat(0., len(undefs))
        out = out.xreplace(dict(zip(undefs, undef_vals)))
        build_output = build_functions(out,
                                       tuple(state_variables + site_fracs),
                                       parameters=parameter_symbols,
                                       include_grad=build_gradients,
                                       include_hess=build_hessians)
        cf, gf, hf = build_output.func, build_output.grad, build_output.hess
        _callables['callables'][name] = cf
        _callables['grad_callables'][name] = gf
        _callables['hess_callables'][name] = hf

        # Build the callables for mass
        # TODO: In principle, we should also check for undefs in mod.moles()
        mcf, mgf, mhf = zip(*[
            build_functions(mod.moles(el),
                            state_variables + site_fracs,
                            include_obj=True,
                            include_grad=build_gradients,
                            include_hess=build_hessians,
                            parameters=parameter_symbols)
            for el in pure_elements
        ])

        _callables['massfuncs'][name] = mcf
        _callables['massgradfuncs'][name] = mgf
        _callables['masshessfuncs'][name] = mhf
    return {output: _callables}
Exemplo n.º 31
0
    def __init__(self, dbe, comps, phase_name, parameters=None):
        self.components = set()
        self.constituents = []
        self.phase_name = phase_name.upper()
        phase = dbe.phases[self.phase_name]
        self.site_ratios = list(phase.sublattices)
        for idx, sublattice in enumerate(phase.constituents):
            subl_comps = set(sublattice).intersection(unpack_components(dbe, comps))
            self.components |= subl_comps
            # Support for variable site ratios in ionic liquid model
            if phase.model_hints.get('ionic_liquid_2SL', False):
                if idx == 0:
                    subl_idx = 1
                elif idx == 1:
                    subl_idx = 0
                else:
                    raise ValueError('Two-sublattice ionic liquid specified with more than two sublattices')
                self.site_ratios[subl_idx] = Add(*[v.SiteFraction(self.phase_name, idx, spec) * abs(spec.charge) for spec in subl_comps])
        if phase.model_hints.get('ionic_liquid_2SL', False):
            # Special treatment of "neutral" vacancies in 2SL ionic liquid
            # These are treated as having variable valence
            for idx, sublattice in enumerate(phase.constituents):
                subl_comps = set(sublattice).intersection(unpack_components(dbe, comps))
                if v.Species('VA') in subl_comps:
                    if idx == 0:
                        subl_idx = 1
                    elif idx == 1:
                        subl_idx = 0
                    else:
                        raise ValueError('Two-sublattice ionic liquid specified with more than two sublattices')
                    self.site_ratios[subl_idx] += self.site_ratios[idx] * v.SiteFraction(self.phase_name, idx, v.Species('VA'))
        self.site_ratios = tuple(self.site_ratios)

        # Verify that this phase is still possible to build
        for sublattice in phase.constituents:
            if len(set(sublattice).intersection(self.components)) == 0:
                # None of the components in a sublattice are active
                # We cannot build a model of this phase
                raise DofError(
                    '{0}: Sublattice {1} of {2} has no components in {3}' \
                    .format(self.phase_name, sublattice,
                            phase.constituents,
                            self.components))
            self.constituents.append(set(sublattice).intersection(self.components))
        self.components = sorted(self.components)

        # Convert string symbol names to sympy Symbol objects
        # This makes xreplace work with the symbols dict
        symbols = {Symbol(s): val for s, val in dbe.symbols.items()}

        def wrap_symbol(obj):
            if isinstance(obj, Symbol):
                return obj
            else:
                return Symbol(obj)
        if parameters is not None:
            symbols.update([(wrap_symbol(s), val) for s, val in parameters.items()])
        self._symbols = {wrap_symbol(key): value for key, value in symbols.items()}

        self.models = OrderedDict()
        self.build_phase(dbe)
        self.site_fractions = sorted([x for x in self.ast.free_symbols if isinstance(x, v.SiteFraction)], key=str)

        for name, value in self.models.items():
            self.models[name] = self.symbol_replace(value, symbols)
Exemplo n.º 32
0
def test_filter_phases_removes_disordered_phases_from_order_disorder():
    """Databases with order-disorder models should have the disordered phases be filtered."""
    all_phases = set(ALNIPT_DBF.phases.keys())
    filtered_phases = set(filter_phases(ALNIPT_DBF, unpack_components(ALNIPT_DBF, ['AL', 'NI', 'PT', 'VA'])))
    assert all_phases.difference(filtered_phases) == {'FCC_A1'}
Exemplo n.º 33
0
def equilibrium(dbf,
                comps,
                phases,
                conditions,
                output=None,
                model=None,
                verbose=False,
                broadcast=True,
                calc_opts=None,
                to_xarray=True,
                scheduler='sync',
                parameters=None,
                solver=None,
                callables=None,
                **kwargs):
    """
    Calculate the equilibrium state of a system containing the specified
    components and phases, under the specified conditions.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : list
        Names of components to consider in the calculation.
    phases : list or dict
        Names of phases to consider in the calculation.
    conditions : dict or (list of dict)
        StateVariables and their corresponding value.
    output : str or list of str, optional
        Additional equilibrium model properties (e.g., CPM, HM, etc.) to compute.
        These must be defined as attributes in the Model class of each phase.
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    verbose : bool, optional
        Print details of calculations. Useful for debugging.
    broadcast : bool
        If True, broadcast conditions against each other. This will compute all combinations.
        If False, each condition should be an equal-length list (or single-valued).
        Disabling broadcasting is useful for calculating equilibrium at selected conditions,
        when those conditions don't comprise a grid.
    calc_opts : dict, optional
        Keyword arguments to pass to `calculate`, the energy/property calculation routine.
    to_xarray : bool
        Whether to return an xarray Dataset (True, default) or an EquilibriumResult.
    scheduler : Dask scheduler, optional
        Job scheduler for performing the computation.
        If None, return a Dask graph of the computation instead of actually doing it.
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.
    solver : pycalphad.core.solver.SolverBase
        Instance of a solver that is used to calculate local equilibria.
        Defaults to a pycalphad.core.solver.InteriorPointSolver.
    callables : dict, optional
        Pre-computed callable functions for equilibrium calculation.

    Returns
    -------
    Structured equilibrium calculation, or Dask graph if scheduler=None.

    Examples
    --------
    None yet.
    """
    if not broadcast:
        raise NotImplementedError('Broadcasting cannot yet be disabled')
    comps = sorted(unpack_components(dbf, comps))
    phases = unpack_phases(phases) or sorted(dbf.phases.keys())
    list_of_possible_phases = filter_phases(dbf, comps)
    if len(list_of_possible_phases) == 0:
        raise ConditionError(
            'There are no phases in the Database that can be active with components {0}'
            .format(comps))
    active_phases = {
        name: dbf.phases[name]
        for name in filter_phases(dbf, comps, phases)
    }
    if len(active_phases) == 0:
        raise ConditionError(
            'None of the passed phases ({0}) are active. List of possible phases: {1}.'
            .format(phases, list_of_possible_phases))
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    if len(set(comps) - set(dbf.species)) > 0:
        raise EquilibriumError('Components not found in database: {}'.format(
            ','.join([c.name for c in (set(comps) - set(dbf.species))])))
    calc_opts = calc_opts if calc_opts is not None else dict()
    solver = solver if solver is not None else InteriorPointSolver(
        verbose=verbose)
    parameters = parameters if parameters is not None else dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    models = instantiate_models(dbf,
                                comps,
                                active_phases,
                                model=model,
                                parameters=parameters)
    # Temporary solution until constraint system improves
    if conditions.get(v.N) is None:
        conditions[v.N] = 1
    if np.any(np.array(conditions[v.N]) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(
            conditions[v.N]))
    # Modify conditions values to be within numerical limits, e.g., X(AL)=0
    # Also wrap single-valued conditions with lists
    conds = _adjust_conditions(conditions)

    for cond in conds.keys():
        if isinstance(cond,
                      (v.Composition,
                       v.ChemicalPotential)) and cond.species not in comps:
            raise ConditionError(
                '{} refers to non-existent component'.format(cond))
    state_variables = sorted(get_state_variables(models=models, conds=conds),
                             key=str)
    str_conds = OrderedDict((str(key), value) for key, value in conds.items())
    components = [x for x in sorted(comps)]
    desired_active_pure_elements = [
        list(x.constituents.keys()) for x in components
    ]
    desired_active_pure_elements = [
        el.upper() for constituents in desired_active_pure_elements
        for el in constituents
    ]
    pure_elements = sorted(
        set([x for x in desired_active_pure_elements if x != 'VA']))
    if verbose:
        print('Components:', ' '.join([str(x) for x in comps]))
        print('Phases:', end=' ')
    output = output if output is not None else 'GM'
    output = output if isinstance(output, (list, tuple, set)) else [output]
    output = set(output)
    output |= {'GM'}
    output = sorted(output)
    phase_records = build_phase_records(dbf,
                                        comps,
                                        active_phases,
                                        conds,
                                        models,
                                        output='GM',
                                        callables=callables,
                                        parameters=parameters,
                                        verbose=verbose,
                                        build_gradients=True,
                                        build_hessians=True)
    if verbose:
        print('[done]', end='\n')

    # 'calculate' accepts conditions through its keyword arguments
    grid_opts = calc_opts.copy()
    statevar_strings = [str(x) for x in state_variables]
    grid_opts.update({
        key: value
        for key, value in str_conds.items() if key in statevar_strings
    })
    if 'pdens' not in grid_opts:
        grid_opts['pdens'] = 500
    grid = calculate(dbf,
                     comps,
                     active_phases,
                     model=models,
                     fake_points=True,
                     callables=callables,
                     output='GM',
                     parameters=parameters,
                     to_xarray=False,
                     **grid_opts)
    coord_dict = str_conds.copy()
    coord_dict['vertex'] = np.arange(
        len(pure_elements) + 1
    )  # +1 is to accommodate the degenerate degree of freedom at the invariant reactions
    coord_dict['component'] = pure_elements
    properties = starting_point(conds, state_variables, phase_records, grid)
    properties = _solve_eq_at_conditions(comps,
                                         properties,
                                         phase_records,
                                         grid,
                                         list(str_conds.keys()),
                                         state_variables,
                                         verbose,
                                         solver=solver)

    # Compute equilibrium values of any additional user-specified properties
    # We already computed these properties so don't recompute them
    output = sorted(set(output) - {'GM', 'MU'})
    for out in output:
        if (out is None) or (len(out) == 0):
            continue
        # TODO: How do we know if a specified property should be per_phase or not?
        # For now, we make a best guess
        if (out == 'degree_of_ordering') or (out == 'DOO'):
            per_phase = True
        else:
            per_phase = False
        eqcal = _eqcalculate(dbf,
                             comps,
                             active_phases,
                             conditions,
                             out,
                             data=properties,
                             per_phase=per_phase,
                             model=models,
                             callables=callables,
                             parameters=parameters,
                             **calc_opts)
        properties = properties.merge(eqcal, inplace=True, compat='equals')
    if to_xarray:
        properties = properties.get_dataset()
    properties.attrs['created'] = datetime.utcnow().isoformat()
    if len(kwargs) > 0:
        warnings.warn(
            'The following equilibrium keyword arguments were passed, but unused:\n{}'
            .format(kwargs))
    return properties
Exemplo n.º 34
0
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, **kwargs):
    """
    Sample the property surface of 'output' containing the specified
    components and phases. Model parameters are taken from 'dbf' and any
    state variables (T, P, etc.) can be specified as keyword arguments.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : str or sequence
        Names of components to consider in the calculation.
    phases : str or sequence
        Names of phases to consider in the calculation.
    mode : string, optional
        See 'make_callable' docstring for details.
    output : string, optional
        Model attribute to sample.
    fake_points : bool, optional (Default: False)
        If True, the first few points of the output surface will be fictitious
        points used to define an equilibrium hyperplane guaranteed to be above
        all the other points. This is used for convex hull computations.
    broadcast : bool, optional
        If True, broadcast given state variable lists against each other to create a grid.
        If False, assume state variables are given as equal-length lists.
    points : ndarray or a dict of phase names to ndarray, optional
        Columns of ndarrays must be internal degrees of freedom (site fractions), sorted.
        If this is not specified, points will be generated automatically.
    pdens : int, a dict of phase names to int, or a seq of both, optional
        Number of points to sample per degree of freedom.
        Default: 2000; Default when called from equilibrium(): 500
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    sampler : callable, a dict of phase names to callable, or a seq of both, optional
        Function to sample phase constitution space.
        Must have same signature as 'pycalphad.core.utils.point_sample'
    grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True)
        Whether to add evenly spaced points between end-members.
        The density of points is determined by 'pdens'
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.

    Returns
    -------
    Dataset of the sampled attribute as a function of state variables

    Examples
    --------
    None yet.
    """
    # Here we check for any keyword arguments that are special, i.e.,
    # there may be keyword arguments that aren't state variables
    pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)
    points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None)
    model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model)
    callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None)
    mass_dict = unpack_kwarg(kwargs.pop('massfuncs', None), default_arg=None)
    sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None)
    fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True)
    parameters = parameters or dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    param_symbols = tuple(parameters.keys())
    param_values = np.atleast_1d(np.array(list(parameters.values()), dtype=np.float))
    if isinstance(phases, str):
        phases = [phases]
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    comps = sorted(unpack_components(dbf, comps))
    if points_dict is None and broadcast is False:
        raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.')
    nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0]

    # Convert keyword strings to proper state variable objects
    # If we don't do this, sympy will get confused during substitution
    statevar_dict = dict((v.StateVariable(key), unpack_condition(value)) for (key, value) in kwargs.items())
    # XXX: CompiledModel assumes P, T are the only state variables
    if statevar_dict.get(v.P, None) is None:
        statevar_dict[v.P] = 101325
    if statevar_dict.get(v.T, None) is None:
        statevar_dict[v.T] = 300
    # Sort after default state variable check to fix gh-116
    statevar_dict = collections.OrderedDict(sorted(statevar_dict.items(), key=lambda x: str(x[0])))
    str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \
                                                for (key, value) in statevar_dict.items())
    all_phase_data = []
    comp_sets = {}
    largest_energy = 1e30
    maximum_internal_dof = 0

    # Consider only the active phases
    active_phases = dict((name.upper(), dbf.phases[name.upper()]) \
        for name in unpack_phases(phases))

    for phase_name, phase_obj in sorted(active_phases.items()):
        # Build the symbolic representation of the energy
        mod = model_dict[phase_name]
        # if this is an object type, we need to construct it
        if isinstance(mod, type):
            try:
                model_dict[phase_name] = mod = mod(dbf, comps, phase_name, parameters=parameters)
            except DofError:
                # we can't build the specified phase because the
                # specified components aren't found in every sublattice
                # we'll just skip it
                warnings.warn("""Suspending specified phase {} due to
                some sublattices containing only unspecified components""".format(phase_name))
                continue
        if points_dict[phase_name] is None:
            maximum_internal_dof = max(maximum_internal_dof, sum(len(x) for x in mod.constituents))
        else:
            maximum_internal_dof = max(maximum_internal_dof, np.asarray(points_dict[phase_name]).shape[-1])

    for phase_name, phase_obj in sorted(active_phases.items()):
        try:
            mod = model_dict[phase_name]
        except KeyError:
            continue
        # this is a phase model we couldn't construct for whatever reason; skip it
        if isinstance(mod, type):
            continue
        # Construct an ordered list of the variables
        variables, sublattice_dof = generate_dof(phase_obj, mod.components)
        # Build the "fast" representation of that model
        if callable_dict[phase_name] is None:
            try:
                out = getattr(mod, output)
            except AttributeError:
                raise AttributeError('Missing Model attribute {0} specified for {1}'
                                     .format(output, mod.__class__))
            # As a last resort, treat undefined symbols as zero
            # But warn the user when we do this
            # This is consistent with TC's behavior
            undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable))
            for undef in undefs:
                out = out.xreplace({undef: float(0)})
                warnings.warn('Setting undefined symbol {0} for phase {1} to zero'.format(undef, phase_name))
            comp_sets[phase_name] = build_functions(out, list(statevar_dict.keys()) + variables,
                                                    include_obj=True, include_grad=False,
                                                    parameters=param_symbols)
        else:
            comp_sets[phase_name] = callable_dict[phase_name]
        if mass_dict[phase_name] is None:
            pure_elements = [spec for spec in nonvacant_components
                             if (len(spec.constituents.keys()) == 1 and
                                 list(spec.constituents.keys())[0] == spec.name)
                             ]
            # TODO: In principle, we should also check for undefs in mod.moles()
            mass_dict[phase_name] = [build_functions(mod.moles(el), list(statevar_dict.keys()) + variables,
                                                     include_obj=True, include_grad=False,
                                                     parameters=param_symbols)
                                     for el in pure_elements]
        phase_record = PhaseRecord_from_cython(comps, list(statevar_dict.keys()) + variables,
                                    np.array(dbf.phases[phase_name].sublattices, dtype=np.float),
                                    param_values, comp_sets[phase_name], None, None, mass_dict[phase_name], None)
        points = points_dict[phase_name]
        if points is None:
            points = _sample_phase_constitution(phase_name, phase_obj.constituents, sublattice_dof, comps,
                                                tuple(variables), sampler_dict[phase_name] or point_sample,
                                                fixedgrid_dict[phase_name], pdens_dict[phase_name])
        points = np.atleast_2d(points)

        fp = fake_points and (phase_name == sorted(active_phases.keys())[0])
        phase_ds = _compute_phase_values(nonvacant_components, str_statevar_dict,
                                         points, phase_record, output,
                                         maximum_internal_dof, broadcast=broadcast,
                                         largest_energy=float(largest_energy), fake_points=fp)
        all_phase_data.append(phase_ds)

    # speedup for single-phase case (found by profiling)
    if len(all_phase_data) > 1:
        final_ds = concat(all_phase_data, dim='points')
        final_ds['points'].values = np.arange(len(final_ds['points']))
        final_ds.coords['points'].values = np.arange(len(final_ds['points']))
    else:
        final_ds = all_phase_data[0]
    return final_ds
Exemplo n.º 35
0
def calculate_activity_error(dbf,
                             comps,
                             phases,
                             datasets,
                             parameters=None,
                             phase_models=None,
                             callables=None,
                             data_weight=1.0):
    """
    Return the sum of square error from activity data

    Parameters
    ----------
    dbf : pycalphad.Database
        Database to consider
    comps : list
        List of active component names
    phases : list
        List of phases to consider
    datasets : espei.utils.PickleableTinyDB
        Datasets that contain single phase data
    parameters : dict
        Dictionary of symbols that will be overridden in pycalphad.equilibrium
    phase_models : dict
        Phase models to pass to pycalphad calculations
    callables : dict
        Callables to pass to pycalphad
    data_weight : float
        Weight for standard deviation of activity measurements, dimensionless.
        Corresponds to the standard deviation of differences in chemical
        potential in typical measurements of activity, in J/mol.

    Returns
    -------
    float
        A single float of the sum of square errors

    Notes
    -----
    General procedure:
    1. Get the datasets
    2. For each dataset

        a. Calculate reference state equilibrium
        b. Calculate current chemical potentials
        c. Find the target chemical potentials
        d. Calculate error due to chemical potentials

    """
    std_dev = 500  # J/mol

    if parameters is None:
        parameters = {}

    activity_datasets = datasets.search(
        (tinydb.where('output').test(lambda x: 'ACR' in x))
        & (tinydb.where('components').test(lambda x: set(x).issubset(comps))))

    error = 0
    if len(activity_datasets) == 0:
        return error

    for ds in activity_datasets:
        acr_component = ds['output'].split('_')[1]  # the component of interest
        # calculate the reference state equilibrium
        ref = ds['reference_state']
        # data_comps and data_phases ensures that we only do calculations on
        # the subsystem of the system defining the data.
        data_comps = ds['components']
        data_phases = filter_phases(dbf,
                                    unpack_components(dbf, data_comps),
                                    candidate_phases=phases)
        ref_conditions = {
            _map_coord_to_variable(coord): val
            for coord, val in ref['conditions'].items()
        }
        ref_result = equilibrium(dbf,
                                 data_comps,
                                 ref['phases'],
                                 ref_conditions,
                                 model=phase_models,
                                 parameters=parameters,
                                 callables=callables)

        # calculate current chemical potentials
        # get the conditions
        conditions = {}
        # first make sure the conditions are paired
        # only get the compositions, P and T are special cased
        conds_list = [(cond, value)
                      for cond, value in ds['conditions'].items()
                      if cond not in ('P', 'T')]
        # ravel the conditions
        # we will ravel each composition individually, since they all must have the same shape
        for comp_name, comp_x in conds_list:
            P, T, X = ravel_conditions(ds['values'], ds['conditions']['P'],
                                       ds['conditions']['T'], comp_x)
            conditions[v.P] = P
            conditions[v.T] = T
            conditions[_map_coord_to_variable(comp_name)] = X
        # do the calculations
        # we cannot currently turn broadcasting off, so we have to do equilibrium one by one
        # invert the conditions dicts to make a list of condition dicts rather than a condition dict of lists
        # assume now that the ravelled conditions all have the same size
        conditions_list = [{c: conditions[c][i]
                            for c in conditions.keys()}
                           for i in range(len(conditions[v.T]))]
        current_chempots = []
        for conds in conditions_list:
            sample_eq_res = equilibrium(dbf,
                                        data_comps,
                                        data_phases,
                                        conds,
                                        model=phase_models,
                                        parameters=parameters,
                                        callables=callables)
            current_chempots.append(
                sample_eq_res.MU.sel(
                    component=acr_component).values.flatten()[0])
        current_chempots = np.array(current_chempots)

        # calculate target chempots
        samples = np.array(ds['values']).flatten()
        target_chempots = target_chempots_from_activity(
            acr_component, samples, conditions[v.T], ref_result)
        # calculate the error
        weight = ds.get('weight', 1.0)
        pe = chempot_error(current_chempots,
                           target_chempots,
                           std_dev=std_dev / data_weight / weight)
        error += np.sum(pe)
        _log.debug(
            'Data: %s, chemical potential difference: %s, probability: %s, reference: %s',
            samples, current_chempots - target_chempots, pe, ds["reference"])

    # TODO: write a test for this
    if np.any(np.isnan(np.array([
            error
    ], dtype=np.float64))):  # must coerce sympy.core.numbers.Float to float64
        return -np.inf
    return error