Пример #1
0
def test_instantiate_models_only_returns_desired_phases():
    """instantiate_models should only return phases passed"""
    comps = ['AL', 'NI', 'VA']
    phases = ['FCC_A1', 'LIQUID']

    # models are overspecified w.r.t. phases
    too_many_phases = ['FCC_A1', 'LIQUID', 'AL3NI1']
    too_many_models = {
        phase: Model(ALNIPT_DBF, comps, phase)
        for phase in too_many_phases
    }
    inst_mods = instantiate_models(ALNIPT_DBF,
                                   comps,
                                   phases,
                                   model=too_many_models)
    assert len(inst_mods) == len(phases)

    # models are underspecified w.r.t. phases
    too_few_phases = ['FCC_A1']
    too_few_models = {
        phase: Model(ALNIPT_DBF, comps, phase)
        for phase in too_few_phases
    }
    inst_mods = instantiate_models(ALNIPT_DBF,
                                   comps,
                                   phases,
                                   model=too_few_models)
    assert len(inst_mods) == len(phases)
Пример #2
0
def test_lnprob_calculates_multi_phase_probability_for_success(datasets_db):
    """lnprob() successfully calculates the probability for equilibrium """
    dbf = Database.from_string(CU_MG_TDB, fmt='tdb')
    datasets_db.insert(CU_MG_DATASET_ZPF_WORKING)
    comps = ['CU', 'MG', 'VA']
    phases = ['LIQUID', 'FCC_A1', 'HCP_A3', 'LAVES_C15', 'CUMG2']
    param = 'VV0001'
    orig_val = dbf.symbols[param].args[0].expr
    models = instantiate_models(dbf, comps, phases, parameters={param: orig_val})
    eq_callables = build_callables(dbf, comps, phases, models, parameter_symbols=[param],
                        output='GM', build_gradients=True, build_hessians=False,
                        additional_statevars={v.N, v.P, v.T})

    zpf_kwargs = {
        'dbf': dbf, 'phases': phases, 'zpf_data': get_zpf_data(comps, phases, datasets_db),
        'phase_models': models, 'callables': eq_callables,
        'data_weight': 1.0,
    }
    opt = EmceeOptimizer(dbf)
    res = opt.predict([10], prior_rvs=[rv_zero()], symbols_to_fit=[param], zpf_kwargs=zpf_kwargs)

    assert np.isreal(res)
    assert np.isclose(res, -31.309645520830344, rtol=1e-6)

    res_2 = opt.predict([10000000], prior_rvs=[rv_zero()], symbols_to_fit=[param], zpf_kwargs=zpf_kwargs)

    assert not np.isclose(res_2, -31.309645520830344, rtol=1e-6)
Пример #3
0
def get_eq_callables_(dbf, comps, phases, symbols_to_fit):

    for x in symbols_to_fit:
        if isinstance(dbf.symbols[x], sympy.Piecewise):
            dbf.symbols[x] = dbf.symbols[x].args[0].expr

    models = instantiate_models(dbf,
                                comps,
                                phases,
                                parameters=dict(
                                    zip(symbols_to_fit,
                                        [0] * len(symbols_to_fit))))

    eq_callables = build_callables(dbf,
                                   comps,
                                   phases,
                                   models,
                                   parameter_symbols=symbols_to_fit,
                                   build_gradients=True,
                                   build_hessians=False,
                                   additional_statevars={v.N, v.P, v.T})

    logging.info('callables have been created')

    return eq_callables
Пример #4
0
def test_instantiate_models_only_returns_desired_phases():
    """instantiate_models should only return phases passed"""
    comps = ['AL', 'NI', 'VA']
    phases = ['FCC_A1', 'LIQUID']

    # models are overspecified w.r.t. phases
    too_many_phases = ['FCC_A1', 'LIQUID', 'AL3NI1']
    too_many_models = {phase: Model(ALNIPT_DBF, comps, phase) for phase in too_many_phases}
    inst_mods = instantiate_models(ALNIPT_DBF, comps, phases, model=too_many_models)
    assert len(inst_mods) == len(phases)

    # models are underspecified w.r.t. phases
    too_few_phases = ['FCC_A1']
    too_few_models = {phase: Model(ALNIPT_DBF, comps, phase) for phase in too_few_phases}
    inst_mods = instantiate_models(ALNIPT_DBF, comps, phases, model=too_few_models)
    assert len(inst_mods) == len(phases)
Пример #5
0
def test_missing_phase_records_passed_to_equilibrium_raises():
    "equilibrium should raise an error if all the active phases are not included in the phase_records"
    my_phases = ['LIQUID', 'FCC_A1']
    subset_phases = ['FCC_A1']
    comps = ['AL', 'FE', 'VA']
    conds = {v.T: 1400, v.P: 101325, v.N: 1.0, v.X('AL'): 0.55}

    models = instantiate_models(ALFE_DBF, comps, my_phases)
    phase_records = build_phase_records(ALFE_DBF, comps, my_phases, conds, models)

    models_subset = instantiate_models(ALFE_DBF, comps, subset_phases)
    phase_records_subset = build_phase_records(ALFE_DBF, comps, subset_phases, conds, models_subset)

    # Under-specified models
    with pytest.raises(ValueError):
        equilibrium(ALFE_DBF, comps, my_phases, conds, verbose=True, model=models_subset, phase_records=phase_records)

    # Under-specified phase_records
    with pytest.raises(ValueError):
        equilibrium(ALFE_DBF, comps, my_phases, conds, verbose=True, model=models, phase_records=phase_records_subset)
Пример #6
0
def test_missing_models_with_phase_records_passed_to_equilibrium_raises():
    "equilibrium should raise an error if all the active phases are not included in the phase_records"
    my_phases = ['LIQUID', 'FCC_A1', 'HCP_A3', 'AL5FE2', 'AL2FE', 'AL13FE4', 'AL5FE4']
    comps = ['AL', 'FE', 'VA']
    conds = {v.T: 1400, v.P: 101325, v.N: 1.0, v.X('AL'): 0.55}

    models = instantiate_models(ALFE_DBF, comps, my_phases)
    phase_records = build_phase_records(ALFE_DBF, comps, my_phases, conds, models)

    with pytest.raises(ValueError):
        # model=models NOT passed
        equilibrium(ALFE_DBF, comps, my_phases, conds, verbose=True, phase_records=phase_records)
Пример #7
0
def test_phase_records_passed_to_equilibrium():
    "Pre-built phase records can be passed to equilibrium."
    my_phases = ['LIQUID', 'FCC_A1', 'HCP_A3', 'AL5FE2', 'AL2FE', 'AL13FE4', 'AL5FE4']
    comps = ['AL', 'FE', 'VA']
    conds = {v.T: 1400, v.P: 101325, v.N: 1.0, v.X('AL'): 0.55}

    models = instantiate_models(ALFE_DBF, comps, my_phases)
    phase_records = build_phase_records(ALFE_DBF, comps, my_phases, conds, models)

    # With models passed
    eqx = equilibrium(ALFE_DBF, comps, my_phases, conds, verbose=True, model=models, phase_records=phase_records)
    assert_allclose(eqx.GM.values.flat[0], -9.608807e4)
Пример #8
0
def get_zpf_data(dbf: Database, comps: Sequence[str], phases: Sequence[str],
                 datasets: PickleableTinyDB, parameters: Dict[str, float]):
    """
    Return the ZPF data used in the calculation of ZPF error

    Parameters
    ----------
    comps : list
        List of active component names
    phases : list
        List of phases to consider
    datasets : espei.utils.PickleableTinyDB
        Datasets that contain single phase data
    parameters : dict
        Dictionary mapping symbols to optimize to their initial values

    Returns
    -------
    list
        List of data dictionaries with keys ``weight``, ``data_comps`` and
        ``phase_regions``. ``data_comps`` are the components for the data in
        question. ``phase_regions`` are the ZPF phases, state variables and compositions.
    """
    desired_data = datasets.search(
        (tinydb.where('output') == 'ZPF')
        & (tinydb.where('components').test(lambda x: set(x).issubset(comps)))
        & (tinydb.where('phases').test(
            lambda x: len(set(phases).intersection(x)) > 0)))

    zpf_data = []  # 1:1 correspondence with each dataset
    for data in desired_data:
        data_comps = list(set(data['components']).union({'VA'}))
        species = sorted(unpack_components(dbf, data_comps), key=str)
        data_phases = filter_phases(dbf, species, candidate_phases=phases)
        models = instantiate_models(dbf,
                                    species,
                                    data_phases,
                                    parameters=parameters)
        all_regions = data['values']
        conditions = data['conditions']
        phase_regions = []
        # Each phase_region is one set of phases in equilibrium (on a tie-line),
        # e.g. [["ALPHA", ["B"], [0.25]], ["BETA", ["B"], [0.5]]]
        for idx, phase_region in enumerate(all_regions):
            # We need to construct a PhaseRegion by matching up phases/compositions to the conditions
            if len(phase_region) < 2:
                # Skip single-phase regions for fitting purposes
                continue
            # Extract the conditions for entire phase region
            region_potential_conds = extract_conditions(conditions, idx)
            region_potential_conds[v.N] = region_potential_conds.get(
                v.N) or 1.0  # Add v.N condition, if missing
            # Extract all the phases and compositions from the tie-line points
            region_phases, region_comp_conds, phase_flags = extract_phases_comps(
                phase_region)
            region_phase_records = [
                build_phase_records(dbf,
                                    species,
                                    data_phases, {
                                        **region_potential_conds,
                                        **comp_conds
                                    },
                                    models,
                                    parameters=parameters,
                                    build_gradients=True,
                                    build_hessians=True)
                for comp_conds in region_comp_conds
            ]
            phase_regions.append(
                PhaseRegion(region_phases, region_potential_conds,
                            region_comp_conds, phase_flags, dbf, species,
                            data_phases, models, region_phase_records))

        data_dict = {
            'weight': data.get('weight', 1.0),
            'data_comps': data_comps,
            'phase_regions': phase_regions,
            'dataset_reference': data['reference']
        }
        zpf_data.append(data_dict)
    return zpf_data
Пример #9
0
def simulate_scheil_solidification(dbf,
                                   comps,
                                   phases,
                                   composition,
                                   start_temperature,
                                   step_temperature=1.0,
                                   liquid_phase_name='LIQUID',
                                   eq_kwargs=None,
                                   stop=0.0001,
                                   verbose=False,
                                   adaptive=True):
    """Perform a Scheil-Gulliver solidification simulation.

    Parameters
    ----------
    dbf : pycalphad.Database
        Database object.
    comps : list
        List of components in the system.
    phases : list
        List of phases in the system.
    composition : Dict[v.X, float]
        Dictionary of independent `v.X` composition variables.
    start_temperature : float
        Starting temperature for simulation. Must be single phase liquid.
    step_temperature : Optional[float]
        Temperature step size. Defaults to 1.0.
    liquid_phase_name : Optional[str]
        Name of the phase treated as liquid (i.e. the phase with infinitely
        fast diffusion). Defaults to 'LIQUID'.
    eq_kwargs: Optional[Dict[str, Any]]
        Keyword arguments for equilibrium
    stop: Optional[float]
        Stop when the phase fraction of liquid is below this amount.
    adaptive: Optional[bool]
        Whether to add additional points near the equilibrium points at each
        step. Only takes effect if ``points`` is in the eq_kwargs dict.

    Returns
    -------
    SolidificationResult

    """
    eq_kwargs = eq_kwargs or dict()
    STEP_SCALE_FACTOR = 1.2  # How much to try to adapt the temperature step by
    MAXIMUM_STEP_SIZE_REDUCTION = 5.0
    T_STEP_ORIG = step_temperature
    phases = filter_phases(dbf, unpack_components(dbf, comps), phases)
    models = instantiate_models(dbf, comps, phases)
    if verbose:
        print('building callables... ', end='')
    cbs = build_callables(dbf,
                          comps,
                          phases,
                          models,
                          additional_statevars={v.P, v.T, v.N},
                          build_gradients=True,
                          build_hessians=True)
    if verbose:
        print('done')
    solid_phases = sorted(set(phases) - {liquid_phase_name})
    temp = start_temperature
    independent_comps = sorted([str(comp)[2:] for comp in composition.keys()])
    x_liquid = {comp: [composition[v.X(comp)]] for comp in independent_comps}
    fraction_solid = [0.0]
    temperatures = [temp]
    phase_amounts = {ph: [0.0] for ph in solid_phases}
    ord_disord_dict = order_disorder_dict(dbf, comps, phases)

    if adaptive and ('points' in eq_kwargs.get('calc_opts', {})):
        # Dynamically add points as the simulation runs
        species = unpack_components(dbf, comps)
        dof_dict = {
            ph: generate_dof(dbf.phases[ph], species)[1]
            for ph in phases
        }
    else:
        adaptive = False

    converged = False
    phases_seen = {liquid_phase_name, ''}
    liquid_comp = composition
    while fraction_solid[-1] < 1:
        conds = {v.T: temp, v.P: 101325.0, v.N: 1.0}
        comp_conds = liquid_comp
        fmt_comp_conds = ', '.join(
            [f'{c}={val:0.2f}' for c, val in comp_conds.items()])
        conds.update(comp_conds)
        eq = equilibrium(dbf,
                         comps,
                         phases,
                         conds,
                         callables=cbs,
                         model=models,
                         **eq_kwargs)
        if adaptive:
            # Update the points dictionary with local samples around the equilibrium site fractions
            points_dict = eq_kwargs['calc_opts']['points']
            for vtx in range(eq.vertex.size):
                masked = eq.isel(vertex=vtx)
                ph = str(masked.Phase.values.squeeze())
                pts = points_dict.get(ph)
                if pts is not None:
                    if verbose:
                        print(f'Adding points to {ph}. ', end='')
                    dof = dof_dict[ph]
                    points_dict[ph] = np.concatenate([
                        pts,
                        local_sample(
                            masked.Y.values.squeeze()[:sum(dof)].reshape(
                                1, -1),
                            dof,
                            pdens=20)
                    ],
                                                     axis=0)

        eq_phases = order_disorder_eq_phases(eq, ord_disord_dict)
        num_eq_phases = np.nansum(
            np.array([str(ph) for ph in eq_phases]) != '')
        new_phases_seen = set(eq_phases).difference(phases_seen)
        if len(new_phases_seen) > 0:
            if verbose:
                print(f'New phases seen: {new_phases_seen}. ', end='')
            phases_seen |= new_phases_seen
        if liquid_phase_name not in eq["Phase"].values.squeeze():
            found_ph = set(eq_phases) - {''}
            if verbose:
                print(
                    f'No liquid phase found at T={temp:0.3f}, {fmt_comp_conds}. (Found {found_ph}) ',
                    end='')
            if len(found_ph) == 0:
                # No phases found in equilibrium. Just continue on lowering the temperature without changing anything
                if verbose:
                    print(f'(Convergence failure) ', end='')
            if T_STEP_ORIG / step_temperature > MAXIMUM_STEP_SIZE_REDUCTION:
                # Only found solid phases and the step size has already been reduced. Stop running without converging.
                if verbose:
                    print('Maximum step size reduction exceeded. Stopping.')
                converged = False
                break
            else:
                # Only found solid phases. Try reducing the step size to zero-in on the correct phases
                if verbose:
                    print(f'Stepping back and reducing step size.')
                temp += step_temperature
                step_temperature /= STEP_SCALE_FACTOR
                continue
        # TODO: Will break if there is a liquid miscibility gap
        liquid_vertex = sorted(
            np.nonzero(
                eq["Phase"].values.squeeze().flat == liquid_phase_name))[0]
        liquid_comp = {}
        for comp in independent_comps:
            x = float(eq["X"].isel(vertex=liquid_vertex).squeeze().sel(
                component=comp).values)
            x_liquid[comp].append(x)
            liquid_comp[v.X(comp)] = x
        np_liq = np.nansum(
            eq.where(eq["Phase"] == liquid_phase_name).NP.values)
        current_fraction_solid = float(fraction_solid[-1])
        found_phase_amounts = [(liquid_phase_name, np_liq)
                               ]  # tuples of phase name, amount
        for solid_phase in solid_phases:
            if solid_phase not in eq_phases:
                phase_amounts[solid_phase].append(0.0)
                continue
            np_tieline = np.nansum(
                eq.isel(vertex=eq_phases.index(solid_phase))
                ["NP"].values.squeeze())
            found_phase_amounts.append((solid_phase, np_tieline))
            delta_fraction_solid = (1 - current_fraction_solid) * np_tieline
            current_fraction_solid += delta_fraction_solid
            phase_amounts[solid_phase].append(delta_fraction_solid)
        fraction_solid.append(current_fraction_solid)
        temperatures.append(temp)
        NL = 1 - fraction_solid[-1]
        if verbose:
            phase_amnts = ' '.join(
                [f'NP({ph})={amnt:0.3f}' for ph, amnt in found_phase_amounts])
            if NL < 1.0e-3:
                print(
                    f'T={temp:0.3f}, {fmt_comp_conds}, ΔT={step_temperature:0.3f}, NL: {NL:.2E}, {phase_amnts} ',
                    end='')
            else:
                print(
                    f'T={temp:0.3f}, {fmt_comp_conds}, ΔT={step_temperature:0.3f}, NL: {NL:0.3f}, {phase_amnts} ',
                    end='')
        if NL < stop:
            if verbose:
                print(
                    f'Liquid fraction below criterion {stop} . Stopping at {fmt_comp_conds}'
                )
            converged = True
            break
        if verbose:
            print()  # add line break
        temp -= step_temperature

    if fraction_solid[-1] < 1:
        for comp in independent_comps:
            x_liquid[comp].append(np.nan)
        fraction_solid.append(1.0)
        temperatures.append(temp)
        # set the final phase amount to the phase fractions in the eutectic
        # this method gives the sum total phase amounts of 1.0 by construction
        for solid_phase in solid_phases:
            if solid_phase in eq_phases:
                amount = np.nansum(
                    eq.isel(vertex=eq_phases.index(solid_phase))
                    ["NP"].values.squeeze())
                phase_amounts[solid_phase].append(
                    float(amount) * (1 - current_fraction_solid))
            else:
                phase_amounts[solid_phase].append(0.0)

    return SolidificationResult(x_liquid, fraction_solid, temperatures,
                                phase_amounts, converged, "scheil")
Пример #10
0
def setup_context(dbf,
                  datasets,
                  symbols_to_fit=None,
                  data_weights=None,
                  phase_models=None,
                  make_callables=True):
    """
    Set up a context dictionary for calculating error.

    Parameters
    ----------
    dbf : Database
        A pycalphad Database that will be fit
    datasets : PickleableTinyDB
        A database of single- and multi-phase data to fit
    symbols_to_fit : list of str
        List of symbols in the Database that will be fit. If None (default) are
        passed, then all parameters prefixed with `VV` followed by a number,
        e.g. VV0001 will be fit.

    Returns
    -------

    Notes
    -----
    A copy of the Database is made and used in the context. To commit changes
    back to the original database, the dbf.symbols.update method should be used.
    """
    dbf = copy.deepcopy(dbf)
    if phase_models is not None:
        comps = sorted(phase_models['components'])
    else:
        comps = sorted([sp for sp in dbf.elements])
    if symbols_to_fit is None:
        symbols_to_fit = database_symbols_to_fit(dbf)
    else:
        symbols_to_fit = sorted(symbols_to_fit)
    data_weights = data_weights if data_weights is not None else {}

    if len(symbols_to_fit) == 0:
        raise ValueError(
            'No degrees of freedom. Database must contain symbols starting with \'V\' or \'VV\', followed by a number.'
        )
    else:
        _log.info('Fitting %s degrees of freedom.', len(symbols_to_fit))

    for x in symbols_to_fit:
        if isinstance(dbf.symbols[x], symengine.Piecewise):
            _log.debug('Replacing %s in database', x)
            dbf.symbols[x] = dbf.symbols[x].args[0]

    # construct the models for each phase, substituting in the SymEngine symbol to fit.
    if phase_models is not None:
        model_dict = get_model_dict(phase_models)
    else:
        model_dict = {}
    _log.trace('Building phase models (this may take some time)')
    import time
    t1 = time.time()
    phases = sorted(
        filter_phases(dbf, unpack_components(dbf, comps), dbf.phases.keys()))
    parameters = dict(zip(symbols_to_fit, [0] * len(symbols_to_fit)))
    models = instantiate_models(dbf,
                                comps,
                                phases,
                                model=model_dict,
                                parameters=parameters)
    if make_callables:
        eq_callables = build_callables(dbf,
                                       comps,
                                       phases,
                                       models,
                                       parameter_symbols=symbols_to_fit,
                                       output='GM',
                                       build_gradients=True,
                                       build_hessians=True,
                                       additional_statevars={v.N, v.P, v.T})
    else:
        eq_callables = None
    t2 = time.time()
    _log.trace('Finished building phase models (%0.2fs)', t2 - t1)
    _log.trace(
        'Getting non-equilibrium thermochemical data (this may take some time)'
    )
    t1 = time.time()
    thermochemical_data = get_thermochemical_data(
        dbf,
        comps,
        phases,
        datasets,
        model=model_dict,
        weight_dict=data_weights,
        symbols_to_fit=symbols_to_fit)
    t2 = time.time()
    _log.trace('Finished getting non-equilibrium thermochemical data (%0.2fs)',
               t2 - t1)
    _log.trace(
        'Getting equilibrium thermochemical data (this may take some time)')
    t1 = time.time()
    eq_thermochemical_data = get_equilibrium_thermochemical_data(
        dbf,
        comps,
        phases,
        datasets,
        model=model_dict,
        parameters=parameters,
        data_weight_dict=data_weights)
    t2 = time.time()
    _log.trace('Finished getting equilibrium thermochemical data (%0.2fs)',
               t2 - t1)
    _log.trace('Getting ZPF data (this may take some time)')
    t1 = time.time()
    zpf_data = get_zpf_data(dbf,
                            comps,
                            phases,
                            datasets,
                            model=model_dict,
                            parameters=parameters)
    t2 = time.time()
    _log.trace('Finished getting ZPF data (%0.2fs)', t2 - t1)

    # context for the log probability function
    # for all cases, parameters argument addressed in MCMC loop
    error_context = {
        'symbols_to_fit': symbols_to_fit,
        'zpf_kwargs': {
            'zpf_data': zpf_data,
            'data_weight': data_weights.get('ZPF', 1.0),
        },
        'equilibrium_thermochemical_kwargs': {
            'eq_thermochemical_data': eq_thermochemical_data,
        },
        'thermochemical_kwargs': {
            'thermochemical_data': thermochemical_data,
        },
        'activity_kwargs': {
            'dbf': dbf,
            'comps': comps,
            'phases': phases,
            'datasets': datasets,
            'phase_models': models,
            'callables': eq_callables,
            'data_weight': data_weights.get('ACR', 1.0),
        },
    }
    return error_context
Пример #11
0
def get_zpf_data(dbf: Database,
                 comps: Sequence[str],
                 phases: Sequence[str],
                 datasets: PickleableTinyDB,
                 parameters: Dict[str, float],
                 model: Optional[Dict[str, Type[Model]]] = None):
    """
    Return the ZPF data used in the calculation of ZPF error

    Parameters
    ----------
    comps : list
        List of active component names
    phases : list
        List of phases to consider
    datasets : espei.utils.PickleableTinyDB
        Datasets that contain single phase data
    parameters : dict
        Dictionary mapping symbols to optimize to their initial values
    model : Optional[Dict[str, Type[Model]]]
        Dictionary phase names to pycalphad Model classes.

    Returns
    -------
    list
        List of data dictionaries with keys ``weight``, ``phase_regions`` and ``dataset_references``.
    """
    desired_data = datasets.search(
        (tinydb.where('output') == 'ZPF')
        & (tinydb.where('components').test(lambda x: set(x).issubset(comps)))
        & (tinydb.where('phases').test(
            lambda x: len(set(phases).intersection(x)) > 0)))

    zpf_data = []  # 1:1 correspondence with each dataset
    for data in desired_data:
        data_comps = list(set(data['components']).union({'VA'}))
        species = sorted(unpack_components(dbf, data_comps), key=str)
        data_phases = filter_phases(dbf, species, candidate_phases=phases)
        models = instantiate_models(dbf,
                                    species,
                                    data_phases,
                                    model=model,
                                    parameters=parameters)
        # assumed N, P, T state variables
        phase_recs = build_phase_records(dbf,
                                         species,
                                         data_phases, {v.N, v.P, v.T},
                                         models,
                                         parameters=parameters,
                                         build_gradients=True,
                                         build_hessians=True)
        all_phase_points = {
            phase_name: _sample_phase_constitution(models[phase_name],
                                                   point_sample, True, 50)
            for phase_name in data_phases
        }
        all_regions = data['values']
        conditions = data['conditions']
        phase_regions = []
        # Each phase_region is one set of phases in equilibrium (on a tie-line),
        # e.g. [["ALPHA", ["B"], [0.25]], ["BETA", ["B"], [0.5]]]
        for idx, phase_region in enumerate(all_regions):
            # Extract the conditions for entire phase region
            pot_conds = _extract_pot_conds(conditions, idx)
            pot_conds.setdefault(v.N, 1.0)  # Add v.N condition, if missing
            # Extract all the phases and compositions from the tie-line points
            vertices = []
            for vertex in phase_region:
                phase_name, comp_conds, disordered_flag = _extract_phases_comps(
                    vertex)
                # Construct single-phase points satisfying the conditions for each phase in the region
                mod = models[phase_name]
                composition = _compute_vertex_composition(
                    data_comps, comp_conds)
                if np.any(np.isnan(composition)):
                    # We can't construct points because we don't have a known composition
                    has_missing_comp_cond = True
                    phase_points = None
                elif _phase_is_stoichiometric(mod):
                    has_missing_comp_cond = False
                    phase_points = None
                else:
                    has_missing_comp_cond = False
                    # Only sample points that have an average mass residual within tol
                    tol = 0.02
                    phase_points = _subsample_phase_points(
                        phase_recs[phase_name], all_phase_points[phase_name],
                        composition, tol)
                    assert phase_points.shape[
                        0] > 0, f"phase {phase_name} must have at least one set of points within the target tolerance {pot_conds} {comp_conds}"
                vtx = RegionVertex(phase_name, composition, comp_conds,
                                   phase_points, phase_recs, disordered_flag,
                                   has_missing_comp_cond)
                vertices.append(vtx)
            region = PhaseRegion(vertices, pot_conds, species, data_phases,
                                 models)
            phase_regions.append(region)

        data_dict = {
            'weight': data.get('weight', 1.0),
            'phase_regions': phase_regions,
            'dataset_reference': data['reference']
        }
        zpf_data.append(data_dict)
    return zpf_data
Пример #12
0
def map_binary(
    dbf,
    comps,
    phases,
    conds,
    eq_kwargs=None,
    calc_kwargs=None,
    boundary_sets=None,
    verbose=False,
    summary=False,
):
    """
    Map a binary T-X phase diagram

    Parameters
    ----------
    dbf : Database
    comps : list of str
    phases : list of str
        List of phases to consider in mapping
    conds : dict
        Dictionary of conditions
    eq_kwargs : dict
        Dictionary of keyword arguments to pass to equilibrium
    verbose : bool
        Print verbose output for mapping
    boundary_sets : ZPFBoundarySets
        Existing ZPFBoundarySets

    Returns
    -------
    ZPFBoundarySets

    Notes
    -----
    Assumes conditions in T and X.

    Simple algorithm to map a binary phase diagram in T-X. More or less follows
    the algorithm described in Figure 2 by Snider et al. [1] with the small
    algorithmic improvement of constructing a convex hull to find the next
    potential two phase region.

    For each temperature, proceed along increasing composition, skipping two
    over two phase regions, once calculated.
    [1] J. Snider, I. Griva, X. Sun, M. Emelianenko, Set based framework for
        Gibbs energy minimization, Calphad. 48 (2015) 18-26.
        doi: 10.1016/j.calphad.2014.09.005

    """

    eq_kwargs = eq_kwargs or {}
    calc_kwargs = calc_kwargs or {}
    # implicitly add v.N to conditions
    if v.N not in conds:
        conds[v.N] = [1.0]
    if 'pdens' not in calc_kwargs:
        calc_kwargs['pdens'] = 2000

    species = unpack_components(dbf, comps)
    phases = filter_phases(dbf, species, phases)
    parameters = eq_kwargs.get('parameters', {})
    models = eq_kwargs.get('model')
    statevars = get_state_variables(models=models, conds=conds)
    if models is None:
        models = instantiate_models(dbf,
                                    comps,
                                    phases,
                                    model=eq_kwargs.get('model'),
                                    parameters=parameters,
                                    symbols_only=True)
    prxs = build_phase_records(dbf,
                               species,
                               phases,
                               conds,
                               models,
                               output='GM',
                               parameters=parameters,
                               build_gradients=True,
                               build_hessians=True)

    indep_comp = [
        key for key, value in conds.items()
        if isinstance(key, v.MoleFraction) and len(np.atleast_1d(value)) > 1
    ]
    indep_pot = [
        key for key, value in conds.items()
        if (type(key) is v.StateVariable) and len(np.atleast_1d(value)) > 1
    ]
    if (len(indep_comp) != 1) or (len(indep_pot) != 1):
        raise ValueError(
            'Binary map requires exactly one composition and one potential coordinate'
        )
    if indep_pot[0] != v.T:
        raise ValueError(
            'Binary map requires that a temperature grid must be defined')

    # binary assumption, only one composition specified.
    comp_cond = [k for k in conds.keys() if isinstance(k, v.X)][0]
    indep_comp = comp_cond.name[2:]
    indep_comp_idx = sorted(get_pure_elements(dbf, comps)).index(indep_comp)
    composition_grid = unpack_condition(conds[comp_cond])
    dX = composition_grid[1] - composition_grid[0]
    Xmax = composition_grid.max()
    temperature_grid = unpack_condition(conds[v.T])
    dT = temperature_grid[1] - temperature_grid[0]

    boundary_sets = boundary_sets or ZPFBoundarySets(comps, comp_cond)

    equilibria_calculated = 0
    equilibrium_time = 0
    convex_hulls_calculated = 0
    convex_hull_time = 0
    curr_conds = {key: unpack_condition(val) for key, val in conds.items()}
    str_conds = sorted([str(k) for k in curr_conds.keys()])
    grid_conds = _adjust_conditions(curr_conds)
    for T_idx in range(temperature_grid.size):
        T = temperature_grid[T_idx]
        iter_equilibria = 0
        if verbose:
            print("=== T = {} ===".format(float(T)))
        curr_conds[v.T] = [float(T)]
        eq_conds = deepcopy(curr_conds)
        Xmax_visited = 0.0
        hull_time = time.time()
        grid = calculate(dbf,
                         comps,
                         phases,
                         fake_points=True,
                         output='GM',
                         T=T,
                         P=grid_conds[v.P],
                         N=1,
                         model=models,
                         parameters=parameters,
                         to_xarray=False,
                         **calc_kwargs)
        hull = starting_point(eq_conds, statevars, prxs, grid)
        convex_hull_time += time.time() - hull_time
        convex_hulls_calculated += 1
        while Xmax_visited < Xmax:
            hull_compsets = find_two_phase_region_compsets(
                hull,
                T,
                indep_comp,
                indep_comp_idx,
                minimum_composition=Xmax_visited,
                misc_gap_tol=2 * dX)
            if hull_compsets is None:
                if verbose:
                    print(
                        "== Convex hull: max visited = {} - no multiphase phase compsets found =="
                        .format(Xmax_visited, hull_compsets))
                break
            Xeq = hull_compsets.mean_composition
            eq_conds[comp_cond] = [float(Xeq)]
            eq_time = time.time()
            start_point = starting_point(eq_conds, statevars, prxs, grid)
            eq_ds = _solve_eq_at_conditions(species, start_point, prxs, grid,
                                            str_conds, statevars, False)
            equilibrium_time += time.time() - eq_time
            equilibria_calculated += 1
            iter_equilibria += 1
            # composition sets in the plane of the calculation:
            # even for isopleths, this should always be two.
            compsets = get_compsets(eq_ds, indep_comp, indep_comp_idx)
            if verbose:
                print(
                    "== Convex hull: max visited = {:0.4f} - hull compsets: {} equilibrium compsets: {} =="
                    .format(Xmax_visited, hull_compsets, compsets))
            if compsets is None:
                # equilibrium calculation, didn't find a valid multiphase composition set
                # we need to find the next feasible one from the convex hull.
                Xmax_visited += dX
                continue
            else:
                boundary_sets.add_compsets(compsets, Xtol=0.10, Ttol=2 * dT)
                if compsets.max_composition > Xmax_visited:
                    Xmax_visited = compsets.max_composition
            # this seems kind of sloppy, but captures the effect that we want to
            # keep doing equilibrium calculations, if possible.
            while Xmax_visited < Xmax and compsets is not None:
                eq_conds[comp_cond] = [float(Xmax_visited + dX)]
                eq_time = time.time()
                # TODO: starting point could be improved by basing it off the previous calculation
                start_point = starting_point(eq_conds, statevars, prxs, grid)
                eq_ds = _solve_eq_at_conditions(species, start_point, prxs,
                                                grid, str_conds, statevars,
                                                False)
                equilibrium_time += time.time() - eq_time
                equilibria_calculated += 1
                compsets = get_compsets(eq_ds, indep_comp, indep_comp_idx)
                if compsets is not None:
                    Xmax_visited = compsets.max_composition
                    boundary_sets.add_compsets(compsets,
                                               Xtol=0.10,
                                               Ttol=2 * dT)
                else:
                    Xmax_visited += dX
                if verbose:
                    print("Equilibrium: at X = {:0.4f}, found compsets {}".
                          format(Xmax_visited, compsets))
        if verbose:
            print(iter_equilibria, 'equilibria calculated in this iteration.')
    if verbose or summary:
        print("{} Convex hulls calculated ({:0.1f}s)".format(
            convex_hulls_calculated, convex_hull_time))
        print("{} Equilbria calculated ({:0.1f}s)".format(
            equilibria_calculated, equilibrium_time))
        print("{:0.0f}% of brute force calculations skipped".format(
            100 * (1 - equilibria_calculated /
                   (composition_grid.size * temperature_grid.size))))
    return boundary_sets
def build_eqpropdata(
        data: tinydb.database.Document,
        dbf: Database,
        parameters: Optional[Dict[str, float]] = None,
        data_weight_dict: Optional[Dict[str, float]] = None) -> EqPropData:
    """
    Build EqPropData for the calculations corresponding to a single dataset.

    Parameters
    ----------
    data : tinydb.database.Document
        Document corresponding to a single ESPEI dataset.
    dbf : Database
        Database that should be used to construct the `Model` and `PhaseRecord` objects.
    parameters : Optional[Dict[str, float]]
        Mapping of parameter symbols to values.
    data_weight_dict : Optional[Dict[str, float]]
        Mapping of a data type (e.g. `HM` or `SM`) to a weight.

    Returns
    -------
    EqPropData
    """
    parameters = parameters if parameters is not None else {}
    data_weight_dict = data_weight_dict if data_weight_dict is not None else {}
    property_std_deviation = {
        'HM': 500.0,  # J/mol
        'SM': 0.2,  # J/K-mol
        'CPM': 0.2,  # J/K-mol
    }

    params_keys, _ = extract_parameters(parameters)

    data_comps = list(set(data['components']).union({'VA'}))
    species = sorted(unpack_components(dbf, data_comps), key=str)
    data_phases = filter_phases(dbf, species, candidate_phases=data['phases'])
    models = instantiate_models(dbf,
                                species,
                                data_phases,
                                parameters=parameters)
    output = data['output']
    property_output = output.split('_')[
        0]  # property without _FORM, _MIX, etc.
    samples = np.array(data['values']).flatten()
    reference = data.get('reference', '')

    # Models are now modified in response to the data from this data
    if 'reference_states' in data:
        property_output = output[:-1] if output.endswith(
            'R'
        ) else output  # unreferenced model property so we can tell shift_reference_state what to build.
        reference_states = []
        for el, vals in data['reference_states'].items():
            reference_states.append(
                ReferenceState(
                    v.Species(el),
                    vals['phase'],
                    fixed_statevars=vals.get('fixed_state_variables')))
        for mod in models.values():
            mod.shift_reference_state(reference_states,
                                      dbf,
                                      output=(property_output, ))

    data['conditions'].setdefault(
        'N', 1.0
    )  # Add default for N. Nothing else is supported in pycalphad anyway.
    pot_conds = OrderedDict([(getattr(v, key),
                              unpack_condition(data['conditions'][key]))
                             for key in sorted(data['conditions'].keys())
                             if not key.startswith('X_')])
    comp_conds = OrderedDict([(v.X(key[2:]),
                               unpack_condition(data['conditions'][key]))
                              for key in sorted(data['conditions'].keys())
                              if key.startswith('X_')])

    phase_records = build_phase_records(dbf,
                                        species,
                                        data_phases, {
                                            **pot_conds,
                                            **comp_conds
                                        },
                                        models,
                                        parameters=parameters,
                                        build_gradients=True,
                                        build_hessians=True)

    # Now we need to unravel the composition conditions
    # (from Dict[v.X, Sequence[float]] to Sequence[Dict[v.X, float]]), since the
    # composition conditions are only broadcast against the potentials, not
    # each other. Each individual composition needs to be computed
    # independently, since broadcasting over composition cannot be turned off
    # in pycalphad.
    rav_comp_conds = [
        OrderedDict(zip(comp_conds.keys(), pt_comps))
        for pt_comps in zip(*comp_conds.values())
    ]

    # Build weights, should be the same size as the values
    total_num_calculations = len(rav_comp_conds) * np.prod(
        [len(vals) for vals in pot_conds.values()])
    dataset_weights = np.array(data.get('weight',
                                        1.0)) * np.ones(total_num_calculations)
    weights = (property_std_deviation.get(property_output, 1.0) /
               data_weight_dict.get(property_output, 1.0) /
               dataset_weights).flatten()

    return EqPropData(dbf, species, data_phases, pot_conds, rav_comp_conds,
                      models, params_keys, phase_records, output, samples,
                      weights, reference)
Пример #14
0
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, **kwargs):
    """
    Sample the property surface of 'output' containing the specified
    components and phases. Model parameters are taken from 'dbf' and any
    state variables (T, P, etc.) can be specified as keyword arguments.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : str or sequence
        Names of components to consider in the calculation.
    phases : str or sequence
        Names of phases to consider in the calculation.
    mode : string, optional
        See 'make_callable' docstring for details.
    output : string, optional
        Model attribute to sample.
    fake_points : bool, optional (Default: False)
        If True, the first few points of the output surface will be fictitious
        points used to define an equilibrium hyperplane guaranteed to be above
        all the other points. This is used for convex hull computations.
    broadcast : bool, optional
        If True, broadcast given state variable lists against each other to create a grid.
        If False, assume state variables are given as equal-length lists.
    points : ndarray or a dict of phase names to ndarray, optional
        Columns of ndarrays must be internal degrees of freedom (site fractions), sorted.
        If this is not specified, points will be generated automatically.
    pdens : int, a dict of phase names to int, or a seq of both, optional
        Number of points to sample per degree of freedom.
        Default: 2000; Default when called from equilibrium(): 500
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    sampler : callable, a dict of phase names to callable, or a seq of both, optional
        Function to sample phase constitution space.
        Must have same signature as 'pycalphad.core.utils.point_sample'
    grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True)
        Whether to add evenly spaced points between end-members.
        The density of points is determined by 'pdens'
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.

    Returns
    -------
    Dataset of the sampled attribute as a function of state variables

    Examples
    --------
    None yet.
    """
    # Here we check for any keyword arguments that are special, i.e.,
    # there may be keyword arguments that aren't state variables
    pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)
    points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None)
    callables = kwargs.pop('callables', {})
    sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None)
    fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True)
    parameters = parameters or dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    if isinstance(phases, str):
        phases = [phases]
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    comps = sorted(unpack_components(dbf, comps))
    if points_dict is None and broadcast is False:
        raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.')
    nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0]

    all_phase_data = []
    largest_energy = 1e10

    # Consider only the active phases
    list_of_possible_phases = filter_phases(dbf, comps)
    active_phases = sorted(set(list_of_possible_phases).intersection(set(phases)))
    active_phases = {name: dbf.phases[name] for name in active_phases}
    if len(list_of_possible_phases) == 0:
        raise ConditionError('There are no phases in the Database that can be active with components {0}'.format(comps))
    if len(active_phases) == 0:
        raise ConditionError('None of the passed phases ({0}) are active. List of possible phases: {1}.'
                             .format(phases, list_of_possible_phases))

    models = instantiate_models(dbf, comps, list(active_phases.keys()), model=kwargs.pop('model', None), parameters=parameters)

    if isinstance(output, (list, tuple, set)):
        raise NotImplementedError('Only one property can be specified in calculate() at a time')
    output = output if output is not None else 'GM'

    # Implicitly add 'N' state variable as a string to keyword arguements if it's not passed
    if kwargs.get('N') is None:
        kwargs['N'] = 1
    if np.any(np.array(kwargs['N']) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(kwargs['N']))

    # TODO: conditions dict of StateVariable instances should become part of the calculate API
    statevar_strings = [sv for sv in kwargs.keys() if getattr(v, sv) is not None]
    # If we don't do this, sympy will get confused during substitution
    statevar_dict = dict((v.StateVariable(key), unpack_condition(value)) for key, value in kwargs.items() if key in statevar_strings)
    # Sort after default state variable check to fix gh-116
    statevar_dict = collections.OrderedDict(sorted(statevar_dict.items(), key=lambda x: str(x[0])))
    phase_records = build_phase_records(dbf, comps, active_phases, statevar_dict,
                                   models=models, parameters=parameters,
                                   output=output, callables=callables,
                                   verbose=kwargs.pop('verbose', False))
    str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \
                                                for (key, value) in statevar_dict.items())
    maximum_internal_dof = max(len(models[phase_name].site_fractions) for phase_name in active_phases)
    for phase_name, phase_obj in sorted(active_phases.items()):
        mod = models[phase_name]
        phase_record = phase_records[phase_name]
        points = points_dict[phase_name]
        variables, sublattice_dof = generate_dof(phase_obj, mod.components)
        if points is None:
            points = _sample_phase_constitution(phase_name, phase_obj.constituents, sublattice_dof, comps,
                                                tuple(variables), sampler_dict[phase_name] or point_sample,
                                                fixedgrid_dict[phase_name], pdens_dict[phase_name])
        points = np.atleast_2d(points)

        fp = fake_points and (phase_name == sorted(active_phases.keys())[0])
        phase_ds = _compute_phase_values(nonvacant_components, str_statevar_dict,
                                         points, phase_record, output,
                                         maximum_internal_dof, broadcast=broadcast,
                                         largest_energy=float(largest_energy), fake_points=fp)
        all_phase_data.append(phase_ds)

    # speedup for single-phase case (found by profiling)
    if len(all_phase_data) > 1:
        final_ds = concat(all_phase_data, dim='points')
        final_ds['points'].values = np.arange(len(final_ds['points']))
        final_ds.coords['points'].values = np.arange(len(final_ds['points']))
    else:
        final_ds = all_phase_data[0]
    return final_ds
Пример #15
0
def calculate(dbf,
              comps,
              phases,
              mode=None,
              output='GM',
              fake_points=False,
              broadcast=True,
              parameters=None,
              to_xarray=True,
              **kwargs):
    """
    Sample the property surface of 'output' containing the specified
    components and phases. Model parameters are taken from 'dbf' and any
    state variables (T, P, etc.) can be specified as keyword arguments.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : str or sequence
        Names of components to consider in the calculation.
    phases : str or sequence
        Names of phases to consider in the calculation.
    mode : string, optional
        See 'make_callable' docstring for details.
    output : string, optional
        Model attribute to sample.
    fake_points : bool, optional (Default: False)
        If True, the first few points of the output surface will be fictitious
        points used to define an equilibrium hyperplane guaranteed to be above
        all the other points. This is used for convex hull computations.
    broadcast : bool, optional
        If True, broadcast given state variable lists against each other to create a grid.
        If False, assume state variables are given as equal-length lists.
    points : ndarray or a dict of phase names to ndarray, optional
        Columns of ndarrays must be internal degrees of freedom (site fractions), sorted.
        If this is not specified, points will be generated automatically.
    pdens : int, a dict of phase names to int, or a seq of both, optional
        Number of points to sample per degree of freedom.
        Default: 2000; Default when called from equilibrium(): 500
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    sampler : callable, a dict of phase names to callable, or a seq of both, optional
        Function to sample phase constitution space.
        Must have same signature as 'pycalphad.core.utils.point_sample'
    grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True)
        Whether to add evenly spaced points between end-members.
        The density of points is determined by 'pdens'
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.

    Returns
    -------
    Dataset of the sampled attribute as a function of state variables

    Examples
    --------
    None yet.
    """
    # Here we check for any keyword arguments that are special, i.e.,
    # there may be keyword arguments that aren't state variables
    pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)
    points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None)
    callables = kwargs.pop('callables', {})
    sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None)
    fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True),
                                  default_arg=True)
    parameters = parameters or dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    if isinstance(phases, str):
        phases = [phases]
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    comps = sorted(unpack_components(dbf, comps))
    if points_dict is None and broadcast is False:
        raise ValueError(
            'The \'points\' keyword argument must be specified if broadcast=False is also given.'
        )
    nonvacant_components = [x for x in sorted(comps) if x.number_of_atoms > 0]

    all_phase_data = []
    largest_energy = 1e10

    # Consider only the active phases
    list_of_possible_phases = filter_phases(dbf, comps)
    if len(list_of_possible_phases) == 0:
        raise ConditionError(
            'There are no phases in the Database that can be active with components {0}'
            .format(comps))
    active_phases = {
        name: dbf.phases[name]
        for name in filter_phases(dbf, comps, phases)
    }
    if len(active_phases) == 0:
        raise ConditionError(
            'None of the passed phases ({0}) are active. List of possible phases: {1}.'
            .format(phases, list_of_possible_phases))

    models = instantiate_models(dbf,
                                comps,
                                list(active_phases.keys()),
                                model=kwargs.pop('model', None),
                                parameters=parameters)

    if isinstance(output, (list, tuple, set)):
        raise NotImplementedError(
            'Only one property can be specified in calculate() at a time')
    output = output if output is not None else 'GM'

    # Implicitly add 'N' state variable as a string to keyword arguements if it's not passed
    if kwargs.get('N') is None:
        kwargs['N'] = 1
    if np.any(np.array(kwargs['N']) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(
            kwargs['N']))

    # TODO: conditions dict of StateVariable instances should become part of the calculate API
    statevar_strings = [
        sv for sv in kwargs.keys() if getattr(v, sv) is not None
    ]
    # If we don't do this, sympy will get confused during substitution
    statevar_dict = dict((v.StateVariable(key), unpack_condition(value))
                         for key, value in kwargs.items()
                         if key in statevar_strings)
    # Sort after default state variable check to fix gh-116
    statevar_dict = collections.OrderedDict(
        sorted(statevar_dict.items(), key=lambda x: str(x[0])))
    phase_records = build_phase_records(dbf,
                                        comps,
                                        active_phases,
                                        statevar_dict,
                                        models=models,
                                        parameters=parameters,
                                        output=output,
                                        callables=callables,
                                        build_gradients=False,
                                        build_hessians=False,
                                        verbose=kwargs.pop('verbose', False))
    str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \
                                                for (key, value) in statevar_dict.items())
    maximum_internal_dof = max(
        len(models[phase_name].site_fractions) for phase_name in active_phases)
    for phase_name, phase_obj in sorted(active_phases.items()):
        mod = models[phase_name]
        phase_record = phase_records[phase_name]
        points = points_dict[phase_name]
        variables, sublattice_dof = generate_dof(phase_obj, mod.components)
        if points is None:
            points = _sample_phase_constitution(
                phase_name, phase_obj.constituents, sublattice_dof, comps,
                tuple(variables), sampler_dict[phase_name] or point_sample,
                fixedgrid_dict[phase_name], pdens_dict[phase_name])
        points = np.atleast_2d(points)

        fp = fake_points and (phase_name == sorted(active_phases.keys())[0])
        phase_ds = _compute_phase_values(nonvacant_components,
                                         str_statevar_dict,
                                         points,
                                         phase_record,
                                         output,
                                         maximum_internal_dof,
                                         broadcast=broadcast,
                                         largest_energy=float(largest_energy),
                                         fake_points=fp)
        all_phase_data.append(phase_ds)

    # speedup for single-phase case (found by profiling)
    if len(all_phase_data) > 1:
        concatenated_coords = all_phase_data[0].coords

        data_vars = all_phase_data[0].data_vars
        concatenated_data_vars = {}
        for var in data_vars.keys():
            data_coords = data_vars[var][0]
            points_idx = data_coords.index('points')  # concatenation axis
            arrs = []
            for phase_data in all_phase_data:
                arrs.append(getattr(phase_data, var))
            concat_data = np.concatenate(arrs, axis=points_idx)
            concatenated_data_vars[var] = (data_coords, concat_data)
        final_ds = LightDataset(data_vars=concatenated_data_vars,
                                coords=concatenated_coords)
    else:
        final_ds = all_phase_data[0]
    if to_xarray:
        return final_ds.get_dataset()
    else:
        return final_ds
Пример #16
0
def simulate_scheil_solidification(dbf,
                                   comps,
                                   phases,
                                   composition,
                                   start_temperature,
                                   step_temperature=1.0,
                                   liquid_phase_name='LIQUID',
                                   stop=0.0001,
                                   verbose=False):
    """Perform a Scheil-Gulliver solidification simulation.

    Parameters
    ----------
    dbf : pycalphad.Database
        Database object.
    comps : list
        List of components in the system.
    phases : list
        List of phases in the system.
    composition : dict
        Dictionary of independent `v.X` composition variables.
    start_temperature : float
        Starting temperature for simulation. Must be single phase liquid.
    step_temperature : float, optional
        Temperature step size. Defaults to 1.0.
    liquid_phase_name : str
        Name of the phase treated as liquid.

    Returns
    -------
    scheil.solidification_result.SolidifcationResult

    """
    STEP_SCALE_FACTOR = 1.1  # How much to try to adapt the temperature step by
    MAXIMUM_STEP_SIZE_REDUCTION = 10.0
    T_STEP_ORIG = step_temperature
    models = instantiate_models(dbf, comps, phases)
    cbs = build_callables(dbf,
                          comps,
                          phases,
                          models,
                          additional_statevars={v.P, v.T, v.N},
                          build_gradients=True,
                          build_hessians=True)
    solid_phases = sorted(set(phases) - {'GAS', liquid_phase_name})
    temp = start_temperature
    independent_comps = sorted(composition.keys(), key=str)
    x_liquid = [composition]
    fraction_solid = [0.0]
    temperatures = [temp]
    phase_amounts = {ph: [0.0] for ph in solid_phases}

    phases_seen = {liquid_phase_name, ''}
    while fraction_solid[-1] < 1:
        conds = {v.T: temp, v.P: 101325}
        comp_conds = x_liquid[-1]
        fmt_comp_conds = ', '.join(
            ['{}={:0.2f}'.format(c, val) for c, val in comp_conds.items()])
        conds.update(comp_conds)
        eq = equilibrium(dbf,
                         comps,
                         phases,
                         conds,
                         callables=cbs,
                         model=models)
        eq_phases = eq["Phase"].values.squeeze()
        num_eq_phases = np.nansum(eq_phases != '')
        new_phases_seen = set(eq_phases).difference(phases_seen)
        if len(new_phases_seen) > 0:
            if verbose:
                print('New phases seen: {}. Resetting step size'.format(
                    new_phases_seen))
            phases_seen |= new_phases_seen
            temp += step_temperature
            step_temperature = T_STEP_ORIG
            continue
        if liquid_phase_name not in eq["Phase"].values.squeeze():
            if num_eq_phases == 0:
                print('Convergence failure: T={} and {}'.format(
                    temp, fmt_comp_conds))
            if T_STEP_ORIG / step_temperature > MAXIMUM_STEP_SIZE_REDUCTION:
                if verbose:
                    print(
                        'No liquid phase found at T={}, {} (Found {}). Maximum step size reduction exceeded. Stopping.'
                        .format(temp, fmt_comp_conds, eq_phases))
                break
            else:
                if verbose:
                    print(
                        'No liquid phase found at T={}, {} (Found {}). Stepping back and reducing step size.'
                        .format(temp, fmt_comp_conds, eq_phases))
                temp += step_temperature
                step_temperature /= STEP_SCALE_FACTOR
                continue
        # TODO: Will break if there is a liquid miscibility gap
        liquid_vertex = sorted(
            np.nonzero(
                eq["Phase"].values.squeeze().flat == liquid_phase_name))[0]
        liquid_comp = {
            comp: float(eq["X"].isel(vertex=liquid_vertex).squeeze().sel(
                component=str(comp)[2:]).values)
            for comp in independent_comps
        }
        x_liquid.append(liquid_comp)
        np_liq = np.nansum(
            eq.where(eq["Phase"] == liquid_phase_name).NP.values)
        current_fraction_solid = float(fraction_solid[-1])
        found_phase_amounts = [(liquid_phase_name, np_liq)
                               ]  # tuples of phase name, amount
        for solid_phase in solid_phases:
            if solid_phase not in eq["Phase"].values.squeeze():
                phase_amounts[solid_phase].append(0.0)
                continue
            np_tieline = np.nansum(
                eq.where(eq["Phase"] == solid_phase)["NP"].values.squeeze())
            found_phase_amounts.append((solid_phase, np_tieline))
            delta_fraction_solid = (1 - current_fraction_solid) * np_tieline
            current_fraction_solid += delta_fraction_solid
            phase_amounts[solid_phase].append(delta_fraction_solid)
        fraction_solid.append(current_fraction_solid)
        temperatures.append(temp)
        NL = 1 - fraction_solid[-1]
        if verbose:
            phase_amnts = ' '.join([
                'NP({})={:0.3f}'.format(ph, amnt)
                for ph, amnt in found_phase_amounts
            ])
            if NL < 1.0e-3:
                print('T={:0.3f}, {}, ΔT={:0.3f}, NL: {:.2E}, {}'.format(
                    temp, fmt_comp_conds, step_temperature, NL, phase_amnts))
            else:
                print('T={:0.3f}, {}, ΔT={:0.3f}, NL: {:0.3f}, {}'.format(
                    temp, fmt_comp_conds, step_temperature, NL, phase_amnts))
        if NL < stop:
            if verbose:
                print(
                    'Liquid fraction below criterion {} . Stopping at fmt_comp_conds'
                    .format(stop))
            break

        temp -= step_temperature

    if fraction_solid[-1] < 1:
        x_liquid.append(NAN_DICT)
        fraction_solid.append(1.0)
        temperatures.append(temp)
        # set the final phase amount to the phase fractions in the eutectic
        # this method gives the sum total phase amounts of 1.0 by construction
        for solid_phase in solid_phases:
            amount = np.nansum(eq["NP"].isel(
                T=0, P=0).where(eq["Phase"] == solid_phase).values)
            phase_amounts[solid_phase].append(
                float(amount) * (1 - current_fraction_solid))

    # take the instantaneous phase amounts and make them cumulative
    phase_amounts = {
        ph: np.cumsum(amnts).tolist()
        for ph, amnts in phase_amounts.items()
    }

    return SolidifcationResult(x_liquid, fraction_solid, temperatures,
                               phase_amounts)
Пример #17
0
def simulate_equilibrium_solidification(dbf,
                                        comps,
                                        phases,
                                        composition,
                                        start_temperature,
                                        step_temperature=1.0,
                                        liquid_phase_name='LIQUID',
                                        adaptive=True,
                                        eq_kwargs=None,
                                        binary_search_tol=0.1,
                                        verbose=False):
    """
    Compute the equilibrium solidification path.

    Decreases temperature until no liquid is found, performing a binary search to get the soildus temperature.

    dbf : pycalphad.Database
        Database object.
    comps : list
        List of components in the system.
    phases : list
        List of phases in the system.
    composition : Dict[v.X, float]
        Dictionary of independent `v.X` composition variables.
    start_temperature : float
        Starting temperature for simulation. Should be single phase liquid.
    step_temperature : Optional[float]
        Temperature step size. Defaults to 1.0.
    liquid_phase_name : Optional[str]
        Name of the phase treated as liquid (i.e. the phase with infinitely
        fast diffusion). Defaults to 'LIQUID'.
    eq_kwargs: Optional[Dict[str, Any]]
        Keyword arguments for equilibrium
    binary_search_tol : float
        Stop the binary search when the difference between temperatures is less than this amount.
    adaptive: Optional[bool]
        Whether to add additional points near the equilibrium points at each
        step. Only takes effect if ``points`` is in the eq_kwargs dict.

    """
    eq_kwargs = eq_kwargs or dict()
    phases = filter_phases(dbf, unpack_components(dbf, comps), phases)
    ord_disord_dict = order_disorder_dict(dbf, comps, phases)
    solid_phases = sorted(set(phases) - {liquid_phase_name})
    independent_comps = sorted([str(comp)[2:] for comp in composition.keys()])
    models = instantiate_models(dbf, comps, phases)
    if verbose:
        print('building callables... ', end='')
    cbs = build_callables(dbf,
                          comps,
                          phases,
                          models,
                          additional_statevars={v.P, v.T, v.N},
                          build_gradients=True,
                          build_hessians=True)
    if verbose:
        print('done')
    conds = {v.P: 101325, v.N: 1.0}
    conds.update(composition)

    if adaptive and ('points' in eq_kwargs.get('calc_opts', {})):
        # Dynamically add points as the simulation runs
        species = unpack_components(dbf, comps)
        dof_dict = {
            ph: generate_dof(dbf.phases[ph], species)[1]
            for ph in phases
        }
    else:
        adaptive = False

    temperatures = []
    x_liquid = {comp: [] for comp in independent_comps}
    fraction_solid = []
    phase_amounts = {ph: []
                     for ph in solid_phases}  # instantaneous phase amounts
    cum_phase_amounts = {ph: [] for ph in solid_phases}
    converged = False
    current_T = start_temperature
    if verbose:
        print('T=')
    while fraction_solid[-1] < 1 if len(fraction_solid) > 0 else True:
        sys.stdout.flush()
        conds[v.T] = current_T
        if verbose:
            print(f'{current_T} ', end='')
        eq = equilibrium(dbf,
                         comps,
                         phases,
                         conds,
                         callables=cbs,
                         model=models,
                         to_xarray=False,
                         **eq_kwargs)
        if not is_converged(eq):
            if verbose:
                comp_conds = {
                    cond: val
                    for cond, val in conds.items() if isinstance(cond, v.X)
                }
                print(f"Convergence failure at T={conds[v.T]} X={comp_conds} ")
        if adaptive:
            # Update the points dictionary with local samples around the equilibrium site fractions
            update_points(eq, eq_kwargs['calc_opts']['points'], dof_dict)
        if liquid_phase_name in eq.Phase:
            # Add the liquid phase composition
            # TODO: will break in a liquid miscibility gap
            liquid_vertex = np.nonzero(eq.Phase == liquid_phase_name)[-1][0]
            for comp in independent_comps:
                x_liquid[comp].append(
                    float(eq.X[..., liquid_vertex,
                               eq.component.index(comp)]))
            temperatures.append(current_T)
            current_T -= step_temperature
        else:
            # binary search to find the solidus
            T_high = current_T + step_temperature  # High temperature, liquid
            T_low = current_T  # Low temperature, solids only
            found_ph = set(eq.Phase[eq.Phase != ''].tolist())
            if verbose:
                print(
                    f'Found phases {found_ph}. Starting binary search between T={(T_low, T_high)} ',
                    end='')
            while (T_high - T_low) > binary_search_tol:
                bin_search_T = (T_high - T_low) * 0.5 + T_low
                conds[v.T] = bin_search_T
                eq = equilibrium(dbf,
                                 comps,
                                 phases,
                                 conds,
                                 callables=cbs,
                                 model=models,
                                 to_xarray=False,
                                 **eq_kwargs)
                if adaptive:
                    # Update the points dictionary with local samples around the equilibrium site fractions
                    update_points(eq, eq_kwargs['calc_opts']['points'],
                                  dof_dict)
                if not is_converged(eq):
                    if verbose:
                        comp_conds = {
                            cond: val
                            for cond, val in conds.items()
                            if isinstance(cond, v.X)
                        }
                        print(
                            f"Convergence failure at T={conds[v.T]} X={comp_conds} "
                        )
                if liquid_phase_name in eq.Phase:
                    T_high = bin_search_T
                else:
                    T_low = bin_search_T
            conds[v.T] = T_low
            temperatures.append(T_low)
            eq = equilibrium(dbf,
                             comps,
                             phases,
                             conds,
                             callables=cbs,
                             model=models,
                             to_xarray=False,
                             **eq_kwargs)
            if not is_converged(eq):
                if verbose:
                    comp_conds = {
                        cond: val
                        for cond, val in conds.items()
                        if isinstance(cond, v.X)
                    }
                    print(
                        f"Convergence failure at T={conds[v.T]} X={comp_conds} "
                    )
            if verbose:
                found_phases = set(eq.Phase[eq.Phase != ''].tolist())
                print(
                    f"Finshed binary search at T={conds[v.T]} with phases={found_phases} and NP={eq.NP.squeeze()[:len(found_phases)]}"
                )
            if adaptive:
                # Update the points dictionary with local samples around the equilibrium site fractions
                update_points(eq, eq_kwargs['calc_opts']['points'], dof_dict)
            # Set the liquid phase composition to NaN
            for comp in independent_comps:
                x_liquid[comp].append(float(np.nan))

        # Calculate fraction of solid and solid phase amounts
        current_fraction_solid = 0.0
        current_cum_phase_amnts = get_phase_amounts(
            order_disorder_eq_phases(eq.get_dataset(), ord_disord_dict),
            eq.NP.squeeze(), solid_phases)
        for solid_phase, amount in current_cum_phase_amnts.items():
            # Since the equilibrium calculations always give the "cumulative" phase amount,
            # we need to take the difference to get the instantaneous.
            cum_phase_amounts[solid_phase].append(amount)
            if len(phase_amounts[solid_phase]) == 0:
                phase_amounts[solid_phase].append(amount)
            else:
                phase_amounts[solid_phase].append(
                    amount - cum_phase_amounts[solid_phase][-2])
            current_fraction_solid += amount
        fraction_solid.append(current_fraction_solid)

    converged = True if np.isclose(fraction_solid[-1], 1.0) else False
    return SolidificationResult(x_liquid, fraction_solid, temperatures,
                                phase_amounts, converged, "equilibrium")
Пример #18
0
def equilibrium(dbf, comps, phases, conditions, output=None, model=None,
                verbose=False, broadcast=True, calc_opts=None,
                scheduler='sync', parameters=None, solver=None, callables=None,
                **kwargs):
    """
    Calculate the equilibrium state of a system containing the specified
    components and phases, under the specified conditions.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : list
        Names of components to consider in the calculation.
    phases : list or dict
        Names of phases to consider in the calculation.
    conditions : dict or (list of dict)
        StateVariables and their corresponding value.
    output : str or list of str, optional
        Additional equilibrium model properties (e.g., CPM, HM, etc.) to compute.
        These must be defined as attributes in the Model class of each phase.
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    verbose : bool, optional
        Print details of calculations. Useful for debugging.
    broadcast : bool
        If True, broadcast conditions against each other. This will compute all combinations.
        If False, each condition should be an equal-length list (or single-valued).
        Disabling broadcasting is useful for calculating equilibrium at selected conditions,
        when those conditions don't comprise a grid.
    calc_opts : dict, optional
        Keyword arguments to pass to `calculate`, the energy/property calculation routine.
    scheduler : Dask scheduler, optional
        Job scheduler for performing the computation.
        If None, return a Dask graph of the computation instead of actually doing it.
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.
    solver : pycalphad.core.solver.SolverBase
        Instance of a solver that is used to calculate local equilibria.
        Defaults to a pycalphad.core.solver.InteriorPointSolver.
    callables : dict, optional
        Pre-computed callable functions for equilibrium calculation.

    Returns
    -------
    Structured equilibrium calculation, or Dask graph if scheduler=None.

    Examples
    --------
    None yet.
    """
    if not broadcast:
        raise NotImplementedError('Broadcasting cannot yet be disabled')
    comps = sorted(unpack_components(dbf, comps))
    phases = unpack_phases(phases) or sorted(dbf.phases.keys())
    # remove phases that cannot be active
    list_of_possible_phases = filter_phases(dbf, comps)
    active_phases = sorted(set(list_of_possible_phases).intersection(set(phases)))
    if len(list_of_possible_phases) == 0:
        raise ConditionError('There are no phases in the Database that can be active with components {0}'.format(comps))
    if len(active_phases) == 0:
        raise ConditionError('None of the passed phases ({0}) are active. List of possible phases: {1}.'.format(phases, list_of_possible_phases))
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    if len(set(comps) - set(dbf.species)) > 0:
        raise EquilibriumError('Components not found in database: {}'
                               .format(','.join([c.name for c in (set(comps) - set(dbf.species))])))
    calc_opts = calc_opts if calc_opts is not None else dict()
    solver = solver if solver is not None else InteriorPointSolver(verbose=verbose)
    parameters = parameters if parameters is not None else dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    models = instantiate_models(dbf, comps, active_phases, model=model, parameters=parameters)
    # Temporary solution until constraint system improves
    if conditions.get(v.N) is None:
        conditions[v.N] = 1
    if np.any(np.array(conditions[v.N]) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(conditions[v.N]))
    # Modify conditions values to be within numerical limits, e.g., X(AL)=0
    # Also wrap single-valued conditions with lists
    conds = _adjust_conditions(conditions)

    for cond in conds.keys():
        if isinstance(cond, (v.Composition, v.ChemicalPotential)) and cond.species not in comps:
            raise ConditionError('{} refers to non-existent component'.format(cond))
    state_variables = sorted(get_state_variables(models=models, conds=conds), key=str)
    str_conds = OrderedDict((str(key), value) for key, value in conds.items())
    num_calcs = np.prod([len(i) for i in str_conds.values()])
    components = [x for x in sorted(comps)]
    desired_active_pure_elements = [list(x.constituents.keys()) for x in components]
    desired_active_pure_elements = [el.upper() for constituents in desired_active_pure_elements for el in constituents]
    pure_elements = sorted(set([x for x in desired_active_pure_elements if x != 'VA']))
    if verbose:
        print('Components:', ' '.join([str(x) for x in comps]))
        print('Phases:', end=' ')
    output = output if output is not None else 'GM'
    output = output if isinstance(output, (list, tuple, set)) else [output]
    output = set(output)
    output |= {'GM'}
    output = sorted(output)
    need_hessians = any(type(c) in v.CONDITIONS_REQUIRING_HESSIANS for c in conds.keys())
    phase_records = build_phase_records(dbf, comps, active_phases, conds, models,
                                        output='GM', callables=callables,
                                        parameters=parameters, verbose=verbose,
                                        build_gradients=True, build_hessians=need_hessians)
    if verbose:
        print('[done]', end='\n')

    # 'calculate' accepts conditions through its keyword arguments
    grid_opts = calc_opts.copy()
    statevar_strings = [str(x) for x in state_variables]
    grid_opts.update({key: value for key, value in str_conds.items() if key in statevar_strings})
    if 'pdens' not in grid_opts:
        grid_opts['pdens'] = 500
    grid = delayed(calculate, pure=False)(dbf, comps, active_phases,
                                          model=models, fake_points=True,
                                          callables=callables, output='GM',
                                          parameters=parameters, **grid_opts)
    coord_dict = str_conds.copy()
    coord_dict['vertex'] = np.arange(
        len(pure_elements) + 1)  # +1 is to accommodate the degenerate degree of freedom at the invariant reactions
    coord_dict['component'] = pure_elements
    grid_shape = tuple(len(x) for x in conds.values()) + (len(pure_elements)+1,)
    properties = delayed(starting_point, pure=False)(conds, state_variables, phase_records, grid)
    conditions_per_chunk_per_axis = 2
    if num_calcs > 1:
        # Generate slices of 'properties'
        slices = []
        for val in grid_shape[:-1]:
            idx_arr = list(range(val))
            num_chunks = int(np.floor(val/conditions_per_chunk_per_axis))
            if num_chunks > 0:
                cond_slices = [x for x in np.array_split(np.asarray(idx_arr), num_chunks) if len(x) > 0]
            else:
                cond_slices = [idx_arr]
            slices.append(cond_slices)
        chunk_dims = [len(slc) for slc in slices]
        chunk_grid = np.array(np.unravel_index(np.arange(np.prod(chunk_dims)), chunk_dims)).T
        res = []
        for chunk in chunk_grid:
            prop_slice = properties[OrderedDict(list(zip(str_conds.keys(),
                                                         [np.atleast_1d(sl)[ch] for ch, sl in zip(chunk, slices)])))]
            job = delayed(_solve_eq_at_conditions, pure=False)(comps, prop_slice, phase_records, grid,
                                                               list(str_conds.keys()), state_variables, verbose, solver=solver)
            res.append(job)
        properties = delayed(_merge_property_slices, pure=False)(properties, chunk_grid, slices, list(str_conds.keys()), res)
    else:
        # Single-process job; don't create child processes
        properties = delayed(_solve_eq_at_conditions, pure=False)(comps, properties, phase_records, grid,
                                                                  list(str_conds.keys()), state_variables, verbose, solver=solver)

    # Compute equilibrium values of any additional user-specified properties
    # We already computed these properties so don't recompute them
    output = sorted(set(output) - {'GM', 'MU'})
    for out in output:
        if (out is None) or (len(out) == 0):
            continue
        # TODO: How do we know if a specified property should be per_phase or not?
        # For now, we make a best guess
        if (out == 'degree_of_ordering') or (out == 'DOO'):
            per_phase = True
        else:
            per_phase = False
        eqcal = delayed(_eqcalculate, pure=False)(dbf, comps, active_phases, conditions, out,
                                                  data=properties, per_phase=per_phase,
                                                  callables=callables,
                                                  parameters=parameters,
                                                  model=models, **calc_opts)
        properties = delayed(properties.merge, pure=False)(eqcal, compat='equals')
    if scheduler is not None:
        properties = dask.compute(properties, scheduler=scheduler)[0]
    properties.attrs['created'] = datetime.utcnow().isoformat()
    if len(kwargs) > 0:
        warnings.warn('The following equilibrium keyword arguments were passed, but unused:\n{}'.format(kwargs))
    return properties
Пример #19
0
def equilibrium(dbf,
                comps,
                phases,
                conditions,
                output=None,
                model=None,
                verbose=False,
                broadcast=True,
                calc_opts=None,
                to_xarray=True,
                scheduler='sync',
                parameters=None,
                solver=None,
                callables=None,
                **kwargs):
    """
    Calculate the equilibrium state of a system containing the specified
    components and phases, under the specified conditions.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : list
        Names of components to consider in the calculation.
    phases : list or dict
        Names of phases to consider in the calculation.
    conditions : dict or (list of dict)
        StateVariables and their corresponding value.
    output : str or list of str, optional
        Additional equilibrium model properties (e.g., CPM, HM, etc.) to compute.
        These must be defined as attributes in the Model class of each phase.
    model : Model, a dict of phase names to Model, or a seq of both, optional
        Model class to use for each phase.
    verbose : bool, optional
        Print details of calculations. Useful for debugging.
    broadcast : bool
        If True, broadcast conditions against each other. This will compute all combinations.
        If False, each condition should be an equal-length list (or single-valued).
        Disabling broadcasting is useful for calculating equilibrium at selected conditions,
        when those conditions don't comprise a grid.
    calc_opts : dict, optional
        Keyword arguments to pass to `calculate`, the energy/property calculation routine.
    to_xarray : bool
        Whether to return an xarray Dataset (True, default) or an EquilibriumResult.
    scheduler : Dask scheduler, optional
        Job scheduler for performing the computation.
        If None, return a Dask graph of the computation instead of actually doing it.
    parameters : dict, optional
        Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.
    solver : pycalphad.core.solver.SolverBase
        Instance of a solver that is used to calculate local equilibria.
        Defaults to a pycalphad.core.solver.InteriorPointSolver.
    callables : dict, optional
        Pre-computed callable functions for equilibrium calculation.

    Returns
    -------
    Structured equilibrium calculation, or Dask graph if scheduler=None.

    Examples
    --------
    None yet.
    """
    if not broadcast:
        raise NotImplementedError('Broadcasting cannot yet be disabled')
    comps = sorted(unpack_components(dbf, comps))
    phases = unpack_phases(phases) or sorted(dbf.phases.keys())
    list_of_possible_phases = filter_phases(dbf, comps)
    if len(list_of_possible_phases) == 0:
        raise ConditionError(
            'There are no phases in the Database that can be active with components {0}'
            .format(comps))
    active_phases = {
        name: dbf.phases[name]
        for name in filter_phases(dbf, comps, phases)
    }
    if len(active_phases) == 0:
        raise ConditionError(
            'None of the passed phases ({0}) are active. List of possible phases: {1}.'
            .format(phases, list_of_possible_phases))
    if isinstance(comps, (str, v.Species)):
        comps = [comps]
    if len(set(comps) - set(dbf.species)) > 0:
        raise EquilibriumError('Components not found in database: {}'.format(
            ','.join([c.name for c in (set(comps) - set(dbf.species))])))
    calc_opts = calc_opts if calc_opts is not None else dict()
    solver = solver if solver is not None else InteriorPointSolver(
        verbose=verbose)
    parameters = parameters if parameters is not None else dict()
    if isinstance(parameters, dict):
        parameters = OrderedDict(sorted(parameters.items(), key=str))
    models = instantiate_models(dbf,
                                comps,
                                active_phases,
                                model=model,
                                parameters=parameters)
    # Temporary solution until constraint system improves
    if conditions.get(v.N) is None:
        conditions[v.N] = 1
    if np.any(np.array(conditions[v.N]) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(
            conditions[v.N]))
    # Modify conditions values to be within numerical limits, e.g., X(AL)=0
    # Also wrap single-valued conditions with lists
    conds = _adjust_conditions(conditions)

    for cond in conds.keys():
        if isinstance(cond,
                      (v.Composition,
                       v.ChemicalPotential)) and cond.species not in comps:
            raise ConditionError(
                '{} refers to non-existent component'.format(cond))
    state_variables = sorted(get_state_variables(models=models, conds=conds),
                             key=str)
    str_conds = OrderedDict((str(key), value) for key, value in conds.items())
    components = [x for x in sorted(comps)]
    desired_active_pure_elements = [
        list(x.constituents.keys()) for x in components
    ]
    desired_active_pure_elements = [
        el.upper() for constituents in desired_active_pure_elements
        for el in constituents
    ]
    pure_elements = sorted(
        set([x for x in desired_active_pure_elements if x != 'VA']))
    if verbose:
        print('Components:', ' '.join([str(x) for x in comps]))
        print('Phases:', end=' ')
    output = output if output is not None else 'GM'
    output = output if isinstance(output, (list, tuple, set)) else [output]
    output = set(output)
    output |= {'GM'}
    output = sorted(output)
    phase_records = build_phase_records(dbf,
                                        comps,
                                        active_phases,
                                        conds,
                                        models,
                                        output='GM',
                                        callables=callables,
                                        parameters=parameters,
                                        verbose=verbose,
                                        build_gradients=True,
                                        build_hessians=True)
    if verbose:
        print('[done]', end='\n')

    # 'calculate' accepts conditions through its keyword arguments
    grid_opts = calc_opts.copy()
    statevar_strings = [str(x) for x in state_variables]
    grid_opts.update({
        key: value
        for key, value in str_conds.items() if key in statevar_strings
    })
    if 'pdens' not in grid_opts:
        grid_opts['pdens'] = 500
    grid = calculate(dbf,
                     comps,
                     active_phases,
                     model=models,
                     fake_points=True,
                     callables=callables,
                     output='GM',
                     parameters=parameters,
                     to_xarray=False,
                     **grid_opts)
    coord_dict = str_conds.copy()
    coord_dict['vertex'] = np.arange(
        len(pure_elements) + 1
    )  # +1 is to accommodate the degenerate degree of freedom at the invariant reactions
    coord_dict['component'] = pure_elements
    properties = starting_point(conds, state_variables, phase_records, grid)
    properties = _solve_eq_at_conditions(comps,
                                         properties,
                                         phase_records,
                                         grid,
                                         list(str_conds.keys()),
                                         state_variables,
                                         verbose,
                                         solver=solver)

    # Compute equilibrium values of any additional user-specified properties
    # We already computed these properties so don't recompute them
    output = sorted(set(output) - {'GM', 'MU'})
    for out in output:
        if (out is None) or (len(out) == 0):
            continue
        # TODO: How do we know if a specified property should be per_phase or not?
        # For now, we make a best guess
        if (out == 'degree_of_ordering') or (out == 'DOO'):
            per_phase = True
        else:
            per_phase = False
        eqcal = _eqcalculate(dbf,
                             comps,
                             active_phases,
                             conditions,
                             out,
                             data=properties,
                             per_phase=per_phase,
                             model=models,
                             callables=callables,
                             parameters=parameters,
                             **calc_opts)
        properties = properties.merge(eqcal, inplace=True, compat='equals')
    if to_xarray:
        properties = properties.get_dataset()
    properties.attrs['created'] = datetime.utcnow().isoformat()
    if len(kwargs) > 0:
        warnings.warn(
            'The following equilibrium keyword arguments were passed, but unused:\n{}'
            .format(kwargs))
    return properties
Пример #20
0
def calculate_driving_force(dbf,
                            data_comps,
                            phases,
                            current_statevars,
                            ph_cond_dict,
                            phase_models,
                            phase_dict,
                            parameters,
                            callables,
                            tol=0.001,
                            max_it=50):
    """
    Calculates driving force for a single data point.

    Parameters
    ----------
    dbf : pycalphad.Database
        Database to consider
    data_comps : list
        List of active component names
    phases : list
        List of phases to consider
    current_statevars : dict
        Dictionary of state variables, e.g. v.P and v.T, no compositions.
    ph_cond_dict : dict
        Dictionary mapping phases to the conditions at which they occurred in experiment.
    phase_models : dict
        Phase models to pass to pycalphad calculations
    parameters : dict
        Dictionary of symbols that will be overridden in pycalphad.equilibrium
    callables : dict
        Callables to pass to pycalphad
    tol: double
        The tolerance allowed for optimization over hyperplanes.
    max_it: int
        The maximum number of iterations allowed for optimization over hyperplanes.

    Notes
    ------
    Calculates the driving force by optimizing the driving force over the chemical potential.
    Allow calculation of the driving force even when both tie points are missing.
    """
    # TODO Refactor absurd unpacking which represents a significant overhead.
    species = list(map(v.Species, data_comps))
    conditions = current_statevars
    if conditions.get(v.N) is None:
        conditions[v.N] = 1.0
    if np.any(np.array(conditions[v.N]) != 1):
        raise ConditionError('N!=1 is not yet supported, got N={}'.format(
            conditions[v.N]))
    conds = conditions
    str_conds = OrderedDict([(str(key), conds[key])
                             for key in sorted(conds.keys(), key=str)])
    models = instantiate_models(dbf,
                                data_comps,
                                phases,
                                model=phase_models,
                                parameters=parameters)
    prxs = build_phase_records(dbf,
                               species,
                               phases,
                               conds,
                               models,
                               build_gradients=True,
                               build_hessians=True,
                               callables=callables,
                               parameters=parameters)
    # Collect data information in phase_dict.
    for phase in phases:
        phase_dict[phase]['data'] = False
    for ph, cond in ph_cond_dict:
        has_nones = False
        ph_conds = cond[0]
        phase_dict[ph]['data'] = True
        for key in ph_conds:
            if ph_conds[key] is None:
                has_nones = True
                phase_dict[ph]['phase_record'] = None
                phase_dict[ph]['str_conds'] = None
        if not has_nones:
            ph_conds.update(conditions)
            phase_records = build_phase_records(dbf,
                                                species, [ph],
                                                ph_conds,
                                                models,
                                                build_gradients=True,
                                                build_hessians=True,
                                                callables=callables,
                                                parameters=parameters)
            phase_dict[ph]['phase_record'] = phase_records[ph]
            phase_dict[ph]['str_conds'] = OrderedDict([
                (str(key), ph_conds[key])
                for key in sorted(ph_conds.keys(), key=str)
            ])
            phase_dict[ph]['min_energy'] = None
    # Collect sampling and equilibrium information in phase_dict.
    for phase in phases:
        # If sample points have not yet been calculated for this phase, calculate them.
        if not 'sample_points' in phase_dict[phase]:
            phase_obj = dbf.phases[phase]
            components = models[phase].components
            variables, sublattice_dof = generate_dof(phase_obj, components)
            sample_points = _sample_phase_constitution(
                phase, phase_obj.constituents, sublattice_dof, data_comps,
                tuple(variables), point_sample, True, 2000)
            phase_dict[phase]['sample_points'] = sample_points
        # If composition values have not yet been calculated for this phase, calculate them.
        if not 'composition_values' in phase_dict[phase]:
            composition_values = np.zeros(
                (sample_points.shape[0],
                 len([sp for sp in species if sp.__str__() != 'VA'])))
            temp_comp_set = CompositionSet(prxs[phase])
            current_state_variables = np.array(
                [str_conds[key] for key in sorted(str_conds.keys(), key=str)])
            for i in range(sample_points.shape[0]):
                temp_comp_set.py_update(sample_points[i, :], np.array([1.0]),
                                        current_state_variables, False)
                composition_values[i, :] = temp_comp_set.X
            phase_dict[phase]['composition_values'] = composition_values
        energies = calculate(dbf,
                             data_comps, [phase],
                             points=phase_dict[phase]['sample_points'],
                             to_xarray=False,
                             **str_conds)
        phase_dict[phase]['energy_values'] = np.array(energies['GM'][0][0][0])
    hyperplane = generate_random_hyperplane(species)
    result = calculate_driving_force_at_chem_potential(dbf,
                                                       hyperplane,
                                                       species,
                                                       phase_dict,
                                                       prxs,
                                                       str_conds,
                                                       approx=True)
    # Ignore entire data point if pointsolver fails to converge.
    if result is None:
        return 0
    # Optimize over the hyperplane.
    it = 0
    current_driving_force = result['driving_force']
    new_plane = result['new_plane']
    last_plane = new_plane
    while np.linalg.norm(hyperplane - last_plane) > tol and it < max_it:
        it += 1
        last_plane = hyperplane
        result = calculate_driving_force_at_chem_potential(dbf,
                                                           new_plane,
                                                           species,
                                                           phase_dict,
                                                           prxs,
                                                           str_conds,
                                                           approx=True)
        # If step results in objective decrease, accept the step.
        if result['driving_force'] < current_driving_force:
            current_driving_force = result['driving_force']
            hyperplane = new_plane
            new_plane = result['new_plane']
        else:
            step = 0.5
            temp_hyperplane = new_plane
            while result[
                    'driving_force'] > current_driving_force and np.linalg.norm(
                        hyperplane - temp_hyperplane) > tol:
                temp_hyperplane = (1.0 - step) * hyperplane + step * new_plane
                result = calculate_driving_force_at_chem_potential(
                    dbf,
                    temp_hyperplane,
                    species,
                    phase_dict,
                    prxs,
                    str_conds,
                    approx=True)
                step /= 2
            hyperplane = temp_hyperplane
            result = calculate_driving_force_at_chem_potential(dbf,
                                                               hyperplane,
                                                               species,
                                                               phase_dict,
                                                               prxs,
                                                               str_conds,
                                                               approx=True)
            new_plane = result['new_plane']
            current_driving_force = result['driving_force']
    final_result = calculate_driving_force_at_chem_potential(dbf,
                                                             hyperplane,
                                                             species,
                                                             phase_dict,
                                                             prxs,
                                                             str_conds,
                                                             approx=True)
    final_driving_force = final_result['driving_force']
    print(it, final_driving_force, hyperplane)
    return final_driving_force