コード例 #1
0
ファイル: cost.py プロジェクト: jmmshn/pymatgen
    def get_lowest_decomposition(self, composition):
        """
        Get the decomposition leading to lowest cost

        Args:
            composition:
                Composition as a pymatgen.core.structure.Composition
        Returns:
            Decomposition as a dict of {Entry: amount}
        """

        entries_list = []
        elements = [e.symbol for e in composition.elements]
        for i in range(len(elements)):
            for combi in itertools.combinations(elements, i + 1):
                chemsys = [Element(e) for e in combi]
                x = self.costdb.get_entries(chemsys)
                entries_list.extend(x)
        try:
            pd = PhaseDiagram(entries_list)
            return pd.get_decomposition(composition)
        except IndexError:
            raise ValueError(
                "Error during PD building; most likely, cost data does not exist!"
            )
コード例 #2
0
def get_decomp_product_ids(structures, competing_phases):
    # Create phase diagrams for each of the new structure chemical systems
    phase_diagrams = []
    for competing in competing_phases:
        # Add competing phases
        entries = [
            PDEntry(Composition(i['full_formula']),
                    i['final_energy'],
                    name=i['task_id']) for i in competing
        ]
        pd = PhaseDiagram(entries)
        phase_diagrams.append(pd)

    # Put new structures on phase diagram to get the set of decomp products
    all_decomp_prods = []
    for new_struc, pd in zip(structures, phase_diagrams):
        comp = new_struc['structure'].composition.element_composition
        decomp_prods = pd.get_decomposition(comp=comp)
        all_decomp_prods.extend([i.name for i in decomp_prods])

    # Reduce decomposition products to unique set
    all_decomp_prods = list(set(all_decomp_prods))
    print('{} unique competing phases to calculate'.format(
        len(all_decomp_prods)))
    return (all_decomp_prods)
コード例 #3
0
ファイル: cost.py プロジェクト: blondegeek/pymatgen
    def get_lowest_decomposition(self, composition):
        """
        Get the decomposition leading to lowest cost

        Args:
            composition:
                Composition as a pymatgen.core.structure.Composition
        Returns:
            Decomposition as a dict of {Entry: amount}
        """

        entries_list = []
        elements = [e.symbol for e in composition.elements]
        for i in range(len(elements)):
            for combi in itertools.combinations(elements, i + 1):
                chemsys = [Element(e) for e in combi]
                x = self.costdb.get_entries(chemsys)
                entries_list.extend(x)
        try:
            pd = PhaseDiagram(entries_list)
            return pd.get_decomposition(composition)
        except IndexError:
            raise ValueError("Error during PD building; most likely, "
                             "cost data does not exist!")
コード例 #4
0
class PhaseDiagramTest(unittest.TestCase):
    def setUp(self):
        self.entries = EntrySet.from_csv(str(module_dir /
                                             "pdentries_test.csv"))
        self.pd = PhaseDiagram(self.entries)
        warnings.simplefilter("ignore")

    def tearDown(self):
        warnings.simplefilter("default")

    def test_init(self):
        # Ensure that a bad set of entries raises a PD error. Remove all Li
        # from self.entries.
        entries = filter(
            lambda e: (not e.composition.is_element) or e.composition.elements[
                0] != Element("Li"),
            self.entries,
        )
        self.assertRaises(PhaseDiagramError, PhaseDiagram, entries)

    def test_dim1(self):
        # Ensure that dim 1 PDs can eb generated.
        for el in ["Li", "Fe", "O2"]:
            entries = [
                e for e in self.entries if e.composition.reduced_formula == el
            ]
            pd = PhaseDiagram(entries)
            self.assertEqual(len(pd.stable_entries), 1)

            for e in entries:
                decomp, ehull = pd.get_decomp_and_e_above_hull(e)
                self.assertGreaterEqual(ehull, 0)
            plotter = PDPlotter(pd)
            lines, stable_entries, unstable_entries = plotter.pd_plot_data
            self.assertEqual(lines[0][1], [0, 0])

    def test_ordering(self):
        # Test sorting of elements
        entries = [
            ComputedEntry(Composition(formula), 0)
            for formula in ["O", "N", "Fe"]
        ]
        pd = PhaseDiagram(entries)
        sorted_elements = (Element("Fe"), Element("N"), Element("O"))
        self.assertEqual(tuple(pd.elements), sorted_elements)

        entries.reverse()
        pd = PhaseDiagram(entries)
        self.assertEqual(tuple(pd.elements), sorted_elements)

        # Test manual specification of order
        ordering = [Element(elt_string) for elt_string in ["O", "N", "Fe"]]
        pd = PhaseDiagram(entries, elements=ordering)
        self.assertEqual(tuple(pd.elements), tuple(ordering))

    def test_stable_entries(self):
        stable_formulas = [
            ent.composition.reduced_formula for ent in self.pd.stable_entries
        ]
        expected_stable = [
            "Fe2O3",
            "Li5FeO4",
            "LiFeO2",
            "Fe3O4",
            "Li",
            "Fe",
            "Li2O",
            "O2",
            "FeO",
        ]
        for formula in expected_stable:
            self.assertTrue(formula in stable_formulas,
                            formula + " not in stable entries!")

    def test_get_formation_energy(self):
        stable_formation_energies = {
            ent.composition.reduced_formula: self.pd.get_form_energy(ent)
            for ent in self.pd.stable_entries
        }
        expected_formation_energies = {
            "Li5FeO4": -164.8117344866667,
            "Li2O2": -14.119232793333332,
            "Fe2O3": -16.574164339999996,
            "FeO": -5.7141519966666685,
            "Li": 0.0,
            "LiFeO2": -7.732752316666666,
            "Li2O": -6.229303868333332,
            "Fe": 0.0,
            "Fe3O4": -22.565714456666683,
            "Li2FeO3": -45.67166036000002,
            "O2": 0.0,
        }
        for formula, energy in expected_formation_energies.items():
            self.assertAlmostEqual(energy, stable_formation_energies[formula],
                                   7)

    def test_all_entries_hulldata(self):
        self.assertEqual(len(self.pd.all_entries_hulldata), 492)

    def test_planar_inputs(self):
        e1 = PDEntry("H", 0)
        e2 = PDEntry("He", 0)
        e3 = PDEntry("Li", 0)
        e4 = PDEntry("Be", 0)
        e5 = PDEntry("B", 0)
        e6 = PDEntry("Rb", 0)

        pd = PhaseDiagram([e1, e2, e3, e4, e5, e6],
                          map(Element, ["Rb", "He", "B", "Be", "Li", "H"]))

        self.assertEqual(len(pd.facets), 1)

    def test_str(self):
        self.assertIsNotNone(str(self.pd))

    def test_get_e_above_hull(self):
        for entry in self.pd.stable_entries:
            self.assertLess(
                self.pd.get_e_above_hull(entry),
                1e-11,
                "Stable entries should have e above hull of zero!",
            )

        for entry in self.pd.all_entries:
            if entry not in self.pd.stable_entries:
                e_ah = self.pd.get_e_above_hull(entry)
                self.assertTrue(isinstance(e_ah, Number))
                self.assertGreaterEqual(e_ah, 0)

    def test_get_equilibrium_reaction_energy(self):
        for entry in self.pd.stable_entries:
            self.assertLessEqual(
                self.pd.get_equilibrium_reaction_energy(entry),
                0,
                "Stable entries should have negative equilibrium reaction energy!",
            )

    def test_get_quasi_e_to_hull(self):
        for entry in self.pd.unstable_entries:
            # catch duplicated stable entries
            if entry.normalize(
                    inplace=False) in self.pd.get_stable_entries_normed():
                self.assertLessEqual(
                    self.pd.get_quasi_e_to_hull(entry),
                    0,
                    "Duplicated stable entries should have negative decomposition energy!",
                )
            else:
                self.assertGreaterEqual(
                    self.pd.get_quasi_e_to_hull(entry),
                    0,
                    "Unstable entries should have positive decomposition energy!",
                )

        for entry in self.pd.stable_entries:
            if entry.composition.is_element:
                self.assertEqual(
                    self.pd.get_quasi_e_to_hull(entry),
                    0,
                    "Stable elemental entries should have decomposition energy of zero!",
                )
            else:
                self.assertLessEqual(
                    self.pd.get_quasi_e_to_hull(entry),
                    0,
                    "Stable entries should have negative decomposition energy!",
                )

        novel_stable_entry = PDEntry("Li5FeO4", -999)
        self.assertLess(
            self.pd.get_quasi_e_to_hull(novel_stable_entry),
            0,
            "Novel stable entries should have negative decomposition energy!",
        )

        novel_unstable_entry = PDEntry("Li5FeO4", 999)
        self.assertGreater(
            self.pd.get_quasi_e_to_hull(novel_unstable_entry),
            0,
            "Novel unstable entries should have positive decomposition energy!",
        )

        duplicate_entry = PDEntry("Li2O", -14.31361175)
        scaled_dup_entry = PDEntry("Li4O2", -14.31361175 * 2)
        stable_entry = [e for e in self.pd.stable_entries
                        if e.name == "Li2O"][0]

        self.assertEqual(
            self.pd.get_quasi_e_to_hull(duplicate_entry),
            self.pd.get_quasi_e_to_hull(stable_entry),
            "Novel duplicates of stable entries should have same decomposition energy!",
        )

        self.assertEqual(
            self.pd.get_quasi_e_to_hull(scaled_dup_entry),
            self.pd.get_quasi_e_to_hull(stable_entry),
            "Novel scaled duplicates of stable entries should have same decomposition energy!",
        )

    def test_get_decomposition(self):
        for entry in self.pd.stable_entries:
            self.assertEqual(
                len(self.pd.get_decomposition(entry.composition)),
                1,
                "Stable composition should have only 1 decomposition!",
            )
        dim = len(self.pd.elements)
        for entry in self.pd.all_entries:
            ndecomp = len(self.pd.get_decomposition(entry.composition))
            self.assertTrue(
                ndecomp > 0 and ndecomp <= dim,
                "The number of decomposition phases can at most be equal to the number of components.",
            )

        # Just to test decomp for a ficitious composition
        ansdict = {
            entry.composition.formula: amt
            for entry, amt in self.pd.get_decomposition(
                Composition("Li3Fe7O11")).items()
        }
        expected_ans = {
            "Fe2 O2": 0.0952380952380949,
            "Li1 Fe1 O2": 0.5714285714285714,
            "Fe6 O8": 0.33333333333333393,
        }
        for k, v in expected_ans.items():
            self.assertAlmostEqual(ansdict[k], v)

    def test_get_transition_chempots(self):
        for el in self.pd.elements:
            self.assertLessEqual(len(self.pd.get_transition_chempots(el)),
                                 len(self.pd.facets))

    def test_get_element_profile(self):
        for el in self.pd.elements:
            for entry in self.pd.stable_entries:
                if not (entry.composition.is_element):
                    self.assertLessEqual(
                        len(self.pd.get_element_profile(el,
                                                        entry.composition)),
                        len(self.pd.facets),
                    )

        expected = [
            {
                "evolution": 1.0,
                "chempot": -4.2582781416666666,
                "reaction": "Li2O + 0.5 O2 -> Li2O2",
            },
            {
                "evolution": 0,
                "chempot": -5.0885906699999968,
                "reaction": "Li2O -> Li2O",
            },
            {
                "evolution": -1.0,
                "chempot": -10.487582010000001,
                "reaction": "Li2O -> 2 Li + 0.5 O2",
            },
        ]
        result = self.pd.get_element_profile(Element("O"), Composition("Li2O"))
        for d1, d2 in zip(expected, result):
            self.assertAlmostEqual(d1["evolution"], d2["evolution"])
            self.assertAlmostEqual(d1["chempot"], d2["chempot"])
            self.assertEqual(d1["reaction"], str(d2["reaction"]))

    def test_get_get_chempot_range_map(self):
        elements = [el for el in self.pd.elements if el.symbol != "Fe"]
        self.assertEqual(len(self.pd.get_chempot_range_map(elements)), 10)

    def test_getmu_vertices_stability_phase(self):
        results = self.pd.getmu_vertices_stability_phase(
            Composition("LiFeO2"), Element("O"))
        self.assertAlmostEqual(len(results), 6)
        test_equality = False
        for c in results:
            if (abs(c[Element("O")] + 7.115) < 1e-2
                    and abs(c[Element("Fe")] + 6.596) < 1e-2
                    and abs(c[Element("Li")] + 3.931) < 1e-2):
                test_equality = True
        self.assertTrue(test_equality,
                        "there is an expected vertex missing in the list")

    def test_getmu_range_stability_phase(self):
        results = self.pd.get_chempot_range_stability_phase(
            Composition("LiFeO2"), Element("O"))
        self.assertAlmostEqual(results[Element("O")][1], -4.4501812249999997)
        self.assertAlmostEqual(results[Element("Fe")][0], -6.5961470999999996)
        self.assertAlmostEqual(results[Element("Li")][0], -3.6250022625000007)

    def test_get_hull_energy(self):
        for entry in self.pd.stable_entries:
            h_e = self.pd.get_hull_energy(entry.composition)
            self.assertAlmostEqual(h_e, entry.energy)
            n_h_e = self.pd.get_hull_energy(
                entry.composition.fractional_composition)
            self.assertAlmostEqual(n_h_e, entry.energy_per_atom)

    def test_1d_pd(self):
        entry = PDEntry("H", 0)
        pd = PhaseDiagram([entry])
        decomp, e = pd.get_decomp_and_e_above_hull(PDEntry("H", 1))
        self.assertAlmostEqual(e, 1)
        self.assertAlmostEqual(decomp[entry], 1.0)

    def test_get_critical_compositions_fractional(self):
        c1 = Composition("Fe2O3").fractional_composition
        c2 = Composition("Li3FeO4").fractional_composition
        c3 = Composition("Li2O").fractional_composition

        comps = self.pd.get_critical_compositions(c1, c2)
        expected = [
            Composition("Fe2O3").fractional_composition,
            Composition("Li0.3243244Fe0.1621621O0.51351349"),
            Composition("Li3FeO4").fractional_composition,
        ]
        for crit, exp in zip(comps, expected):
            self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))

        comps = self.pd.get_critical_compositions(c1, c3)
        expected = [
            Composition("Fe0.4O0.6"),
            Composition("LiFeO2").fractional_composition,
            Composition("Li5FeO4").fractional_composition,
            Composition("Li2O").fractional_composition,
        ]
        for crit, exp in zip(comps, expected):
            self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))

    def test_get_critical_compositions(self):
        c1 = Composition("Fe2O3")
        c2 = Composition("Li3FeO4")
        c3 = Composition("Li2O")

        comps = self.pd.get_critical_compositions(c1, c2)
        expected = [
            Composition("Fe2O3"),
            Composition("Li0.3243244Fe0.1621621O0.51351349") * 7.4,
            Composition("Li3FeO4"),
        ]
        for crit, exp in zip(comps, expected):
            self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))

        comps = self.pd.get_critical_compositions(c1, c3)
        expected = [
            Composition("Fe2O3"),
            Composition("LiFeO2"),
            Composition("Li5FeO4") / 3,
            Composition("Li2O"),
        ]
        for crit, exp in zip(comps, expected):
            self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))

        # Don't fail silently if input compositions aren't in phase diagram
        # Can be very confusing if you're working with a GrandPotentialPD
        self.assertRaises(
            ValueError,
            self.pd.get_critical_compositions,
            Composition("Xe"),
            Composition("Mn"),
        )

        # For the moment, should also fail even if compositions are in the gppd
        # because it isn't handled properly
        gppd = GrandPotentialPhaseDiagram(self.pd.all_entries, {"Xe": 1},
                                          self.pd.elements + [Element("Xe")])
        self.assertRaises(
            ValueError,
            gppd.get_critical_compositions,
            Composition("Fe2O3"),
            Composition("Li3FeO4Xe"),
        )

        # check that the function still works though
        comps = gppd.get_critical_compositions(c1, c2)
        expected = [
            Composition("Fe2O3"),
            Composition("Li0.3243244Fe0.1621621O0.51351349") * 7.4,
            Composition("Li3FeO4"),
        ]
        for crit, exp in zip(comps, expected):
            self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))

        # case where the endpoints are identical
        self.assertEqual(self.pd.get_critical_compositions(c1, c1 * 2),
                         [c1, c1 * 2])

    def test_get_composition_chempots(self):
        c1 = Composition("Fe3.1O4")
        c2 = Composition("Fe3.2O4.1Li0.01")

        e1 = self.pd.get_hull_energy(c1)
        e2 = self.pd.get_hull_energy(c2)

        cp = self.pd.get_composition_chempots(c1)
        calc_e2 = e1 + sum(cp[k] * v for k, v in (c2 - c1).items())
        self.assertAlmostEqual(e2, calc_e2)

    def test_get_all_chempots(self):
        c1 = Composition("Fe3.1O4")
        c2 = Composition("FeO")

        cp1 = self.pd.get_all_chempots(c1)
        cpresult = {
            Element("Li"): -4.077061954999998,
            Element("Fe"): -6.741593864999999,
            Element("O"): -6.969907375000003,
        }

        for elem, energy in cpresult.items():
            self.assertAlmostEqual(cp1["Fe3O4-FeO-LiFeO2"][elem], energy)

        cp2 = self.pd.get_all_chempots(c2)
        cpresult = {
            Element("O"): -7.115354140000001,
            Element("Fe"): -6.5961471,
            Element("Li"): -3.9316151899999987,
        }

        for elem, energy in cpresult.items():
            self.assertAlmostEqual(cp2["FeO-LiFeO2-Fe"][elem], energy)

    def test_to_from_dict(self):

        # test round-trip for other entry types such as ComputedEntry
        entry = ComputedEntry("H", 0.0, 0.0, entry_id="test")
        pd = PhaseDiagram([entry])
        d = pd.as_dict()
        pd_roundtrip = PhaseDiagram.from_dict(d)
        self.assertEqual(pd.all_entries[0].entry_id,
                         pd_roundtrip.all_entries[0].entry_id)
コード例 #5
0
    def _solve_gs_preserve(self, A, f, mu, subsample_mapping, skip_gs=False):
        """
        Code notes from Daniil Kitchaev ([email protected]) - 2018-09-10

        This is a WORK IN PROGRESS based on Wenxuan's ground-state preservation fitting code.
        A, f, and mu as as in the other routines
        subsample mapping deals with the fact that weights change when fitting on a partial set (when figuring out mu)
        skin_gs gives the option of ignoring the constrained fitting part, which is helpful when figuring out mu

        In general, this code is really not production ready - the algorithm that serious numerical issues, and getting
        around them involved lots of fiddling with eigenvalue roundoffs, etc, as is commented below.

        There are also issues with the fact that constraints can be very difficult to satisfy, causing the solver to
        diverge (or just quit silently giving absurd results) - ths solution here appears to be to use MOSEK instead
        of cvxopt, and to iteratively remove constraints when they cause problems. Usually after cleaning up the data,
        everything can be fit though without removing constraints.

        At the end of the day, this algorithm seems to only be useful for niche applications because enforcing ground
        state preservation causes a giant bias in the fit and makes the error in E-above-hull highly correlated with the
        value of E-above-hull. The result is entropies are completely wrong, which is what you usually want out of a
        cluster expansion.

        So, use the code at your own risk. AFAIK, it works as described in Wenxuans paper, with various additions from
        me for numerical stability. It has not been extensively tested though or used in real projects due to the bias
        issue I described above. I think that unless the bias problem is resolved, this fitting scheme will not be
        of much practical use.
        """
        if not subsample_mapping:
            assert A.shape[0] == self.feature_matrix.shape[0]
            subsample_mapping = {}
            for i in range(self.feature_matrix.shape[0]):
                subsample_mapping[i] = i

        from cvxopt import matrix
        from cvxopt import solvers
        from pymatgen.core.periodic_table import get_el_sp
        try:
            import mosek
        except:
            raise ValueError("GS preservation fitting is finicky and MOSEK solvers are typically required for numerical stability.")
        solvers.options['show_progress'] = False
        solvers.options['MOSEK'] = {mosek.dparam.check_convexity_rel_tol: 1e-6}

        ehull = list(self.e_above_hull_input)
        structure_index_at_hull = [i for (i,e) in enumerate(ehull) if e < 1e-5]

        reduce_composition_at_hull = [
            self.structures[i].composition.element_composition.reduced_composition.element_composition for
            i in structure_index_at_hull]

        all_corr_in = np.array(self.feature_matrix)
        all_engr_in = np.array(self.normalized_energies)

        # Some structures can be degenerate in correlation space, even if they are distinct in reality. We can't
        # constrain their energies since as far as the CE is concerned, same correlation = same structure
        duplicated_correlation_set = []
        for i in range(len(all_corr_in)):
            if i not in structure_index_at_hull:
                for j in structure_index_at_hull:
                    if np.max(np.abs(all_corr_in[i] - all_corr_in[j])) < 1e-6:
                        logging.info("Structure {} ({} - {}) has the same correlation as hull structure {} ({} {})".format(i,
                                                                    self.structures[i].composition.element_composition.reduced_formula,
                                                                    self.spacegroups[i],
                                                                    j,
                                                                    self.structures[j].composition.element_composition.reduced_formula,
                                                                    self.spacegroups[j]))
                        duplicated_correlation_set.append(i)

        all_engr_in.shape = (len(all_engr_in), 1)
        f.shape = (f.shape[0], 1)

        # Adjust weights if subsample changed whats included and whats not
        weights_tmp = []
        for i in range(A.shape[0]):
            weights_tmp.append(self.weights[subsample_mapping[i]])

        subsample_mapping_inv = {}
        for i, j in subsample_mapping.items():
            subsample_mapping_inv[j] = i
        for i in duplicated_correlation_set:
            if i in subsample_mapping_inv.keys():
                weights_tmp[subsample_mapping_inv[i]] = 0


        weight_vec = np.array(weights_tmp)

        weight_matrix = np.diag(weight_vec.transpose())

        N_corr = A.shape[1]

        # Deal with roundoff error making P not positive semidefinite by using the SVD of A
        # At = USV*
        # At A = U S St Ut -> any negatives in S get squared
        # Unfortunately, this is usually not enough, so the next step is to explicitly add something small (1e-10)
        # to all eigenvalues so that eigenvalues close to zero are instead very slightly positive.
        # Otherwise, random numerical error makes the matrix not positive semidefinite, and the convex optimization
        # gets confused
        Aw = weight_matrix.dot(A)
        u, s, v = la.svd(Aw.transpose())
        Ss = np.pad(np.diag(s), ((0, u.shape[0] - len(s)),(0,0)), mode='constant', constant_values=0)
        P_corr_part = 2 * u.dot((Ss.dot(Ss.transpose()))).dot(u.transpose())
        P = np.lib.pad(P_corr_part, ((0, N_corr), (0, N_corr)), mode='constant', constant_values=0)
        P = 0.5 * (P + P.transpose())
        ev, Q = la.eigh(P)
        Qi = la.inv(Q)
        P = Q.dot(np.diag(np.abs(ev)+1e-10)).dot(Qi)

        q_corr_part = -2 * ((weight_matrix.dot(A)).transpose()).dot(f)
        q_z_part = np.ones((N_corr, 1)) / mu
        q = np.concatenate((q_corr_part, q_z_part), axis=0)

        G_1 = np.concatenate((np.identity(N_corr), -np.identity(N_corr)), axis=1)
        G_2 = np.concatenate((-np.identity(N_corr), -np.identity(N_corr)), axis=1)
        G_3 = np.concatenate((G_1, G_2), axis=0)
        h_3 = np.zeros((2 * N_corr, 1))

        # formulation is min 1/2 x'Px+ q'x s.t.: Gx<=h, Ax=b

        # P = 2 * A^T A
        # q = -2 * E^T A = q^T -> q = -2 * A^T E

        # See Wenxuan npjCompMat paper for derivation. All of the above mess is implementing this formula, plus dealing
        # with numerical issues with zero eigenvalues getting rounded off to something slightly negative

        init_vals = matrix(np.linalg.lstsq(self.feature_matrix, self.normalized_energies)[0])

        input_entries = []
        for s, e in zip(self.structures, self.energies):
            input_entries.append(PDEntry(s.composition.element_composition, e))
        max_e = max(input_entries, key=lambda e: e.energy_per_atom).energy_per_atom + 1000
        for el in self.ce.structure.composition.keys():
            input_entries.append(PDEntry(Composition({el: 1}).element_composition, max_e))
        pd_input = PhaseDiagram(input_entries)

        constraint_strings = []

        # Uncomment to save various matrices for debugging purposes
        #np.save("A.npy", A)
        #np.save("f.npy", f)
        #np.save("w.npy", weight_vec)
        #np.save("P.npy", P)
        #np.save("q.npy", q)
        #np.save("G_noC.npy", G_3)
        #np.save("h_noC.npy", h_3)

        # The next part deals with adding constraints based on on-hull/off-hull compositions
        # Once again, there are numerical errors that arise when some structures are very close in correlation space
        # or in energy, such that the solver runs into either numerical issues or something else. The solution seems
        # to be to add constraints in batches, and try the increasingly constrained fit every once in a while.
        # When the fitting fails, roll back to find the problematic constraint and remove it. Usually there isnt more
        # than one or two bad constrains, and looking at them by hand is enough to figure out why they are causing
        # problems.
        BATCH_SIZE = int(np.sqrt(len(all_corr_in)))
        tot_constraints = 0
        removed_constraints = 0
        if not skip_gs:
            for i in range(len(all_corr_in)):
                if i not in structure_index_at_hull and i not in duplicated_correlation_set:

                    reduced_comp = self.structures[i].composition.element_composition.reduced_composition.element_composition
                    if reduced_comp in reduce_composition_at_hull:  ## in hull composition

                        hull_idx = reduce_composition_at_hull.index(reduced_comp)
                        global_index = structure_index_at_hull[hull_idx]

                        G_3_new_line = np.concatenate((all_corr_in[global_index] - all_corr_in[i], np.zeros((N_corr))))

                        G_3_new_line.shape = (1, 2 * N_corr)
                        G_3 = np.concatenate((G_3, G_3_new_line), axis=0)
                        small_error = np.array(-1e-3) # TODO: This tolerance is actually quite big, but it can be reduced as needed
                        small_error.shape = (1, 1)
                        h_3 = np.concatenate((h_3, small_error), axis=0)
                        tot_constraints += 1
                        string = "{}|Added constraint from {}({} - {}) structure at hull comp".format(h_3.shape[0], reduced_comp, self.spacegroups[i], i)
                        print(string)
                        constraint_strings.append(string)

                    else:  # out of hull composition

                        comp_now = self.structures[i].composition.element_composition.reduced_composition.element_composition
                        decomposition_now = pd_input.get_decomposition(comp_now)
                        new_vector = -1.0 * all_corr_in[i]
                        for decompo_keys, decompo_values in decomposition_now.items():
                            reduced_decompo_keys = decompo_keys.composition.element_composition.reduced_composition.element_composition
                            index_1 = reduce_composition_at_hull.index(reduced_decompo_keys)
                            vertex_index_global = structure_index_at_hull[index_1]
                            new_vector = new_vector + decompo_values * all_corr_in[vertex_index_global]

                        G_3_new_line = np.concatenate((new_vector, np.zeros(N_corr)))

                        G_3_new_line.shape = (1, 2 * N_corr)
                        G_3 = np.concatenate((G_3, G_3_new_line), axis=0)

                        small_error = np.array(-1e-3)
                        small_error.shape = (1, 1)
                        h_3 = np.concatenate((h_3, small_error), axis=0)
                        tot_constraints += 1
                        string = "{}|Added constraint from {}({}) structure not at hull comp".format(h_3.shape[0], reduced_comp, i)
                        print(string)
                        constraint_strings.append(string)

                elif i in structure_index_at_hull:
                    if self.structures[i].composition.element_composition.is_element:
                        continue

                    entries_new = []
                    for j in structure_index_at_hull:
                        if not j == i:
                            entries_new.append(
                                PDEntry(self.structures[j].composition.element_composition, self.energies[j]))

                    for el in self.ce.structure.composition.keys():
                        entries_new.append(PDEntry(Composition({el: 1}).element_composition,
                                                   max(self.normalized_energies) + 1000))

                    pd_new = PhaseDiagram(entries_new)

                    comp_now = self.structures[i].composition.element_composition.reduced_composition.element_composition
                    decomposition_now = pd_new.get_decomposition(comp_now)

                    new_vector = all_corr_in[i]

                    abandon = False
                    print("Constraining gs of {}({})".format(self.structures[i].composition, self.structures[i].composition))
                    for decompo_keys, decompo_values in decomposition_now.items():
                        reduced_decompo_keys = decompo_keys.composition.element_composition.reduced_composition.element_composition
                        if not reduced_decompo_keys in reduce_composition_at_hull:
                            abandon = True
                            break

                        index = reduce_composition_at_hull.index(reduced_decompo_keys)
                        vertex_index_global = structure_index_at_hull[index]
                        new_vector = new_vector - decompo_values * all_corr_in[vertex_index_global]
                    if abandon:
                        continue

                    G_3_new_line = np.concatenate((new_vector, np.zeros(N_corr)))

                    G_3_new_line.shape = (1, 2 * N_corr)
                    G_3 = np.concatenate((G_3, G_3_new_line), axis=0)
                    small_error = np.array(-1e-3) # TODO: Same tolerance as above
                    small_error.shape = (1, 1)
                    h_3 = np.concatenate((h_3, small_error), axis=0)
                    tot_constraints += 1
                    string = "{}|Added constraint from {}({}) structure on hull, decomp".format(h_3.shape[0], comp_now, i)
                    print(string)
                    constraint_strings.append(string)

                if i % BATCH_SIZE == 0 or i == len(all_corr_in)-1:
                    valid = False
                    const_remove = 0
                    G_t = deepcopy(G_3)
                    h_t = deepcopy(h_3)
                    # Remove constraints until fit works
                    while not valid:
                        sol = solvers.qp(matrix(P), matrix(q), matrix(G_3), matrix(h_3), initvals=init_vals, solver='mosek')
                        if sol['status'] == 'optimal':
                            valid = True
                        else:
                            const_remove += 1
                            G_3 = G_t[:-1 * (const_remove),:]
                            h_3 = h_t[:-1 * (const_remove)]
                            removed_constraints += 1

                    if const_remove > 0:
                        constraint_strings.append("{}|Removed".format(G_t.shape[0] - const_remove + 1))

                    # Add constraints back in one by one and remove if they cause problems
                    for num_new in range(1, const_remove):
                        G_new_line = G_t[-1 * (const_remove - num_new),:]
                        h_new_line = h_t[-1 * (const_remove - num_new)]
                        G_new_line.shape = (1, 2 * N_corr)
                        h_new_line.shape = (1,1)
                        G_3 = np.concatenate((G_3, G_new_line), axis=0)
                        h_3 = np.concatenate((h_3, h_new_line), axis=0)
                        sol = solvers.qp(matrix(P), matrix(q), matrix(G_3), matrix(h_3), initvals=init_vals, solver='mosek')
                        removed_constraints -= 1
                        if sol['status'] != 'optimal':
                            G_3 = G_3[:-1, :]
                            h_3 = h_3[:-1]
                            removed_constraints += 1
                            constraint_strings.append("{}|Removed".format(G_t.shape[0] - const_remove + num_new + 1))
            # Uncomment for iterative saving matricex
            #np.save("G.npy", G_3)
            #np.save("h.npy", h_3)



        # Uncomment for debugging
        #np.save("G.npy", G_3)
        #np.save("h.npy", h_3)

        sol = solvers.qp(matrix(P), matrix(q), matrix(G_3), matrix(h_3), initvals=init_vals, solver='mosek')
        print("Final status: {}".format(sol['status']))
        print("Mu: {}".format(mu))
        print("Constrants: {}/{}".format(tot_constraints - removed_constraints, tot_constraints))
        ecis = np.array(sol['x'])[:N_corr, 0]

        # Uncomment for some debugging info
        #print(ecis)
        #for string in constraint_strings:
        #    print(string)
        return ecis