예제 #1
0
파일: 3.1run_MGGS.py 프로젝트: boliqq07/BGP
        estimator_all.append(GridSearchCV(me1, cv=cv1, scoring=scoring1, param_grid=param_grid1, n_jobs=1))

    """union"""
    index_all = [tuple(index[0]) for _ in index_all for index in _[:10]]
    index_all = list(set(index_all))

    """get x_name and abbr"""
    index_all_name = name_to_name(X_frame.columns.values, search=[i for i in index_all],
                                  search_which=0, return_which=(1,), two_layer=True)

    index_all_name = [list(set([re.sub(r"_\d", "", j) for j in i])) for i in index_all_name]
    [i.sort() for i in index_all_name]
    index_all_abbr = name_to_name(name_init, abbr_init, search=index_all_name, search_which=1, return_which=2,
                                  two_layer=True)

    store.to_pkl_pd(index_all, "index_all")
    store.to_csv(index_all_name, "index_all_name")
    store.to_csv(index_all_abbr, "index_all_abbr")

    ugs = UGS(estimator_all, index_all, estimator_n=[2, 3], n_jobs=3)
    ugs.fit(X, y)
    # re = gs.cv_score_all(index_all)
    binary_distance = ugs.cal_binary_distance_all(index_all, estimator_i=3)
    # slice_k  = gs._cv_predict_all(estimator_i=3)
    groups = ugs.cal_group(estimator_i=3, printing=True, print_noise=0.2, pre_binary_distance_all=binary_distance)
    ugs.cluster_print(binary_distance, highlight=[1, 2, 3])

    # groups = ugs.cal_t_group(printing=False, pre_group=None)
    # ss=ugs.select_ugs(alpha=0.01)
    # results = gs.select_gs(alpha=0.01)
    # gs.cal_group(eps=0.10, estimator_i=1, printing=True, pre_binary_distance_all=slice_g, print_noise=0.1,
예제 #2
0
        # Rct ** (-1) - beta * F ** 2 / (R * T)*(k1p * (1 - Thetah) - k_1p * Thetah + k2p * Thetah),
        # taup ** (-1) - F / q * (4 * k3 * Thetah + k1p + k_1p + k2p),
        Thetah - ((k1p + k_1p + k2p) + sympy.sqrt(
            (k1p + k_1p + k2p)**2) + 8 * k1p * k3),
        k1p - k1 * sympy.exp(-beta * F * E / (R * T)),
        k_1p - k_1 * sympy.exp((1 - beta) * F * E / (R * T)),
        k2p - k2 * sympy.exp(-beta * F * E / (R * T)),
    ],
    [Thetah, k1p, k_1p, k2p])

print(result)

from mgetool.exports import Store

store = Store(r'C:\Users\Administrator\Desktop\cl')
store.to_pkl_pd(result, "result")
"""fitting"""
exps1 = (beta * F**2 / (R * T) *
         (k1p * (1 - Thetah) - k_1p * Thetah + k2p * Thetah))**(-1)
exps2 = (F / q * (4 * k3 * Thetah + k1p + k_1p + k2p))**(-1)
exps3 = (beta * F**2 / (R * T) * (k2p - k1p - k_1p) *
         (k1p * (1 - Thetah) - k_1p * Thetah + k2p * Thetah) /
         (4 * k3 * Thetah + k2p + k1p + k_1p))**(-1)

subbb1 = {
    Thetah: result[0][0],
}
subbb2 = {
    k1p: result[0][1],
    k_1p: result[0][2],
    k2p: result[0][3],
예제 #3
0
파일: 3.0 SL_unit.py 프로젝트: boliqq07/BGP
    x, y = X, Y

    # y_unit
    from sympy.physics.units import eV, elementary_charge, m, pm

    y_u = eV
    # c_unit
    c = [1, 5.290 * 10**-11, 1.74, 2, 3, 4, 1 / 2, 1 / 3, 1 / 4]
    c_u = [
        elementary_charge, m, dless, dless, dless, dless, dless, dless, dless
    ]
    """preprocessing"""
    dims = [
        Dim.convert_to_Dim(i, target_units=None, unit_system="SI") for i in x_u
    ]

    x, x_dim = Dim.convert_x(x, x_u, target_units=None, unit_system="SI")
    y, y_dim = Dim.convert_xi(y, y_u)
    c, c_dim = Dim.convert_x(c, c_u)

    scal = MagnitudeTransformer(tolerate=1)

    group = 2
    n = X.shape[1]
    indexes = [_ for _ in range(n)]
    group = [indexes[i:i + group] for i in range(2, len(indexes), group)]
    x, y = scal.fit_transform_all(x, y, group=group)
    c = scal.fit_transform_constant(c)
    store.to_pkl_pd(scal, "si_transformer")
    store.to_pkl_pd((x, x_dim, y, y_dim, c, c_dim, X, Y), "SL_data")
예제 #4
0
    clf = Exhaustion(estimator,
                     n_select=n_select,
                     muti_grade=2,
                     muti_index=[2, X.shape[1]],
                     must_index=None,
                     n_jobs=1,
                     refit=True).fit(X, y)

    name_ = name_to_name(X_frame.columns.values,
                         search=[i[0] for i in clf.score_ex[:10]],
                         search_which=0,
                         return_which=(1, ),
                         two_layer=True)
    sc = np.array(clf.scatter)

    for i in clf.score_ex[:]:
        print(i[1])
    for i in name_:
        print(i)

    t = clf.predict(X)
    p = BasePlot()
    p.scatter(y, t, strx='True $E_{gap}$', stry='Calculated $E_{gap}$')
    plt.show()
    p.scatter(sc[:, 0], sc[:, 1], strx='Number', stry='Score')
    plt.show()

    store.to_csv(sc, method_name + "".join([str(i) for i in n_select]))
    store.to_pkl_pd(clf.score_ex,
                    method_name + "".join([str(i) for i in n_select]))
예제 #5
0
    all_import_title = com_data.join(ele_ratio)
    all_import_title = all_import_title.join(depart_elements_table)
    """sub density to e density"""
    select2 = ['electron number_0', 'electron number_1', 'cell volume']
    x_rame = (all_import_title['electron number_0'] +
              all_import_title['electron number_1']
              ) / all_import_title['cell volume']
    all_import_title['cell density'] = x_rame
    all_import_title.rename(columns={'cell density': "electron density"},
                            inplace=True)

    name = [
        "electron density" if i == "cell density" else i
        for i in name_and_abbr[0]
    ]
    abbr = [r"$\rho_e$" if i == r"$\rho_c$" else i for i in name_and_abbr[1]]
    name_and_abbr = [name, abbr]
    dims[-3] = np.array([0, -3, 0, 0, 0, 0, 0])

    store.to_csv(all_import_title, "all_import_title")
    all_import = all_import_title.drop([
        'name_number', 'name_number', "name", "structure", "structure_type",
        "space_group", "reference", 'material_id', 'composition', "com_0",
        "com_1"
    ],
                                       axis=1)

    store.to_pkl_pd(dims, "dims")
    store.to_pkl_pd(name_and_abbr, "name_and_abbr")
    store.to_csv(all_import, "all_import")
예제 #6
0
파일: wrtem.py 프로젝트: boliqq07/BGP
# # # 预处理
# minmax = MinMaxScaler()
# x = minmax.fit_transform(x)
x_, y_ = shuffle(x, y, random_state=2)

# # # 建模
method_all = ['SVR-set', "GPR-set", "RFR-em", "AdaBR-em", "DTR-em", "LASSO-L1", "BRR-L1"]
methods = method_pack(method_all=method_all,
                      me="reg", gd=True)
pre_y = []
ests = []
for name, methodi in zip(method_all, methods):
    methodi.cv = 5
    methodi.scoring = "neg_root_mean_squared_error"
    gd = methodi.fit(X=x_, y=y_)
    score = gd.best_score_
    est = gd.best_estimator_
    print(name, "neg_root_mean_squared_error", score)
    score = cross_val_score(est, X=x_, y=y_, scoring="r2", ).mean()
    print(name, "r2", score)
    pre_yi = est.predict(x)
    pre_y.append(pre_yi)
    ests.append(est)
    store.to_pkl_pd(est, name)

pre_y.append(y)
pre_y = np.array(pre_y).T
pre_y = pd.DataFrame(pre_y)
pre_y.columns = method_all + ["realy_y"]
store.to_csv(pre_y, "wrtem_result")