Example #1
0
def computing_cv_accuracy_imprecise(in_path=None, ell_optimal=0.1, cv_n_fold=10):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y ** 2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y ** 2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = np.array(data.iloc[:, -1].tolist())
    mean_u65, mean_u80 = 0, 0
    lqa = LinearDiscriminant(init_matlab=True)
    kf = KFold(n_splits=cv_n_fold, random_state=None, shuffle=True)
    for idx_train, idx_test in kf.split(y):
        X_cv_train, y_cv_train = X[idx_train], y[idx_train]
        X_cv_test, y_cv_test = X[idx_test], y[idx_test]
        lqa.learn(X_cv_train, y_cv_train, ell=ell_optimal)
        sum_u65, sum_u80 = 0, 0
        n_test, _ = X_cv_test.shape
        for i, test in enumerate(X_cv_test):
            print("--TESTING-----", i, ell_optimal)
            evaluate, _ = lqa.evaluate(test)
            print(evaluate, "-----", y_cv_test[i])
            if y_cv_test[i] in evaluate:
                sum_u65 += u65(len(evaluate))
                sum_u80 += u80(len(evaluate))
        mean_u65 += sum_u65 / n_test
        mean_u80 += sum_u80 / n_test
    mean_u65 = mean_u65 / cv_n_fold
    mean_u80 = mean_u80 / cv_n_fold
    print("--ell-->", ell_optimal, "--->", mean_u65, mean_u80)
Example #2
0
def computing_cv_accuracy_imprecise(in_path=None,
                                    ell_optimal=0.1,
                                    cv_n_fold=10):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y**2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y**2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(
        in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = np.array(data.iloc[:, -1].tolist())
    mean_u65, mean_u80 = 0, 0
    lqa = LinearDiscriminant(init_matlab=True)
    kf = KFold(n_splits=cv_n_fold, random_state=None, shuffle=True)
    for idx_train, idx_test in kf.split(y):
        X_cv_train, y_cv_train = X[idx_train], y[idx_train]
        X_cv_test, y_cv_test = X[idx_test], y[idx_test]
        lqa.learn(X_cv_train, y_cv_train, ell=ell_optimal)
        sum_u65, sum_u80 = 0, 0
        n_test, _ = X_cv_test.shape
        for i, test in enumerate(X_cv_test):
            print("--TESTING-----", i, ell_optimal)
            evaluate, _ = lqa.evaluate(test)
            print(evaluate, "-----", y_cv_test[i])
            if y_cv_test[i] in evaluate:
                sum_u65 += u65(len(evaluate))
                sum_u80 += u80(len(evaluate))
        mean_u65 += sum_u65 / n_test
        mean_u80 += sum_u80 / n_test
    mean_u65 = mean_u65 / cv_n_fold
    mean_u80 = mean_u80 / cv_n_fold
    print("--ell-->", ell_optimal, "--->", mean_u65, mean_u80)
Example #3
0
def computing_best_imprecise_mean(in_path=None,
                                  seed=0,
                                  cv_n_fold=10,
                                  from_ell=0.1,
                                  to_ell=1.0,
                                  by_ell=0.1):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y**2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y**2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(
        in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = np.array(data.iloc[:, -1].tolist())
    ell_u65, ell_u80 = dict(), dict()
    lqa = LinearDiscriminant(init_matlab=True)
    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.4, random_state=seed)

    kf = KFold(n_splits=cv_n_fold, random_state=None, shuffle=True)
    splits = list([])
    for idx_train, idx_test in kf.split(y_train):
        splits.append((idx_train, idx_test))
    print("Splits --->", splits)

    for ell_current in np.arange(from_ell, to_ell, by_ell):
        ell_u65[ell_current], ell_u80[ell_current] = 0, 0
        for idx_train, idx_test in splits:
            print("---k-FOLD-new-executing--")
            X_cv_train, y_cv_train = X_train[idx_train], y_train[idx_train]
            X_cv_test, y_cv_test = X_train[idx_test], y_train[idx_test]
            lqa.learn(X_cv_train, y_cv_train, ell=ell_current)
            sum_u65, sum_u80 = 0, 0
            n_test = len(idx_test)
            for i, test in enumerate(X_cv_test):
                evaluate, _ = lqa.evaluate(test)
                print("----TESTING-----", i, ell_current, "|---|", evaluate,
                      "-----", y_cv_test[i])
                if y_cv_test[i] in evaluate:
                    sum_u65 += u65(len(evaluate))
                    sum_u80 += u80(len(evaluate))
            ell_u65[ell_current] += sum_u65 / n_test
            ell_u80[ell_current] += sum_u80 / n_test
        print("-------ELL_CURRENT-----", ell_current)
        ell_u65[ell_current] = ell_u65[ell_current] / cv_n_fold
        ell_u80[ell_current] = ell_u80[ell_current] / cv_n_fold
        print("u65-->", ell_u65[ell_current])
        print("u80-->", ell_u80[ell_current])
    print("--->", ell_u65, ell_u80)
Example #4
0
def output_paper_result(in_path=None):
    data = export_data_set(
        'bin_normal_rnd.data') if in_path is None else pd.read_csv(in_path)
    X = data.loc[:, ['x1', 'x2']].values
    y = data.y.tolist()
    lqa = LinearDiscriminant(init_matlab=True)
    lqa.learn(X, y, ell=2)
    lqa.plot2D_classification(np.array([2, 2]), colors={0: 'red', 1: 'blue'})
    lqa.plot2D_decision_boundary()
Example #5
0
def computing_best_imprecise_mean(in_path=None, seed=0, cv_n_fold=10,
                                  from_ell=0.1, to_ell=1.0, by_ell=0.1):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y ** 2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y ** 2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = np.array(data.iloc[:, -1].tolist())
    ell_u65, ell_u80 = dict(), dict()
    lqa = LinearDiscriminant(init_matlab=True)
    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.4, random_state=seed)

    kf = KFold(n_splits=cv_n_fold, random_state=None, shuffle=True)
    splits = list([])
    for idx_train, idx_test in kf.split(y_train):
        splits.append((idx_train, idx_test))
    print("Splits --->", splits)

    for ell_current in np.arange(from_ell, to_ell, by_ell):
        ell_u65[ell_current], ell_u80[ell_current] = 0, 0
        for idx_train, idx_test in splits:
            print("---k-FOLD-new-executing--")
            X_cv_train, y_cv_train = X_train[idx_train], y_train[idx_train]
            X_cv_test, y_cv_test = X_train[idx_test], y_train[idx_test]
            lqa.learn(X_cv_train, y_cv_train, ell=ell_current)
            sum_u65, sum_u80 = 0, 0
            n_test = len(idx_test)
            for i, test in enumerate(X_cv_test):
                evaluate, _ = lqa.evaluate(test)
                print("----TESTING-----", i, ell_current, "|---|", evaluate, "-----", y_cv_test[i])
                if y_cv_test[i] in evaluate:
                    sum_u65 += u65(len(evaluate))
                    sum_u80 += u80(len(evaluate))
            ell_u65[ell_current] += sum_u65 / n_test
            ell_u80[ell_current] += sum_u80 / n_test
        print("-------ELL_CURRENT-----", ell_current)
        ell_u65[ell_current] = ell_u65[ell_current] / cv_n_fold
        ell_u80[ell_current] = ell_u80[ell_current] / cv_n_fold
        print("u65-->", ell_u65[ell_current])
        print("u80-->", ell_u80[ell_current])
    print("--->", ell_u65, ell_u80)
Example #6
0
def computing_time_prediction(in_path=None):
    import time
    data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].tolist()
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
    lqa = LinearDiscriminant(init_matlab=True)
    lqa.learn(X_train, y_train, ell=0.01)
    sum_time = 0
    n, _ = X_test.shape
    for i, test in enumerate(X_test):
        start = time.time()
        evaluate, _ = lqa.evaluate(test)
        end = time.time()
        print(evaluate, "-----", y_test[i], '--time--', (end - start))
        sum_time += (end - start)
    print("--->", sum_time, '---n---', n)
Example #7
0
def computing_precise_vs_imprecise(in_path=None, ell_optimal=0.1, seeds=0):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y**2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y**2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(
        in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].tolist()
    n_time = len(seeds)
    lda_imp = LinearDiscriminant(init_matlab=True)
    lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
    mean_u65_imp, mean_u80_imp, u_mean = 0, 0, 0
    for k in range(0, n_time):
        X_train, X_test, y_train, y_test = \
            train_test_split(X, y, test_size=0.4, random_state=seeds[k])
        lda_imp.learn(X_train, y_train, ell=ell_optimal)
        lda.fit(X_train, y_train)
        sum_u65, sum_u80 = 0, 0
        u_precise, n_real_test = 0, 0
        n_test, _ = X_test.shape
        for i, test in enumerate(X_test):
            print("--TESTING-----", i)
            evaluate_imp, _ = lda_imp.evaluate(test)
            if len(evaluate_imp) > 1:
                n_real_test += 1
                if y_test[i] in evaluate_imp:
                    sum_u65 += u65(len(evaluate_imp))
                    sum_u80 += u80(len(evaluate_imp))
                evaluate = lda.predict([test])
                if y_test[i] in evaluate:
                    u_precise += u80(len(evaluate))
        mean_u65_imp += sum_u65 / n_real_test
        mean_u80_imp += sum_u80 / n_real_test
        u_mean += u_precise / n_real_test
        print("--time_k--u65-->", k, sum_u65 / n_real_test)
        print("--time_k--u80-->", k, sum_u80 / n_real_test)
        print("--time_k--precise-->", k, u_precise / n_real_test)
    print("--global--u65-->", mean_u65_imp / n_time)
    print("--global--u80-->", mean_u80_imp / n_time)
    print("--global--precise-->", u_mean / n_time)
Example #8
0
def output_paper_result(in_path=None):
    data = export_data_set('bin_normal_rnd.data') if in_path is None else pd.read_csv(in_path)
    X = data.loc[:, ['x1', 'x2']].values
    y = data.y.tolist()
    lqa = LinearDiscriminant(init_matlab=True)
    lqa.learn(X, y, ell=2)
    lqa.plot2D_classification(np.array([2, 2]), colors={0: 'red', 1: 'blue'})
    lqa.plot2D_decision_boundary()
Example #9
0
def computing_precise_vs_imprecise(in_path=None, ell_optimal=0.1, seeds=0):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y ** 2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y ** 2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].tolist()
    n_time = len(seeds)
    lda_imp = LinearDiscriminant(init_matlab=True)
    lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
    mean_u65_imp, mean_u80_imp, u_mean = 0, 0, 0
    for k in range(0, n_time):
        X_train, X_test, y_train, y_test = \
            train_test_split(X, y, test_size=0.4, random_state=seeds[k])
        lda_imp.learn(X_train, y_train, ell=ell_optimal)
        lda.fit(X_train, y_train)
        sum_u65, sum_u80 = 0, 0
        u_precise, n_real_test = 0, 0
        n_test, _ = X_test.shape
        for i, test in enumerate(X_test):
            print("--TESTING-----", i)
            evaluate_imp, _ = lda_imp.evaluate(test)
            if len(evaluate_imp) > 1:
                n_real_test += 1
                if y_test[i] in evaluate_imp:
                    sum_u65 += u65(len(evaluate_imp))
                    sum_u80 += u80(len(evaluate_imp))
                evaluate = lda.predict([test])
                if y_test[i] in evaluate:
                    u_precise += u80(len(evaluate))
        mean_u65_imp += sum_u65 / n_real_test
        mean_u80_imp += sum_u80 / n_real_test
        u_mean += u_precise / n_real_test
        print("--time_k--u65-->", k, sum_u65 / n_real_test)
        print("--time_k--u80-->", k, sum_u80 / n_real_test)
        print("--time_k--precise-->", k, u_precise / n_real_test)
    print("--global--u65-->", mean_u65_imp / n_time)
    print("--global--u80-->", mean_u80_imp / n_time)
    print("--global--precise-->", u_mean / n_time)
Example #10
0
def output_paper_zone_imprecise():
    data = export_data_set('iris.data')
    X = data.iloc[:, 0:2].values
    y = data.iloc[:, -1].tolist()
    lqa = LinearDiscriminant(init_matlab=True)
    lqa.learn(X, y, ell=5)
    query = np.array([5.0, 2])
    answer, _ = lqa.evaluate(query)
    # lqa.plot2D_classification(query)
    lqa.plot2D_decision_boundary()
Example #11
0
def computing_time_prediction(in_path=None):
    import time
    data = export_data_set('iris.data') if in_path is None else pd.read_csv(
        in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].tolist()
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.4,
                                                        random_state=0)
    lqa = LinearDiscriminant(init_matlab=True)
    lqa.learn(X_train, y_train, ell=0.01)
    sum_time = 0
    n, _ = X_test.shape
    for i, test in enumerate(X_test):
        start = time.time()
        evaluate, _ = lqa.evaluate(test)
        end = time.time()
        print(evaluate, "-----", y_test[i], '--time--', (end - start))
        sum_time += (end - start)
    print("--->", sum_time, '---n---', n)
Example #12
0
def computing_accuracy_imprecise(in_path=None,
                                 seeds=list([0]),
                                 ell_optimal=0.1):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y**2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y**2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(
        in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].tolist()
    n_time = len(seeds)
    mean_u65 = 0
    mean_u80 = 0
    lqa = LinearDiscriminant(init_matlab=True)
    for k in range(0, n_time):
        X_train, X_test, y_train, y_test = \
            train_test_split(X, y, test_size=0.4, random_state=seeds[k])
        lqa.learn(X_train, y_train, ell=ell_optimal)
        sum_u65 = 0
        sum_u80 = 0
        n_test, _ = X_test.shape
        for i, test in enumerate(X_test):
            print("--TESTING-----", i, ell_optimal)
            evaluate, _ = lqa.evaluate(test)
            print(evaluate, "-----", y_test[i])
            if y_test[i] in evaluate:
                sum_u65 += u65(len(evaluate))
                sum_u80 += u80(len(evaluate))
        print("--ell_65_k_time---", k, sum_u65 / n_test)
        print("--ell_u80_k_time---", k, sum_u80 / n_test)
        mean_u65 += sum_u65 / n_test
        mean_u80 += sum_u80 / n_test
    mean_u65 = mean_u65 / n_time
    mean_u80 = mean_u80 / n_time
    print("--ell-->", ell_optimal, "--->", mean_u65, mean_u80)
Example #13
0
def computing_accuracy_imprecise(in_path=None, seeds=list([0]), ell_optimal=0.1):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y ** 2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y ** 2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].tolist()
    n_time = len(seeds)
    mean_u65 = 0
    mean_u80 = 0
    lqa = LinearDiscriminant(init_matlab=True)
    for k in range(0, n_time):
        X_train, X_test, y_train, y_test = \
            train_test_split(X, y, test_size=0.4, random_state=seeds[k])
        lqa.learn(X_train, y_train, ell=ell_optimal)
        sum_u65 = 0
        sum_u80 = 0
        n_test, _ = X_test.shape
        for i, test in enumerate(X_test):
            print("--TESTING-----", i, ell_optimal)
            evaluate, _ = lqa.evaluate(test)
            print(evaluate, "-----", y_test[i])
            if y_test[i] in evaluate:
                sum_u65 += u65(len(evaluate))
                sum_u80 += u80(len(evaluate))
        print("--ell_65_k_time---", k, sum_u65 / n_test)
        print("--ell_u80_k_time---", k, sum_u80 / n_test)
        mean_u65 += sum_u65 / n_test
        mean_u80 += sum_u80 / n_test
    mean_u65 = mean_u65 / n_time
    mean_u80 = mean_u80 / n_time
    print("--ell-->", ell_optimal, "--->", mean_u65, mean_u80)
Example #14
0
def output_paper_zone_imprecise():
    data = export_data_set('iris.data')
    X = data.iloc[:, 0:2].values
    y = data.iloc[:, -1].tolist()
    lqa = LinearDiscriminant(init_matlab=True)
    lqa.learn(X, y, ell=5)
    query = np.array([5.0, 2])
    answer, _ = lqa.evaluate(query)
    # lqa.plot2D_classification(query)
    lqa.plot2D_decision_boundary()
Example #15
0
def _test_ILDA(in_train=None, features=None):
    ilda = LinearDiscriminant(DEBUG=True)
    data = export_data_set('iris.data') if in_train is None else pd.read_csv(
        in_train)
    __test_imprecise_model(ilda, data, features, hgrid=0.1)
Example #16
0
@author: Yonatan-Carlos Carranza-Alarcon

Imprecise Gaussian Discriminant

multi-classes classification
'''

from classifip.models.qda import LinearDiscriminant
from classifip.dataset.uci_data_set import export_data_set

# We start by creating an instance of the base classifier we want to use
print(
    "Example of Imprecise Linear Discriminant Analyse for multi-classes - Data set IRIS \n"
)
model = LinearDiscriminant(solver_matlab=False, DEBUG=False)
data = export_data_set('iris.data')

# Learning
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].tolist()
model.learn(X=X, y=y, ell=5)

# Evaluation : we can set the method for minimize convex problem with quadratic
test = model.evaluate(query=X[2], method="quadratic")

# The output is a list of probability intervals, we can print each instance :
print(
    "\nPrediction using interval dominance criterion with 0/1 costs + quadratic method\n"
)
print(test)