Exemplo n.º 1
0
def main():
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    # X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")
    minmax_scale(X, copy=False)

    res = compactness(X, y, types=types, metric=1)

    print(res[0], res[1], res[2])
Exemplo n.º 2
0
def main():
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")
    X, types, y = ToFormNumpy(
        r"D:\Nuu\AI\Selections\Amazon_initial_50_30_10000\data.txt")

    metric = 1

    minmax_scale(X, copy=False)

    w = Lagranj_nd(X, y)

    value = w.min()
    X_Test = np.array(X[:, w == value])
    types_Test = np.array(types[w == value])

    i = 0

    while X_Test.shape[1] < 2000:

        value = np.min(w[w > value])

        X_Test = X[:, w <= value]
        types_Test = types[w <= value]

        noisy = find_noisy(X_Test, y, types=types_Test, metric=metric)

        cond = np.logical_not(noisy)

        print("\nnoisy = ", len(noisy[noisy == True]))

        compactness(X_Test[cond], y[cond], types=types_Test, metric=metric)

        i += 1
Exemplo n.º 3
0
def main():

    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\Asian Religion.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\arcene_train.txt")

    minmax_scale(X, copy=False)
    #Normalizing_Estmation(X, y, types=types)

    k = 10
    k_fold = KFold(n_splits=k, shuffle=True, random_state=None)

    # Neighbors
    nnc = NearestNeighborClassifier()

    knc = TemplateClassifier()

    begin = time.time()
    max_mean1 = CVS(nnc, X, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()
    end = time.time()
    print("Time: ", (end - begin) * 1000)

    print(max_mean1)

    begin = time.time()
    max_mean2 = CVS(knc, X, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()
    end = time.time()
    print("Time: ", (end - begin) * 1000)

    print(max_mean1, max_mean2)
Exemplo n.º 4
0
def main():
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")
    X, types, y = ToFormNumpy(r"D:\Nuu\AI\Selections\Amazon_initial_50_30_10000\data.txt")

    metric = 1

    minmax_scale(X, copy=False)

    #w = Lagranj_nd(X, y)
    w = Lagranj(X)
    value = w.max()

    value = w.max()
    cond = w == value
    while len(cond[cond == True]) < 661:
        value = np.max(w[w < value])
        cond = w >= value

    X_Test = X[:, w >= value]

    k = 10
    k_fold = KFold(n_splits=k, shuffle=True, random_state=None)

    svm = SVC(kernel="linear")

    #svm.fit(X_Test, y)

    nn = MLPClassifier()
    nn.fit(X_Test, y)

    max_mean = CVS(nn, X_Test, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()
    print(max_mean)
Exemplo n.º 5
0
def main():
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")

    reg = KnnOptimalRegression()

    reg.fit(X, y)

    print(reg.predict(X))
Exemplo n.º 6
0
def main():
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy(r"D:\Nuu\AI\Selections\Amazon_initial_50_30_10000\data.txt")
    X, types, y = ToFormNumpy(
        r"D:\Nuu\AI\Selections\LSVT_voice_rehabilitation\data.txt")

    minmax_scale(X, copy=False)

    metric = 1

    w = Lagranj1(X, y)

    minmax_scale(X, copy=False)
    #Normalizing_Estmation(X, y)

    while X.shape[1] > 103:
        cond = w != w.min()
        X = X[:, cond]
        w = w[cond]

    nnc1 = NearestNeighborClassifier_(noisy=True)
    nnc2 = NearestNeighborClassifier()
    nnc3 = TemplateClassifier(noisy=True)
    nn = MLPClassifier()
    svm = SVC()

    k = 10
    mean1 = 0
    mean2 = 0
    mean3 = 0
    mean4 = 0
    mean5 = 0
    for i in range(k):
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.45,
                                                            random_state=None,
                                                            shuffle=True)

        nnc1.fit(X_train, y_train)
        nnc2.fit(X_train, y_train)
        nnc3.fit(X_train, y_train)
        svm.fit(X_train, y_train)
        nn.fit(X_train, y_train)

        mean1 += nnc1.score(X_test, y_test)
        mean2 += nnc2.score(X_test, y_test)
        mean3 += nnc3.score(X_test, y_test)
        mean4 += svm.score(X_test, y_test)
        mean5 += nn.score(X_test, y_test)

    mean1 /= k
    mean2 /= k
    mean3 /= k
    mean4 /= k
    mean5 /= k

    print(mean1, mean2, mean3, mean4, mean5)
Exemplo n.º 7
0
def main():
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    X, types, y = ToFormNumpy(r"D:\Nuu\AI\Selections\Amazon_initial_50_30_10000\data.txt")

    #y[y == 2] = 1

    minmax_scale(X, copy=False)
    #minmax_scale(X, copy=False)

    w = Lagranj_nd(X, y)

    #return 0

    value = w.max()
    cond = w == value
    while len(cond[cond == True]) < 5000:
        value = np.max(w[w < value])
        cond = w >= value
        if len(cond[cond == True]) > 154:
            compactness(X[:, cond], y, types)

    return 0
    print(len(cond[cond == True]))

    X = X[:, cond]
    types = types[cond]

    metric = 1

    #nnc = NearestNeighborClassifier_(noisy=True)
    nnc = NearestNeighborClassifier()
    # nnc = TemplateClassifier(noisy=True)
    nn = MLPClassifier()
    svm = SVC()

    k = 10
    mean1 = 0
    mean2 = 0
    mean3 = 0
    for i in range(k):
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.5, random_state=None, shuffle=True)

        nnc.fit(X_train, y_train)
        svm.fit(X_train, y_train)
        nn.fit(X_train, y_train)

        mean1 += nnc.score(X_test, y_test)
        mean2 += svm.score(X_test, y_test)
        mean3 += nn.score(X_test, y_test)

    mean1 /= k
    mean2 /= k
    mean3 /= k

    print(mean1, mean2, mean3)
Exemplo n.º 8
0
def main():
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")

    y -= 1

    minmax_scale(X, copy=False)
    #Normalizing_Estmation(X, y)

    compactness(X, y, types=types, metric=1)
Exemplo n.º 9
0
def main():

    X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")

    y -= 1

    file = open("test.txt", "w")
    print(Fris(X, y, types=types, file=file))
    file.close()
Exemplo n.º 10
0
def main():
    path = r"D:\tanlanmalar\GIPER_MY.txt"

    X, types, y = ToFormNumpy(path)

    y -= 1

    minmax_scale(X, copy=False)
    # Normalizing_Estmation(X, y)

    print(compactness(X, y, types))
    res = find_standard(X, y, types)
    res = find_noisy(X, y, types)

    s = 0
    for i in range(res.shape[0]):
        if res[i] == True and y[i] == 1:
            print(i + 1)
            s += 1

    print(s)

    return 0

    #nnc = NearestNeighborClassifier_(noisy=True)
    #nnc = NearestNeighborClassifier()
    #nnc = TemplateClassifier(noisy=True)
    nn = MLPClassifier()
    svm = SVC()

    k = 10
    mean1 = 0
    mean2 = 0
    mean3 = 0
    for i in range(k):
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.2,
                                                            random_state=None,
                                                            shuffle=True)

        nnc.fit(X_train, y_train)
        svm.fit(X_train, y_train)
        nn.fit(X_train, y_train)

        mean1 += nnc.score(X_test, y_test)
        mean2 += svm.score(X_test, y_test)
        mean3 += nn.score(X_test, y_test)

    mean1 /= k
    mean2 /= k
    mean3 /= k

    print(mean1, mean2, mean3)
Exemplo n.º 11
0
def main():

    X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")

    y -= 1

    minmax_scale(X, copy=False)
    #Normalizing_Estmation(X, y)

    group, comp1 = Fris(X, y, types=types, file=None)
Exemplo n.º 12
0
def main():
    #path = r"D:\Nuu\AI\Selections\leukemia\leukemia_small.csv"

    #X, types, y = ReadFromCSVWithHeaderClass(path)
    X, types, y = ToFormNumpy(r"D:\tanlanmalar\spame.txt")

    minmax_scale(X, copy=False)

    #res = find_shell(X, y, types)
    #res = find_standard(X, y, types)
    #res = find_noisy(X, y, types)

    #count(res, y)

    #X = X[res == False]
    #y = y[res == False]

    #print(X.shape)

    #print(compactness(X, y, types))

    #return 0

    nnc = NearestNeighborClassifier_(noisy=False)
    #nnc = NearestNeighborClassifier()
    #nnc = TemplateClassifier(noisy=True)
    nn = MLPClassifier()
    svm = SVC()

    k = 10
    mean1 = 0
    mean2 = 0
    mean3 = 0
    for i in range(k):
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.2,
                                                            random_state=None,
                                                            shuffle=True)

        nnc.fit(X_train, y_train)
        #svm.fit(X_train, y_train)
        #nn.fit(X_train, y_train)

        mean1 += nnc.score(X_test, y_test)
        #mean2 += svm.score(X_test, y_test)
        #mean3 += nn.score(X_test, y_test)

    mean1 /= k
    mean2 /= k
    mean3 /= k

    print(mean1, mean2, mean3)
Exemplo n.º 13
0
def main():
    path = r"D:\Tanlanmalar\MATBIO_MY.txt"

    X, types, y = ToFormNumpy(path)

    y -= 1

    print(X.shape)

    #minmax_scale(X, copy=False)
    #Normalizing_Estmation(X, y)

    count(find_shell(X, y), y)
Exemplo n.º 14
0
def main():

    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")

    #y -= 1
    #y[y == 2] = 1

    for i in range(X.shape[1]):
        for j in range(i + 1,  X.shape[1]):
            ReductionOptimal(X[:, i], X[:, j], y)
def main():

    X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasteralogy.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")

    y = y - 1

    #minmax_scale(X, copy=False)
    #Normalizing_Estmation(X, y)

    drawobjects(X[:, [3, 5]], classes=y, isVisibleLabel=True)
Exemplo n.º 16
0
def main():

    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")
    #X, types, y = ToFormNumpy("D:\\german.txt") #71.2
    #X, types, y = ToFormNumpy("D:\\german1.txt") #91.7
    #X, types, y = ToFormNumpy("D:\\german2.txt") #91.7
    #X, types, y = ToFormNumpy("D:\\german3.txt") #94.4
    #X, types, y = ToFormNumpy("D:\\german4.txt") #95.4
    #X, types, y = ToFormNumpy("D:\\german5.txt") #97.7
    X, types, y = ToFormNumpy("D:\\german6.txt")  #98.1
    #X, types, y = ToFormNumpy("D:\\german7.txt") #97.3
    #X, types, y = ToFormNumpy("D:\\german8.txt") #94.5
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\german.txt")

    #y[y == 2] = 1

    _, ln = np.unique(y, return_counts=True)

    #minmax_scale(X, copy=False)
    #Normalizing_Estmation(X, y)

    # Cross Validation
    k = 10
    k_fold = KFold(n_splits=k, shuffle=True, random_state=None)

    #Nerual network
    mlp = MLPClassifier(hidden_layer_sizes=(100, 200))

    # Knn
    n_neighbors = 2 * min(ln) - 3
    # mertic Euclidean
    #knc = KNeighborsClassifier(n_neighbors=n_neighbors)
    knc = KNeighborsClassifier(n_neighbors=1)

    #SVM
    svc = SVC()

    #print("MLP")
    max_mean1 = CVS(mlp, X, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()
    #print("KNN")
    max_mean2 = CVS(knc, X, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()
    #print("SVM")
    max_mean3 = CVS(svc, X, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()

    print(max_mean1, max_mean2, max_mean3)
Exemplo n.º 17
0
def main():
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    # X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")

    metric = 1

    minmax_scale(X, copy=False)

    noisy = find_noisy(X, y, types=types, metric=metric)

    #for item in noisy:
    #    print(item)

    print(len(noisy))
Exemplo n.º 18
0
def main():

    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")
    #X, types, y = ToFormNumpy(r"D:\Nuu\Data mining\Articles\PCA operator\Computing\Gastown1.txt")
    X, types, y = ToFormNumpy(r"D:\Nuu\AI\Selections\Amazon_initial_50_30_10000\data.txt")

    #minmax_scale(X, copy=False)

    w = Lagranj_nd(X, y)

    cond = w > 0

    print(len(cond[cond == True]))
Exemplo n.º 19
0
def main():
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\arcene_train.txt")

    minmax_scale(X, copy=False)

    w = Lagranj_nd(X, y)

    print(w.shape)

    X_Test = np.array(X[:, w == w.min()])
    types_Test = np.array(types[w == w.min()])

    print(X_Test)

    res = compactness(X_Test, y, types=types_Test, metric=1)
def main():
    path = r"D:\Tanlanmalar\german.txt"

    X, types, y = ToFormNumpy(path)

    minmax_scale(X, copy=False)
    #Normalizing_Estmation(X, y)

    nnc1 = NearestNeighborClassifier_(noisy=True)
    nnc2 = NearestNeighborClassifier()
    nnc3 = TemplateClassifier(noisy=True)
    nn = MLPClassifier()
    svm = SVC()

    k = 10
    mean1 = 0
    mean2 = 0
    mean3 = 0
    mean4 = 0
    mean5 = 0
    for i in range(k):
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.45, random_state=None, shuffle=True)

        nnc1.fit(X_train, y_train)
        nnc2.fit(X_train, y_train)
        nnc3.fit(X_train, y_train)
        svm.fit(X_train, y_train)
        nn.fit(X_train, y_train)

        mean1 += nnc1.score(X_test, y_test)
        mean2 += nnc2.score(X_test, y_test)
        mean3 += nnc3.score(X_test, y_test)
        mean4 += svm.score(X_test, y_test)
        mean5 += nn.score(X_test, y_test)

    mean1 /= k
    mean2 /= k
    mean3 /= k
    mean4 /= k
    mean5 /= k

    print(mean1, mean2, mean3, mean4, mean5)
Exemplo n.º 21
0
def main():
    path = r"D:\Nuu\AI\Selections\Amazon_initial_50_30_10000\data.txt"

    X, types, y = ToFormNumpy(path)

    minmax_scale(X, copy=False)

    _, ln = np.unique(y, return_counts=True)

    w = Lagranj1(X, y)

    compactness(X, y, types=types, metric=1)

    while X.shape[1] > 2:
        cond = w != w.min()
        X = X[:, cond]
        w = w[cond]

        compactness(X, y, types=types, metric=1)
Exemplo n.º 22
0
def main():
    path = r"D:\Nuu\AI\Selections\LSVT_voice_rehabilitation\data.txt"

    X, types, y = ToFormNumpy(path)

    #minmax_scale(X, copy=False)
    Normalizing_Estmation(X, y)

    _, ln = np.unique(y, return_counts=True)

    w = Lagranj1(X, y)

    compactness(X, y, types=types, metric=1)

    while X.shape[1] > 2:
        cond = w != w.min()
        X = X[:, cond]
        w = w[cond]

        compactness(X, y, types=types, metric=1)
Exemplo n.º 23
0
def main():

    X, types, y = ToFormNumpy("D:\\tanlanmalar\\arcene_train.txt")

    X_Test = np.loadtxt(
        r"D:\Nuu\AI\Selections\Arcena Data Set\arcene_test.data")

    minmax_scale(X, copy=False)
    minmax_scale(X_Test, copy=False)
    #Normalizing_Estmation(X, y, types=types)

    nnc = NearestNeighborClassifier()

    begin = time.time()

    nnc.fit(X, y)

    print(nnc.predict(X_Test))

    end = time.time()
    print("Time: ", (end - begin) * 1000)
Exemplo n.º 24
0
def main():
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    X, types, y = ToFormNumpy(r"D:\Nuu\AI\Selections\Amazon_initial_50_30_10000\data.txt")

    minmax_scale(X, copy=False)
    #minmax_scale(X, copy=False)

    X = SelectKBest(chi2, k=2000).fit_transform(X, y)


    metric = 1

    # nnc = NearestNeighborClassifier_(noisy=True)
    nnc = NearestNeighborClassifier()
    # nnc = TemplateClassifier(noisy=True)
    nn = MLPClassifier()
    svm = SVC()

    k = 10
    mean1 = 0
    mean2 = 0
    mean3 = 0
    for i in range(k):
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.5, random_state=None, shuffle=True)

        #nnc.fit(X_train, y_train)
        svm.fit(X_train, y_train)
        nn.fit(X_train, y_train)

        #mean1 += nnc.score(X_test, y_test)
        mean2 += svm.score(X_test, y_test)
        mean3 += nn.score(X_test, y_test)

    mean1 /= k
    mean2 /= k
    mean3 /= k

    print(mean1, mean2, mean3)
Exemplo n.º 25
0
def main():
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    minmax_scale(X, copy=False)

    #X, types, y = ToFormNumpy(r"D:\Nuu\Data mining\Articles\PCA operator\Computing\Lagranj\Spame\data\own\(4595, 57).txt")

    k = 5

    k_fold = KFold(n_splits=k, shuffle=True, random_state=42)

    mlp = MLPClassifier(hidden_layer_sizes=(50, 200),
                        activation='relu',
                        max_iter=1000,
                        alpha=1e-5,
                        solver='adam',
                        verbose=False,
                        tol=1e-4,
                        random_state=1,
                        learning_rate_init=.1)

    max_mean = sum(CVS(mlp, X, y, cv=k_fold, n_jobs=4, scoring='accuracy')) / k

    print('Score = ', max_mean)
Exemplo n.º 26
0
def main():

    X, types, y = ToFormNumpy("D:\\tanlanmalar\\german.txt")
    _, ln = np.unique(y, return_counts=True)
    for j in range(X.shape[1]):
        if types[j] == 0:
            gradation = {}
            for i in range(X.shape[0]):
                if not (X[i, j] in gradation):
                    nyu1 = np.count_nonzero(X[y == 0, j] == X[i, j]) / ln[0]
                    nyu2 = np.count_nonzero(X[y == 1, j] == X[i, j]) / ln[1]

                    gradation[X[i, j]] = nyu1 / (nyu1 + nyu2)

                if y[i] == 0:
                    X[i, j] = gradation[X[i, j]]
                else:
                    X[i, j] = 1 - gradation[X[i, j]]


    for i in range(X.shape[0]):
        for j in range(X.shape[1]):
            print(X[i, j], end=' ')
        print(y[i] + 1)
Exemplo n.º 27
0
import numpy as np

from Test.read_data import ToFormNumpy
from uz.nuu.datamining.graphic.drawing import mscatter
from ai.own.fris import Fris

X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")

#X = np.array([[4, 4], [3, 6], [5, 2]], dtype=float)

#X = np.array([[4, 3, 5], [4, 6, 2]])

y -= 1
X -= X.mean(axis=0)

covmat = np.dot(X.T, X)

covmat = np.array([[0.6066353658781276, 0.6301733805074563, 0.6214466136515134, 0.8592131106607721, 0.8736921033877233, 0.600853414661878, 0.6094070313817901, 0.5867057852210043, 0.5786615866051649, 0.7006998089403434, 0.6163119318278932, 0.6522341941050182, 0.600853414661878, 0.8736921033877233, 0.8096234318951467, 0.5791038997423555, 0.635994346239335, 0.6229420214572404, 0.5867057852210043, 0.600853414661878, 0.6214466136515134, 0.6453908994369276, 0.6522341941050182, 0.6066353658781276, 0.6013082124440698, 0.6203214350207669, 0.6340651713331743, 0.7392127093686114, 0.7446575751698247, ],
[0.6301733805074563, 0.2526354862657758, 0.5541910135822533, 0.903192279138827, 0.7186195103789759, 0.2818595878729509, 0.2742637217767136, 0.31516312154767984, 0.27900015828746344, 0.6846108915151231, 0.5030495974816688, 0.5251028311161942, 0.28179954267556345, 0.8668488087196328, 0.7817367478837411, 0.2877247700558762, 0.27703932588862057, 0.2869910742219577, 0.31516312154767984, 0.28445312424525493, 0.5219978701790134, 0.5378191108703357, 0.34730253638939607, 0.3516703786191537, 0.30316574926819917, 0.27619445726349956, 0.35664261143103015, 0.48890977094392085, 0.4597624350408315, ],
[0.6214466136515134, 0.5541910135822533, 0.5071687871984828, 0.8809462023270487, 0.8379863159373181, 0.4566814030065701, 0.41643789297835326, 0.48890977094392085, 0.5215768849770335, 0.6148852638830367, 0.5481133694942159, 0.5215768849770335, 0.44322496215591983, 0.8592131106607721, 0.8241024246220979, 0.4391580890467305, 0.5053005492500667, 0.4566814030065701, 0.48890977094392085, 0.4391580890467305, 0.5381905290591259, 0.5215768849770335, 0.5738029789254734, 0.45777269474374155, 0.4785228435562512, 0.4784405786632958, 0.5644675856257149, 0.600853414661878, 0.6301733805074563, ],
[0.8592131106607721, 0.903192279138827, 0.8809462023270487, 0.903192279138827, 0.9193229606741115, 0.9193229606741115, 0.845217674987534, 0.9349088509444856, 0.903192279138827, 0.8962343189514681, 0.903192279138827, 0.8719953806813495, 0.9429656085186894, 0.9349088509444856, 0.903192279138827, 0.9193229606741115, 0.9349088509444856, 0.9349088509444856, 0.9349088509444856, 0.9193229606741115, 0.903192279138827, 0.8719953806813495, 0.9349088509444856, 0.9116872626152508, 0.8668488087196328, 0.9116872626152508, 0.9349088509444856, 0.8574610244988864, 0.9193229606741115, ],
[0.8736921033877233, 0.7186195103789759, 0.8379863159373181, 0.9193229606741115, 0.7515372833858358, 0.7628688079319111, 0.6846108915151231, 0.7627978386701475, 0.7897935054579449, 0.8507181271843484, 0.8299295583631144, 0.8110616184112844, 0.7344496686738706, 0.8412934092221398, 0.9349088509444856, 0.7446575751698247, 0.7543028551937238, 0.770433536729008, 0.7627978386701475, 0.733098503105927, 0.8241024246220979, 0.8110616184112844, 0.772785926238042, 0.772785926238042, 0.7344496686738706, 0.7628688079319111, 0.772785926238042, 0.7543028551937238, 0.733098503105927, ],
[0.600853414661878, 0.2818595878729509, 0.4566814030065701, 0.9193229606741115, 0.7628688079319111, 0.2529438867226544, 0.2838028823181013, 0.3026134523536157, 0.2994060876020787, 0.6453908994369276, 0.4843688528914958, 0.45528877058201483, 0.26080178173719376, 0.8890029599012524, 0.8096234318951467, 0.2902945261519872, 0.27851214243196426, 0.3108877251416227, 0.3026134523536157, 0.28179954267556345, 0.505249644596341, 0.48512989604303636, 0.3287914492071879, 0.3088273128362215, 0.31642332755918506, 0.27897117607585314, 0.3488175629229823, 0.5943920163667752, 0.733098503105927, ],
[0.6094070313817901, 0.2742637217767136, 0.41643789297835326, 0.845217674987534, 0.6846108915151231, 0.2838028823181013, 0.26748329621380845, 0.26854783514026276, 0.28363552862439273, 0.6229420214572404, 0.4893443554913488, 0.5219978701790134, 0.2838028823181013, 0.8209787367174153, 0.8096234318951467, 0.29513492705771843, 0.28796613985923564, 0.284858355014257, 0.26854783514026276, 0.29183370452858204, 0.4736911373140029, 0.5335978145926178, 0.31272921027932166, 0.3649482444582667, 0.310192820883244, 0.2882702301410542, 0.3255433607326703, 0.4659357947405461, 0.4477305813608709, ],
[0.5867057852210043, 0.31516312154767984, 0.48890977094392085, 0.9349088509444856, 0.7627978386701475, 0.3026134523536157, 0.26854783514026276, 0.2756011393724831, 0.27851214243196426, 0.6439822896393053, 0.4784405786632958, 0.4937398422921809, 0.28727205304785114, 0.8890029599012524, 0.8096234318951467, 0.30932892803716855, 0.2778019296207863, 0.3009744711303732, 0.2815887156644395, 0.304426475473246, 0.4631916251812317, 0.509335021732943, 0.3907907164774277, 0.3461766889383816, 0.3653727967016757, 0.3211642971850841, 0.3711599498533426, 0.5266203099907628, 0.5378191108703357, ],
[0.5786615866051649, 0.27900015828746344, 0.5215768849770335, 0.903192279138827, 0.7897935054579449, 0.2994060876020787, 0.28363552862439273, 0.27851214243196426, 0.2502966961021898, 0.6292602179016433, 0.5365374282968938, 0.5095717097944269, 0.2940188889706336, 0.8719953806813495, 0.7970248645081608, 0.3168840704104253, 0.2778019296207863, 0.2934039458909541, 0.27851214243196426, 0.2988699166872887, 0.5365374282968938, 0.5095717097944269, 0.3402344437979182, 0.33814267125328745, 0.3034149962880475, 0.274536005939124, 0.35820631321744906, 0.48512989604303636, 0.4902953435989813, ],
[0.7006998089403434, 0.6846108915151231, 0.6148852638830367, 0.8962343189514681, 0.8507181271843484, 0.6453908994369276, 0.6229420214572404, 0.6439822896393053, 0.6292602179016433, 0.6268914794527267, 0.6301733805074563, 0.6229420214572404, 0.59789722907763, 0.8962343189514681, 0.8379863159373181, 0.6214466136515134, 0.6377552013780671, 0.6453908994369276, 0.6439822896393053, 0.6453908994369276, 0.6301733805074563, 0.6008630753990813, 0.6684802099798388, 0.6214466136515134, 0.6148852638830367, 0.6084891127207386, 0.6684802099798388, 0.7817367478837411, 0.727298345115717, ],
[0.6163119318278932, 0.5030495974816688, 0.5481133694942159, 0.903192279138827, 0.8299295583631144, 0.4843688528914958, 0.4893443554913488, 0.4784405786632958, 0.5365374282968938, 0.6301733805074563, 0.5239109933987439, 0.5365374282968938, 0.5136370223229911, 0.8890029599012524, 0.8096234318951467, 0.4841139580560516, 0.48512989604303636, 0.4736911373140029, 0.4784405786632958, 0.4784405786632958, 0.5239109933987439, 0.5492767451935974, 0.5365374282968938, 0.5483231561182563, 0.5263093085661757, 0.5596923575394251, 0.48512989604303636, 0.7627978386701475, 0.65905549201763, ],
[0.6522341941050182, 0.5251028311161942, 0.5215768849770335, 0.8719953806813495, 0.8110616184112844, 0.45528877058201483, 0.5219978701790134, 0.4937398422921809, 0.5095717097944269, 0.6229420214572404, 0.5365374282968938, 0.5139411126048097, 0.4621320652501053, 0.8809462023270487, 0.7917037304565143, 0.44322496215591983, 0.509335021732943, 0.4631916251812317, 0.4937398422921809, 0.4754180312457966, 0.5492767451935974, 0.5251028311161942, 0.5156610953938348, 0.4843688528914958, 0.4671480686328496, 0.505249644596341, 0.5263093085661757, 0.7173812268095862, 0.6439822896393053, ],
[0.600853414661878, 0.28179954267556345, 0.44322496215591983, 0.9429656085186894, 0.7344496686738706, 0.26080178173719376, 0.2838028823181013, 0.28727205304785114, 0.2940188889706336, 0.59789722907763, 0.5136370223229911, 0.4621320652501053, 0.2529438867226544, 0.8809462023270487, 0.7448781372315151, 0.27619445726349956, 0.29513492705771843, 0.2941543622315709, 0.28727205304785114, 0.2848552338530067, 0.5156610953938348, 0.4719695196309896, 0.3399748672205984, 0.311812703572169, 0.310192820883244, 0.262221761479371, 0.3461766889383816, 0.5738029789254734, 0.733098503105927, ],
[0.8736921033877233, 0.8668488087196328, 0.8592131106607721, 0.9349088509444856, 0.8412934092221398, 0.8890029599012524, 0.8209787367174153, 0.8890029599012524, 0.8719953806813495, 0.8962343189514681, 0.8890029599012524, 0.8809462023270487, 0.8809462023270487, 0.8890029599012524, 0.903192279138827, 0.8574610244988864, 0.9349088509444856, 0.8736921033877233, 0.8890029599012524, 0.8736921033877233, 0.8962343189514681, 0.8809462023270487, 0.9116872626152508, 0.8890029599012524, 0.8668488087196328, 0.8668488087196328, 0.8890029599012524, 0.8011284484187231, 0.8302166308847824, ],
Exemplo n.º 28
0
def main():

    X, types, y = ToFormNumpy(r"D:/test.txt")

    mscatter1(X, y, marker=['v', 's'], size=6, colors=['black', 'black'])
def main():

    X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\german.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")
    #X, types, y = ToFormNumpy(r"D:\Nuu\Data mining\Articles\PCA operator\Computing\Gastown1.txt")

    y -= 1
    #y[y == 2] = 1

    _, ln = np.unique(y, return_counts=True)

    #minmax_scale(X, copy=False)
    Normalizing_Estmation(X, y, types=types)

    #return None

    root = r"D:\Nuu\Data mining\Articles\PCA operator\Computing\Lagranj"
    selection_Name = r'\It_bori'
    preproccesing_name = r'own'

    img_path = root + selection_Name + \
               "\images " + preproccesing_name

    save_name = img_path + r"\img"
    save_name += str(X.shape) + ".png"

    path = root + selection_Name + \
           "/res " + preproccesing_name + ".txt"
    path1 = root + selection_Name + \
           "/res1 " + preproccesing_name + ".txt"

    p_res = root + selection_Name + \
           "/data/" + preproccesing_name + "/"
    p_res_PCA = root + selection_Name + \
            "/data/" + preproccesing_name + "/"

    file = open(path, 'w')
    file1 = open(path1, 'w')

    shape = X.shape

    #Computing for X
    print("Computing for shape " + str(X.shape))
    file.write("Computing for shape " + str(X.shape) + "\n")
    group, comp1, noisy1 = Fris(X, y, types=types, file=file)
    similarity0 = DecomposionEstimation(group, group, X.shape[0])

    print("Similarity between shape  " + str(shape) + " and " + str(X.shape) +
          " are " + str(similarity0))
    file.write("Similarity between shape  " + str(shape) + " and " +
               str(X.shape) + " are " + str(similarity0) + "\n")

    writeNP(p_res + str(X.shape) + ".txt", X, y, types=types)

    #PCA
    print("Computing for PCA")
    file.write("Computing for PCA\n")

    #pca = PCA(n_components=2)
    pca = KernelPCA(n_components=2, kernel='poly')
    pca.fit(X, y=y)
    transform = pca.transform(X)
    writeNP(p_res_PCA + str(transform.shape) + str(X.shape) + ".txt",
            transform,
            y,
            types=[1, 1])
    mscatter(transform, y=y, save_name=save_name)

    group_b, comp2, noisy2 = Fris(transform, y, types, file=file)
    similarity = DecomposionEstimation(group, group_b, X.shape[0])
    print("Similarity between shape  " + str(shape) + " and " +
          str(transform.shape) + " are " + str(similarity))
    file.write("Similarity between shape  " + str(shape) + " and " +
               str(transform.shape) + " are " + str(similarity) + "\n")

    file1.write(
        str(X.shape[1]) + "\t" + str(comp1) + "\t" + str(similarity0) + "\t" +
        str(comp2) + "\t" + str(similarity) + "\t" + str(similarity) + "\t" +
        str(noisy1) + "\t" + str(noisy2) + "\n")

    # 25
    w = Lagranj(X, y, types, ln=ln)

    while X.shape[1] > 2:
        cond = w != w.min()
        X = X[:, cond]
        types = types[cond]
        w = w[cond]

        print("\n" + "*" * 50)
        file.write("\n" + "*" * 50 + "\n")
        print("Computing for shape " + str(X.shape))
        file.write("Computing for shape " + str(X.shape) + "\n")

        # For X
        group_b, comp1, noisy2 = Fris(X, y, types=types, file=file)
        similarity0 = DecomposionEstimation(group,
                                            group_b,
                                            obj_count=X.shape[0])
        print("Similarity between shape  " + str(shape) + " and " +
              str(X.shape) + " are " + str(similarity0))
        file.write("Similarity between shape  " + str(shape) + " and " +
                   str(X.shape) + " are " + str(similarity0) + "\n")

        #PCA
        print("Computing for PCA")
        file.write("Computing for PCA\n")

        pca = PCA(n_components=2)
        pca.fit(X, y=y)
        transform = pca.transform(X)

        #Save images
        save_name = img_path + r"\img"
        save_name += str(X.shape) + ".png"
        mscatter(transform, y=y, save_name=save_name)

        group_c, comp2, noisy2 = Fris(transform, y, types, file=file)
        similarity = DecomposionEstimation(group, group_c, X.shape[0])
        similarity1 = DecomposionEstimation(group_b, group_c, X.shape[0])

        print("Similarity between shape  " + str(shape) + " and " +
              str(transform.shape) + " are " + str(similarity))
        file.write("Similarity between shape  " + str(shape) + " and " +
                   str(transform.shape) + " are " + str(similarity) + "\n")

        file1.write(
            str(X.shape[1]) + "\t" + str(comp1) + "\t" + str(similarity0) +
            "\t" + str(comp2) + "\t" + str(similarity) + "\t" +
            str(similarity1) + "\t" + str(noisy1) + "\t" + str(noisy2) + "\n")

        writeNP(p_res + str(X.shape) + ".txt", X, y, types=types)
        writeNP(p_res_PCA + str(transform.shape) + str(X.shape) + ".txt",
                transform,
                y,
                types=[1, 1])

    file.close()
    file1.close()
Exemplo n.º 30
0
def main():

    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\IT_BORI_42_6.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\giper_my.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\spame.txt")
    X, types, y = ToFormNumpy("D:\\tanlanmalar\\gasterlogy1394.txt")
    #X, types, y = ToFormNumpy("D:\\tanlanmalar\\MATBIO_MY.txt")

    y[y == 2] = 1

    _, ln = np.unique(y, return_counts=True)

    #print(ln)

    #minmax_scale(X, copy=False)
    Normalizing_Estmation(X, y)

    indx = clearNoisy(X, y)

    X = X[indx]
    y = y[indx]

    #print(X.shape)
    #print(y.shape)

    #return None

    selection_Name = r'\Gasterology2'
    preproccesing_name = r'own'

    path = r"D:\Nuu\Data mining\Articles\Cross Validation\Computing" + selection_Name + \
           r"\res " + preproccesing_name + ".txt"

    file = open(path, 'w')

    # Cross Validation
    k = 10
    k_fold = KFold(n_splits=k, shuffle=True, random_state=None)

    #Nerual network
    mlp = MLPClassifier(hidden_layer_sizes=(100, 200), activation='logistic')

    # Knn
    n_neighbors = 2 * min(ln) - 3
    # mertic Euclidean
    knc = KNeighborsClassifier(n_neighbors=n_neighbors, p=2)

    #SVM
    svc = SVC(kernel="linear", degree=5)

    # RDF
    rdf = RandomForestClassifier(max_depth=1000)

    #print("MLP")
    max_mean1 = CVS(mlp, X, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()
    #print("KNN")
    max_mean2 = CVS(knc, X, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()
    #print("SVM")
    max_mean3 = CVS(svc, X, y, cv=k_fold, n_jobs=4, scoring='accuracy').mean()

    print(X.shape[1], max_mean1, max_mean2, max_mean3)
    # 25
    w = Lagranj(X, y, types)

    while X.shape[1] > 2:
        # Cross Validation
        k = 5
        k_fold = KFold(n_splits=k, shuffle=True, random_state=42)

        # Nerual network
        mlp = MLPClassifier(hidden_layer_sizes=(50, 200),
                            activation='relu',
                            max_iter=1000,
                            alpha=1e-5,
                            solver='adam',
                            verbose=False,
                            tol=1e-8,
                            random_state=1,
                            learning_rate_init=.1)

        # Knn
        n_neighbors = 2 * min(ln) - 3
        # mertic Euclidean
        knc = KNeighborsClassifier(n_neighbors=n_neighbors, p=2)

        # SVM
        svc = SVC(gamma='scale')

        max_mean1 = sum(CVS(mlp, X, y, cv=k_fold, n_jobs=4,
                            scoring='accuracy')) / k
        max_mean2 = sum(CVS(knc, X, y, cv=k_fold, n_jobs=4,
                            scoring='accuracy')) / k
        max_mean3 = sum(CVS(svc, X, y, cv=k_fold, n_jobs=4,
                            scoring='accuracy')) / k

        print(X.shape[1], max_mean1, max_mean2, max_mean3)
        file.write(
            str(X.shape[1]) + "\t" + str(max_mean1) + "\t" + str(max_mean2) +
            "\t" + str(max_mean3) + "\n")

        cond = w != w.min()
        X = X[:, cond]
        w = w[cond]

    file.close()