예제 #1
0
from matplotlib import pyplot
#from mlxtend.plotting import plot_decision_regions
from sklearn.manifold import MDS, LocallyLinearEmbedding

from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA

from data.preprocess import features_preprocess, features_test_preprocess, labels_preprocess

labels_all = labels_preprocess()

labels_train = labels_all[:38]
labels_test = labels_all[38:]

features_train = features_preprocess()
features_test = features_test_preprocess()

print("Dimensionality = ", len(features_train[1]))
"""
scaler = MinMaxScaler()
features_train = scaler.fit_transform(features_train)
features_test = scaler.fit_transform(features_test)
"""

embedding = LocallyLinearEmbedding(n_components=10, n_neighbors=5)

features_train = embedding.fit_transform(features_train, labels_train)
features_test = embedding.transform(features_test)

clf = svm.SVC(kernel='linear')
예제 #2
0
#input_data = [[1,1,0],[1,1,1],[0,1,0],[-1,1,0],[-1,0,0],[-1,0,1],[0,0,1],[1,1,0],[1,0,0],[-1,0,0],[1,0,1],[0,1,1],[0,0,0],[-1,1,1]]
#label_data = [[0],[0],[1],[1],[1],[0],[1],[0],[1],[1],[1],[1],[1],[0]]

labels = labels_preprocess()
k = []

for lbl in labels:
    if lbl == 'AML':
        k.append([0])
    if lbl == 'ALL':
        k.append([1])

lbls = numpy.asarray(k)
lbls = lbls.astype(numpy.float32)

ftrs = numpy.concatenate((features_preprocess(), features_test_preprocess()))
pca = PCA(n_components=15)
ftrs = pca.fit_transform(ftrs)

inp = numpy.asarray(ftrs)
inp = inp.astype(numpy.float32)

val = numpy.asarray(lbls)
val = val.astype(numpy.float32)

x = tf.Variable(inp)
y = tf.Variable(val)

model.fit(x, y, epochs=500, steps_per_epoch=15)
results = model.predict(inp, verbose=0, steps=1)
prediction = []
예제 #3
0
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.decomposition import PCA

from data.preprocess import features_preprocess, features_test_preprocess, labels_preprocess, labels_preprocess_num
from data.preprocess_2nd import preprocess_ft_lbls_num

labels = labels_preprocess()
features = numpy.concatenate(
    (features_preprocess(), features_test_preprocess()))

#(features , labels) = preprocess_ft_lbls_num()

K = 5
cv = KFold(n_splits=K, shuffle=True)

pca5 = PCA(n_components=5)
pca10 = PCA(n_components=10)
pca15 = PCA(n_components=15)
pca20 = PCA(n_components=20)
pca25 = PCA(n_components=25)

clf = svm.LinearSVC()

average_scores = []