import numpy as np from lib.Allread import allread from lib.train_test_split import train_test_split from lib.machine_learning.classification import svm, kNN, pCA from lib.visualization import colorcode y_all = [] flag = 0 #mainデータを読み込む。 for i in range(1, 4): for j in range(1, 6): try: x = allread('reflectance').Frequency_trans_reflect_TDS( '/Users/ryoya/kawaseken/20190123/2019_0123_{0}mm_{1}.txt'. format(i, j), '/Users/ryoya/kawaseken/20190123/2019_0123_ref_1.txt', 1.40, 1.60) if flag == 0: x_all = x flag += 1 else: x_all = np.append(x_all, x, axis=0) y_all.append(i) except FileNotFoundError as e: print(e) #train_test_split(特徴量,目的関数,1つの厚さにおけるtrainデータの数) train_x, train_y, test_x, test_y = train_test_split(x_all, y_all, 1) #print(train_x) #print(train_y) #print(test_x)
from sklearn import preprocessing plt.close() l = 1 y_all = [] flag = 0 med = [ 'グルコース', 'ラクトース'] #試薬の数だけ繰り返すように組んでいこう #mainデータを読み込む。 #ここでディレクトリの移動をするので開きたいファイルのパスを入力してください #uuuuu os.chdir('/Users/toshinari/Downloads/暫定') for w in med: for j in glob.glob("{0}*.txt".format(w)): try: x = allread('Intencity').Frequency_Intencity_is_TPG("/Users/toshinari/Downloads/暫定/{0}".format(j), 1.05, 1.8) print(j) if flag == 0: x_all = x flag += 1 else: x_all = np.append(x_all, x, axis=0) y_all.append(l) except FileNotFoundError as e: print(e) l = l + 1 mm = preprocessing.MinMaxScaler() x_all_2 = mm.fit_transform(x_all) print(x_all_2) print(x_all)
import numpy as np from lib.Allread import allread from lib.train_test_split import train_test_split from lib.machine_learning.classification import svm, kNN, pCA from lib.visualization import colorcode y_all = [] flag = 0 #mainデータを読み込む。 for i in range(1, 5): i = i * 0.5 for j in range(1, 5): try: x = allread('reflectance').Frequency_trans_reflect_is_TPG( '/Users/ryoya/kawaseken/20190201_fix/PE_{0}mm_{1}.txt'.format( i, j), '/Users/ryoya/kawaseken/20190201/ref.txt', 1.4, 1.5) if flag == 0: x_all = x flag += 1 else: x_all = np.append(x_all, x, axis=0) #y_allの値がint出ないとsvm,pcaの可視化が上手くいかないので0.5mmの場合は*2などをして元に戻す。 y_all.append(i * 2) except FileNotFoundError as e: print(e) #train_test_split(特徴量,目的関数,1つの厚さにおけるtrainデータの数) train_x, train_y, test_x, test_y = train_test_split(x_all, y_all, 1) print(train_x) print(train_y) #print(test_x)
num = 5 #教師データのフォルダの数(適宜変更) #med = ['lac_2', 'lac_3', 'lac_4', 'lac_5', 'lac_6']#対象の指定 med = ['lac'] os.chdir(path_1) for i in range(1, num + 1): os.chdir('/Users/toshinari/Downloads/SVM_train/SVM_train_{0}'.format(i)) for w in med: print(w) for j in sorted(glob.glob("{0}*.txt".format(w))): try: x, ref = allread('Trans', 'a', 'b', 'c', 'd', 'e').Frequency_trans_reflect_is_TPG( "{0}".format(j), "ref_s.txt", 1.05, 1.8) print(j) if flag == 0: x_all = x flag += 1 else: x_all = np.append(x_all, x, axis=0) y_all.append(l) except FileNotFoundError as e: print(e) l = l + 1 i = i + 1 k = k + 1
from lib.Allread import allread from lib.train_test_split import train_test_split from lib.machine_learning.classification import svm, kNN, pCA from lib.visualization import colorcode y_all = [] flag = 0 #mainデータを読み込む。 for i in range(2, 5): #i = i*0.5 for j in range(1, 4): try: x = allread( 'transmittance', '{}mm'.format(i) ).Frequency_trans_reflect_TDS( r'C:\Users\tera\PycharmProjects\20190509\Si_touka\{}\{}.txt'. format(i, j), r'C:\Users\tera\PycharmProjects\20190509\Si_touka\ref.txt', 1.0, 2.0) if flag == 0: x_all = x flag += 1 else: x_all = np.append(x_all, x, axis=0) y_all.append(i * 2) except FileNotFoundError as e: print(e) #train_test_split(特徴量,目的関数,1つの厚さにおけるtrainデータの数) train_x, train_y, test_x, test_y = train_test_split(x_all, y_all, 1) #print(train_x)
import numpy as np from lib.Allread import allread from lib.train_test_split import train_test_split from lib.machine_learning.classification import svm, kNN, pCA, iCA from lib.visualization import colorcode y_all = [] flag = 0 #mainデータを読み込む。 for i in range(2, 5): i = i * 0.5 for j in range(1, 5): try: x = allread('reflectance', '{}mm'.format(i)).Time_intensity( '/Users/ryoya/kawaseken/20190123_fix/2019_0123_{0}mm_{1}.txt'. format(i, j)) if flag == 0: x_all = x flag += 1 else: x_all = np.append(x_all, x, axis=0) y_all.append(i * 2) except FileNotFoundError as e: print(e) #train_test_split(特徴量,目的関数,1つの厚さにおけるtrainデータの数) train_x, train_y, test_x, test_y = train_test_split(x_all, y_all, 1) #print(train_x) #print(train_y) #print(test_x) #print(test_y)