예제 #1
0
    def get_data(self,
                 balanced=1,
                 batch_size=20,
                 tra_val_split=0.8,
                 use_validation=True):
        self.use_validation = use_validation
        Data = DATA()
        Data.Fetch_OASIS(balanced=balanced)
        Data.Train_Test(TRAIN_TEST_SPLIT, random=RANDOM_SEED)
        selectors = self.ROI_nums
        Data.Add_MRI(selectors)
        Data.Split_Data()

        # GET TRAINING AND TEST SETS
        X = Data.features_train
        y = Data.labels_train
        y[y > 0] = 1
        y[y <= 0] = 0
        y.shape = (len(y), 1)

        X_test = Data.features_test
        y_test = Data.labels_test
        y_test[y_test > 0] = 1
        y_test[y_test <= 0] = 0
        y_test.shape = (len(y_test), 1)

        self.y_test = y_test
        self.X_test = X_test

        # SPLIT TRAINING INTO TRAINING/VALIDATION
        len_yt = y.shape[0]
        if use_validation:
            training_size = floor(tra_val_split * len_yt)
        else:
            training_size = len_yt - 1
        y_tra = y[:training_size]
        X_tra = X[:training_size, ...]

        y_val = y[training_size:]
        X_val = X[training_size:, ...]

        # CREATE TENSORFLOW DATASETS
        self.train_ds = tf.data.Dataset.from_tensor_slices(
            (X_tra, y_tra)).shuffle(3000).batch(batch_size)
        if use_validation:
            self.val_ds = tf.data.Dataset.from_tensor_slices(
                (X_val, y_val)).batch(batch_size)
        else:
            self.val_ds = tf.data.Dataset.from_tensor_slices(
                (X_val, y_val)).batch(1)
예제 #2
0
from sklearn.metrics import accuracy_score
from sklearn import svm
from sklearn.model_selection import cross_validate

from DataPrep import DATA
from ATLAS import ATLAS

Atlas = ATLAS()

FileDir = os.getcwd()+'/SVM_Outputs/'
file_object = open(FileDir+'SVM_Log.txt', "a")
for i in range(3,46):
    ROI = Atlas.dataset_files.labels[i]
    FigName = "SVM_"+str(i)+"_"+ROI+".png"

    Data = DATA()
    Data.Train_Test(0.8)
    selectors = [i] #brain" #Hippo mask [34,35], whole brain "brain"
    Data.Add_MRI(selectors)
    Data.Split_Data()

    C_space = np.logspace(-4,1,50)
    train_score = []
    test_score = []
    print("Feature Size = "+str(Data.features_train.shape[1]))
    print("Started Training for "+FigName+"....")
    for C in C_space:
        SVM = svm.SVC(kernel='linear', C=C)
        cvs = cross_validate(SVM,Data.features_train,Data.labels_train, cv=4,return_train_score=True)
        train_score.append(np.mean(cvs["train_score"]))
        test_score.append(np.mean(cvs["test_score"]))
예제 #3
0

Atlas = ATLAS()

FileDir = os.getcwd() + '/PCA_vs_ICA/'
FigName = "Brain_PCA"
file_SVM_p = open(FileDir + 'SVM_PCA_Log.txt', "a")
file_SVM_i = open(FileDir + 'SVM_ICA_Log.txt', "a")

file_ADA_p = open(FileDir + 'ADA_PCA_Log.txt', "a")
file_ADA_i = open(FileDir + 'ADA_ICA_Log.txt', "a")

file_NN_p = open(FileDir + 'NN_PCA_Log.txt', "a")
file_NN_i = open(FileDir + 'NN_ICA_Log.txt', "a")

Data = DATA()
Data.Train_Test(0.8, 12345)
Data.Add_MRI("brain")
Data.Split_Data()

Comp_space = np.arange(1, 60)

SVM_PCA_Valid = []
SVM_PCA_Valid_STD = []
SVM_PCA_Test = []

SVM_ICA_Valid = []
SVM_ICA_Valid_STD = []
SVM_ICA_Test = []

ADA_PCA_Valid = []
예제 #4
0
    def get_data(self, balanced=1, tra_val_split=0.8, use_validation=True):
        self.use_validation = use_validation
        Data = DATA()
        Data.Fetch_OASIS(balanced=balanced)
        Data.Train_Test(TRAIN_TEST_SPLIT, random=RANDOM_SEED)
        Data.Split_Data()

        # GET TRAINING AND TEST SETS
        features_train = Data.features_train
        y = Data.labels_train
        y[y > 0] = 1
        y[y < 0] = 0
        y.shape = (len(y), 1)

        features_test = Data.features_test
        y_test = Data.labels_test
        y_test[y_test > 0] = 1
        y_test[y_test <= 0] = 0
        y_test.shape = (len(y_test), 1)

        Data.load_images()
        gm_imgs_3D = Data.gm_imgs_3D
        wm_imgs_3D = Data.wm_imgs_3D

        gm_imgs_3D = gm_imgs_3D[..., np.newaxis]
        wm_imgs_3D = wm_imgs_3D[..., np.newaxis]

        idx_train = Data.idx_train
        idx_test = Data.idx_test

        gm_imgs_3D_train = gm_imgs_3D[idx_train, ...]
        wm_imgs_3D_train = wm_imgs_3D[idx_train, ...]

        gm_imgs_3D_test = gm_imgs_3D[idx_test, ...]
        wm_imgs_3D_test = wm_imgs_3D[idx_test, ...]

        # SPLIT TRAINING INTO TRAINING/VALIDATION
        len_yt = y.shape[0]
        if use_validation:
            train_size = floor(tra_val_split * len_yt)
        else:
            train_size = len_yt - 1

        y_tra = y[:train_size]
        features_tra = features_train[:train_size, ...]

        y_val = y[train_size:]
        features_val = features_train[train_size:, ...]

        gm_imgs_3D_val = gm_imgs_3D_train[train_size:, ...]
        wm_imgs_3D_val = wm_imgs_3D_train[train_size:, ...]

        gm_imgs_3D_tra = gm_imgs_3D_train[:train_size, ...]
        wm_imgs_3D_tra = wm_imgs_3D_train[:train_size, ...]

        X_tra = [gm_imgs_3D_tra] + [wm_imgs_3D_tra] + [features_tra]
        X_val = [gm_imgs_3D_val] + [wm_imgs_3D_val] + [features_val]
        X_test = [gm_imgs_3D_test] + [wm_imgs_3D_test] + [features_test]

        self.y_test = y_test
        self.X_test = X_test

        self.y_tra = y_tra
        self.X_tra = X_tra

        self.y_val = y_val
        self.X_val = X_val
예제 #5
0
    def get_data(self, balanced=1, tra_val_split=0.8, use_validation=True):
        self.use_validation = use_validation
        Data = DATA()
        Data.Fetch_OASIS(balanced=balanced)
        Data.Train_Test(TRAIN_TEST_SPLIT, random=RANDOM_SEED)
        Data.Split_Data()

        # GET TRAINING AND TEST SETS
        features_train = Data.features_train
        y = Data.labels_train
        y[y > 0] = 1
        y[y < 0] = 0
        y.shape = (len(y), 1)

        features_test = Data.features_test
        y_test = Data.labels_test
        y_test[y_test > 0] = 1
        y_test[y_test <= 0] = 0
        y_test.shape = (len(y_test), 1)

        Data.load_images()
        selectors = self.ROI_nums
        Data.get_3D_ROI(selectors)
        ROIs_3D_gm = Data.ROIs_3D_gm
        ROIs_3D_wm = Data.ROIs_3D_wm

        idx_train = Data.idx_train
        idx_test = Data.idx_test

        ROIs_3D_gm_train = []
        ROIs_3D_gm_test = []
        ROIs_3D_wm_train = []
        ROIs_3D_wm_test = []
        for k in range(self.len_ROI):
            ROIs_3D_gm_train.append(ROIs_3D_gm[k][idx_train, ...])
            ROIs_3D_gm_test.append(ROIs_3D_gm[k][idx_test, ...])
            ROIs_3D_wm_train.append(ROIs_3D_wm[k][idx_train, ...])
            ROIs_3D_wm_test.append(ROIs_3D_wm[k][idx_test, ...])

        # SPLIT TRAINING INTO TRAINING/VALIDATION
        len_yt = y.shape[0]
        if use_validation:
            train_size = floor(tra_val_split * len_yt)
        else:
            train_size = len_yt - 1
        y_tra = y[:train_size]
        features_tra = features_train[:train_size, ...]

        y_val = y[train_size:]
        features_val = features_train[train_size:, ...]
        train_ROIs_3D_gm = []
        val_ROIs_3D_gm = []
        train_ROIs_3D_wm = []
        val_ROIs_3D_wm = []
        for k in range(self.len_ROI):
            train_ROIs_3D_gm.append(ROIs_3D_gm_train[k][:train_size, ...])
            val_ROIs_3D_gm.append(ROIs_3D_gm_train[k][train_size:, ...])

            train_ROIs_3D_wm.append(ROIs_3D_wm_train[k][:train_size, ...])
            val_ROIs_3D_wm.append(ROIs_3D_wm_train[k][train_size:, ...])

        X_tra = train_ROIs_3D_gm + train_ROIs_3D_wm + [features_tra]
        X_val = val_ROIs_3D_gm + val_ROIs_3D_wm + [features_val]
        X_test = ROIs_3D_gm_test + ROIs_3D_wm_test + [features_test]

        self.y_test = y_test
        self.X_test = X_test

        self.y_tra = y_tra
        self.X_tra = X_tra

        self.y_val = y_val
        self.X_val = X_val