예제 #1
0
    def get_data(self,
                 balanced=1,
                 batch_size=20,
                 tra_val_split=0.8,
                 use_validation=True):
        self.use_validation = use_validation
        Data = DATA()
        Data.Fetch_OASIS(balanced=balanced)
        Data.Train_Test(TRAIN_TEST_SPLIT, random=RANDOM_SEED)
        selectors = self.ROI_nums
        Data.Add_MRI(selectors)
        Data.Split_Data()

        # GET TRAINING AND TEST SETS
        X = Data.features_train
        y = Data.labels_train
        y[y > 0] = 1
        y[y <= 0] = 0
        y.shape = (len(y), 1)

        X_test = Data.features_test
        y_test = Data.labels_test
        y_test[y_test > 0] = 1
        y_test[y_test <= 0] = 0
        y_test.shape = (len(y_test), 1)

        self.y_test = y_test
        self.X_test = X_test

        # SPLIT TRAINING INTO TRAINING/VALIDATION
        len_yt = y.shape[0]
        if use_validation:
            training_size = floor(tra_val_split * len_yt)
        else:
            training_size = len_yt - 1
        y_tra = y[:training_size]
        X_tra = X[:training_size, ...]

        y_val = y[training_size:]
        X_val = X[training_size:, ...]

        # CREATE TENSORFLOW DATASETS
        self.train_ds = tf.data.Dataset.from_tensor_slices(
            (X_tra, y_tra)).shuffle(3000).batch(batch_size)
        if use_validation:
            self.val_ds = tf.data.Dataset.from_tensor_slices(
                (X_val, y_val)).batch(batch_size)
        else:
            self.val_ds = tf.data.Dataset.from_tensor_slices(
                (X_val, y_val)).batch(1)
예제 #2
0
from DataPrep import DATA
from ATLAS import ATLAS

Atlas = ATLAS()

FileDir = os.getcwd()+'/SVM_Outputs/'
file_object = open(FileDir+'SVM_Log.txt', "a")
for i in range(3,46):
    ROI = Atlas.dataset_files.labels[i]
    FigName = "SVM_"+str(i)+"_"+ROI+".png"

    Data = DATA()
    Data.Train_Test(0.8)
    selectors = [i] #brain" #Hippo mask [34,35], whole brain "brain"
    Data.Add_MRI(selectors)
    Data.Split_Data()

    C_space = np.logspace(-4,1,50)
    train_score = []
    test_score = []
    print("Feature Size = "+str(Data.features_train.shape[1]))
    print("Started Training for "+FigName+"....")
    for C in C_space:
        SVM = svm.SVC(kernel='linear', C=C)
        cvs = cross_validate(SVM,Data.features_train,Data.labels_train, cv=4,return_train_score=True)
        train_score.append(np.mean(cvs["train_score"]))
        test_score.append(np.mean(cvs["test_score"]))
        # print("C="+str(C)+"Train"+str(np.mean(cvs["train_score"]))+"Valid"+str(np.mean(cvs["test_score"])))

    maxC = C_space[test_score.index(max(test_score))]
예제 #3
0
Atlas = ATLAS()

FileDir = os.getcwd() + '/PCA_vs_ICA/'
FigName = "Brain_PCA"
file_SVM_p = open(FileDir + 'SVM_PCA_Log.txt', "a")
file_SVM_i = open(FileDir + 'SVM_ICA_Log.txt', "a")

file_ADA_p = open(FileDir + 'ADA_PCA_Log.txt', "a")
file_ADA_i = open(FileDir + 'ADA_ICA_Log.txt', "a")

file_NN_p = open(FileDir + 'NN_PCA_Log.txt', "a")
file_NN_i = open(FileDir + 'NN_ICA_Log.txt', "a")

Data = DATA()
Data.Train_Test(0.8, 12345)
Data.Add_MRI("brain")
Data.Split_Data()

Comp_space = np.arange(1, 60)

SVM_PCA_Valid = []
SVM_PCA_Valid_STD = []
SVM_PCA_Test = []

SVM_ICA_Valid = []
SVM_ICA_Valid_STD = []
SVM_ICA_Test = []

ADA_PCA_Valid = []
ADA_PCA_Valid_STD = []
ADA_PCA_Test = []