示例#1
0
文件: CNN.py 项目: mikbuch/pymri
    def train_and_test(self, training_data, test_data):

        from pymri.model.ann.theano_script import share_data
        training_data, test_data = share_data(training_data, test_data)
        # from pymri.model.ann.theano_script import load_data_shared
        # training_data, test_data = load_data_shared()
        self.net.SGD(
            training_data,
            self.epochs,
            self.mini_batch_size,
            self.learning_rate,
            test_data,
            verbose=self.verbose
            )
示例#2
0
    def train_and_test(self, training_data, test_data):

        if 'simple' in self.type:
            self.net.SGD(training_data,
                         self.epochs,
                         self.mini_batch_size,
                         self.learning_rate,
                         test_data=test_data,
                         verbose=self.verbose)
        else:
            from pymri.model.ann.theano_script import share_data
            training_data, test_data = share_data(training_data, test_data)
            self.net.SGD(training_data,
                         self.epochs,
                         self.mini_batch_size,
                         self.learning_rate,
                         test_data,
                         verbose=self.verbose)
示例#3
0
文件: FNN.py 项目: mikbuch/pymri
    def train_and_test(self, training_data, test_data):

        if 'simple' in self.type:
            self.net.SGD(
                training_data, self.epochs,
                self.mini_batch_size, self.learning_rate,
                test_data=test_data,
                verbose=self.verbose
                )
        else:
            from pymri.model.ann.theano_script import share_data
            training_data, test_data = share_data(training_data, test_data)
            self.net.SGD(
                training_data,
                self.epochs,
                self.mini_batch_size,
                self.learning_rate,
                test_data,
                verbose=self.verbose
                )
示例#4
0
], mini_batch_size)

# ### Dimensionality reduction ################################################

from sklearn.feature_selection import SelectKBest, f_classif

# ### Define the dimension reduction to be used.
# Here we use a classical univariate feature selection based on F-test,
# namely Anova. We set the number of features to be selected to 784
feature_selection = SelectKBest(f_classif, k=300)

# We fit the selector to our tr6ining dataset
feature_selection.fit(X, y)
# Transform training dataset
X = feature_selection.transform(X)
# Transform testing dataset
X_t = feature_selection.transform(X_t)

# ### Train and test classifier ###############################################

# prior chance level
print('Prior chance: %0.2f' % (y_t.sum() / float(y_t.shape[0])))

# prepare data in the format acceptable by theano scripts
from pymri.model.ann.theano_script import share_data
training_data, test_data = share_data((X, y), (X_t, y_t))

net.SGD(training_data, 500, mini_batch_size, 3., test_data, verbose=1)

# from sklearn.metrics import confusion_matrix
示例#5
0
# ### Dimensionality reduction ################################################

from sklearn.feature_selection import SelectKBest, f_classif

# ### Define the dimension reduction to be used.
# Here we use a classical univariate feature selection based on F-test,
# namely Anova. We set the number of features to be selected to 784
feature_selection = SelectKBest(f_classif, k=300)

# We fit the selector to our tr6ining dataset
feature_selection.fit(X, y)
# Transform training dataset
X = feature_selection.transform(X)
# Transform testing dataset
X_t = feature_selection.transform(X_t)


# ### Train and test classifier ###############################################

# prior chance level
print('Prior chance: %0.2f' % (y_t.sum()/float(y_t.shape[0])))

# prepare data in the format acceptable by theano scripts
from pymri.model.ann.theano_script import share_data
training_data, test_data = share_data((X, y), (X_t, y_t))

net.SGD(training_data, 500, mini_batch_size, 3., test_data, verbose=1)


# from sklearn.metrics import confusion_matrix