def train_and_test(self, training_data, test_data): from pymri.model.ann.theano_script import share_data training_data, test_data = share_data(training_data, test_data) # from pymri.model.ann.theano_script import load_data_shared # training_data, test_data = load_data_shared() self.net.SGD( training_data, self.epochs, self.mini_batch_size, self.learning_rate, test_data, verbose=self.verbose )
def train_and_test(self, training_data, test_data): if 'simple' in self.type: self.net.SGD(training_data, self.epochs, self.mini_batch_size, self.learning_rate, test_data=test_data, verbose=self.verbose) else: from pymri.model.ann.theano_script import share_data training_data, test_data = share_data(training_data, test_data) self.net.SGD(training_data, self.epochs, self.mini_batch_size, self.learning_rate, test_data, verbose=self.verbose)
def train_and_test(self, training_data, test_data): if 'simple' in self.type: self.net.SGD( training_data, self.epochs, self.mini_batch_size, self.learning_rate, test_data=test_data, verbose=self.verbose ) else: from pymri.model.ann.theano_script import share_data training_data, test_data = share_data(training_data, test_data) self.net.SGD( training_data, self.epochs, self.mini_batch_size, self.learning_rate, test_data, verbose=self.verbose )
], mini_batch_size) # ### Dimensionality reduction ################################################ from sklearn.feature_selection import SelectKBest, f_classif # ### Define the dimension reduction to be used. # Here we use a classical univariate feature selection based on F-test, # namely Anova. We set the number of features to be selected to 784 feature_selection = SelectKBest(f_classif, k=300) # We fit the selector to our tr6ining dataset feature_selection.fit(X, y) # Transform training dataset X = feature_selection.transform(X) # Transform testing dataset X_t = feature_selection.transform(X_t) # ### Train and test classifier ############################################### # prior chance level print('Prior chance: %0.2f' % (y_t.sum() / float(y_t.shape[0]))) # prepare data in the format acceptable by theano scripts from pymri.model.ann.theano_script import share_data training_data, test_data = share_data((X, y), (X_t, y_t)) net.SGD(training_data, 500, mini_batch_size, 3., test_data, verbose=1) # from sklearn.metrics import confusion_matrix
# ### Dimensionality reduction ################################################ from sklearn.feature_selection import SelectKBest, f_classif # ### Define the dimension reduction to be used. # Here we use a classical univariate feature selection based on F-test, # namely Anova. We set the number of features to be selected to 784 feature_selection = SelectKBest(f_classif, k=300) # We fit the selector to our tr6ining dataset feature_selection.fit(X, y) # Transform training dataset X = feature_selection.transform(X) # Transform testing dataset X_t = feature_selection.transform(X_t) # ### Train and test classifier ############################################### # prior chance level print('Prior chance: %0.2f' % (y_t.sum()/float(y_t.shape[0]))) # prepare data in the format acceptable by theano scripts from pymri.model.ann.theano_script import share_data training_data, test_data = share_data((X, y), (X_t, y_t)) net.SGD(training_data, 500, mini_batch_size, 3., test_data, verbose=1) # from sklearn.metrics import confusion_matrix