コード例 #1
0
 def fit(self, X, y):  #check_estimator wants var names X, y
     mappings, representations = msda.mSDA(X, self.prob_corruption,
                                           self.num_layers,
                                           self.use_nonlinearity)
     self.mappings = mappings
     msda_train_data = representations[-1]
     self.clf.fit(msda_train_data, y)
コード例 #2
0
 def predict(self, X):
     maps, test_reps = msda.mSDA(X, self.prob_corruption, self.num_layers,
                                 self.use_nonlinearity, self.mappings)
     msda_test_data = test_reps[-1]
     test_predictions = self.clf.predict(msda_test_data)
     return test_predictions
コード例 #3
0
ファイル: text_analysis.py プロジェクト: markheimann/mSDA
num_layers = 3
subproblem_size = 400

#need to transpose data to be in the right format (#features x #data) for mSDA
#specifically, the deep representation is the output from the last layer
#with low dimensional approximation described in paper
'''
subproblem_mappings, subseq_mappings, representations  = msda.mSDA_lowDimApprox(train_data, prob_corruption, 
			num_layers, subproblem_size)
train_deepRep = representations[:,:,-1]
#use same weights as on training features to transform test data
test_deepRep = msda.mSDA_lowDimApprox(test_data, prob_corruption, num_layers, subproblem_size, 
			subproblem_mappings, subseq_mappings)[2][:,:,-1]
'''
#without low dimensional approximation
train_mappings, train_reps = msda.mSDA(train_data, prob_corruption, num_layers)
train_deepRep = train_reps[:,:,-1]
#use same weights as on training features to transform test data
test_deepRep = msda.mSDA(test_data, prob_corruption, num_layers, train_mappings)[1][:,:,-1]
#'''

#sklearn requires (#data x #features) so transpose back
train_deepRep = train_deepRep.transpose()
test_deepRep = test_deepRep.transpose()

after_msda = time.time()
print("used msda in %s seconds" % (after_msda - before_msda))
print "Shape of msda train rep: ", train_deepRep.shape
print "Shape of msda test rep: ", test_deepRep.shape

#...and classify with linear SVM
コード例 #4
0
ファイル: text_analysis.py プロジェクト: wzell/mSDA
num_layers = 3
subproblem_size = 400

#need to transpose data to be in the right format (#features x #data) for mSDA
#specifically, the deep representation is the output from the last layer
#with low dimensional approximation described in paper
'''
subproblem_mappings, subseq_mappings, representations  = msda.mSDA_lowDimApprox(train_data, prob_corruption, 
			num_layers, subproblem_size)
train_deepRep = representations[:,:,-1]
#use same weights as on training features to transform test data
test_deepRep = msda.mSDA_lowDimApprox(test_data, prob_corruption, num_layers, subproblem_size, 
			subproblem_mappings, subseq_mappings)[2][:,:,-1]
'''
#without low dimensional approximation
train_mappings, train_reps = msda.mSDA(train_data, prob_corruption, num_layers)
train_deepRep = train_reps[:, :, -1]
#use same weights as on training features to transform test data
test_deepRep = msda.mSDA(test_data, prob_corruption, num_layers,
                         train_mappings)[1][:, :, -1]
#'''

#sklearn requires (#data x #features) so transpose back
train_deepRep = train_deepRep.transpose()
test_deepRep = test_deepRep.transpose()

after_msda = time.time()
print("used msda in %s seconds" % (after_msda - before_msda))
print "Shape of msda train rep: ", train_deepRep.shape
print "Shape of msda test rep: ", test_deepRep.shape
コード例 #5
0
ファイル: md.py プロジェクト: franciscoprimero/memoria
 def predict(self, X):
   maps, test_reps = msda.mSDA(X, self.prob_corruption, self.num_layers, self.use_nonlinearity, self.mappings)
   msda_test_data = test_reps[-1]
   test_predictions = self.clf.predict(msda_test_data)
   return test_predictions
コード例 #6
0
ファイル: md.py プロジェクト: franciscoprimero/memoria
 def fit(self, X, y): #check_estimator wants var names X, y
   mappings, representations = msda.mSDA(X, self.prob_corruption, self.num_layers, self.use_nonlinearity)
   self.mappings = mappings
   msda_train_data = representations[-1]
   self.clf.fit(msda_train_data, y)