예제 #1
0
def train_test(ds_path):
    data = SparseDataSet(ds_path)
    g, c, fold = 0.25, 128, 2
    ##################################################
    #### This part of the code does Does statistical
    #### feature selection ...
    #labels = np.array([int(n) for n in data.labels.L])
    #ranks = rank_feat(data.getMatrix().T, labels)
    #ranks = [(abs(r),i) for i, r in enumerate(ranks)]
    #ranks.sort()
    #ranks.reverse()
    #feats = [f[1] for f in ranks]
    #data.keepFeatures(feats[:2662])

    data.attachKernel('gaussian', gamma=g)
    s = SVM(C=c)
    r = s.cv(data, numFolds=fold)
    o = open(ds_path + '.pkl', 'wb')
    pickle.dump(r, o)
    o.close()
    print ds_path
예제 #2
0
def train_test(ds_path):
	data = SparseDataSet(ds_path)
	g, c, fold = 0.25, 128, 2
	##################################################
	#### This part of the code does Does statistical 
	#### feature selection ...
	#labels = np.array([int(n) for n in data.labels.L])
	#ranks = rank_feat(data.getMatrix().T, labels)
	#ranks = [(abs(r),i) for i, r in enumerate(ranks)]
	#ranks.sort()
	#ranks.reverse()
	#feats = [f[1] for f in ranks]
	#data.keepFeatures(feats[:2662])

	data.attachKernel('gaussian', gamma = g)
	s=SVM(C=c)
	r = s.cv(data, numFolds=fold)
	o = open(ds_path+'.pkl', 'wb')
	pickle.dump(r, o)
	o.close();
	print ds_path
예제 #3
0
 def train(self, datalist, labelslist):    
     data = SparseDataSet(datalist, L = labelslist)
     self.svminstance.C = 20
     data.attachKernel('gaussian', degree = 5)
     self.svminstance.train(data)
예제 #4
0
 def train(self, datalist, labelslist):
     data = SparseDataSet(datalist, L=labelslist)
     self.svminstance.C = 20
     data.attachKernel('gaussian', degree=5)
     self.svminstance.train(data)