def trainSVM(features, Cparam):
    '''
	Train a multi-class probabilitistic SVM classifier.
	Note:     This function is simply a wrapper to the mlpy-LibSVM functionality for SVM training
			  See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
	ARGUMENTS:
		- features:         a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
							each matrix features[i] of class i is [numOfSamples x numOfDimensions]
		- Cparam:           SVM parameter C (cost of constraints violation)
	RETURNS:
		- svm:              the trained SVM variable

	NOTE:
		This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
		For example, gamma for a polynomial, rbf or sigmoid kernel. Furthermore, Nu should be provided for a nu_SVM classifier.
		See MLPY documentation for more details (http://mlpy.sourceforge.net/docs/3.4/svm.html)
	'''

    [X, Y] = listOfFeatures2Matrix(features)
    svm = mlpy.LibSvm(svm_type='c_svc',
                      kernel_type='linear',
                      eps=0.0000001,
                      C=Cparam,
                      probability=True)
    svm.learn(X, Y)
    return svm
Example #2
0
def create_and_teach_svm(vectors, labels, param_gamma, param_c):
    """ Create and teach SVM """
    svm_type = "c_svc"
    kernel_type = "rbf"

    svm = mlpy.LibSvm(kernel_type=kernel_type, svm_type=svm_type, gamma=param_gamma, C=param_c)
    svm.learn(vectors, labels)

    return svm
Example #3
0
def trainSVMregression(Features, Y, C):
    #svm = mlpy.LibSvm(svm_type='c_svc', kernel_type='linear', eps=0.0000001, C=C, probability=True)
    svm = mlpy.LibSvm(svm_type='epsilon_svr',
                      kernel_type='linear',
                      eps=0.001,
                      C=C,
                      probability=False)
    svm.learn(Features, Y)
    trainError = numpy.mean(numpy.abs(svm.pred(Features) - Y))
    return svm, trainError
Example #4
0
def fitLinearSVM(data):
    '''
        Build the linear SVM classifier
    '''
    # create the classifier object
    svm = ml.LibSvm(svm_type='c_svc', kernel_type='linear', C=20.0)

    # fit the data
    svm.learn(data[0], data[1])

    # return the classifier
    return svm
Example #5
0
def train_classifier(fmat, labels, method, c_param, nu_param, learn_rate,
                     n_estimators):
    classifiers = {
        'c_svc':
        mlpy.LibSvm(C=c_param),
        'nu_svc_linear':
        mlpy.LibSvm('nu_svc', 'linear', nu=nu_param),
        'nu_svc_sigmoid':
        mlpy.LibSvm('nu_svc', 'sigmoid', nu=nu_param),
        'c_svc_prob':
        mlpy.LibSvm(probability=True, C=c_param),
        'nu_svc_linear_prob':
        mlpy.LibSvm('nu_svc', 'linear', nu=nu_param, probability=True),
        'nu_svc_sigmoid_prob':
        mlpy.LibSvm('nu_svc', 'sigmoid', nu=nu_param, probability=True),
        'lr':
        mlpy.LibLinear(),
        'gboost':
        GradientBoostingClassifier(learning_rate=learn_rate,
                                   n_estimators=n_estimators)
    }
    clas = classifiers[method]
    if hasattr(clas, 'learn'):
        clas.learn(fmat, labels)
    else:
        clas.fit(fmat, labels)
    return clas
Example #6
0
def get_classifiers(n):
    with open('train_data.json', 'rb') as fp:
        dictionary = json.load(fp)
        dictionary = dict([(k.encode('utf-8'), v)
                           for k, v in dictionary.items()])
    print "LEN", len(dictionary)
    with open('chinese_sents.json', 'rb') as fp:
        chinese_sents = json.load(fp)
    with open('test_ref_dict.json', 'rb') as fp:
        ref_dict = json.load(fp)
        ref_dict = dict([(k.encode('utf-8'), v) for k, v in ref_dict.items()])
    with open('cooccurence_data.json', 'rb') as fp:
        cooccur_data = json.load(fp)
        cooccur_data = dict([(k.encode('utf-8'), v)
                             for k, v in cooccur_data.items()])
    #print dictionary.keys()
    classifiers = {}
    words_train = dictionary.keys()
    words_ref = ref_dict.keys()
    can_eval_words = list(set(words_train) & set(words_ref))
    for word in can_eval_words:
        #print "word", word
        #word = '\xe5\x8f\x83\xe5\x8a\xa0'
        #xs, y is last element
        obs = []
        labels = []
        #Collect all sentences containing this word into a feature vector
        info = dictionary[word]
        sentences = []
        for s in info:
            sentences.append(chinese_sents[s[0]])
        words_vec = get_top_features(n, word, cooccur_data[word])
        for s in info:
            #Create observation vector
            observation_vec = build_observation_vector(words_vec,
                                                       chinese_sents[s[0]],
                                                       cooccur_data[word])
            obs.append(observation_vec)
            #obs[len(obs)-1].append(s[1])
            labels.append(s[1])
    #end = len(obs[0])
    #print end
    #print obs
    #x, y = obs[: :48], obs[: 48]
    # x: (observations x attributes) matrix, y: classes (1: setosa, 2: versicolor, 3: virginica)
        linear_svm = mlpy.LibSvm(kernel_type='linear')
        linear_svm.learn(obs, labels)
        classifiers[word] = (linear_svm, words_vec)
    #print 'pred:', linear_svm.pred([0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
    #print 'x:', obs
    #print 'y:', labels
    return classifiers
 def trainSVM(self,features, Cparam):
     X = np.array([])
     Y = np.array([])
     for i, f in enumerate(features):
         if i == 0:
             X = f
             Y = i * np.ones((len(f), 1))
         else:
             X = np.vstack((X, f))
             Y = np.append(Y, i * np.ones((len(f), 1)))
     svm = mlpy.LibSvm(svm_type='c_svc', kernel_type='linear', eps=0.0000001, C=Cparam, probability=True)
     svm.learn(X, Y)
     return svm
Example #8
0
File: SVM.py Project: wypstudy/ML
def f(TrainIn, TrainOut, TestIn):
    print "init......"
    x = numpy.array(TrainIn)
    y = numpy.array(TrainOut)
    t = numpy.array(TestIn)

    print "learn......"
    svm = mlpy.LibSvm(svm_type='c_svc', kernel_type='rbf', gamma=100)
    svm.learn(x, y)

    print "out......"
    re = svm.pred(t)
    return re
Example #9
0
 def def_model(self):
     """ define a new model using the kernel, type and parameters
     """
     if (self.model != None):
         del self.model  # remove an old model
     if (self.type == None or self.kernel == None):
         return None
     self.model = mlpy.LibSvm(svm_type=self.type,
                              kernel_type=self.kernel,
                              degree=self.degree,
                              gamma=self.gamma,
                              coef0=self.coef,
                              nu=self.nu,
                              eps=self.eps,
                              p=self.p,
                              probability=self.prop,
                              weight=self.weight)
     return True
Example #10
0
File: test.py Project: deepxkn/FEAR
def train_SVMs(gabors, points):
		
	svm = mlpy.LibSvm( svm_type='c_svc', kernel_type='rbf' )

	(h, w) = gabors[0].shape

	x = pack_gabor_images(gabors)

	y = numpy.zeros((h, w))
	
	y[points[0][0]][points[0][1]] = 10.0
	y.resize(y.size, 1)
	y = numpy.hstack((y))

	print "Training SVM..."
		
	#Train the SVM
	svm.learn(x, y)

	svm.save_model('svm.model')
		
	return svm
Example #11
0
def pcaSvm():
    wine = np.loadtxt(r'F:\PY\data\wine.txt', delimiter=',')
    x,y = wine[:,1:4],wine[:,0].astype(np.int)
    print x.shape, y.shape

    pca=mlpy.PCA()
    pca.learn(x)

    z = pca.transform(x,k=2)
    print z.shape

    fig1 = plt.figure(1)
    title = plt.title('PCA on wine dataset')
    plot = plt.scatter(z[:,0],z[:,1],c = y, s = 90, cmap =cm.Reds)
    labx = plt.xlabel('First component')
    laby = plt.ylabel('Second component')
    plt.show()

    svm = mlpy.LibSvm(kernel_type='rbf',gamma=20)
    svm.learn(z,y)

    xmin, xmax = z[:,0].min()-0.1, z[:,0].max()+0.1
    ymin, ymax = z[:,1].min()-0.1, z[:,1].max()+0.1
    xx, yy = np.meshgrid(np.arange(xmin, xmax, 0.01), np.arange(ymin, ymax, 0.01))
    grid = np.c_[xx.ravel(), yy.ravel()]

    result = svm.pred(grid)

    fig2 = plt.figure(2)
    title = plt.title("SVM (rbf kernel) on PCA")
    plot1 = plt.pcolormesh(xx, yy, result.reshape(xx.shape), cmap = cm.Greys_r)
    plot2 = plt.scatter(z[:, 0], z[:, 1], c=y, s=90, cmap = cm.Reds)
    labx = plt.xlabel("First component")
    laby = plt.ylabel("Second component")
    limx = plt.xlim(xmin, xmax)
    limy = plt.ylim(ymin, ymax)
    plt.show()
Example #12
0
def train_classifier(fmat, labels, method):
    classifiers = {
        'c_svc': mlpy.LibSvm(),
        'nu_svc_linear': mlpy.LibSvm('nu_svc', 'linear'),
        'nu_svc_sigmoid': mlpy.LibSvm('nu_svc', 'sigmoid'),
        'c_svc_prob': mlpy.LibSvm(probability=True),
        'nu_svc_linear_prob': mlpy.LibSvm('nu_svc', 'linear',
                                          probability=True),
        'nu_svc_sigmoid_prob': mlpy.LibSvm('nu_svc', 'sigmoid',
                                           probability=True),
        'lr': mlpy.LibLinear(),
        'gboost': GradientBoostingClassifier()
    }
    clas = classifiers[method]
    if hasattr(clas, 'learn'):
        clas.learn(fmat, labels)
    else:
        clas.fit(fmat, labels)
    return clas
Example #13
0
#!/usr/bin/env python

import numpy as np
import matplotlib.pyplot as plt
import mlpy
import sklearn.preprocessing

iris = np.loadtxt('./heart-disease.data', delimiter=',', dtype='str')
x, y = iris[:, :13], iris[:, 13].astype(np.int)

le = sklearn.preprocessing.LabelEncoder()

x_data = []
for item in x:
    le.fit(item)
    new_item = le.transform(item)
    x_data.append(new_item)

svm = mlpy.LibSvm()
svm.learn(np.array(x_data), np.array(y))
print svm.pred(x_data[len(x_data) - 3])
Example #14
0
#读取图片,提取每类图片的特征
for ii in xrange(1,picflag+1):
    smp_x=[]
   
    mytz=np.zeros((3,w_fg*h_fg))
    for jj in xrange(1,4):
        fn='p'+str(ii)+'-'+str(jj)+'.png'
        tmptz=readpic(fn)
        train_x.append(tmptz.tolist())
        d.append(ii)

   

x=np.array(train_x)
y=np.array(d)
svm = mlpy.LibSvm(svm_type='c_svc', kernel_type='poly',gamma=50)
svm.learn(x, y)

print svm.pred(x)

fn='ptest3.png'
testtz=np.array(readpic(fn))
nowi=svm.pred(testtz)     
print u'%s属于第%d类'%(fn,nowi)

fn='ptest1.png'
testtz=np.array(readpic(fn))
nowi=svm.pred(testtz)       
print u'%s属于第%d类'%(fn,nowi)
        
fn='ptest2.png'
Example #15
0
import mlpy

BEST = {
    'knn': mlpy.KNN(1),
    'tree': mlpy.ClassTree(stumps=0, minsize=0),
    'svm': mlpy.LibSvm(svm_type='c_svc',
                       kernel=mlpy.KernelGaussian(10),
                       C=10000)
}
Example #16
0
print(x.shape)
print(y.shape)

pca = mlpy.PCA()
pca.learn(x)
z = pca.transform(x, k=2)
print(z.shape)

fig1 = plt.figure(1)
title = plt.title("PCA on wine dataset")
plot = plt.scatter(z[:, 0], z[:, 1], c=y, s=90, cmap=cm.Reds)
labx = plt.xlabel("First component")
laby = plt.ylabel("Second component")
plt.show()

svm = mlpy.LibSvm(kernel_type='rbf', gamma=20)
svm.learn(z, y)

xmin, xmax = z[:, 0].min() - 0.1, z[:, 0].max() + 0.1
ymin, ymax = z[:, 1].min() - 0.1, z[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.arange(xmin, xmax, 0.01), np.arange(ymin, ymax, 0.01))
grid = np.c_[xx.ravel(), yy.ravel()]

result = svm.pred(grid)

fig2 = plt.figure(2)
title = plt.title("SVM (linear kernel) on PCA")
plot1 = plt.pcolormesh(xx, yy, result.reshape(xx.shape), cmap=cm.Greys_r)
plot2 = plt.scatter(z[:, 0], z[:, 1], c=y, s=90, cmap=cm.Reds)
labx = plt.xlabel("First component")
laby = plt.ylabel("Second component")
Example #17
0
File: 8-16.py Project: lxldfzr/p3
#-*- coding: utf-8 -*-
#8-16.py
import numpy as np
import matplotlib.pyplot as plt
import  mlpy
f = np.loadtxt("spiral.data")
x,y = f[:,:2],f[:,2]
svm = mlpy.LibSvm(svm_type='c_svc',kernel_type='rbf',gamma=100)
svm.learn(x,y)
xmin,xmax = x[:,0].min()-0.1,x[:,0].max()+0.1
ymin,ymax = x[:,1].min()-0.1,x[:,1].max()+0.1
xx,yy = np.meshgrid(np.arange(xmin,xmax,0.01),np.arange(ymin,ymax,0.01))
xnew = np.c_[xx.ravel(),yy.ravel()]
ynew = svm.pred(xnew).reshape(xx.shape)
fig = plt.figure(1)
plt.pcolormesh(xx,yy,ynew)
plt.scatter(x[:,0],x[:,1],c=y)
plt.show()
Example #18
0
####
# Important note: Error will show up if the plt.set_cmap(plt.cm.Paired)
# is placed before the plot = plt.scatter(z[:, 0], z[:, 1], c=y)
# http://mlpy.sourceforge.net/docs/3.5/tutorial.html#tutorial-1-iris-dataset
##
fig1 = plt.figure(1)
title = plt.title("PCA on iris dataset")
title = plt.title("PCA on iris dataset")
plot = plt.scatter(z[:, 0], z[:, 1], c=y)
plt.set_cmap(plt.cm.Paired)
labx = plt.xlabel("First component")
laby = plt.ylabel("Second component")
plt.show()

#Learning by Kernel Support Vector Machines (SVMs) on principal components:
linear_svm = mlpy.LibSvm(kernel_type='sigmoid') # new linear SVM instance
linear_svm.learn(z, y) # learn from principal components

#For plotting purposes, we build the grid where we will compute the predictions (zgrid):
xmin, xmax = z[:,0].min()-0.1, z[:,0].max()+0.1
ymin, ymax = z[:,1].min()-0.1, z[:,1].max()+0.1
xx, yy = np.meshgrid(np.arange(xmin, xmax, 0.01), np.arange(ymin, ymax, 0.01))
zgrid = np.c_[xx.ravel(), yy.ravel()]

#Now we perform the predictions on the grid. The pred() method returns the prediction for each point in zgrid:
yp = linear_svm.pred(zgrid)

#Plot the predictions:
####
# Important note: Error will show up if the plt.set_cmap(plt.cm.Paired)
# is placed before the plt.pcolormesh(xx, yy, yp.reshape(xx.shape))
Example #19
0
iris = np.loadtxt('iris.csv', delimiter=',')
x, y = iris[:, :4], iris[:, 4].astype(np.int)
x.shape
pca = mlpy.PCA()
pca.learn(x)
z = pca.transform(x, k=2)

#plt.set_cmap(plt.cm.Paired)
fig1 = plt.figure(1)
title = plt.title("PCA on iris dataset")
plot = plt.scatter(z[:, 0], z[:, 1], c=y)
labx = plt.xlabel("First component")
laby = plt.ylabel("Second component")


linear_svm = mlpy.LibSvm(svm_type='nu_svc' ,kernel_type='rbf',probability=True) # new linear SVM instance
linear_svm.learn(z, y) # learn from principal components

xmin, xmax = z[:,0].min()-0.1, z[:,0].max()+0.1
ymin, ymax = z[:,1].min()-0.1, z[:,1].max()+0.1


xx, yy = np.meshgrid(np.arange(xmin, xmax, 0.01), np.arange(ymin, ymax, 0.01))
zgrid = np.c_[xx.ravel(), yy.ravel()]
yp = linear_svm.pred(zgrid)
data_p = linear_svm.pred_probability(zgrid)

linear_svm.save_model('svm_for_iris')
svm_iris = mlpy.LibSvm.load_model('svm_for_iris')
yp_iris = svm_iris.pred_probability(zgrid)
Example #20
0
z = pca.transform(x, k=2) # embed x into the k=2 dimensional subspace
z.shape


#Plot the principal components:
plt.set_cmap(plt.cm.Paired)
fig1 = plt.figure(1)
title = plt.title("PCA on iris dataset")
plot = plt.scatter(z[:, 0], z[:, 1], c=y)
labx = plt.xlabel("First component")
laby = plt.ylabel("Second component")
plt.show()


#Learning by Kernel Support Vector Machines (SVMs) on principal components:
linear_svm = mlpy.LibSvm(kernel_type='linear') # new linear SVM instance
linear_svm.learn(z, y) # learn from principal components

#For plotting purposes, we build the grid where we will compute the predictions (zgrid):
xmin, xmax = z[:,0].min()-0.1, z[:,0].max()+0.1
ymin, ymax = z[:,1].min()-0.1, z[:,1].max()+0.1
xx, yy = np.meshgrid(np.arange(xmin, xmax, 0.01), np.arange(ymin, ymax, 0.01))
zgrid = np.c_[xx.ravel(), yy.ravel()]

#Now we perform the predictions on the grid. The pred() method returns the prediction for each point in zgrid:
yp = linear_svm.pred(zgrid)

#Plot the predictions:
plt.set_cmap(plt.cm.Paired)
fig2 = plt.figure(2)
title = plt.title("SVM (linear kernel) on principal components")