Exemplo n.º 1
0
def create_svm(param, data, lab):
    """
    create SVM object with standard settings
    
    @param param: parameter object
    @param data: kernel or feature object (for kernelized/linear svm)
    @param lab: label object
    
    @return: svm object
    """


    # create SVM
    if param.flags.has_key("svm_type") and param.flags["svm_type"] == "liblineardual":
        print "creating LibLinear object"
        svm = LibLinear(param.cost, data, lab)
        svm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)

        # set solver type
        if param.flags.has_key("solver_type") and param.flags["solver_type"] == "L2R_LR":
            print "setting linear solver type to: L2R_LR" 
            svm.set_liblinear_solver_type(L2R_LR)

    else:
        print "creating SVMLight object"
        svm = SVMLight(param.cost, data, lab)

    
    return set_svm_parameters(svm, param)
Exemplo n.º 2
0
def create_svm(param, data, lab):
    """
    create SVM object with standard settings
    
    @param param: parameter object
    @param data: kernel or feature object (for kernelized/linear svm)
    @param lab: label object
    
    @return: svm object
    """

    # create SVM
    if param.flags.has_key(
            "svm_type") and param.flags["svm_type"] == "liblineardual":
        print "creating LibLinear object"
        svm = LibLinear(param.cost, data, lab)
        svm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)

        # set solver type
        if param.flags.has_key(
                "solver_type") and param.flags["solver_type"] == "L2R_LR":
            print "setting linear solver type to: L2R_LR"
            svm.set_liblinear_solver_type(L2R_LR)

    else:
        print "creating SVMLight object"
        svm = SVMLight(param.cost, data, lab)

    return set_svm_parameters(svm, param)
Exemplo n.º 3
0
def train_svm(feats_train, labels, C=1):
	from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC, L2R_L2LOSS_SVC_DUAL

	epsilon = 1e-3
	svm = LibLinear(C, feats_train, labels)
	svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
	svm.set_epsilon(epsilon)
	svm.set_bias_enabled(False)
	svm.train()

	return svm
def train_svm(feats_train, labels, C=1):
    from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC, L2R_L2LOSS_SVC_DUAL

    epsilon = 1e-3
    svm = LibLinear(C, feats_train, labels)
    svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
    svm.set_epsilon(epsilon)
    svm.set_bias_enabled(False)
    svm.train()

    return svm
def features_director_dot_modular (fm_train_real, fm_test_real,
		label_train_twoclass, C, epsilon):

	from shogun.Features import RealFeatures, SparseRealFeatures, BinaryLabels
	from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC_DUAL
	from shogun.Mathematics import Math_init_random
	Math_init_random(17)

	feats_train=RealFeatures(fm_train_real)
	feats_test=RealFeatures(fm_test_real)
	labels=BinaryLabels(label_train_twoclass)

	dfeats_train=NumpyFeatures(fm_train_real)
	dfeats_test=NumpyFeatures(fm_test_real)
	dlabels=BinaryLabels(label_train_twoclass)

	print feats_train.get_computed_dot_feature_matrix()
	print dfeats_train.get_computed_dot_feature_matrix()

	svm=LibLinear(C, feats_train, labels)
	svm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)
	svm.set_epsilon(epsilon)
	svm.set_bias_enabled(True)
	svm.train()

	svm.set_features(feats_test)
	svm.apply().get_labels()
	predictions = svm.apply()

	dfeats_train.__disown__()
	dfeats_train.parallel.set_num_threads(1)
	dsvm=LibLinear(C, dfeats_train, dlabels)
	dsvm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)
	dsvm.set_epsilon(epsilon)
	dsvm.set_bias_enabled(True)
	dsvm.train()

	dfeats_test.__disown__()
	dfeats_test.parallel.set_num_threads(1)
	dsvm.set_features(dfeats_test)
	dsvm.apply().get_labels()
	dpredictions = dsvm.apply()

	return predictions, svm, predictions.get_labels()
def features_director_dot_modular(fm_train_real, fm_test_real,
                                  label_train_twoclass, C, epsilon):

    from shogun.Features import RealFeatures, SparseRealFeatures, BinaryLabels
    from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC_DUAL
    from shogun.Mathematics import Math_init_random
    Math_init_random(17)

    feats_train = RealFeatures(fm_train_real)
    feats_test = RealFeatures(fm_test_real)
    labels = BinaryLabels(label_train_twoclass)

    dfeats_train = NumpyFeatures(fm_train_real)
    dfeats_test = NumpyFeatures(fm_test_real)
    dlabels = BinaryLabels(label_train_twoclass)

    print feats_train.get_computed_dot_feature_matrix()
    print dfeats_train.get_computed_dot_feature_matrix()

    svm = LibLinear(C, feats_train, labels)
    svm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)
    svm.set_epsilon(epsilon)
    svm.set_bias_enabled(True)
    svm.train()

    svm.set_features(feats_test)
    svm.apply().get_labels()
    predictions = svm.apply()

    dfeats_train.__disown__()
    dfeats_train.parallel.set_num_threads(1)
    dsvm = LibLinear(C, dfeats_train, dlabels)
    dsvm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)
    dsvm.set_epsilon(epsilon)
    dsvm.set_bias_enabled(True)
    dsvm.train()

    dfeats_test.__disown__()
    dfeats_test.parallel.set_num_threads(1)
    dsvm.set_features(dfeats_test)
    dsvm.apply().get_labels()
    dpredictions = dsvm.apply()

    return predictions, svm, predictions.get_labels()
Exemplo n.º 7
0
def solver_dcd_shogun_debug(C, all_xt, all_lt, task_indicator, M, L):
    """
    use standard LibLinear for debugging purposes
    """

    xt = numpy.array(all_xt)
    lt = numpy.array(all_lt)
    tt = numpy.array(task_indicator, dtype=numpy.int32)
    tsm = numpy.array(M)
    num_tasks = L.shape[0]

    # sanity checks
    assert len(xt) == len(lt) == len(tt)
    assert M.shape == L.shape
    assert num_tasks == len(set(tt))

    # set up shogun objects
    if type(xt[0]) == str:
        feat = create_hashed_features_wdk(xt, 8)
    else:
        feat = RealFeatures(xt.T)

    lab = Labels(lt)

    # set up machinery
    svm = LibLinear()
    svm.set_liblinear_solver_type(L2R_L1LOSS_SVC_DUAL)
    svm.io.set_loglevel(MSG_DEBUG)

    svm.set_C(C, C)
    svm.set_bias_enabled(False)

    # invoke training
    svm.set_labels(lab)
    svm.train(feat)

    # get model parameters
    W = [svm.get_w()]

    return W, 42, 42
def classifier_liblinear_modular(fm_train_real, fm_test_real, label_train_twoclass, C, epsilon):

    from shogun.Features import RealFeatures, SparseRealFeatures, Labels
    from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC_DUAL
    from shogun.Mathematics import Math_init_random

    Math_init_random(17)

    feats_train = RealFeatures(fm_train_real)
    feats_test = RealFeatures(fm_test_real)
    labels = Labels(label_train_twoclass)

    svm = LibLinear(C, feats_train, labels)
    svm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)
    svm.set_epsilon(epsilon)
    svm.set_bias_enabled(True)
    svm.train()

    svm.set_features(feats_test)
    svm.apply().get_labels()
    predictions = svm.apply()
    return predictions, svm, predictions.get_labels()
Exemplo n.º 9
0
def classifier_liblinear_modular(fm_train_real, fm_test_real,
                                 label_train_twoclass, C, epsilon):

    from shogun.Features import RealFeatures, SparseRealFeatures, Labels
    from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC_DUAL
    from shogun.Mathematics import Math_init_random
    Math_init_random(17)

    feats_train = RealFeatures(fm_train_real)
    feats_test = RealFeatures(fm_test_real)
    labels = Labels(label_train_twoclass)

    svm = LibLinear(C, feats_train, labels)
    svm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)
    svm.set_epsilon(epsilon)
    svm.set_bias_enabled(True)
    svm.train()

    svm.set_features(feats_test)
    svm.apply().get_labels()
    predictions = svm.apply()
    return predictions, svm, predictions.get_labels()
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
	from shogun.Features import RealFeatures, BinaryLabels
	from shogun.Classifier import Perceptron, LibLinear, L2R_L2LOSS_SVC
	from modshogun import MSG_INFO

	# 2D data
	_DIM = 2

	# To get the nice message that the perceptron has converged
	dummy = BinaryLabels()
#	dummy.io.set_loglevel(MSG_INFO)

	np.random.seed(seed)

	# Produce some (probably) linearly separable training data by hand
	# Two Gaussians at a far enough distance
	X = np.array(np.random.randn(_DIM,n))+distance
	Y = np.array(np.random.randn(_DIM,n))
	label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))

	fm_train_real = np.hstack((X,Y))
	feats_train = RealFeatures(fm_train_real)
	labels = BinaryLabels(label_train_twoclass)

	perceptron = Perceptron(feats_train, labels)
	perceptron.set_learn_rate(learn_rate)
	perceptron.set_max_iter(max_iter)
	perceptron.set_initialize_hyperplane(False)

	# Find limits for visualization
	x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
	x_max = max(np.max(X[0,:]), np.max(Y[0,:]))

	y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
	y_max = max(np.max(X[1,:]), np.max(Y[1,:]))

	fig1, axes1 = plt.subplots(1,1)
	fig2, axes2 = plt.subplots(1,1)

	for i in xrange(nperceptrons):
		# Initialize randomly weight vector and bias
		perceptron.set_w(np.random.random(2))
		perceptron.set_bias(np.random.random())

		# Run the perceptron algorithm
		perceptron.train()

		# Construct the hyperplane for visualization
		# Equation of the decision boundary is w^T x + b = 0
		b = perceptron.get_bias()
		w = perceptron.get_w()

		hx = np.linspace(x_min-1,x_max+1)
		hy = -w[1]/w[0] * hx

		axes1.plot(hx, -1/w[1]*(w[0]*hx+b))
		axes2.plot(hx, -1/w[1]*(w[0]*hx+b), alpha=0.5)

		print('minimum distance with perceptron is %f' % min_distance(w, b, feats_train))

	C = 1
	epsilon = 1e-3
	svm = LibLinear(C, feats_train, labels)
	svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
	svm.set_epsilon(epsilon)
	svm.set_bias_enabled(True)
	svm.train()

	b = svm.get_bias()
	w = svm.get_w()

	print('minimum distance with svm is        %f' % min_distance(w, b, feats_train))

	hx = np.linspace(x_min-1,x_max+1)
	hy = -w[1]/w[0] * hx

	axes2.plot(hx, -1/w[1]*(w[0]*hx+b), 'k', linewidth=2.0)

	# Plot the two-class data
	axes1.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
	axes1.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')

	axes2.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
	axes2.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')

	# Customize the plot
	axes1.axis([x_min-1, x_max+1, y_min-1, y_max+1])
	axes1.set_title('Rosenblatt\'s Perceptron Algorithm')
	axes1.set_xlabel('x')
	axes1.set_ylabel('y')

	axes2.axis([x_min-1, x_max+1, y_min-1, y_max+1])
	axes2.set_title('Support Vector Machine')
	axes2.set_xlabel('x')
	axes2.set_ylabel('y')

	plt.show()

	return perceptron
Exemplo n.º 11
0
def SVMLinear(traindata, trainlabs, testdata, C=1.0, eps=1e-5, threads=1, getw=False, useLibLinear=False, useL1R=False):
    """
    Does efficient linear SVM using the OCAS subgradient solver (as interfaced
    by shogun).  Handles multiclass problems using a one-versus-all approach.

    NOTE: the training and testing data should both be scaled such that each
    dimension ranges from 0 to 1
    traindata = n by d training data array
    trainlabs = n-length training data label vector (should be normalized
        so labels range from 0 to c-1, where c is the number of classes)
    testdata = m by d array of data to test
    C = SVM regularization constant
    eps = precision parameter used by OCAS
    threads = number of threads to use
    getw = whether or not to return the learned weight vector from the SVM (note:
        only works for 2-class problems)

    returns:
    m-length vector containing the predicted labels of the instances
         in testdata
    if problem is 2-class and getw == True, then a d-length weight vector is also returned
    """
    numc = trainlabs.max() + 1
    #
    # when using an L1 solver, we need the data transposed
    #
    # trainfeats = wrapFeatures(traindata, sparse=True)
    # testfeats = wrapFeatures(testdata, sparse=True)
    if not useL1R:
        ### traindata directly here for LR2_L2LOSS_SVC
        trainfeats = wrapFeatures(traindata, sparse=False)
    else:
        ### traindata.T here for L1R_LR
        trainfeats = wrapFeatures(traindata.T, sparse=False)
    testfeats = wrapFeatures(testdata, sparse=False)
    if numc > 2:
        preds = np.zeros(testdata.shape[0], dtype=np.int32)
        predprobs = np.zeros(testdata.shape[0])
        predprobs[:] = -np.inf
        for i in xrange(numc):
            # set up svm
            tlabs = np.int32(trainlabs == i)
            tlabs[tlabs == 0] = -1
            # print tlabs
            # print i, ' ', np.sum(tlabs==-1), ' ', np.sum(tlabs==1)
            labels = BinaryLabels(np.float64(tlabs))
            if useLibLinear:
                # Use LibLinear and set the solver type
                svm = LibLinear(C, trainfeats, labels)
                if useL1R:
                    # this is L1 regularization on logistic loss
                    svm.set_liblinear_solver_type(L1R_LR)
                else:
                    # most of my results were computed with this (ucf50)
                    svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
            else:
                # Or Use SVMOcas
                svm = SVMOcas(C, trainfeats, labels)
            svm.set_epsilon(eps)
            svm.parallel.set_num_threads(threads)
            svm.set_bias_enabled(True)
            # train
            svm.train()
            # test
            res = svm.apply(testfeats).get_labels()
            thisclass = res > predprobs
            preds[thisclass] = i
            predprobs[thisclass] = res[thisclass]
        return preds
    else:
        tlabs = trainlabs.copy()
        tlabs[tlabs == 0] = -1
        labels = Labels(np.float64(tlabs))
        svm = SVMOcas(C, trainfeats, labels)
        svm.set_epsilon(eps)
        svm.parallel.set_num_threads(threads)
        svm.set_bias_enabled(True)
        # train
        svm.train()
        # test
        res = svm.classify(testfeats).get_labels()
        res[res > 0] = 1
        res[res <= 0] = 0
        if getw == True:
            return res, svm.get_w()
        else:
            return res
def features_director_dot_modular (fm_train_real, fm_test_real,
		label_train_twoclass, C, epsilon):
	try:
		from shogun.Features import DirectorDotFeatures
		from shogun.Library import RealVector
	except ImportError:
		print "recompile shogun with --enable-swig-directors"
		return

	class NumpyFeatures(DirectorDotFeatures):

		# variables
		data=numpy.empty((1,1))
		
		# constructor
		def __init__(self, d):
			DirectorDotFeatures.__init__(self)
			self.data = d
		
		# overloaded methods
		def add_to_dense_sgvec(self, alpha, vec_idx1, vec2, abs):
			if abs:
				vec2+=alpha*numpy.abs(self.data[:,vec_idx1])
			else:
				vec2+=alpha*self.data[:,vec_idx1]

		def dot(self, vec_idx1, df, vec_idx2):
			return numpy.dot(self.data[:,vec_idx1], df.get_computed_dot_feature_vector(vec_idx2))

		def dense_dot_sgvec(self, vec_idx1, vec2):
			return numpy.dot(self.data[:,vec_idx1], vec2[0:vec2.vlen])

		def get_num_vectors(self):
			return self.data.shape[1]

		def get_dim_feature_space(self):
			return self.data.shape[0]

		# operators
	#	def __add__(self, other):
	#		return NumpyFeatures(self.data+other.data)

	#	def __sub__(self, other):
	#		return NumpyFeatures(self.data-other.data)

	#	def __iadd__(self, other):
	#		return NumpyFeatures(self.data+other.data)

	#	def __isub__(self, other):
	#		return NumpyFeatures(self.data-other.data)


	from shogun.Features import RealFeatures, SparseRealFeatures, BinaryLabels
	from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC_DUAL
	from shogun.Mathematics import Math_init_random
	Math_init_random(17)

	feats_train=RealFeatures(fm_train_real)
	feats_test=RealFeatures(fm_test_real)
	labels=BinaryLabels(label_train_twoclass)

	dfeats_train=NumpyFeatures(fm_train_real)
	dfeats_test=NumpyFeatures(fm_test_real)
	dlabels=BinaryLabels(label_train_twoclass)

	print feats_train.get_computed_dot_feature_matrix()
	print dfeats_train.get_computed_dot_feature_matrix()

	svm=LibLinear(C, feats_train, labels)
	svm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)
	svm.set_epsilon(epsilon)
	svm.set_bias_enabled(True)
	svm.train()

	svm.set_features(feats_test)
	svm.apply().get_labels()
	predictions = svm.apply()

	dfeats_train.__disown__()
	dfeats_train.parallel.set_num_threads(1)
	dsvm=LibLinear(C, dfeats_train, dlabels)
	dsvm.set_liblinear_solver_type(L2R_L2LOSS_SVC_DUAL)
	dsvm.set_epsilon(epsilon)
	dsvm.set_bias_enabled(True)
	dsvm.train()

	dfeats_test.__disown__()
	dfeats_test.parallel.set_num_threads(1)
	dsvm.set_features(dfeats_test)
	dsvm.apply().get_labels()
	dpredictions = dsvm.apply()

	return predictions, svm, predictions.get_labels()
Exemplo n.º 13
0
def classifier_non_separable_svm(n=100, m=10, distance=5, seed=None):
    '''
	n is the number of examples per class and m is the number of examples per class that gets its
	label swapped to force non-linear separability
	'''
    from shogun.Features import RealFeatures, BinaryLabels
    from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC

    # 2D data
    _DIM = 2

    # To get the nice message that the perceptron has converged
    dummy = BinaryLabels()

    np.random.seed(seed)

    # Produce some (probably) linearly separable training data by hand
    # Two Gaussians at a far enough distance
    X = np.array(np.random.randn(_DIM, n)) + distance
    Y = np.array(np.random.randn(_DIM, n))
    # The last five points of each class are swapped to force non-linear separable data
    label_train_twoclass = np.hstack(
        (np.ones(n - m), -np.ones(m), -np.ones(n - m), np.ones(m)))

    fm_train_real = np.hstack((X, Y))
    feats_train = RealFeatures(fm_train_real)
    labels = BinaryLabels(label_train_twoclass)

    # Train linear SVM
    C = 1
    epsilon = 1e-3
    svm = LibLinear(C, feats_train, labels)
    svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
    svm.set_epsilon(epsilon)
    svm.set_bias_enabled(True)
    svm.train()

    # Get hyperplane parameters
    b = svm.get_bias()
    w = svm.get_w()

    # Find limits for visualization
    x_min = min(np.min(X[0, :]), np.min(Y[0, :]))
    x_max = max(np.max(X[0, :]), np.max(Y[0, :]))

    y_min = min(np.min(X[1, :]), np.min(Y[1, :]))
    y_max = max(np.max(X[1, :]), np.max(Y[1, :]))

    hx = np.linspace(x_min - 1, x_max + 1)
    hy = -w[1] / w[0] * hx

    plt.plot(hx, -1 / w[1] * (w[0] * hx + b), 'k', linewidth=2.0)

    # Plot the two-class data
    pos_idxs = label_train_twoclass == +1
    plt.scatter(fm_train_real[0, pos_idxs],
                fm_train_real[1, pos_idxs],
                s=40,
                marker='o',
                facecolors='none',
                edgecolors='b')

    neg_idxs = label_train_twoclass == -1
    plt.scatter(fm_train_real[0, neg_idxs],
                fm_train_real[1, neg_idxs],
                s=40,
                marker='s',
                facecolors='none',
                edgecolors='r')

    # Customize the plot
    plt.axis([x_min - 1, x_max + 1, y_min - 1, y_max + 1])
    plt.title('SVM with non-linearly separable data')
    plt.xlabel('x')
    plt.ylabel('y')

    plt.show()

    return svm