def solver_dcd_shogun_debug(C, all_xt, all_lt, task_indicator, M, L):
    """
    use standard LibLinear for debugging purposes
    """

    xt = numpy.array(all_xt)
    lt = numpy.array(all_lt)
    tt = numpy.array(task_indicator, dtype=numpy.int32)
    tsm = numpy.array(M)
    num_tasks = L.shape[0]

    # sanity checks
    assert len(xt) == len(lt) == len(tt)
    assert M.shape == L.shape
    assert num_tasks == len(set(tt))

    # set up shogun objects
    if type(xt[0]) == str:
        feat = create_hashed_features_wdk(xt, 8)
    else:
        feat = RealFeatures(xt.T)

    lab = Labels(lt)

    # set up machinery
    svm = LibLinear()
    svm.set_liblinear_solver_type(L2R_L1LOSS_SVC_DUAL)
    svm.io.set_loglevel(MSG_DEBUG)

    svm.set_C(C, C)
    svm.set_bias_enabled(False)

    # invoke training
    svm.set_labels(lab)
    svm.train(feat)

    # get model parameters
    W = [svm.get_w()]

    return W, 42, 42
Example #2
0
lab_presvm = Labels(numpy.array(labels_presvm))
wdk_presvm = LinearKernel(feat_presvm, feat_presvm)

presvm_liblinear = LibLinear(1, feat_presvm, lab_presvm)
presvm_liblinear.set_max_iterations(10000)
presvm_liblinear.set_bias_enabled(False)
presvm_liblinear.train()

presvm_libsvm = LibSVM(1, wdk_presvm, lab_presvm)
#presvm_libsvm = SVMLight(1, wdk_presvm, lab_presvm)

#presvm_libsvm.io.set_loglevel(MSG_DEBUG)
presvm_libsvm.set_bias_enabled(False)
presvm_libsvm.train()

my_w = presvm_liblinear.get_w()
presvm_liblinear = LibLinear(1, feat_presvm, lab_presvm)
presvm_liblinear.set_w(my_w)

#############################################
#    compute linear term manually
#############################################

examples = numpy.array(examples, dtype=numpy.float64)
examples = numpy.transpose(examples)

feat = RealFeatures(examples)
lab = Labels(numpy.array(labels))

wdk = LinearKernel(feat, feat)
lab = Labels(numpy.array(labels))
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
	from shogun.Features import RealFeatures, BinaryLabels
	from shogun.Classifier import Perceptron, LibLinear, L2R_L2LOSS_SVC
	from modshogun import MSG_INFO

	# 2D data
	_DIM = 2

	# To get the nice message that the perceptron has converged
	dummy = BinaryLabels()
#	dummy.io.set_loglevel(MSG_INFO)

	np.random.seed(seed)

	# Produce some (probably) linearly separable training data by hand
	# Two Gaussians at a far enough distance
	X = np.array(np.random.randn(_DIM,n))+distance
	Y = np.array(np.random.randn(_DIM,n))
	label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))

	fm_train_real = np.hstack((X,Y))
	feats_train = RealFeatures(fm_train_real)
	labels = BinaryLabels(label_train_twoclass)

	perceptron = Perceptron(feats_train, labels)
	perceptron.set_learn_rate(learn_rate)
	perceptron.set_max_iter(max_iter)
	perceptron.set_initialize_hyperplane(False)

	# Find limits for visualization
	x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
	x_max = max(np.max(X[0,:]), np.max(Y[0,:]))

	y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
	y_max = max(np.max(X[1,:]), np.max(Y[1,:]))

	fig1, axes1 = plt.subplots(1,1)
	fig2, axes2 = plt.subplots(1,1)

	for i in xrange(nperceptrons):
		# Initialize randomly weight vector and bias
		perceptron.set_w(np.random.random(2))
		perceptron.set_bias(np.random.random())

		# Run the perceptron algorithm
		perceptron.train()

		# Construct the hyperplane for visualization
		# Equation of the decision boundary is w^T x + b = 0
		b = perceptron.get_bias()
		w = perceptron.get_w()

		hx = np.linspace(x_min-1,x_max+1)
		hy = -w[1]/w[0] * hx

		axes1.plot(hx, -1/w[1]*(w[0]*hx+b))
		axes2.plot(hx, -1/w[1]*(w[0]*hx+b), alpha=0.5)

		print('minimum distance with perceptron is %f' % min_distance(w, b, feats_train))

	C = 1
	epsilon = 1e-3
	svm = LibLinear(C, feats_train, labels)
	svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
	svm.set_epsilon(epsilon)
	svm.set_bias_enabled(True)
	svm.train()

	b = svm.get_bias()
	w = svm.get_w()

	print('minimum distance with svm is        %f' % min_distance(w, b, feats_train))

	hx = np.linspace(x_min-1,x_max+1)
	hy = -w[1]/w[0] * hx

	axes2.plot(hx, -1/w[1]*(w[0]*hx+b), 'k', linewidth=2.0)

	# Plot the two-class data
	axes1.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
	axes1.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')

	axes2.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
	axes2.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')

	# Customize the plot
	axes1.axis([x_min-1, x_max+1, y_min-1, y_max+1])
	axes1.set_title('Rosenblatt\'s Perceptron Algorithm')
	axes1.set_xlabel('x')
	axes1.set_ylabel('y')

	axes2.axis([x_min-1, x_max+1, y_min-1, y_max+1])
	axes2.set_title('Support Vector Machine')
	axes2.set_xlabel('x')
	axes2.set_ylabel('y')

	plt.show()

	return perceptron
Example #4
0
def SVMLinear(traindata, trainlabs, testdata, C=1.0, eps=1e-5, threads=1, getw=False, useLibLinear=False, useL1R=False):
    """
    Does efficient linear SVM using the OCAS subgradient solver (as interfaced
    by shogun).  Handles multiclass problems using a one-versus-all approach.

    NOTE: the training and testing data should both be scaled such that each
    dimension ranges from 0 to 1
    traindata = n by d training data array
    trainlabs = n-length training data label vector (should be normalized
        so labels range from 0 to c-1, where c is the number of classes)
    testdata = m by d array of data to test
    C = SVM regularization constant
    eps = precision parameter used by OCAS
    threads = number of threads to use
    getw = whether or not to return the learned weight vector from the SVM (note:
        only works for 2-class problems)

    returns:
    m-length vector containing the predicted labels of the instances
         in testdata
    if problem is 2-class and getw == True, then a d-length weight vector is also returned
    """
    numc = trainlabs.max() + 1
    #
    # when using an L1 solver, we need the data transposed
    #
    # trainfeats = wrapFeatures(traindata, sparse=True)
    # testfeats = wrapFeatures(testdata, sparse=True)
    if not useL1R:
        ### traindata directly here for LR2_L2LOSS_SVC
        trainfeats = wrapFeatures(traindata, sparse=False)
    else:
        ### traindata.T here for L1R_LR
        trainfeats = wrapFeatures(traindata.T, sparse=False)
    testfeats = wrapFeatures(testdata, sparse=False)
    if numc > 2:
        preds = np.zeros(testdata.shape[0], dtype=np.int32)
        predprobs = np.zeros(testdata.shape[0])
        predprobs[:] = -np.inf
        for i in xrange(numc):
            # set up svm
            tlabs = np.int32(trainlabs == i)
            tlabs[tlabs == 0] = -1
            # print tlabs
            # print i, ' ', np.sum(tlabs==-1), ' ', np.sum(tlabs==1)
            labels = BinaryLabels(np.float64(tlabs))
            if useLibLinear:
                # Use LibLinear and set the solver type
                svm = LibLinear(C, trainfeats, labels)
                if useL1R:
                    # this is L1 regularization on logistic loss
                    svm.set_liblinear_solver_type(L1R_LR)
                else:
                    # most of my results were computed with this (ucf50)
                    svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
            else:
                # Or Use SVMOcas
                svm = SVMOcas(C, trainfeats, labels)
            svm.set_epsilon(eps)
            svm.parallel.set_num_threads(threads)
            svm.set_bias_enabled(True)
            # train
            svm.train()
            # test
            res = svm.apply(testfeats).get_labels()
            thisclass = res > predprobs
            preds[thisclass] = i
            predprobs[thisclass] = res[thisclass]
        return preds
    else:
        tlabs = trainlabs.copy()
        tlabs[tlabs == 0] = -1
        labels = Labels(np.float64(tlabs))
        svm = SVMOcas(C, trainfeats, labels)
        svm.set_epsilon(eps)
        svm.parallel.set_num_threads(threads)
        svm.set_bias_enabled(True)
        # train
        svm.train()
        # test
        res = svm.classify(testfeats).get_labels()
        res[res > 0] = 1
        res[res <= 0] = 0
        if getw == True:
            return res, svm.get_w()
        else:
            return res

presvm_liblinear = LibLinear(1, feat_presvm, lab_presvm)
presvm_liblinear.set_max_iterations(10000)
presvm_liblinear.set_bias_enabled(False)
presvm_liblinear.train()


presvm_libsvm = LibSVM(1, wdk_presvm, lab_presvm)
#presvm_libsvm = SVMLight(1, wdk_presvm, lab_presvm)

#presvm_libsvm.io.set_loglevel(MSG_DEBUG)
presvm_libsvm.set_bias_enabled(False)
presvm_libsvm.train()

my_w = presvm_liblinear.get_w()
presvm_liblinear = LibLinear(1, feat_presvm, lab_presvm)
presvm_liblinear.set_w(my_w)


#############################################
#    compute linear term manually
#############################################

examples = numpy.array(examples, dtype=numpy.float64)
examples = numpy.transpose(examples)

feat = RealFeatures(examples)
lab = Labels(numpy.array(labels))

wdk = LinearKernel(feat, feat)
Example #6
0
def classifier_non_separable_svm(n=100, m=10, distance=5, seed=None):
    '''
	n is the number of examples per class and m is the number of examples per class that gets its
	label swapped to force non-linear separability
	'''
    from shogun.Features import RealFeatures, BinaryLabels
    from shogun.Classifier import LibLinear, L2R_L2LOSS_SVC

    # 2D data
    _DIM = 2

    # To get the nice message that the perceptron has converged
    dummy = BinaryLabels()

    np.random.seed(seed)

    # Produce some (probably) linearly separable training data by hand
    # Two Gaussians at a far enough distance
    X = np.array(np.random.randn(_DIM, n)) + distance
    Y = np.array(np.random.randn(_DIM, n))
    # The last five points of each class are swapped to force non-linear separable data
    label_train_twoclass = np.hstack(
        (np.ones(n - m), -np.ones(m), -np.ones(n - m), np.ones(m)))

    fm_train_real = np.hstack((X, Y))
    feats_train = RealFeatures(fm_train_real)
    labels = BinaryLabels(label_train_twoclass)

    # Train linear SVM
    C = 1
    epsilon = 1e-3
    svm = LibLinear(C, feats_train, labels)
    svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
    svm.set_epsilon(epsilon)
    svm.set_bias_enabled(True)
    svm.train()

    # Get hyperplane parameters
    b = svm.get_bias()
    w = svm.get_w()

    # Find limits for visualization
    x_min = min(np.min(X[0, :]), np.min(Y[0, :]))
    x_max = max(np.max(X[0, :]), np.max(Y[0, :]))

    y_min = min(np.min(X[1, :]), np.min(Y[1, :]))
    y_max = max(np.max(X[1, :]), np.max(Y[1, :]))

    hx = np.linspace(x_min - 1, x_max + 1)
    hy = -w[1] / w[0] * hx

    plt.plot(hx, -1 / w[1] * (w[0] * hx + b), 'k', linewidth=2.0)

    # Plot the two-class data
    pos_idxs = label_train_twoclass == +1
    plt.scatter(fm_train_real[0, pos_idxs],
                fm_train_real[1, pos_idxs],
                s=40,
                marker='o',
                facecolors='none',
                edgecolors='b')

    neg_idxs = label_train_twoclass == -1
    plt.scatter(fm_train_real[0, neg_idxs],
                fm_train_real[1, neg_idxs],
                s=40,
                marker='s',
                facecolors='none',
                edgecolors='r')

    # Customize the plot
    plt.axis([x_min - 1, x_max + 1, y_min - 1, y_max + 1])
    plt.title('SVM with non-linearly separable data')
    plt.xlabel('x')
    plt.ylabel('y')

    plt.show()

    return svm