Пример #1
0
    def test_classification_three_classes(self):
        """Check classification works with three classes."""
        iris = load_iris()

        X = iris.data
        y = iris.target

        clf = RVC()
        clf.fit(X, y)

        self.assertGreater(clf.score(X, y), 0.95)
Пример #2
0
    def test_classification_two_classes(self):
        """Check classification works with two classes."""
        iris = load_iris()

        X = iris.data[:, 1:]
        y = iris.target

        # Only 2 classes needed
        X = X[y != 0]
        y = y[y != 0]

        clf = RVC()

        clf.fit(X, y)

        self.assertGreater(clf.score(X, y), 0.95)

        prob = clf.predict_proba(X[0, :])
        p_target = np.array([[0.999, 5.538e-4]])
        np.testing.assert_allclose(prob, p_target, rtol=1e-2, atol=1e-2)
Пример #3
0
feature_scaler = StandardScaler()  
X_train = feature_scaler.fit_transform(X_train)  
X_test = feature_scaler.transform(X_test)  
####################################################################################################################
#CROSS VALIDATION SPLIT IN K-folds
######################################################################################################################
kf = KFold(n_splits=5)
kf.get_n_splits(X_train,X_test)    
print(kf)
#CREATE WIDTHS FOR GRID SEARCH
width1= np.linspace(-5,4,10)
width=10**width1
#create matrix to input the scores and widths of each iteration
score_width=np.ones([len(width),2])
##################
#FIRST for loop gives the values for different widths, SECOND for loop does Kfold_validation
for i in range(len(width)):
    score=0
    for train_index, test_index in kf.split(X_train):
        print("TRAIN:", train_index, "TEST:", test_index)
        X_train1, X_test1 = X_train[train_index], X_train[test_index]
        y_train1, y_test1 =y_train[train_index], y_train[test_index]
        clf1=RVC(kernel='rbf',coef1=width[i])
        clf1.fit(X_train1,y_train1)
        score=score+clf1.score(X_test1,y_test1) 
    score_width[i,0]=score
    score_width[i,1]=width[i]
#########################################################################################################
idx=np.argmax(score_width[:,0])
best_width=score_width[idx,1]
Пример #4
0
"""
Authors: Mrunmayee Deshpande, Lu Gan, Bruce Huang, Abhishek Venkataraman 

"""
import timeit
from skrvm import RVC
import numpy as np
import os.path
import scipy.io

from import_data import import_data

## Set data path
parsed_data_path = 'parsed_data/'
[X, Y, valX, valY, testX, testY] = import_data(parsed_data_path)

scipy.io.savemat('train.mat', dict(X=X, Y=Y))
scipy.io.savemat('val.mat', dict(valX=valX, valY=valY))
scipy.io.savemat('test.mat', dict(testX=testX, testY=testY))

## Train a RVM
clf = RVC(verbose=True)
print(clf)
clf.fit(valX, valY)
clf.score(testX, testY)
    New[i, :] = n.params
#mdl = smt.AR(d_tot[100,:]).fit(maxlag=30, ic='aic', trend='nc')
#est_order=smt.AR(d_tot[1,:]).select_order(maxlag=30, ic='aic', trend='nc')
#print(est_order)
#p_orders=np.zeros([1380,1])
#for i in range(1380):
#   X=AR(d_tot[i,:])
#  n=X.fit(maxlag=4,ic='aic')
# p_orders[i,0]=len(n.params)

#np.mean(p_orders)

#plt.scatter(New[:,1],New[:,3])

An_lab = Animal_label(tot)

#shuffle data to take them in random order
indx = random.sample(range(10350), 10350)
a = New[indx, :]
b = An_lab[indx]
#define trainning and test set
x_train = a[0:8000, :]
y_train = b[0:8000]
x_test = a[8001:, :]
y_test = b[8001:]
#SVM with 600 out of 690 samples as training data
from skrvm import RVC
clf1 = RVC(kernel='rbf')
clf1.fit(x_train, y_train)
clf1.score(x_test, y_test)
Пример #6
0
params = rvc_param_selection(X, Y, 5)
#
#params = rvc_param_selection2(X,Y,5)

####################TO TEST
test_err = np.zeros((15))
train_err = np.zeros((15))
coef = np.multiply(
    [1000, 500, 200, 150, 100, 70, 60, 50, 40, 30, 20, 10, 5, 2, 1], 1e-6)
for i in range(15):
    #coef[i] = 5e-5-(i+1)*1e-5
    clf = RVC(kernel="rbf", coef1=coef[i])  # coef1:  1=46 0.1same
    clf.fit(full_normPCA123_array[train_indicies],
            full_subClass_array[train_indicies])
    train_err[i] = clf.score(full_normPCA123_array[train_indicies],
                             full_subClass_array[train_indicies])
    test_err[i] = clf.score(full_normPCA123_array[test_indicies],
                            full_subClass_array[test_indicies])

    print(coef[i])
    print(train_err[i])
    print(test_err[i])
    print("\n\n")

#####################################################################################################################
rvm_tested_coef1 = np.load("rvm_tested_coef1.npy")
rvm_tested_coef1_again = np.load("rvm_tested_coef1_again.npy")
train1 = np.load("rvm_tested_coef1_trainerr.npy")
train2 = np.load("rvm_tested_coef1_trainerr_again.npy")
test1 = np.load("rvm_tested_coef1_testerr.npy")
test2 = np.load("rvm_tested_coef1_testerr_again.npy")
Пример #7
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 21:26:07 2018

@author: pan
"""

from skrvm import RVC
from sklearn.datasets import load_iris

clf = RVC()
iris = load_iris()
clf.fit(iris.data, iris.target)
#RVC(alpha=1e-06, beta=1e-06, beta_fixed=False, bias_used=True, coef0=0.0,
#coef1=None, degree=3, kernel='rbf', n_iter=3000, n_iter_posterior=50,
#threshold_alpha=1000000000.0, tol=0.001, verbose=False)
clf.score(iris.data, iris.target)
Пример #8
0
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")

# RVM
from skrvm import RVC
clf = RVC()

### read data from single csv file and transform to a list
dir = '/Users/weng/Downloads/'
# raw = pd.read_csv(os.path.join(dir, 'test.txt'), header=0, delimiter=';')
raw_list = pd.read_csv(os.path.join(dir, 'test.txt'), sep="\t", header=None)
# array format is needed for further processing (df -> list -> matrix(array) )
raw_X = raw_list[0].values.tolist()
raw_X = np.asarray(raw_X)

raw_y = raw_list[1].values.tolist()
raw_y = np.asarray(raw_y)

print "Read %d rows of data\n" % len(raw)

clf.fit(raw_X, raw_y)

from sklearn.datasets import load_iris
iris = load_iris()
iris.data
iris.target
clf.fit(iris.data, iris.target)
clf.score(iris.data, iris.target)