def bench_pybrain(X, y, T, valid):
#
#       .. PyBrain ..
#
#   local import, they require libsvm < 2.81
    from pybrain.supervised.trainers.svmtrainer import SVMTrainer
    from pybrain.structure.modules.svmunit import SVMUnit
    from pybrain.datasets import SupervisedDataSet

    tstart = datetime.now()
    ds = SupervisedDataSet(X.shape[1], 1)
    for i in range(X.shape[0]):
        ds.addSample(X[i], y[i])
    clf = SVMTrainer(SVMUnit(), ds)
    clf.train()
    pred = np.empty(T.shape[0], dtype=np.int32)
    for i in range(T.shape[0]):
        pred[i] = clf.svm.model.predict(T[i])
    score = np.mean(pred == valid)
    return score, datetime.now() - tstart
Example #2
0
# import some local stuff
from datasets import generateClassificationData, plotData, generateGridData

logging.basicConfig(level=logging.INFO,
                    filename=join('.', 'testrun.log'),
                    format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger('').addHandler(logging.StreamHandler())

# load the training and test data sets
trndata = generateClassificationData(20, nClasses=2)
tstdata = generateClassificationData(100, nClasses=2)

# initialize the SVM module and a corresponding trainer
svm = SVMUnit()
trainer = SVMTrainer(svm, trndata)

# train the with fixed meta-parameters
log2C = 0.  # degree of slack
log2g = 1.1  # width of RBF kernels
trainer.train(log2C=log2C, log2g=log2g)
# alternatively, could train the SVM using design-of-experiments grid search
##trainer.train( search="GridSearchDOE" )

# pass data sets through the SVM to get performance
trnresult = percentError(svm.activateOnDataset(trndata), trndata['target'])
tstresult = percentError(svm.activateOnDataset(tstdata), tstdata['target'])
print "sigma: %7g,  C: %7g,  train error: %5.2f%%,  test error: %5.2f%%" % (
    2.0**log2g, 2.0**log2C, trnresult, tstresult)

# generate a grid dataset