예제 #1
0
def generate_and_test_nn():
    d = load_training_set()
    n = buildNetwork(d.indim, 13, d.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)
    t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)
    t.trainOnDataset(d, 1000)
    t.testOnData(verbose=True)
    return (n, d)
예제 #2
0
def testOldTraining(hidden=15, n=None):
    d = XORDataSet()
    if n is None:
        n = buildNetwork(d.indim, hidden, d.outdim, recurrent=False)
    t = BackpropTrainer(n, learningrate=0.01, momentum=0., verbose=False)
    t.trainOnDataset(d, 250)
    t.testOnData(verbose=True)
    def initializeNetwork(self):
        can1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can1.png'), self.encodingDict["can"])
        can2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can2.png'), self.encodingDict["can"])
        can3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can3.png'), self.encodingDict["can"])
        stain1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain1.png'), self.encodingDict["stain"])
        stain2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain2.png'), self.encodingDict["stain"])
        stain3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain3.png'), self.encodingDict["stain"])
        dirt1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt1.png'), self.encodingDict["dirt"])
        dirt2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt2.png'), self.encodingDict["dirt"])
        dirt3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt3.png'), self.encodingDict["dirt"])

        self.trainData.append(can1)
        self.trainData.append(can2)
        self.trainData.append(can3)
        self.trainData.append(stain1)
        self.trainData.append(stain2)
        self.trainData.append(stain3)
        self.trainData.append(dirt1)
        self.trainData.append(dirt2)
        self.trainData.append(dirt3)

        for x in self.trainData:
            x.prepareTrainData()

        self.net = buildNetwork(4, 3, 3, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
        ds = SupervisedDataSet(4, 3)

        for x in self.trainData:
            ds.addSample((x.contours/100.0, x.color[0]/1000.0, x.color[1]/1000.0, x.color[2]/1000.0), x.output)

        trainer = BackpropTrainer(self.net, momentum=0.1, verbose=True, weightdecay=0.01)
        trainer.trainOnDataset(ds, 1000)
        trainer.testOnData(verbose=True)
        print "\nSiec nauczona\n"
예제 #4
0
def trainedANN():
    n = FeedForwardNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.sortModules()

    draw_connections(n)
    # d = generateTrainingData()
    d = getDatasetFromFile(root.path()+"/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 20:
            return trainedANN()

    exportANN(n)
    draw_connections(n)

    return n
예제 #5
0
def trainedRNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
    # n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
    n.sortModules()

    draw_connections(n)
    d = getDatasetFromFile(root.path() + "/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 50:
            return trainedRNN()
    # exportRNN(n)
    draw_connections(n)

    return n
예제 #6
0
def testOldTraining(hidden=15, n=None):
    d = XORDataSet()
    if n is None:
        n = buildNetwork(d.indim, hidden, d.outdim, recurrent=False)
    t = BackpropTrainer(n, learningrate=0.01, momentum=0., verbose=False)
    t.trainOnDataset(d, 250)
    t.testOnData(verbose=True)
예제 #7
0
def trainedRNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
    # n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
    n.sortModules()

    draw_connections(n)
    d = getDatasetFromFile(root.path()+"/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 50:
            return trainedRNN()
    # exportRNN(n)
    draw_connections(n)

    return n
예제 #8
0
def trainedANN():
    n = FeedForwardNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.sortModules()

    draw_connections(n)
    # d = generateTrainingData()
    d = getDatasetFromFile(root.path() + "/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 20:
            return trainedANN()

    exportANN(n)
    draw_connections(n)

    return n
예제 #9
0
def main():
    print '----- loading train/test datasets -----'
    train_ds, test_ds = create_datasets()
    print '----- building the network -----'
    net = ann_network()
    trainer = BackpropTrainer(net, learningrate=0.1, momentum=0.1, verbose=True)
    print '----- training the model -----'
    trainer.trainOnDataset(train_ds)
예제 #10
0
    def execute(self):
        network = self.networkFactoryMethod()
        trainer = BackpropTrainer(network, learningrate = self.learningrate, momentum = self.momentum)
        trainer.trainOnDataset(self.datasetForTraining, self.epochs)
        averageError = trainer.testOnData(self.datasetForTest)
        self.collectedErrors.append(averageError)

        return averageError
예제 #11
0
def testTraining():
    d = PrimesDataSet()
    d._convertToOneOfMany()
    n = buildNetwork(d.indim, 8, d.outdim, recurrent=True)
    t = BackpropTrainer(n, learningrate = 0.01, momentum = 0.99, verbose = True)
    t.trainOnDataset(d, 1000)
    t.testOnData(verbose=True)
    for i in range(15):
        print "Guess: %s || Real: %s" % (str(n.activate(i)), str(i in d.generatePrimes(10)))
    print d
예제 #12
0
def testTraining():
    ds = WebsiteFeaturesDataSet()
    net = buildNetwork(ds.indim, 4, ds.outdim, recurrent=True)
    trainer = BackpropTrainer(net,
                              learningrate=0.001,
                              momentum=0.99,
                              verbose=True)
    trainer.trainOnDataset(ds, 1000)
    trainer.testOnData(verbose=True)
    import pdb
    pdb.set_trace()
예제 #13
0
def testTraining():
    print "Reading data"
    d = XORDataSet()
    traind,testd = d.splitWithProportion(0.8)
    print "Building network"
    n = buildNetwork(traind.indim, 4, traind.outdim, recurrent=True)
    print "Training"
    t = BackpropTrainer(n, learningrate = 0.01, momentum = 0.99, verbose = True)
    t.trainOnDataset(traind,100)
    testd = XORDataSet(begin=60000,end=80000)
    print t.module.params
    t.testOnData(testd,verbose= True)
예제 #14
0
def generate_and_test_nn():
    d = load_training_set()
    n = buildNetwork(d.indim,
                     13,
                     d.outdim,
                     hiddenclass=LSTMLayer,
                     outclass=SoftmaxLayer,
                     outputbias=False,
                     recurrent=True)
    t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)
    t.trainOnDataset(d, 1000)
    t.testOnData(verbose=True)
    return (n, d)
예제 #15
0
    def __init__(self, stock_to_predict, days_of_prediction = 10, days_of_training = 450):

        self.number_of_days_before = 8
        self.days_of_prediction = days_of_prediction

        self.downloader = StockDownloader()

        stock_training_data = self.downloader.download_stock(stock_to_predict, days_of_training, days_of_prediction)
        self.stock_prediction_data = self.downloader.download_stock(stock_to_predict, days_of_prediction)

        self.starting_price = self.stock_prediction_data[0]

        self.dataset = StockSupervisedDataSet(self.number_of_days_before, stock_training_data)
        self.network = buildNetwork(self.dataset.indim, 10, self.dataset.outdim, recurrent=True)
        t = BackpropTrainer(self.network, learningrate = 0.00005,  momentum=0., verbose = True)
        t.trainOnDataset(self.dataset, 200)
        t.testOnData(verbose= True)

        self.starting_prices = self.dataset['input'][-1]
예제 #16
0
    def __init__(self):
        self.code = {
            'cat': [1, 0, 0],
            'dust': [0, 1, 0],
            'water': [0, 0, 1]
        }

        pack = 'media.images_train'
        train_data = [
            (Neuron(load(file_path(pack, 'cat1.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'cat2.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'cat3.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'dust1.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'dust2.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'dust3.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'water1.png'))), self.code['water']),
            (Neuron(load(file_path(pack, 'water2.png'))), self.code['water']),
            (Neuron(load(file_path(pack, 'water3.png'))), self.code['water']),
        ]

        for x, output in train_data:
            x.prepare()

        self.net = buildNetwork(
            4, 3, 3, hiddenclass=TanhLayer, outclass=SoftmaxLayer
        )
        data = SupervisedDataSet(4, 3)

        for x, output in train_data:
            data.addSample(
                (
                    x.contours / 100.0, x.color[0] / 1000.0,
                    x.color[1] / 1000.0, x.color[2] / 1000.0,
                ),
                output
            )

        trainer = BackpropTrainer(
            self.net, momentum=0.1, verbose=True, weightdecay=0.01
        )
        trainer.trainOnDataset(data, 1000)  # 1000 iterations
        trainer.testOnData(verbose=True)
예제 #17
0
def testTraining():
    # the AnBnCn dataset (sequential)
    d = AnBnCnDataSet()
    
    # build a recurrent network to be trained
    hsize = 2
    n = RecurrentNetwork()
    n.addModule(TanhLayer(hsize, name = 'h'))
    n.addModule(BiasUnit(name = 'bias'))
    n.addOutputModule(LinearLayer(1, name = 'out'))
    n.addConnection(FullConnection(n['bias'], n['h']))
    n.addConnection(FullConnection(n['h'], n['out']))
    n.addRecurrentConnection(FullConnection(n['h'], n['h']))
    n.sortModules()

    # initialize the backprop trainer and train
    t = BackpropTrainer(n, learningrate = 0.1, momentum = 0.0, verbose = True)
    t.trainOnDataset(d, 200)
    
    # the resulting weights are in the network:
    print 'Final weights:', n.params
예제 #18
0
def testTraining():
    # the AnBnCn dataset (sequential)
    d = AnBnCnDataSet()

    # build a recurrent network to be trained
    hsize = 2
    n = RecurrentNetwork()
    n.addModule(TanhLayer(hsize, name='h'))
    n.addModule(BiasUnit(name='bias'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['bias'], n['h']))
    n.addConnection(FullConnection(n['h'], n['out']))
    n.addRecurrentConnection(FullConnection(n['h'], n['h']))
    n.sortModules()

    # initialize the backprop trainer and train
    t = BackpropTrainer(n, learningrate=0.1, momentum=0.0, verbose=True)
    t.trainOnDataset(d, 200)

    # the resulting weights are in the network:
    print('Final weights:', n.params)
예제 #19
0
    def __init__(self):
        self.code = {'cat': [1, 0, 0], 'dust': [0, 1, 0], 'water': [0, 0, 1]}

        pack = 'media.images_train'
        train_data = [
            (Neuron(load(file_path(pack, 'cat1.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'cat2.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'cat3.png'))), self.code['cat']),
            (Neuron(load(file_path(pack, 'dust1.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'dust2.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'dust3.png'))), self.code['dust']),
            (Neuron(load(file_path(pack, 'water1.png'))), self.code['water']),
            (Neuron(load(file_path(pack, 'water2.png'))), self.code['water']),
            (Neuron(load(file_path(pack, 'water3.png'))), self.code['water']),
        ]

        for x, output in train_data:
            x.prepare()

        self.net = buildNetwork(4,
                                3,
                                3,
                                hiddenclass=TanhLayer,
                                outclass=SoftmaxLayer)
        data = SupervisedDataSet(4, 3)

        for x, output in train_data:
            data.addSample((
                x.contours / 100.0,
                x.color[0] / 1000.0,
                x.color[1] / 1000.0,
                x.color[2] / 1000.0,
            ), output)

        trainer = BackpropTrainer(self.net,
                                  momentum=0.1,
                                  verbose=True,
                                  weightdecay=0.01)
        trainer.trainOnDataset(data, 1000)  # 1000 iterations
        trainer.testOnData(verbose=True)
    cpi.append(data[1])

    # third feature is unemployment
    une.append(data[2])
    
    fund.append(data[3])

    indata =  tuple(data[:features])
    outdata = tuple(data[features:])
    ds.addSample(indata,outdata)

# this builds a network that has the number of features as input, 
# a *SINGLE* defined hidden layer and a single output neuron. 
n = buildNetwork(ds.indim,hidden,hidden,ds.outdim)
t = BackpropTrainer(n,learningrate=0.01,momentum=0.8,verbose=True)
t.trainOnDataset(ds,steps)
t.testOnData(verbose=True)

# let's plot what we have
import matplotlib.pyplot as plt

# lets ask for a prediction: GDP,CPI, Unemployment
#print n.activate([.02,.02,-.002])

x = []
y = []
#print range(len(time))
for i in range(len(time)):
    #print n.activate([gdp(i),cpi(i),une(i)])
    x.append(.25*time[i]+1954.5)
    y.append(n.activate([gdp[i],cpi[i],une[i]]))
예제 #21
0
    ((0.1, 0.8), 1.0),
    ((0.3, 1.0), 1.0),
    ((1.0, 0.6), 0.0),
    ((0.7, 0.6), 0.0),
    ((0.7, 0.1), 1.0),
)

testData = (
    ((0.8, 0.0), 1.0),
    ((0.9, 0.7), 0.0),
    ((0.1, 0.1), 0.0),
    ((0.2, 0.8), 1.0),
    ((0.6, 0.6), 0.0),
    ((0.6, 1.0), 0.0),
    ((1.0, 0.3), 1.0),
    ((0.1, 0.1), 0.0),
)

datasetForTraining = SupervisedDataSet(ENTRY_DIMENSION, RESULT_DIMENSION)
for entry, expectedResult in trainingData:
    datasetForTraining.addSample(entry, [expectedResult])

datasetForTest = SupervisedDataSet(ENTRY_DIMENSION, RESULT_DIMENSION)
for entry, expectedResult in testData:
    datasetForTest.addSample(entry, [expectedResult])

HIDDEN_LAYER_DIMENSION = 4
network = buildNetwork(ENTRY_DIMENSION, HIDDEN_LAYER_DIMENSION, RESULT_DIMENSION, recurrent=True)
trainer = BackpropTrainer(network, learningrate=0.01, momentum=0.99, verbose=True)
trainer.trainOnDataset(datasetForTraining, 1)
trainer.testOnData(datasetForTest, verbose=True)
예제 #22
0
        trainIn = []
        for x in row[:numberOfInputs]:
            trainIn.append(x)

        trainOut = []
        for x in row[numberOfInputs:]:
            trainOut.append(x)

        d.appendLinked(trainIn, trainOut)

    # build a neural network with the second parameter being the number of hidden layers
    n = buildNetwork(d.indim, 3, d.outdim, recurrent=True)

    # configure the trainer
    t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)

    # split the data randomly into 75% training - 25% testing
    train, test = d.splitWithProportion(0.75)
    print "{} - {}".format(len(train), len(test))

    # train the data with n number of epochs
    t.trainOnDataset(train, 10)

    # test the data with the remaining data
    t.testOnData(test, verbose=True)

    # try the same test but with a different method
    net = buildNetwork(d.indim, 3, d.outdim, bias=True, hiddenclass=TanhLayer)
    trainer = BackpropTrainer(net, d)
    trainer.trainUntilConvergence(verbose=True)
예제 #23
0
def testTraining():
    d = SequentialXORDataSet()
    n = buildNetwork(d.indim, 4, d.outdim, recurrent=True)
    t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)
    t.trainOnDataset(d, 1000)
    t.testOnData(verbose=True)
예제 #24
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer

from pybrain.datasets import SupervisedDataSet, ImportanceDataSet


class SequentialXORDataSet(ImportanceDataSet):
    """ same thing, but sequential, and having no importance on a second output"""
    def __init__(self):
        ImportanceDataSet.__init__(self, 2, 2)
        self.addSample([0,0],[0, 1],  [1,0])
        self.addSample([0,1],[1, 10], [1,0])
        self.addSample([1,0],[1, -1], [1,0])
        self.addSample([1,1],[0, 0],  [1,0])


d = SequentialXORDataSet()
n = buildNetwork(d.indim, 4, d.outdim, recurrent=True)
t = BackpropTrainer(n, learningrate = 0.01, momentum = 0.99, verbose = True)
t.trainOnDataset(d, 1000)
t.testOnData(verbose= True)
예제 #25
0
class Classifier:
    def __init__(self,Xtrain,Ytrain):
        self._Xtrain=Xtrain
        self._Ytrain=Ytrain
        self.features=Xtrain.keys()
        
    def Train(self,feat_list=None,type='logreg',gamma=0.0,domeanstd=True,special_bias=None,add_bias=True, weight=None, class_instance=None, method='sigmoid',factor=10.0,arch=[10],
              cv_feats=None, cv_special_bias=None,cv_class_instance=None):
        if feat_list==None:
            feat_list=self.features
        self.feat_list=feat_list
        self._gamma=gamma
        self._type=type
        self._special_bias = special_bias
        self._add_bias = add_bias
        Xtrain_feats = np.ascontiguousarray(np.hstack((self._Xtrain[feat] for feat in feat_list)))
        self.m, self.std = classifier.feature_meanstd(Xtrain_feats)
        if domeanstd==False: #hacky, overwrite the things we computed
            self.m[:] = 0
            self.std[:] = 1
        Xtrain_feats -= self.m
        Xtrain_feats /= self.std
        if special_bias != None:
            Xtrain_feats = np.ascontiguousarray(np.hstack((Xtrain_feats, special_bias)))
        #CV
        if cv_feats!=None:
            cv_feats = np.ascontiguousarray(np.hstack((cv_feats[feat] for feat in feat_list)))
            cv_feats -= self.m
            cv_feats /= self.std
            if special_bias != None:
                cv_feats = np.ascontiguousarray(np.hstack((cv_feats, cv_special_bias)))
        '''Classifier stage'''
        if type=='linsvm':
            self.w, self.b = classifier.svm_onevsall(Xtrain_feats, self._Ytrain, self._gamma, weight = weight, special_bias=special_bias, add_bias=add_bias)
            return (self.w,self.b)
        elif type=='logreg':
            self.w, self.b = l2logreg_onevsall(Xtrain_feats, self._Ytrain, self._gamma, weight = weight, special_bias=special_bias, add_bias=add_bias)
            return (self.w,self.b)
        elif type=='logreg_atwv':
            self.w, self.b = Train_atwv(Xtrain_feats,class_instance=class_instance,weight=weight,special_bias=special_bias, add_bias=add_bias, method=method, 
                                        factor=factor, gamma=self._gamma, cv_class_instance=cv_class_instance, cv_feats=cv_feats)
        elif type=='nn_atwv':
            self._arch = arch
            self._weights_nn = Train_atwv_nn(Xtrain_feats,class_instance=class_instance,weight=weight,special_bias=special_bias, add_bias=add_bias, 
                                             arch=self._arch, method=method, factor=factor, gamma=self._gamma, cv_class_instance=cv_class_instance, cv_feats=cv_feats)
            #self._weights_nn = Train_atwv_nn(Xtrain_feats,class_instance=class_instance,weight=self._weights_nn,special_bias=special_bias, add_bias=add_bias, 
            #                                 arch=self._arch, method=method, factor=factor*10.0)
        elif type=='nn_debug':
            if mpi.COMM.Get_size() > 1:
                print 'Warning!!! Running NN training with MPI with more than one Node!'
                #FIXME: Collect X and Y at root to avoid this
#                 prob = mpi.COMM.gather(prob)
#                 if mpi.is_root():
#                     np.vstack(prob)
#                     #Train
#                     mpi.COMM.Bcast(self._nn)
#                 mpi.distribute(prob)                
            DS = ClassificationDataSet( Xtrain_feats.shape[1], 1, nb_classes=2 )
            #for i in range(Xtrain_feats.shape[0]):
            #    DS.addSample( Xtrain_feats[i,:], [self._Ytrain[i]] )
            DS.setField('input', Xtrain_feats)
            DS.setField('target', self._Ytrain[:,np.newaxis])
            DS._convertToOneOfMany()
            self._nn = buildNetwork(DS.indim, 10, DS.outdim, outclass=SoftmaxLayer, fast=True)
            self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.01, lrdecay=1.0)
            self._nn_trainer.trainOnDataset(DS,epochs=8)
            self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.001, lrdecay=1.0)
            self._nn_trainer.trainOnDataset(DS,epochs=8)
            self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.0001, lrdecay=1.0)
            self._nn_trainer.trainOnDataset(DS,epochs=5)
            return self._nn
    
    def Accuracy(self, X, Y, special_bias = None):
        X_feats = np.ascontiguousarray(np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list)))))
        X_feats -= self.m
        X_feats /= self.std
        if special_bias != None:
            X_feats = np.ascontiguousarray(np.hstack((X_feats, special_bias)))
        if self._type=='linsvm' or self._type=='logreg' or self._type=='logreg_atwv':
            self.test_accu = classifier.Evaluator.accuracy(Y, np.dot(X_feats,self.w)+self.b)
        elif self._type=='nn_atwv':
            pred = get_predictions_nn(X_feats, self._weights_nn, arch=[10])[0]
            pred[:,0] = 0.5
            self.test_accu = classifier.Evaluator.accuracy(Y, pred)
        else:
            DS = ClassificationDataSet( X_feats.shape[1], 1, nb_classes=2 )
            #for i in range(X_feats.shape[0]):
            #    DS.addSample( X_feats[i,:], [Y[i]] )
            DS.setField('input', X_feats)
            DS.setField('target', Y[:,np.newaxis])
            DS._convertToOneOfMany()
            predict,targts = self._nn_trainer.testOnClassData(DS, verbose=True,return_targets=True)
            self.test_accu = np.sum(np.array(predict)==np.array(targts))/float(len(targts))
        return self.test_accu
    
    def loss_multiclass_logreg(self, X, Y, special_bias=None):
        X_feats=np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list))))
        X_feats -= self.m
        X_feats /= self.std
        if special_bias != None:
            X_feats = np.ascontiguousarray(np.hstack((X_feats, special_bias)))
        return loss_multiclass_logreg(Y, X_feats, (self.w,self.b))
    
    def loss_multiclass_nn(self, X, Y, special_bias=None):
        X_feats = np.ascontiguousarray(np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list)))))
        X_feats -= self.m
        X_feats /= self.std
        if special_bias != None:
            X_feats = np.ascontiguousarray(np.hstack((X_feats, special_bias)))
        return loss_multiclass_nn(X_feats, Y, self._weights_nn, self._arch)

    def get_predictions_logreg(self, X, special_bias=None):
        X_feats=np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list))))
        X_feats -= self.m
        X_feats /= self.std
        if special_bias != None:
            X_feats = np.ascontiguousarray(np.hstack((X_feats, special_bias)))
        return get_predictions_logreg(X_feats, (self.w,self.b))
    
    def get_predictions_nn_old(self, X, special_bias=None):
        X_feats = np.ascontiguousarray(np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list)))))
        X_feats -= self.m
        X_feats /= self.std
        if special_bias != None:
            X_feats = np.ascontiguousarray(np.hstack((X_feats, special_bias)))
        DS = ClassificationDataSet( X_feats.shape[1], 1, nb_classes=2 )
        #for i in range(X_feats.shape[0]):
        #    DS.addSample( X_feats[i,:], [0.0] )
        DS.setField('input', X_feats)
        DS.setField('target', np.zeros((X_feats.shape[0],1)))
        DS._convertToOneOfMany()
        prob = self._nn.activateOnDataset(DS)
        prob = mpi.COMM.gather(prob)
        if mpi.is_root():
            return np.vstack(prob)
        else:
            return np.zeros((0))
    
    def get_predictions_nn(self, X, special_bias=None):
        X_feats = np.ascontiguousarray(np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list)))))
        X_feats -= self.m
        X_feats /= self.std
        if special_bias != None:
            X_feats = np.ascontiguousarray(np.hstack((X_feats, special_bias)))
        prob = get_predictions_nn(X_feats, self._weights_nn, self._arch)[0]
        prob = mpi.COMM.gather(prob)
        if mpi.is_root():
            return np.vstack(prob)
        else:
            return np.zeros((0))