Beispiel #1
0
def create_network():
    # Create the network itself
    network = FeedForwardNetwork()
    # Create layers
    NUMBER_OF_INPUT_BYTES = 1600  # because at input we have picture 40x40 size
    NUMBER_OF_HIDDEN_LAYERS = 10  # number of hidden layers
    NUMBER_OF_OUTPUT_CLASSES = 8  # because in output we have 8 classes
    inLayer = LinearLayer(NUMBER_OF_INPUT_BYTES)
    hiddenLayer = SigmoidLayer(NUMBER_OF_HIDDEN_LAYERS)
    outLayer = LinearLayer(NUMBER_OF_OUTPUT_CLASSES)
    # Create connections between layers
    # We create FullConnection - each neuron of one layer is connected to each neuron of other layer
    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    # Add layers to our network
    network.addInputModule(inLayer)
    network.addModule(hiddenLayer)
    network.addOutputModule(outLayer)
    # Add connections to network
    network.addConnection(in_to_hidden)
    network.addConnection(hidden_to_out)
    # Sort modules to make multilayer perceptron usable
    network.sortModules()
    # prepare array to activate network
    d_letter_array = read_array("d")
    # activate network
    network.activate(d_letter_array)
    return network
Beispiel #2
0
class BMBPTester:
    __fnn = None
    fnnname = 'buildBMTrainer.xml'
    srctestname = 'tester.xlsx'

    def __init__(self, psrctestname='tester.xlsx', pfnnname='buildBMTrainer.xml'):
        self.fnnname = pfnnname
        self.srctestname = psrctestname
        self.__fnn = FeedForwardNetwork()

    def test(self):
        self.__fnn = NetworkReader.readFrom(self.fnnname)
        workbook = xlrd.open_workbook(self.srctestname)
        sheet1 = workbook.sheet_by_index(0)
        x = np.zeros((sheet1.nrows, sheet1.ncols), dtype=np.float)
        for i in range(sheet1.nrows):
            for j in range(sheet1.ncols):
                x[i][j] = sheet1.cell(i, j).value
        stestx = MinMaxScaler()
        xtest = stestx.fit_transform(x)
        sy = joblib.load('sy.pkl')
        print(sy)
        values = []
        for x1 in xtest:
            values.append(sy.inverse_transform(self.__fnn.activate(x1).reshape(-1, 1)))
            print(self.__fnn.activate(x1))
        print(values)
def create_network():
    # Create the network itself
    network = FeedForwardNetwork()
    # Create layers
    NUMBER_OF_INPUT_BYTES = 1600 # because at input we have picture 40x40 size
    NUMBER_OF_HIDDEN_LAYERS = 10  # number of hidden layers
    NUMBER_OF_OUTPUT_CLASSES = 8 # because in output we have 8 classes
    inLayer = LinearLayer( NUMBER_OF_INPUT_BYTES )
    hiddenLayer = SigmoidLayer( NUMBER_OF_HIDDEN_LAYERS )
    outLayer = LinearLayer( NUMBER_OF_OUTPUT_CLASSES )
    # Create connections between layers
    # We create FullConnection - each neuron of one layer is connected to each neuron of other layer
    in_to_hidden = FullConnection( inLayer, hiddenLayer )
    hidden_to_out = FullConnection( hiddenLayer, outLayer )
    # Add layers to our network
    network.addInputModule( inLayer )
    network.addModule( hiddenLayer )
    network.addOutputModule( outLayer )
    # Add connections to network
    network.addConnection( in_to_hidden )
    network.addConnection( hidden_to_out )
    # Sort modules to make multilayer perceptron usable
    network.sortModules()
    # prepare array to activate network
    d_letter_array = read_array( "d" )
    # activate network
    network.activate( d_letter_array )
    return network
def main():
    n = FeedForwardNetwork()

    in_layer = LinearLayer(2)
    hidden_layer = SigmoidLayer(3)
    out_layer = LinearLayer(1)

    n.addInputModule(in_layer)
    n.addModule(hidden_layer)
    n.addOutputModule(out_layer)

    in_to_hidden = FullConnection(in_layer, hidden_layer)
    hidden_to_out = FullConnection(hidden_layer, out_layer)

    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)

    n.sortModules()

    print(">>> print n")
    print(n)

    print(">>> n.activate([1, 2])")
    print(n.activate([1, 2]))

    print(">>> in_to_hidden.params")
    print(in_to_hidden.params)

    print(">>> hidden_to_out.params")
    print(hidden_to_out.params)

    print(">>> n.params")
    print(n.params)
Beispiel #5
0
class ANNApproximator(object):
    def __init__(self, alpha):
        self.name = "ANNApprox"
        self.network = FeedForwardNetwork()
        inLayer = LinearLayer(4)
        hiddenLayer = SigmoidLayer(12)
        outLayer = LinearLayer(1)
        self.network.addInputModule(inLayer)
        self.network.addModule(hiddenLayer)
        self.network.addOutputModule(outLayer)
        in_to_hidden = FullConnection(inLayer, hiddenLayer)
        hidden_to_out = FullConnection(hiddenLayer, outLayer)
        self.network.addConnection(in_to_hidden)
        self.network.addConnection(hidden_to_out)

        # Last step to make sure everything works in the connections
        self.network.sortModules()

        self.dataset = SupervisedDataSet(4, 1)
        self.trainer = BackpropTrainer(self.network,
                                       self.dataset,
                                       learningrate=alpha,
                                       momentum=0.0,
                                       verbose=True)

    def computeOutput(self, state_features):
        return self.network.activate(state_features)[0]

    def updateWeights(self, features, desired_output):
        print("updateWeights: features: {0}".format(features))
        print("updateWeights: value: {0}".format(desired_output))
        self.dataset.addSample(features, desired_output)
        # self.trainer.train()
        self.trainer.trainEpochs(10)
        self.dataset.clear()
class NNet(FunctionApproximator):
	def __init__(self, num_features, num_hidden_neurons):
		super(NNet,self).__init__(num_features)

		self.ds = SupervisedDataSet(num_features, 1)

		self.net = FeedForwardNetwork()
		self.net.addInputModule(LinearLayer(num_features, name='in'))
		self.net.addModule(LinearLayer(num_hidden_neurons, name='hidden'))
		self.net.addOutputModule(LinearLayer(1, name='out'))
		self.net.addConnection(FullConnection(self.net['in'], self.net['hidden'], name='c1'))
		self.net.addConnection(FullConnection(self.net['hidden'], self.net['out'], name='c2'))
		self.net.sortModules()

	def getY(self, inpt):
		#giving NAN
		return self.net.activate(inpt)

	def update(self, inpt, target):
		q_old = self.qvalue(state, action)
		q_new = self.qvalue(new_state, new_action)
		target = q_old + self.alpha*(reward + (self.gamma*q_new)-q_old)
		

		self.ds.addSample(inpt, target)
		# print inpt.shape, target.shape
		# print inpt, target
		trainer = BackpropTrainer(self.net, self.ds)
		# try:
		# 	trainer.trainUntilConvergence()
		# except:
		trainer.train()
Beispiel #7
0
class NeuralNetwork(BaseEstimator, RegressorMixin):
    def __init__(
        self,
        inp_neu=4,
        hid_neu=3,
        out_neu=1,
        learn_rate=0.1,
        nomentum=0.5,
        weight_dec=0.0001,
        epochs=100,
        split_prop=0.25,
    ):
        self.inp_neu = inp_neu
        self.hid_neu = hid_neu
        self.out_neu = out_neu
        self.learn_rate = learn_rate
        self.nomentum = nomentum
        self.weight_dec = weight_dec
        self.epochs = epochs
        self.split_prop = split_prop

    def data(self, X, y=None):
        DS = SupervisedDataSet(self.inp_neu, self.out_neu)
        for i in range(0, len(X)):
            DS.addSample((X[i][0], X[i][1], X[i][2], X[i][3]), y[i])  # ATTENTION pas optimisé pour toutes les tailles
        return DS

    def fit(self, X, y):
        self.n = FeedForwardNetwork()

        self.n.addInputModule(SigmoidLayer(self.inp_neu, name="in"))
        self.n.addModule(SigmoidLayer(self.hid_neu, name="hidden"))
        self.n.addOutputModule(LinearLayer(self.out_neu, name="out"))
        self.n.addConnection(FullConnection(self.n["in"], self.n["hidden"], name="c1"))
        self.n.addConnection(FullConnection(self.n["hidden"], self.n["out"], name="c2"))

        self.n.sortModules()  # initialisation

        self.tstdata, trndata = self.data(X, y).splitWithProportion(self.split_prop)

        trainer = BackpropTrainer(
            self.n, trndata, learningrate=self.learn_rate, momentum=self.nomentum, weightdecay=self.weight_dec
        )
        trainer.trainUntilConvergence(verbose=True, maxEpochs=self.epochs)

        return self

    def predict(self, X):
        self.yhat = []
        for i in X:
            self.yhat.append(float(self.n.activate(i)))
        self.yhat = np.array(self.yhat)
        return self.yhat

    def score(self, y):
        vect_se = (self.yhat - y) ** 2
        mse = float(np.sum(vect_se)) / float(len(vect_se))
        return mse
def fit_predict(xTrain, yTrain, xTest, epochs, neurons):

    # Check edge cases
    if (not len(xTrain) == len(yTrain) or len(xTrain) == 0 or len(xTest) == 0
            or epochs <= 0):
        return

    # Randomize the training data (probably not necessary but pybrain might
    # not shuffle the data itself, so perform as safety check)
    indices = np.arange(len(xTrain))
    np.random.shuffle(indices)

    trainSwapX = [xTrain[x] for x in indices]
    trainSwapY = [yTrain[x] for x in indices]

    supTrain = SupervisedDataSet(len(xTrain[0]), 1)
    for x in range(len(trainSwapX)):
        supTrain.addSample(trainSwapX[x], trainSwapY[x])

    # Construct the feed-forward neural network

    n = FeedForwardNetwork()

    inLayer = LinearLayer(len(xTrain[0]))
    hiddenLayer1 = SigmoidLayer(neurons)
    outLayer = LinearLayer(1)

    n.addInputModule(inLayer)
    n.addModule(hiddenLayer1)
    n.addOutputModule(outLayer)

    in_to_hidden = FullConnection(inLayer, hiddenLayer1)
    hidden_to_out = FullConnection(hiddenLayer1, outLayer)

    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)

    n.sortModules()

    # Train the neural network on the training partition, validating
    # the training progress on the validation partition

    trainer = BackpropTrainer(n,
                              dataset=supTrain,
                              momentum=0.1,
                              learningrate=0.01,
                              verbose=False,
                              weightdecay=0.01)

    trainer.trainUntilConvergence(dataset=supTrain,
                                  maxEpochs=epochs,
                                  validationProportion=0.30)

    outputs = []
    for x in xTest:
        outputs.append(n.activate(x))

    return outputs
Beispiel #9
0
def logicTest():
	inLayer = LinearLayer(2)
	hiddenLayer = SigmoidLayer(6)
	outLayer = LinearLayer(4) # OR, AND, NOT, XOR
	
	n=FeedForwardNetwork()
	n.addInputModule(inLayer)
	n.addModule(hiddenLayer)
	n.addOutputModule(outLayer)

	inToHidden = FullConnection(inLayer, hiddenLayer)
	hiddenToOut = FullConnection(hiddenLayer, outLayer)

	n.addConnection(inToHidden)
	n.addConnection(hiddenToOut)

	n.sortModules()
	
	print n.activate([0, 1])
def main(T=10, load_brain=False, save_brain=False):
    singles = [room for room in rooms.allRooms if room.capacity == "Single"]
    preprocessed = preprocess_rooms(singles)
    all_vectors = [room_to_feature_vector(room, preprocessed) for room in singles]
    
    training_sequences = getLabeledRoomsFeaturesAndLabels(getRoomsMap(singles, all_vectors))
    
    input_units = len(all_vectors[0])

    if load_brain and "net" in brain_shelf:
        net = brain_shelf["net"]
        net.sorted = False
        net.sortModules()
    else:
        net = FeedForwardNetwork()
        layer_in = LinearLayer(input_units)
        layer_hidden = SigmoidLayer(1000)
        layer_hidden2 = SigmoidLayer(100)
        layer_out = LinearLayer(1)
        net.addInputModule(layer_in)
        net.addModule(layer_hidden)
        net.addModule(layer_hidden2)
        net.addOutputModule(layer_out)

        in_to_hidden = FullConnection(layer_in, layer_hidden)
        hidden_to_hidden = FullConnection(layer_hidden, layer_hidden2)
        hidden_to_out = FullConnection(layer_hidden2, layer_out)
        net.addConnection(in_to_hidden)
        net.addConnection(hidden_to_hidden)
        net.addConnection(hidden_to_out)

        net.sortModules()

        training_data = SupervisedDataSet(len(all_vectors[0]), 1)
        for training_seq in training_sequences: 
            training_data.appendLinked(training_seq[1], training_seq[2])
        trainer = BackpropTrainer(net, training_data)
        for i in xrange(T):
            error = trainer.train()
            print "Training iteration %d.  Error: %f" % (i + 1, error)

        if save_brain:
            brain_shelf["net"] = net
    
    labeled_rooms = []
    for i, vector in enumerate(all_vectors):
        labeled_rooms.append((singles[i], net.activate(vector)))
    
    available_rooms = available.get_available_rooms()

    labeled_rooms.sort(key=lambda x: -x[1])
    for room, label in labeled_rooms:
        if room.num in available_rooms:
            print "%16.12f: %s" % (label, room)
class UnmannedNet:
    def __init__(self, n_in, n_hidden, n_out):
        self.net = FeedForwardNetwork()
        inLayer = LinearLayer(n_in)
        hiddenLayer1 = SigmoidLayer(n_hidden)
        hiddenLayer2 = SigmoidLayer(n_hidden)
        outLayer = LinearLayer(n_out)
        self.net.addInputModule(inLayer)
        self.net.addModule(hiddenLayer1)
        self.net.addModule(hiddenLayer2)
        self.net.addOutputModule(outLayer)
        in_to_hidden = FullConnection(inLayer, hiddenLayer1)
        hidden_to_out = FullConnection(hiddenLayer2, outLayer)
        hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
        self.net.addConnection(in_to_hidden)
        self.net.addConnection(hidden_to_hidden)
        self.net.addConnection(hidden_to_out)
        self.net.sortModules()
        #self.net.params
        self.ds = SupervisedDataSet(n_in, n_out)

    def load_network(self, fName='./data/mynetwork.xml'):
        self.net = NetworkReader.readFrom(fName)

    def save_network(self, fName='./data/mynetwork.xml'):
        NetworkWriter.writeToFile(self.net, fName)

    def train(self, number):
        self.trainer = BackpropTrainer(self.net, self.ds)
        self.trainer.trainEpochs(number)

    def add_data(self, image, control):
        self.ds.addSample(image, control)

    def save_data(self, fName="./data/mydata"):
        SupervisedDataSet.saveToFile(self.ds, fName)

    def read_data(self, fName="./data/mydata"):
        self.ds = SupervisedDataSet.loadFromFile(fName)

    def prediction(self, image):
        return self.net.activate(image)

    def evaluate(self, valueFaultTolerant):
        target = self.ds.data.get('target')
        inputvalue = self.ds.data.get('input')
        numberOfSample = target.shape[0]
        numberOfCorrect = 0
        for i in range(0, numberOfSample):
            if (abs(target[i] - self.prediction(inputvalue[i])) <=
                    valueFaultTolerant):
                numberOfCorrect += 1
        print "Correct rate is" + str(
            float(numberOfCorrect) / float(numberOfSample))
Beispiel #12
0
def fit_predict(xTrain,yTrain,xTest,epochs,neurons):

  # Check edge cases
  if (not len(xTrain) == len(yTrain) or len(xTrain) == 0 or 
    len(xTest) == 0 or epochs <= 0):
    return

  # Randomize the training data (probably not necessary but pybrain might
  # not shuffle the data itself, so perform as safety check)
  indices = np.arange(len(xTrain))
  np.random.shuffle(indices)

  trainSwapX = [xTrain[x] for x in indices]
  trainSwapY = [yTrain[x] for x in indices]

  supTrain = SupervisedDataSet(len(xTrain[0]),1)
  for x in range(len(trainSwapX)):
    supTrain.addSample(trainSwapX[x],trainSwapY[x])

  # Construct the feed-forward neural network

  n = FeedForwardNetwork()

  inLayer = LinearLayer(len(xTrain[0]))
  hiddenLayer1 = SigmoidLayer(neurons)
  outLayer = LinearLayer(1)

  n.addInputModule(inLayer)
  n.addModule(hiddenLayer1)
  n.addOutputModule(outLayer)

  in_to_hidden = FullConnection(inLayer, hiddenLayer1)
  hidden_to_out = FullConnection(hiddenLayer1, outLayer)
  
  n.addConnection(in_to_hidden)
  n.addConnection(hidden_to_out)

  n.sortModules() 

  # Train the neural network on the training partition, validating
  # the training progress on the validation partition

  trainer = BackpropTrainer(n,dataset=supTrain,momentum=0.1,learningrate=0.01
    ,verbose=False,weightdecay=0.01)
  
  trainer.trainUntilConvergence(dataset=supTrain,
    maxEpochs=epochs,validationProportion=0.30)

  outputs = []
  for x in xTest:
    outputs.append(n.activate(x))

  return outputs
class UnmannedNet:
  def __init__(self,n_in,n_hidden,n_out):
      self.net = FeedForwardNetwork()
      inLayer = LinearLayer(n_in)
      hiddenLayer1 = SigmoidLayer(n_hidden) 
      hiddenLayer2 = SigmoidLayer(n_hidden) 
      outLayer = LinearLayer(n_out)
      self.net.addInputModule(inLayer)
      self.net.addModule(hiddenLayer1)
      self.net.addModule(hiddenLayer2)
      self.net.addOutputModule(outLayer)
      in_to_hidden = FullConnection(inLayer, hiddenLayer1)
      hidden_to_out = FullConnection(hiddenLayer2, outLayer)
      hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
      self.net.addConnection(in_to_hidden)
      self.net.addConnection(hidden_to_hidden)
      self.net.addConnection(hidden_to_out)
      self.net.sortModules()
      #self.net.params
      self.ds = SupervisedDataSet(n_in, n_out)

  def load_network(self,fName='./data/mynetwork.xml'):
      self.net = NetworkReader.readFrom(fName)

  def save_network(self,fName='./data/mynetwork.xml'):
      NetworkWriter.writeToFile(self.net, fName)

  def train(self,number):
      self.trainer = BackpropTrainer(self.net, self.ds)
      self.trainer.trainEpochs(number)
  
  def add_data(self,image,control):
      self.ds.addSample(image, control)

  def save_data(self,fName="./data/mydata"):
      SupervisedDataSet.saveToFile(self.ds, fName)

  def read_data(self,fName="./data/mydata"):
      self.ds = SupervisedDataSet.loadFromFile(fName)

  def prediction(self,image):
      return self.net.activate(image)

  def evaluate(self,valueFaultTolerant):
      target = self.ds.data.get('target')
      inputvalue = self.ds.data.get('input')
      numberOfSample = target.shape[0]
      numberOfCorrect = 0
      for i in range(0,numberOfSample):
         if (abs(target[i]-self.prediction(inputvalue[i]))<=valueFaultTolerant):
            numberOfCorrect+=1
      print "Correct rate is"+str(float(numberOfCorrect)/float(numberOfSample)) 
def neuralNet(info, test_data):
    ann = FeedForwardNetwork()
    
    ''' 
        Initiate the input nodes, hidden layer nodes,
        and the output layer nodes.
    '''
    inputLayer = LinearLayer(5)
    hiddenLayer = SigmoidLayer(20) 
    outputLayer = LinearLayer(1)
    
    '''
        Add the nodes to the corresponding layer
    '''
    ann.addInputModule(inputLayer)
    ann.addModule(hiddenLayer)
    ann.addOutputModule(outputLayer)
    
    '''
        Connect the input layer to hidden layer,
        then connect hidden layer to output layer
    '''
    in_to_hidden = FullConnection(inputLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outputLayer)
    
    ann.addConnection(in_to_hidden)
    ann.addConnection(hidden_to_out)
    
    ann.sortModules ()
    
    data_set = SupervisedDataSet(5, 1)
    for data in info:
        data_set.addSample(data[:-1], data[-1])
    trainer = BackpropTrainer(ann, data_set, verbose=False)
    
    #test_data, train_data = data_set.splitWithProportion(0.2)
    train_data = data_set
    test_data = test_data
    '''
        Using 50 epochs for testing purposes, it will train
        the network until convergence within the first 50 epochs
    
    '''
    train = trainer.trainUntilConvergence(dataset=train_data, maxEpochs=10)
    NetworkWriter.writeToFile(ann, 'filename5.xml')
    
    for d in test_data:
        out = ann.activate(d)
        #print (train)
        print (out) 
        
    '''
Beispiel #15
0
def test():
    ds = SupervisedDataSet(2, 1)

    net = FeedForwardNetwork()
    inLayer = LinearLayer(2)
    hiddenLayer = SigmoidLayer(5)
    outLayer = SigmoidLayer(1)
    bias = BiasUnit('bias')

    net.addInputModule(inLayer)
    net.addModule(hiddenLayer)
    net.addOutputModule(outLayer)
    net.addModule(bias)

    in_to_hidd = FullConnection(inLayer, hiddenLayer)
    hi_to_out = FullConnection(hiddenLayer, outLayer)
    bias_to_hidd = FullConnection(bias, hiddenLayer)
    bias_to_out = FullConnection(bias, outLayer)

    net.addConnection(in_to_hidd)
    net.addConnection(hi_to_out)
    net.addConnection(bias_to_hidd)
    net.addConnection(bias_to_out)

    net.sortModules()
    pairs = [(0, 0, 0), (1, 1, 1), (0, 1, 0), (1, 0, 0)]
    for x in range(40):
        for i in range(4):
            p = pairs[i]
            ds.addSample((p[0], p[1]), p[2])

    trainer = BackpropTrainer(net, ds)

    for i in range(400):
        trainer.train()
        print "Epoch : %4d" % i

    print net.activate((0, 0))
    return net
Beispiel #16
0
    def train(self):
        # We will build up a network piecewise in order to create a new dataset
        # for each layer.
        dataset = self.dataset
        piecenet = FeedForwardNetwork()
        piecenet.addInputModule(copy.deepcopy(self.net.inmodules[0]))
        # Add a bias
        bias = BiasUnit()
        piecenet.addModule(bias)
        # Add the first visible layer
        firstRbm = self.iterRbms().next()
        visible = copy.deepcopy(firstRbm.visible)
        piecenet.addModule(visible)
        # For saving the rbms and their inverses
        self.invRbms = []
        self.rbms = []
        for rbm in self.iterRbms():
            self.net.sortModules()
            # Train the first layer with an rbm trainer for `epoch` epochs.
            trainer = self.trainerKlass(rbm, dataset, self.cfg)
            for _ in xrange(self.epochs):
                trainer.train
            self.invRbms.append(trainer.invRbm)
            self.rbms.append(rbm)
            # Add the connections and the hidden layer of the rbm to the net.
            hidden = copy.deepcopy(rbm.hidden)
            biascon = FullConnection(bias, hidden)
            biascon.params[:] = rbm.biasWeights
            con = FullConnection(visible, hidden)
            con.params[:] = rbm.weights

            piecenet.addConnection(biascon)
            piecenet.addConnection(con)
            piecenet.addModule(hidden)
            # Overwrite old outputs
            piecenet.outmodules = [hidden]
            piecenet.outdim = rbm.hiddenDim
            piecenet.sortModules()

            dataset = UnsupervisedDataSet(rbm.hiddenDim)
            for sample, in self.dataset:
                new_sample = piecenet.activate(sample)
                dataset.addSample(new_sample)
            visible = hidden
Beispiel #17
0
 def train(self):
     # We will build up a network piecewise in order to create a new dataset
     # for each layer.
     dataset = self.dataset
     piecenet = FeedForwardNetwork()
     piecenet.addInputModule(copy.deepcopy(self.net.inmodules[0]))
     # Add a bias
     bias = BiasUnit()
     piecenet.addModule(bias)
     # Add the first visible layer
     firstRbm = self.iterRbms().next()
     visible = copy.deepcopy(firstRbm.visible)
     piecenet.addModule(visible)
     # For saving the rbms and their inverses
     self.invRbms = []
     self.rbms = []
     for rbm in self.iterRbms():
         self.net.sortModules()
         # Train the first layer with an rbm trainer for `epoch` epochs.
         trainer = self.trainerKlass(rbm, dataset, self.cfg)
         for _ in xrange(self.epochs):
             trainer.train()
         self.invRbms.append(trainer.invRbm)
         self.rbms.append(rbm)
         # Add the connections and the hidden layer of the rbm to the net.
         hidden = copy.deepcopy(rbm.hidden)
         biascon = FullConnection(bias, hidden)
         biascon.params[:] = rbm.biasWeights
         con = FullConnection(visible, hidden)
         con.params[:] = rbm.weights
         
         piecenet.addConnection(biascon)
         piecenet.addConnection(con)
         piecenet.addModule(hidden)
         # Overwrite old outputs
         piecenet.outmodules = [hidden]
         piecenet.outdim = rbm.hiddenDim
         piecenet.sortModules()
         
         dataset = UnsupervisedDataSet(rbm.hiddenDim)
         for sample, in self.dataset:
             new_sample = piecenet.activate(sample)
             dataset.addSample(new_sample)
         visible = hidden
Beispiel #18
0
 def transform(self, X):
     #self.representationNet.sortModules()
     an = FeedForwardNetwork()
     an.addInputModule(self.inLayer)
     an.addOutputModule(self.hiddenLayer)
     an.addModule(self.b)
     an.addConnection(self.in_to_hidden)
     an.addConnection(self.b_to_hidden)
     an.sortModules()
     an.owner = self.supervisedNet
     #print self.representationNet.params
     #print 'kuku'
     #print self.supervisedNet.params
     transformed = []
     for x in X:
         #res = self.representationNet.activate(x)
         res = an.activate(x)
         transformed.append(res)
     return np.array(transformed)
class NeuralNetworkPlayer(BasePlayer):
    """ NN-backed player"""
    def __init__(self):
        super(NeuralNetworkPlayer, self).__init__()

        # Create the network
        self.net = FeedForwardNetwork()

        # Internal Layers
        inLayer = LinearLayer(5)
        hiddenLayer1 = SigmoidLayer(6)
        hiddenLayer2 = SigmoidLayer(6)
        outLayer = LinearLayer(7)

        self.net.addInputModule(inLayer)
        self.net.addModule(hiddenLayer1)
        self.net.addModule(hiddenLayer2)
        self.net.addOutputModule(outLayer)

        self.net.addConnection(FullConnection(inLayer, hiddenLayer1))
        self.net.addConnection(FullConnection(hiddenLayer1, hiddenLayer2))
        self.net.addConnection(FullConnection(hiddenLayer2, outLayer))

        self.net.sortModules()

    def get_move(self, in_vals):
        results = self.net.activate(in_vals)
        return np.argmax(results)

    def set_params(self, params):
        self.net._params = params

    def get_params(self):
        return self.net._params

    def param_dim(self):
        return self.net.paramdim

    def reward(self, amount): pass
    def reset(self): pass
    def learn(self): pass
Beispiel #20
0
    def narcolepsy(self, naps, awakenings, obesity):
        parameters = [naps, awakenings, obesity]

        # Init network
        network = FeedForwardNetwork()
        # Init Layers
        inLayer = LinearLayer(3)
        outLayer = LinearLayer(1)
        # Init connection
        in_to_out = FullConnection(inLayer, outLayer)
        # Add modules
        network.addInputModule(inLayer)
        network.addInputModule(outLayer)
        # Add connections
        network.addConnection(in_to_out)
        # Sort
        network.sortModules()
        # Set equal weights
        # TODO: Use learning to learn weights over time
        # in_to_out._setParameters([.1,.1,.1])
        probability = network.activate(parameters)[0]

        return probability
Beispiel #21
0
    def insomnia(self, falling_asleep, awakenings, cant_fall_back, low_sleep_hours):
        parameters = [falling_asleep, waking_up, cant_fall_back, low_sleep_hours]

        # Init network
        network = FeedForwardNetwork()
        # Init Layers
        inLayer = LinearLayer(4)
        outLayer = LinearLayer(1)
        # Init connection
        in_to_out = FullConnection(inLayer, outLayer)
        # Add modules
        network.addInputModule(inLayer)
        network.addInputModule(outLayer)
        # Add connections
        network.addConnection(in_to_out)
        # Sort
        network.sortModules()
        # Set equal weights
        # TODO: Use learning to learn weights over time
        # in_to_out._setParameters([.1,.1,.1,.1])
        probability = network.activate(parameters)[0]

        return probability
Beispiel #22
0
    print len(mfcc_feat)
    currentDur = 0
    for duration in sorted(labFileDictionary):
        correctPhone = labFileDictionary[duration]
        correctPhoneNumber = phoneList.index(correctPhone)
        #print correctPhone
        #print correctPhoneNumber
        duration = (float)(duration)
        duration = (int)(duration * 100)
        while currentDur < duration:
            totalPredictions += 1
            arr = []
            for iterator in xrange(len(phoneList)):
                arr.append(0)
            arr[correctPhoneNumber] = 1
            prediction = n.activate(mfcc_feat[currentDur])
            p = prediction.argmax()
            logFile.write("Correct Phone: " + correctPhone +
                          " Predicted Phone: " + phoneList[p] + "\n")
            if p == correctPhoneNumber:
                print "Correct"
                count += 1
            currentDur += 1
print count
print totalPredictions
#for i in xrange(0, 10000):
#	prediction = n.activate(test_set[0][i])
#	p = prediction.argmax()
#	if p == test_set[1][i]:
#		count += 1
#print "Done with " + str(count) + " correct o/p"
Beispiel #23
0
the 'FullConnection'. """

in2hidden = FullConnection(inLayer, hiddenLayer)
hidden2out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in2hidden)
n.addConnection(hidden2out)
""" All the elements are in place now, so we can do the final step that makes our MLP usable,
which is to call the 'sortModules()' method. """

n.sortModules()
""" Let's see what we did. """

print n
""" One way of using the network is to call its 'activate()' method with an input to be transformed. """

print n.activate([1, 2])
""" We can access the trainable parameters (weights) of a connection directly, or read
all weights of the network at once. """

print hidden2out.params
print n.params
""" The former are the last slice of the latter. """

print n.params[-3:] == hidden2out.params
""" Ok, after having covered the basics, let's move on to some additional concepts. 
First of all, we encourage you to name all modules, or connections you create, because that gives you 
more readable printouts, and a very concise way of accessing them. 

We now build an equivalent network to the one before, but with a more concise syntax:
"""
n2 = RecurrentNetwork(name='net2')
print "ROZPOCZYNA UCZENIE"

trainer = BackpropTrainer(siec,
                          CU,
                          learningrate=0.01,
                          momentum=0.8,
                          verbose=True)
#trainer.trainUntilConvergence(maxEpochs = 500)
for i in range(10):
    print i
    trainer.trainEpochs(1)
    print i, siec.params

# test na zbiorze uczącym
print "TEST NA ZBIORZE UCZACYM"
y_est = np.zeros(x.shape)
for i in range(len(x) - N_wej - 1):
    bufor_wejsciowy = x[i:i + N_wej]
    y_est[i + N_wej] = siec.activate(bufor_wejsciowy[::-1])
#py.plot(t,x)
py.subplot(2, 1, 1)
py.plot(x, 'b')
py.plot(y_est, 'r')
py.subplot(2, 1, 2)
py.plot(x - y_est)
print 'wariancja reziduum, ', np.var(x - y_est)
print 'wariancja procesu AR, ', epsilon**2
print 'parametry procesu: ', str(a)
print 'parametry wyestymowane: ', str(siec.params)
py.show()
Beispiel #25
0
class NeuralNetworkRegression(algorithmbase):
    def ExtraParams(self, hiddenlayerscount, hiddenlayernodescount):

        self.hiddenlayerscount = hiddenlayerscount
        self.hiddenlayernodescount = hiddenlayernodescount
        return self

    def PreProcessTrainData(self):
        self.traindata = preprocess_apply(self.traindata,
                                          self.missingvaluemethod,
                                          self.preprocessingmethods)

    def PrepareModel(self, savedmodel=None):

        if savedmodel != None:
            self.trainer = savedmodel
        else:
            attributescount = len(self.traindata[0])
            self.ds = SupervisedDataSet(attributescount, 1)
            for i in range(len(self.traindata)):
                self.ds.appendLinked(self.traindata[i], self.trainlabel[i])

            self.net = FeedForwardNetwork()
            inLayer = LinearLayer(len(self.traindata[0]))
            self.net.addInputModule(inLayer)
            hiddenLayers = []
            for i in range(self.hiddenlayerscount):
                hiddenLayer = SigmoidLayer(self.hiddenlayernodescount)
                hiddenLayers.append(hiddenLayer)
                self.net.addModule(hiddenLayer)
            outLayer = LinearLayer(1)
            self.net.addOutputModule(outLayer)

            layers_connections = []
            layers_connections.append(FullConnection(inLayer, hiddenLayers[0]))
            for i in range(self.hiddenlayerscount - 1):
                layers_connections.append(
                    FullConnection(hiddenLayers[i - 1], hiddenLayers[i]))
            layers_connections.append(
                FullConnection(hiddenLayers[-1], outLayer))

            for layers_connection in layers_connections:
                self.net.addConnection(layers_connection)
            self.net.sortModules()

            #training the self.network
            self.trainer = BackpropTrainer(self.net, self.ds)
            self.trainer.train()

    def PreProcessTestDate(self):
        self.testdata = preprocess_apply(self.testdata,
                                         self.missingvaluemethod,
                                         self.preprocessingmethods)

    def Predict(self):
        prediction = []
        for testrecord in self.testdata:
            prediction.append(self.net.activate(testrecord)[0])

        self.result = [self.testlabel, prediction]

    def GetModel(self):
        return self.trainer
Beispiel #26
0
hidden2out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in2hidden)
n.addConnection(hidden2out)

""" All the elements are in place now, so we can do the final step that makes our MLP usable,
which is to call the 'sortModules()' method. """

n.sortModules()

""" Let's see what we did. """

print(n)

""" One way of using the network is to call its 'activate()' method with an input to be transformed. """

print(n.activate([1, 2]))

""" We can access the trainable parameters (weights) of a connection directly, or read
all weights of the network at once. """

print(hidden2out.params)
print(n.params)

""" The former are the last slice of the latter. """

print(n.params[-3:] == hidden2out.params)

""" Ok, after having covered the basics, let's move on to some additional concepts.
First of all, we encourage you to name all modules, or connections you create, because that gives you
more readable printouts, and a very concise way of accessing them.
Beispiel #27
0
class ANN:
    def __init__(self):
        self.name = "ANN"

    def getParams(self):
        return self.in_to_hidden.params, self.hidden_to_out.params

    def create_network(self, nFeatures, hidden1Size=20, nClasses=1):
        # create network object
        self.ffn = FeedForwardNetwork()

        # create layer objects
        inLayer = LinearLayer(nFeatures, name="input")
        hiddenLayer = SigmoidLayer(hidden1Size, name="hidden1")
        #hiddenLayer2 = SigmoidLayer(hidden2Size, name="hidden2")
        outLayer = LinearLayer(nClasses, name="output")

        # add layers to feed forward network
        self.ffn.addInputModule(inLayer)
        self.ffn.addModule(hiddenLayer)
        #self.ffn.addModule(hiddenLayer2)
        self.ffn.addOutputModule(outLayer)

        # add bias unit to layers
        self.ffn.addModule(BiasUnit(name='bias'))

        # establish connections between layers
        self.in_to_hidden = FullConnection(inLayer, hiddenLayer)
        #hidden_to_hidden = FullConnection(hiddenLayer, hiddenLayer2)
        self.hidden_to_out = FullConnection(hiddenLayer, outLayer)

        # print "into hidden: {}".format(len(in_to_hidden.params))
        # print "into out: {}".format(len(hidden_to_out.params))

        # add connections to network
        self.ffn.addConnection(self.in_to_hidden)
        #self.ffn.addConnection(hidden_to_hidden)
        self.ffn.addConnection(self.hidden_to_out)

        # necessary, sort layers into correct/certain order
        self.ffn.sortModules()

        # dataset object
        self.train_ds = SupervisedDataSet(nFeatures, nClasses)
        self.validate_ds = SupervisedDataSet(nFeatures, nClasses)

    # train network
    def train(self, TrainX, TrainY, ValidateX, ValidateY):
        # clear old dataset
        self.train_ds.clear()
        self.validate_ds.clear()

        # add data to dataset object (ds)
        for i in range(TrainX.shape[0]):
            self.train_ds.addSample(TrainX[i], TrainY[i])

        for i in range(ValidateX.shape[0]):
            self.validate_ds.addSample(ValidateX[i], ValidateY[i])

        # randomiz weights
        self.ffn.randomize()

        # Backprop trainer object
        self.trainer = BackpropTrainer(self.ffn,
                                       learningrate=.0775,
                                       momentum=.1)
        try:
            with Timer() as t:
                self.train_errors, self.val_errors \
                    = self.trainer.trainUntilConvergence(trainingData=self.train_ds, \
                                                         validationData=self.validate_ds, \
                                                         maxEpochs=500, \
                                                         continueEpochs=10)

            #return self.train_errors, self.val_errors
        except:
            print "Error occured while training model in ANN."

        #finally:
        #    print("ANN.py - Time to trainUntilConvergence: {:.03f} sec.".format(t.interval))

        return 'ANN'

    # predict depenent variable for dataset
    def predict(self, data):
        # if only make prediction for one sample
        if (len(data.shape) == 1):
            return self.ffn.activate(data)
        else:
            outputs = np.zeros(data.shape[0])
            for i in range(data.shape[0]):
                outputs[i] = self.ffn.activate(data[i])
            return outputs
                        for i in range(Xte.shape[0]):
                            dst.addSample(Xte[i,:],Yte[i])

                        #net = buildNetwork(ds.indim,ds.indim,ds.indim,ds.indim,ds.outdim,recurrent=False)
                        trainer = BackpropTrainer(net,learningrate=learnRate,momentum=moment,verbose=True)
                        #trainer.trainOnDataset(ds,30)
                        trainer.trainUntilConvergence(ds,10)

                        #trainer.testOnData(verbose=True)

                        netbags.append(net)

                        mse = 0.0
                        for i in range(Xte.shape[0]):
                            mse += pow(net.activate(Xte[i])[0]-Yte[i],2)
                        mse /= Xte.shape[0]
                        mseTrain = 0.0
                        for i in range(Xtr.shape[0]):
                            mseTrain += pow(net.activate(Xtr[i])[0]-Ytr[i],2)
                        mseTrain /= Xtr.shape[0]
                        print 'mse(test):{},mse(train):{},epoch:{},width:{},depth:{},momentum:{},learnrate:{}'.format(mse,mseTrain,epochs,hidw,depth,moment,learnRate)
                        #testdat.write('{},{},{},{},{},{},{}\n'.format(mse,mseTrain,epochs,hidw,depth,learnRate,moment))



def predict(entry):
    ymean = 0.0
    for n in netbags:
        ymean += n.activate(entry)[0]
    ymean /= len(netbags)
        target = [16,14,9,16,-7,-2,16,-1,-6,8,7,10,-6,-2,12,2,3]

    # while 1:


        trainer = BackpropTrainer( n, DS, verbose=True)
        trainer.trainUntilConvergence(dataset = None, maxEpochs=750, continueEpochs=10, validationProportion=0.35)
        # trainer.trainEpochs(50)
        # trainer.trainOnDataset(DS, 1500)
        # trainer.testOnData(verbose = True)

        vals = []


        for x in prediction_inputs:
            vals.append(float(n.activate(x)))

        error = 0.0
        num = 0.0;
        for o, t in zip(vals, prediction_outputs):
            if abs(t - o) < 10:
                error += abs(t - o)
                num = num + 1

        error = error / num

        if error < local_min_error:
            local_min_error = error

        if error < min_error and num >= 16:
            NetworkWriter.writeToFile(n, "20 prediction games with num = 16.xml")
Beispiel #30
0
outLayer = LinearLayer(1)

# add each layer, connect them individually and add the connections
# to the MLP
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

# this is required to make the MLP usable 
n.sortModules()

print n.activate((2,2)) # forward pass
print 'n.params\n', n.params # all weights

# same but for recurrent network
n = RecurrentNetwork()
n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))

n.sortModules()


print n.activate((2,2)) # forward pass
Beispiel #31
0
hidden2out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in2hidden)
n.addConnection(hidden2out)

""" All the elements are in place now, so we can do the final step that makes our MLP usable,
which is to call the 'sortModules()' method. """

n.sortModules()

""" Let's see what we did. """

print n

""" One way of using the network is to call its 'activate()' method with an input to be transformed. """

print n.activate([1, 2])

""" We can access the trainable parameters (weights) of a connection directly, or read
all weights of the network at once. """

print hidden2out.params
print n.params

""" The former are the last slice of the latter. """

print n.params[-3:] == hidden2out.params

""" Ok, after having covered the basics, let's move on to some additional concepts.
First of all, we encourage you to name all modules, or connections you create, because that gives you
more readable printouts, and a very concise way of accessing them.
	for h in labels:
		j = [0,0,0,0,0,0,0,0,0,0]
		j[h] = 1
		targets +=  [j]

	newParams = lmsTrain(network, dataSet, targets, 20)
	newParams = newParams.flatten()
	x[(len(x) - (784 * 10)):] = newParams
	network._setParameters(p=x)
	activations = np.zeros(10)
	results = []

	for x in dataSet:
		activations = np.zeros(10)
		r = network.activate(x)
		activations[np.argmax(r)] = 1
		results += [1]
	
	testTargets = []
	for x in testLabels:
		h = np.zeros(10)
		h[x] = 1
		testTargets += [h]
	

	trainingErrors = []

	for i,x in enumerate(results):
		if x != targets[i]:
			trainingErrors += [1]
Beispiel #33
0
# add connection and active machine
in_hidden = FullConnection(inn, hidden)
hidden_out = FullConnection(hidden, out)
fnn.addConnection(in_hidden)
fnn.addConnection(hidden_out)
fnn.sortModules()

# examine feedback network
'''
print 'FeedBack network: ', fnn
print 'in_hidden params: ', in_hidden.params
print 'hidden_out params:', hidden_out.params
print fnn.params
'''

#for i in range(0, 10):
# fnn.reset()
# print 'active[2, 1]:', fnn.activate((2,1))

ds = SupervisedDataSet(2, 1)
ds.addSample([0, 0], [0])
ds.addSample([0, 1], [1])
ds.addSample([1, 0], [1])
ds.addSample([1, 1], [2])
tr = BackpropTrainer(fnn, dataset=ds, learningrate=0.01)
tr.trainUntilConvergence()

for i in range(0, 10):
    print fnn.activate((1, 1))
#   Feed Forward Networks
#----------------------------------------------
from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer
from pybrain.structure import FullConnection

n = FeedForwardNetwork()

#   Construct input, hidden and output layers
inLayer = LinearLayer(2)
hiddenlayer = SigmoidLayer(3)
outLayer = LinearLayer(1)

#   Add layers to the network
n.addInputModule(inLayer)
n.addModule(hiddenlayer)
n.addOutputModule(outLayer)

#   Add full connection between the neurons of each layer
in_to_hidden = FullConnection(inLayer, hiddenlayer)
hidden_to_out = FullConnection(hiddenlayer, outLayer)

#   Final step: Necessary for sorting the modules, and other internal initialization
n.sortModules()

#----------------------------------------------
#   Examining a Network
#----------------------------------------------

n.activate([1, 2])
Beispiel #35
0
class MP_Pybrain(Regression):
    """
    Fully connected multilayer perceptron using pybrain library.
    """
    def __init__(self, train_data, hyper,  n_targets=None, label_targets=None):
        """
    ------------

    train_data: pandas DataFrame
                Contains columns for features and for target variables. The names of the target variables ends
                with the suffix "_tau"
    hyper:      dictionary
                It contains the hyperparameters necessary to run all the functionalities of the model.
                 They are the following:
                "structure" is a list of integers determining the number of neurons in each hidden layer
                "epochs" an integer specifying the maximum number of epochs to run during every training session
                "learning_rate" a float giving the learning rate of the gradient descend
                "momentum" a float giving the value of the momentum for the algorithm
                "batch" a bool. If True the method performs full batch learning, i.e. updates of the weights is done
                using all the instances of the training set. Else, normal online method is performed
                Other parameters regarding cross validation are explained in the base class

        """
        Regression.__init__(self, train_data, hyper, n_targets=n_targets, label_targets=label_targets)

        self.N = FeedForwardNetwork()
        self.structure = [self.n_feature] + hyper['structure'] + [self.n_target]

        self._build_net(self.structure)
        self.res_params = [self.N.params[i] for i in range(len(self.N.params))]

        self.train_fraction = hyper['train_fraction']
        self.seed = hyper['seed']
        self.epochs = hyper['epochs']
        self.learning_rate = hyper['learning_rate']
        self.momentum = hyper['momentum']
        self.batch = bool(hyper['batch'])

    def learn(self, train_data = None, seed = None):
        """
    Performs single run training, and it is designed to be called after network instantiation.

    ----------

    train_data: pandas Dataframe
            It needs to contain datetime objects on index, and both features and target variables.
            The target variables need to end with the suffix "_tau". If None the self.train_set
            variable passed at the moment of instantiation will be used.

    Returns: tuple(MP_Pybrain object,float)
            It returns the model with the lowest training error, and the value of the training error.

        """
        if train_data is not None:
            self.train_set = train_data
            self.randomize()
        ds_train, ds_valid = self._build_dataset(self.train_set)
        trainer = BackpropTrainer(self.N, ds_train, learningrate=self.learning_rate,
                                  momentum=self.momentum,batchlearning=self.batch)
        trainer.train()
        e_train = [self._error(ds_train)]
        e_valid = [self._error(ds_valid)]
        final_model = copy(self)
        fin_error_train = e_train[0]
        fin_error_valid = e_valid[0]
        for i in range(1,self.epochs):
            if i%10 == 0:
                print "epoch: ", i
            trainer.train()
            e_train.append(self._error(ds_train))
            e_valid.append(self._error(ds_valid))
            if e_train[-1] < fin_error_train:
                final_model = deepcopy(self)
                fin_error_train = e_train[-1]
                fin_error_valid = e_valid[-1]
        return final_model, fin_error_train, fin_error_valid

    def xvalidate(self, train_data = None, folds = None):
        """
    Performs n-folds cross-validation on the a data set. The method is designed to reset the network
    to an initial configuration (decided at the moment of instantiation) every time a new training is
    started. The purpose is to make model comparison and returning an average error given a specific
    data set and collection of hyper-parameters. At the moment training and validation sets are chosen
    based on the input sequence of data, i.e. there is no random shuffling of the instances of the data set.

    ----------

    train_data: pandas Dataframe
            It needs to contain datetime objects on index, and both features and target variables.
            The target variables need to end with the suffix "_tau". If None the self.train_set
            variable passed at the moment of instantiation will be used.

    folds: integer
            The number of training/validation partition used in the method. If None it needs to be
            passed in the constructor when instantiating the object for the first time. If not passed
            ever, the method cannot work and an exception needs to be thrown.
    Returns: list, float, float
            A list of all the models trained for each fold, the mean train error and the cross-validation error,
            i.e. the average of NRMSE for all the training/validation partitions created.

        """
        if train_data is not None:
            self.train_set = train_data
        if folds is not None:
            self.cv_folds = folds
        train, validation = self._build_folds(random=False)
        models = []
        train_error = []
        cv_error = []
        for i in range(self.cv_folds):
            print "Cross-validation Fold: ", i+1
            self.randomize()
            model, error, _ = self.learn(train_data=train[i])
            models.append(deepcopy(model))
            train_error.append(error)
            predicted, actual = self.test(validation[i])
            e = 0
            for k in predicted.keys():
                e += errors.RMSE(np.array(actual[k]),np.array(predicted[k]))
            cv_error.append(e)
        return models, np.mean(train_error), np.mean(cv_error)

    def test(self, data):
        """
    Tests the trained model on data. The usage is two fold: 1) Internal usage to calculate errors on validation
    sets. 2) For external usage when a test set is provided. Both the validation and test set need to contain target
    columns. For prediction, where target variables are unknown, please refer to the function self.predict below.
    ----------

    data:       pandas Dataframe
                A pandas dataframe. A deepcopy of it will be made and only the feature columns will be considered.
                Due to the functionality of the pyBrain library we require (at the moment) that the order of the
                colums is the same as the one of the training set used for training.

    Returns:    pandas Dataframe
                A Dataframe with columns containing the predictions of the different target variables and same index as
                the input DataFrame

        """
        data_x = data[self.features]
        data_y = data[self.targets]
        predicted = np.array([])
        for i in range(len(data_x)):
            predicted = np.append(predicted, self.N.activate(data_x.values[i]))
        return pd.DataFrame(predicted, index=data.index, columns=self.targets), data_y

    def predict(self, data):
        """
    It returns target variables given a set of features, using the model trained and saved.
    ---------

    data: pandas Dataframe
         It must contain all the feature columns used for training of the model

    Returns: pandas Dataframe
         It contains the prediction on the target variables. The name of the variables is the same as the one
         provided at the moment of instantiation of object.

        """
        data_x = data[self.features]
        predicted = np.array([])
        for i in range(len(data_x)):
            predicted = np.append(predicted, self.N.activate(data_x.values[i]))
        return pd.DataFrame(predicted, index=data_x.index, columns=self.targets)

    def randomize(self):
        self.N.randomize()
        pass


    ### Private functions ###
    def _error(self, ds):
        """
    Calculates the RMSE over an input dataset, given the current state of the network.

    ds: Supervised dataset pybrain style

    Returns: float
        The total error between prediction and actual values.

        """
        predicted = np.array([list(self.N.activate(x)) for x in ds['input']]).transpose()
        actual = np.array([list(x) for x in ds['target']]).transpose()
        total_error = [errors.RMSE(np.array(actual[i]),np.array(predicted[i])) for i in range(len(actual))]
        return sum(total_error)

    def _build_net(self,s):
        layers = [LinearLayer(s[0])]
        self.N.addInputModule(layers[0])
        for i in range(1,len(s)-1):
            layers.append(SigmoidLayer(s[i]))
            self.N.addModule(layers[i])
        layers.append(SigmoidLayer(s[-1]))
        self.N.addOutputModule(layers[-1])
        self._build_connections(layers)

    def _build_connections(self, l):
        for i,j in zip(l,l[1:]):
            a = FullConnection(i,j)
            self.N.addConnection(a)
        self.N.sortModules()

    def _build_dataset(self, data):
        """
    Given a input training Dataframe with features and targets it returns the formatted training and validation
    datasets for pybrain usage, and randomly shuffled according to the self.seed given at instantiation.

    ----------

    data: pandas Dataframe
        It must contains both features and target columns

    Returns: (pybrain dataset, pybrain dataset)
        The first is the training dataset and the second is the validation dataset

        """
        np.random.seed(self.seed)
        permutation = np.random.permutation(np.arange(len(data)))
        sep = int(self.train_fraction * len(data))
        x = data[self.features]
        y = data[self.targets]
        ds_train = SupervisedDataSet(self.n_feature, self.n_target)
        ds_valid = SupervisedDataSet(self.n_feature, self.n_target)
        for i in permutation[:sep]:
            ds_train.addSample(x.values[i], y.values[i])
        for i in permutation[sep:]:
            ds_valid.addSample(x.values[i], y.values[i])
        return ds_train, ds_valid
def evalFunc(ds):
    trains = []
    tests = []
    epochsNums = []
    parameters = range(1, 40)
    testAmount = 10
    #for testNum in range(testAmount):
    tstdata, trndata = ds.splitWithProportion( 0.2 )
    hidden_size = 6
    inLayer = LinearLayer(len(ds.getSample(0)[0]))
    hiddenLayer = SigmoidLayer(hidden_size)
    outLayer = LinearLayer(len(ds.getSample(0)[1]))
    n = FeedForwardNetwork()
    n.addInputModule(inLayer)
    n.addModule(hiddenLayer)
    b = BiasUnit()
    n.addModule(b)
    n.addOutputModule(outLayer)
    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    b_to_hidden = FullConnection(b, hiddenLayer)
    b_to_out = FullConnection(b, outLayer)
    
    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)
    n.addConnection(b_to_hidden)
    n.addConnection(b_to_out)
    
    n.sortModules()
    #print n.activate([1, 2])
    
    
    initialLearningrate=1
    decay= 0.999995
    trainer = BackpropTrainer(n, trndata,  learningrate=initialLearningrate,\
                                lrdecay=decay, verbose=True, weightdecay=0)
    #trainer.trainUntilConvergence( verbose = True, validationProportion = 0.15, \
    #maxEpochs = epochsNum, continueEpochs = 10 )
    #print n.activateOnDataset(tstdata)
    first  = True
    num=4
    colors = plt.cm.jet(np.linspace(0, 1, num))
    for i in range(num):
        a=100
        trainer.trainEpochs(a)
        approx = []
        inputs = []
        reference = []
        sum =0
        for input, output in tstdata:
            res = n.activate(input)
            approx.append(res[0])
            inputs.append(input[0])
            reference.append(output[0])
            sum+=(res-output)**2
        rmse =math.sqrt(sum/len(tstdata))   
        if first:
            first = False
            plt.plot(inputs, reference, label='Original func')
        plt.scatter(inputs, approx, label=str((i+1)*a) + ' epochs, RMSE: '\
                    +str(rmse), c=colors[i])
        plt.legend().draggable()
        plt.xlabel('x')
        plt.ylabel('f(x)')
        plt.grid()

        
    plt.title('Function: sin(10r), Hidden layer size: '+str(hidden_size)+', DS size: '\
        +str(N)+ ', Initial LR: '+str(initialLearningrate)+', LR decay: '+str(decay), color='b')
Beispiel #37
0
class Learn:

    def __init__(self, mode):
        self.net = None
        self.major = True
        if mode == "minor":
            self.major = False

    def train(self):

        n = FeedForwardNetwork()

        dataModel = SongFactory(self.major).getModels()

        ds = SupervisedDataSet(static.NUM_OF_INPUTS, 1)

        #adds samples from the data received from songfactory and the k
        for data in dataModel:
            for input, target in data.model:
                print input, target
                ds.addSample(input, target)


        #instantiate the network
        self.net = FeedForwardNetwork()
        bias = BiasUnit()
        self.net.addModule(bias)

        #create the layers of the network
        inLayer = LinearLayer(static.NUM_OF_INPUTS)
        outLayer = LinearLayer(1)
        hidden1 = SigmoidLayer(25)
        hidden2 = SigmoidLayer(5)

        #add the layers
        self.net.addInputModule(inLayer)
        self.net.addOutputModule(outLayer)
        self.net.addModule(hidden1)
        self.net.addModule(hidden2)

        #create the connection
        in_h1 = FullConnection(inLayer,hidden1)
        h1_h2 = FullConnection(hidden1, hidden2)
        h2_out = FullConnection(hidden2, outLayer)
        b_h1  = FullConnection(bias, hidden1)
        b_h2  = FullConnection(bias, hidden2)

        #add the connection
        self.net.addConnection(in_h1)
        self.net.addConnection(h1_h2)
        self.net.addConnection(h2_out)
        self.net.addConnection(b_h1)
        self.net.addConnection(b_h2)

        self.net.sortModules()

        #trainer to edit the network
        trainer = BackpropTrainer(self.net, ds, learningrate = 0.003)

        trainer.trainEpochs(25)

    #generate a song given an input sequence
    def getSong(self, inputSequence, songLength = 128):
        if self.net is None:
            print "Cannot create from nothing"
            return

        inputSequence = [x for x in inputSequence]
        song = [inputSequence[x]  for x in range(0, static.NUM_OF_INPUTS)]
        nextout = 0
        for x in range(0, songLength):
            nextout = int(self.net.activate(tuple(inputSequence)))

            # just to shake it up a little if we get 4 of the same chord in a row
            if nextout == inputSequence[-1] and nextout == inputSequence[-2] and nextout == inputSequence[-3]:
                recurring = dict()
                for i in song:
                    if i in recurring:
                        recurring[i] += 1
                    else:
                        recurring[i] = 1

                nextout = int(min(recurring, key = recurring.get))

            song.append(nextout)
            inputSequence = inputSequence[1:]
            inputSequence.append(nextout)

        '''print song[4:]
        f = open('output.txt', 'w')
        f.write(' '.join(song[4:]))
        f.close()'''

        return song[4:]

    # Save trained data to file for later usage
    def saveToFile(self):
        if self.net is not None:
            if self.major:
                NetworkWriter.writeToFile(self.net, TRAINED_DATA_FILEPATH_MAJOR)
            else:
                NetworkWriter.writeToFile(self.net, TRAINED_DATA_FILEPATH_MINOR)

        else:
            print "Cannot save nothing"

    # Load trained data from file
    def loadFromFile(self):
        try:
            if self.major:
                self.net = NetworkReader.readFrom(TRAINED_DATA_FILEPATH_MAJOR)
            else:
                self.net = NetworkReader.readFrom(TRAINED_DATA_FILEPATH_MINOR)

        except:
            print "Could not find or open file"
hiddenLayer = SigmoidLayer(3, name='hidden')
outLayer = LinearLayer(1, name='out')

n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)

#   Full Connection class - add connections/synapses

in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)
#makes our MLP usable,
n.sortModules()
print n.activate([1, 2])

print n

#Recureent Connection Class -which looks back in time one timestep.
n = RecurrentNetwork()
n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

n.sortModules()
print n.activate((2, 2))
print n.activate((2, 2))
print n.activate((2, 2))
Beispiel #39
0
class NetworkManager(object):
    def __init__(self, hidden_layers, ally_champ_obj_list,
                 enemy_champ_obj_list):

        self.ally_champ_obj_list = ally_champ_obj_list
        self.enemy_champ_obj_list = enemy_champ_obj_list

        self.set_nodes()

        self.network = FeedForwardNetwork()

        connect_queue = Queue.Queue()

        for layer in xrange(0, hidden_layers):
            connect_queue.put(
                TanhLayer(self.input_node_count,
                          name='hidden_layer_{}'.format(layer)))

        connect_queue.put(SigmoidLayer(1, name='output_layer'))

        prev_layer = LinearLayer(self.input_node_count, name='input_layer')
        self.network.addInputModule(prev_layer)

        while not connect_queue.empty():

            current_layer = connect_queue.get()
            if current_layer.name == 'output_layer':
                self.network.addOutputModule(current_layer)
            else:
                self.network.addModule(current_layer)

            bias = BiasUnit()
            bias_connection = FullConnection(
                bias,
                current_layer,
                name="bias_to_{}_connection".format(current_layer.name))
            self.network.addModule(bias)
            self.network.addConnection(bias_connection)

            connection = FullConnection(prev_layer,
                                        current_layer,
                                        name="{}_to_{}_connection".format(
                                            prev_layer.name,
                                            current_layer.name))
            self.network.addConnection(connection)

            prev_layer = current_layer

        self.network.sortModules()

    def get_node_count(self):
        return (len(self.ally_champ_obj_list) + len(self.enemy_champ_obj_list))

    def set_nodes(
        self
    ):  #TEMPORARY SET_NODE - PERFORMS QUERYSET EVERYTIME - SHOULD EVENTUALLY STORE VALUES AS NODES
        node_list = {}
        match_count = 0
        champion_list = list(
            enumerate(self.ally_champ_obj_list + self.enemy_champ_obj_list))
        self.input_node_count = len(champion_list)
        while len(champion_list) != 0:
            pid, prime = champion_list.pop()
            data = {}
            queryset = Player.objects.filter(champion=prime)

            for (cid, champ) in champion_list:

                if pid <= 4 and cid <= 4:
                    ally = True
                elif pid > 4 and cid > 4:
                    ally = True
                else:
                    ally = False

                if ally:
                    matches = queryset.filter(ally_heroes=champ)
                else:
                    matches = queryset.filter(enemy_heroes=champ)
                match_count += len(matches)

                data[champ] = {
                    'ally': ally,
                    'wins': matches.filter(winner=True).count(),
                    'loses': matches.filter(winner=False).count()
                }

                node_list[prime] = data

        self.node_set = node_list

    def train_network(self):

        training_set = SupervisedDataSet(self.input_node_count, 1)
        validation_set = SupervisedDataSet(self.input_node_count, 1)

        champion_list = list(
            enumerate(self.ally_champ_obj_list + self.enemy_champ_obj_list))
        while len(champion_list) != 0:
            pid, prime = champion_list.pop()
            for (cid, champ) in champion_list:

                input_set = [0] * self.input_node_count
                input_set[pid] = 1
                input_set[cid] = 1

                wins = self.node_set[prime][champ]['wins']
                loses = self.node_set[prime][champ]['loses']

                for win in xrange(0, wins):
                    training_set.addSample(input_set, [1])
                for loss in xrange(0, loses):
                    training_set.addSample(input_set, [0])

        print 'Training Set Length = ', len(training_set)

        ally_list = self.ally_champ_obj_list
        enemy_list = self.enemy_champ_obj_list
        prime = ally_list.pop()
        validation_queryset = Player.objects.filter(champion=prime)
        print len(validation_queryset)
        for ally in ally_list:
            validation_queryset = validation_queryset.filter(ally_heroes=ally)
            print len(validation_queryset)
        for enemy in enemy_list:
            validation_queryset = validation_queryset.filter(
                enemy_heroes=enemy)
            print len(validation_queryset)

        print 'Validation Set Length = ', len(validation_queryset)

        validation_wins = validation_queryset.filter(winner=True).count()
        validation_loses = validation_queryset.filter(winner=False).count()

        for win in xrange(0, validation_wins):
            validation_set.addSample([1] * self.input_node_count, [1])
        for loss in xrange(0, validation_loses):
            validation_set.addSample([1] * self.input_node_count, [0])

        if not validation_set:
            print 'There is no Validation Set, more error in output'
        else:
            print 'Raw Win Rate = ', str(
                float(validation_wins) /
                float(validation_wins + validation_loses))

        trainer = BackpropTrainer(self.network, learningrate=0.5)
        trainer.trainUntilConvergence(
            validationData=validation_set,
            trainingData=training_set,
            dataset=training_set,
            continueEpochs=10,
            maxEpochs=50,
            convergence_threshold=1,
        )

        return str(
            float(validation_wins) / float(validation_wins + validation_loses))

    def run_network(self):

        input_set = [1] * self.input_node_count
        return self.network.activate(input_set)
Beispiel #40
0
n.addRecurrentConnection(FullConnection(n['hiddenLayerA'], n['hiddenLayerB'], name='rec3'))

n.sortModules()
print 'Network One (Recurrent)' + str(n.activate([1,2,3]))
print 'Network One (Recurrent)' + str(n.activate([1,2,3]))

####
#FEED FORWARD NETWORK
####

n2 = FeedForwardNetwork()

inLayer2 = LinearLayer(inputVector, name='inputLayer')
hiddenLayerA2 = SigmoidLayer(hiddenVector, name='hiddenLayerA')
hiddenLayerB2 = SigmoidLayer(hiddenVector, name='hiddenLayerB')
outputLayer2 = LinearLayer(outputVector, name='outputLayer')

n2.addInputModule(inLayer)
n2.addModule(hiddenLayerA)
n2.addModule(hiddenLayerB)
n2.addOutputModule(outputLayer)

n2.addConnection(FullConnection(n2['inputLayer'], n2['hiddenLayerA'], name='c1'))
n2.addConnection(FullConnection(n2['hiddenLayerA'], n2['hiddenLayerB'], name='c2'))
n2.addConnection(FullConnection(n2['hiddenLayerB'], n2['outputLayer'], name='c3'))

n2.sortModules()
print 'Network Two (Not Recurrent)' + str(n2.activate([1,2,3]))
print 'Network Two (Not Recurrent)' + str(n2.activate([1,2,3]))
Beispiel #41
0
class BMTrainer:
    #隐藏层神经元节点数:
    hiddendim = 3
    #读取训练数据源文件:
    srcname = 'trainer.xlsx'
    #存储训练数据文件:
    destname = 'buildBMTrainer.xml'
    #源文件中结果列为几列(输出层节点数)
    rescol = 1
    #是否显示计算中间迭代过程
    verbose = True
    #总体容差
    finalerror = 0
    __fnn = None
    __sy = None

    def __init__(self,
                 _hiddendim=3,
                 _srcnmae='trainer.xlsx',
                 _destname='buildBMTrainer.xml'):
        self.hiddendim = _hiddendim
        self.srcname = _srcnmae
        self.destname = _destname

    def readexcel(self):
        workbook = xlrd.open_workbook(self.srcname)
        sheet1 = workbook.sheet_by_index(0)
        if (self.verbose):
            print('训练集共:' + str(sheet1.nrows) + '行,' + str(sheet1.ncols) +
                  '列;其中结果为:' + str(self.rescol) + '列')
        # data = np.empty()
        # target = np.empty()
        # for i, d in enumerate(sheet1):
        #     data[i] = np.asarray(d[:-1], dtype=np.float64)i
        #     target[i] = np.asarray(d[-1], dtype=np.float64)
        # test = [[0 for i in range(sheet1.nrows)] for j in range(sheet1.ncols)]
        if (sheet1.nrows > 1 and sheet1.ncols > self.rescol):
            x = np.zeros((sheet1.nrows - 1, sheet1.ncols - self.rescol),
                         dtype=np.float)
            y = np.zeros((sheet1.nrows - 1, self.rescol), dtype=np.float)
            for i in range(sheet1.nrows - 1):
                for j in range(sheet1.ncols):
                    if (j < sheet1.ncols - self.rescol):
                        # print sheet1.cell(i + 1, j).value
                        x[i][j] = sheet1.cell(i + 1, j).value
                    else:
                        y[i][j - sheet1.ncols + self.rescol] = sheet1.cell(
                            i + 1, j).value
        return x, y

    def buildBMTrainer(self):
        # print np.random.rand(1)
        # print np.random.rand(1)
        x, y = self.readexcel()
        # 从sklearn数据集中读取用来模拟的数据
        # boston = load_boston()
        # x = boston.data
        # y = boston.target.reshape(-1, 1)
        # for i in range(0,x.shape[0]):
        #     for j in range(0,x.shape[1]):
        #         print (x[i][j])
        # print x.shape
        # sys.exit();
        # for x in x:
        #     print x
        # print x
        # print y
        # sys.exit(0)
        # 直接采用不打乱的方式进行7:3分离训练集和测试集
        # per = int(len(x) * 0.7)
        per = int(len(x))
        # 对数据进行归一化处理(一般来说使用Sigmoid时一定要归一化)
        sx = MinMaxScaler()
        sy = MinMaxScaler()
        xTrain = x[:per]
        xTrain = sx.fit_transform(xTrain)
        yTrain = y[:per]
        # print yTrain
        yTrain = sy.fit_transform(yTrain)
        # print yTrain
        # print sy.inverse_transform(yTrain)
        # sys.exit()
        # xTest = x[per:]
        # xTest = sx.transform(xTest)
        # yTest = y[per:]
        # yTest = sy.transform(yTest)
        # print xTest.shape
        # for x in xTest:
        #     print x
        # sys.exit()

        # 初始化前馈神经网络
        self.__fnn = FeedForwardNetwork()

        # 构建输入层,隐藏层和输出层,一般隐藏层为3-5层,不宜过多
        inLayer = LinearLayer(x.shape[1], 'inLayer')

        # hiddenLayer = TanhLayer(3, 'hiddenLayer')
        hiddenLayer = TanhLayer(self.hiddendim, 'hiddenLayer')
        outLayer = LinearLayer(self.rescol, 'outLayer')
        # hiddenLayer1 = TanhLayer(5, 'hiddenLayer1')
        # outLayer = LinearLayer(1, 'outLayer')

        # 将构建的输出层、隐藏层、输出层加入到fnn中
        self.__fnn.addInputModule(inLayer)
        self.__fnn.addModule(hiddenLayer)
        # fnn.addModule(hiddenLayer1)
        self.__fnn.addOutputModule(outLayer)

        # 对各层之间建立完全连接
        in_to_hidden = FullConnection(inLayer, hiddenLayer)
        # in_to_hidden.setName('in_to_hidden')
        # in_to_hidden._setParameters([0 for i in range(30)])
        hidden_to_out = FullConnection(hiddenLayer, outLayer)
        # hidden_to_out.setName('hidden_to_out')
        # hidden_to_out._setParameters([1 for i in range(3)])
        # hidden_to_hidden = FullConnection(hiddenLayer,hiddenLayer1 )
        # hidden_to_out = FullConnection(hiddenLayer1, outLayer)

        # 与fnn建立连接
        self.__fnn.addConnection(in_to_hidden)
        # fnn.addConnection(hidden_to_hidden)
        self.__fnn.addConnection(hidden_to_out)
        self.__fnn.sortModules()

        # 初始化监督数据集
        DS = SupervisedDataSet(x.shape[1], self.rescol)

        # 将训练的数据及标签加入到DS中
        # for i in range(len(xTrain)):
        #     DS.addSample(xTrain[i], yTrain[i])
        for i in range(len(xTrain)):
            DS.addSample(xTrain[i], yTrain[i])

        # 采用BP进行训练,训练至收敛,最大训练次数为1000
        trainer = BackpropTrainer(self.__fnn,
                                  DS,
                                  learningrate=0.001,
                                  verbose=self.verbose)
        trainingErrors = trainer.trainUntilConvergence(maxEpochs=10000)
        self.finalError = trainingErrors[0][-2]
        if (self.verbose):
            print('最后总体容差:', self.finalError)
        self.__sy = sy
        # print "1"
        # print fnn.activate(x)
        for i in range(len(xTrain)):
            print(
                sy.inverse_transform(
                    self.__fnn.activate(xTrain[i]).reshape(-1, 1)))
        # sys.exit()
        # print sy.inverse_transform(fnn.activate(x))[0]
        # 在测试集上对其效果做验证
        # values = []
        # sy.inverse_transform()
        # for x in xTest:
        #     values.append(sy.inverse_transform(fnn.activate(x))[0])
        # for x in xTest:
        #     x1 = fnn.activate(x)
        #     x2 = sy.inverse_transform(x1.reshape(-1, 1))
        #     values.append(x2[0])
        # print "2"
        # 计算RMSE (Root Mean Squared Error)均方差
        # totalsum = sum(map(lambda x: x ** 0.5, map(lambda x, y: pow(x - y, 2), boston.target[per:], values))) / float(len(xTest))
        # print totalsum
        # print "3"
        # 将训练数据进行保存

    def saveresult(self):
        NetworkWriter.writeToFile(self.__fnn, self.destname)
        joblib.dump(self.__sy, 'sy.pkl', compress=3)
Beispiel #42
0
from pybrain.datasets import SupervisedDataSet
ds = SupervisedDataSet(4, 1)


ds.addSample((0, 0, 0, 0), (0,))
ds.addSample((0, 0, 1, 1), (1,))
ds.addSample((0, 0, 0, 1), (0,))
ds.addSample((0, 1, 1, 0), (1,))

print "Before training"

sq_err = []
for data in ds:
    input_entry = data[0]
    output_entry = data[1]
    pred_entry = n.activate(input_entry)
    print 'Actual:', output_entry, 'Predicted', pred_entry

    sq_err.append((pred_entry[0] - output_entry[0])**2)
print "RMSE: %.2f" % (sum(sq_err) / len(sq_err))


from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(n, ds)
trainer.trainUntilConvergence()

print "~~~"

print "After training"

sq_err = []
Beispiel #43
0
ds = SupervisedDataSet(inLen, outLen)

for i in xrange(len(arr)):
	ds.addSample(arr[i],lab[i])

print "filled training dataset"

trainer = BackpropTrainer(n, ds)

trainer.train()
print "training done1"
trainer.train()
print "training done2"
trainer.train()
print "training done3"

result = []
def sk(x): return x[1]
for i,t in enumerate(tes):
	print i
	r = [(i,j) for i,j in enumerate(n.activate(t))]

	result.append([(i,j) for i,j in sorted(r, key=sk, reverse=True)])



f = file("results/nn%d.csv" % time(),"w")
f.write("\n".join([",".join([str(x) for x in i]) for i in result ]))
f.flush()
f.close()
Beispiel #44
0
class NET():
    def __init__(self, arg):
        self.inputsize = arg[0]
        self.outputsize = arg[-1]
        self.hiden = arg[1:-1]
        self.err = 1
        self.old_err = 1
        b = []
        b.append(self.inputsize)
        b += self.hiden
        b.append(self.outputsize)
        #print b#"%s, %s, %s, hiddenclass=TanhLayer"%(self.inputsize, self.hiden, self.outputsize)
        self.net = FeedForwardNetwork()
        self.inputlayer = LinearLayer(self.inputsize, "Input")
        self.net.addInputModule(self.inputlayer)
        self.outputlayer = LinearLayer(self.outputsize, "Output")
        self.net.addOutputModule(self.outputlayer)
        self.hidenlayers = []
        for i in xrange(len(self.hiden)):
            self.hidenlayers.append(SigmoidLayer(self.hiden[i], "hiden%s" % i))
            self.net.addModule(self.hidenlayers[-1])
        self.net.addConnection(
            FullConnection(self.inputlayer, self.outputlayer))
        for i in xrange(len(self.hidenlayers)):
            self.net.addConnection(
                FullConnection(self.inputlayer, self.hidenlayers[i]))
            self.net.addConnection(
                FullConnection(self.hidenlayers[i], self.outputlayer))
        for i in xrange(len(self.hidenlayers)):
            for j in xrange(i + 1, len(self.hidenlayers)):
                self.net.addConnection(
                    FullConnection(self.hidenlayers[i], self.hidenlayers[j]))
                #self.print_conections(self.net)
        self.net.sortModules()
        self.ds = SupervisedDataSet(self.inputsize, self.outputsize)

    def Update(self, hiden, h):
        self.net = FeedForwardNetwork()
        self.inputlayer = LinearLayer(self.inputsize, "Input")
        self.net.addInputModule(self.inputlayer)
        self.outputlayer = LinearLayer(self.outputsize, "Output")
        self.net.addOutputModule(self.outputlayer)
        self.hidenlayers = []
        for i in xrange(len(hiden)):
            self.hidenlayers.append(SigmoidLayer(hiden[i], "hiden%s" % i))
            self.net.addModule(self.hidenlayers[-1])
        self.net.addConnection(
            FullConnection(self.inputlayer, self.outputlayer))
        for i in xrange(len(self.hidenlayers)):
            self.net.addConnection(
                FullConnection(self.inputlayer, self.hidenlayers[i]))
            self.net.addConnection(
                FullConnection(self.hidenlayers[i], self.outputlayer))
        for i in xrange(len(self.hidenlayers)):
            for j in xrange(i + 1, len(self.hidenlayers)):
                if i < h:
                    self.net.addConnection(
                        FullConnection(self.hidenlayers[i],
                                       self.hidenlayers[j]))
                elif i == h:
                    self.net.addConnection(
                        FullConnection(self.hidenlayers[i],
                                       self.hidenlayers[j],
                                       inSliceTo=hiden[i] - 1))
                else:
                    self.net.addConnection(
                        FullConnection(self.hidenlayers[i],
                                       self.hidenlayers[j]))
                #self.print_conections(self.net)
        self.net.sortModules()
        self.hiden = hiden

    def print_conections(self, n):
        print("BEGIN")
        for mod in n.modules:
            print(mod)
            for conn in n.connections[mod]:
                print(conn)
                for cc in range(len(conn.params)):
                    print(conn.whichBuffers(cc), conn.params[cc])
        print("END")

    def AddData(self, datainput, dataoutput, learningrate):
        if len(dataoutput) != len(datainput):
            print("Not equals data", len(dataoutput), len(datainput))
            return 1
        self.ds = SupervisedDataSet(self.inputsize, self.outputsize)
        for i in xrange(len(dataoutput)):
            self.ds.appendLinked(datainput[i], dataoutput[i])
        self.trainer = BackpropTrainer(self.net,
                                       dataset=self.ds,
                                       learningrate=learningrate)
        return 0

    def TrainNet(self, epoch, error):

        if epoch <= 5:
            epoch = 5
        i = 0
        count = 0
        while i < epoch:
            if error == self.err:
                break
            self.err = self.trainer.train()
            if self.err == self.old_err:
                count += 1
            else:
                count = 0
            if count == 3:
                self.err = self.old_err
                return (self.err, 1)
            self.old_err = self.err
            i += 1
        #self.SaveNet('%s  %s_%s_%s.work'%(self.err, self.inputsize, self.hiden, self.outputsize))
        return [self.err, 0]

    def TrainNetOnce(self):

        self.err = self.trainer.train()

        return self.err

    def SaveNet(self, filename=None):
        if filename == None:
            NetworkWriter.writeToFile(
                self.net, '%s  %s_%s_%s.xml' %
                (self.err, self.inputsize, self.hiden, self.outputsize))
        else:
            NetworkWriter.writeToFile(self.net, filename)

    def LoadNet(self, fname):
        self.net = NetworkReader.readFrom(fname)
        tree = ET.parse(fname)
        x = tree.getroot()
        l = []
        for modules in x.findall('Network/Modules/SigmoidLayer/dim'):
            l.append(int(modules.get("val")))
        self.hiden = l[:]
        self.inputsize = self.net.indim
        self.outputsize = self.net.outdim

    def TestNet(self, inp):
        if len(inp) != self.inputsize:
            return 0
        return self.net.activate(inp[:])

    def UpdateWeights(self, f1, f2=None):
        n = NetworkReader.readFrom(f1)
        if f2 != None:
            n2 = NetworkReader.readFrom(f2)

        def DictParams(n):
            l1 = []
            for mod in n.modules:
                l = []
                for conn in n.connections[mod]:

                    if conn.paramdim > 0:

                        l.append([conn.outmod.name, conn.params])
                d = dict(l)
                l1.append([mod.name, d])
            d1 = dict(l1)
            return d1

        d1 = DictParams(n)
        if f2 != None:
            d2 = DictParams(n2)
        d3 = DictParams(self.net)

        params = np.array([])
        if f2 != None:
            for i in d2:
                for j in d2[i]:
                    try:
                        b = d3[i][j][:]
                        b[:d2[i][j].size] = d2[i][j][:]
                        d3[i].update({j: b})
                    except:
                        pass
        for i in d1:
            for j in d1[i]:
                try:
                    b = d3[i][j][:]
                    b[:d1[i][j].size] = d1[i][j][:]
                    d3[i].update({j: b})
                except:
                    pass
        for i in d3["Input"]:
            params = np.hstack((params, d3["Input"][i]))
        for i in xrange(len(self.hiden)):
            for j in d3["hiden%s" % i]:
                params = np.hstack((params, d3["hiden%s" % i][j]))
        self.net._setParameters(params)
Beispiel #45
0
hidden_to_out = FullConnection(hiddenLayer, outLayer)

network.addConnection(in_to_hidden)
network.addConnection(hidden_to_out)

network.sortModules()

# Train network using Backpropagation

from pybrain.supervised.trainers import BackpropTrainer

trainer = BackpropTrainer(network, dataset=train_data, momentum=0.1, weightdecay=0.01, verbose=True)

from pybrain.tools.validation import CrossValidator

# Train 300 times
trainer.trainEpochs(150)
# Calculate accuracy

print(test_data['target'])
print("__________")
print(train_data['target'])

sum = 0
for i in range(test_data['input'].shape[0]):
    result = np.round(network.activate(test_data['input'][i]))
    if (np.sum(result == test_data['target'][i]) == 2):
        sum += 1

print('Test accuracy: %d/%d' % (sum, test_data['target'].shape[0]))
Beispiel #46
0
def part2():
    '''
    Determine the minimal number of hidden units
    required to train the network successfully
    using multiple hidden layers
    '''
    '''
    # Parameters
    HIDDEN_NODES =          8
    LEARNING_DECAY =        0.9999    # Set in range [0.9, 1]
    LEARNING_RATE =         0.08    # Set in range [0, 1]
    MOMENTUM =              0.0    # Set in range [0, 0.5]
    TRAINING_ITERATIONS =   1000
    BATCH_LEARNING =        False
    VALIDATION_PROPORTION = 0.0
    SPARSE_LENGTH =         16
    '''

    # Parameters
    HIDDEN_NODES = 4
    LEARNING_DECAY = 0.9999  # Set in range [0.9, 1]
    LEARNING_RATE = 0.111  # Set in range [0, 1]
    MOMENTUM = 0.05  # Set in range [0, 0.5]
    TRAINING_ITERATIONS = 5000
    BATCH_LEARNING = False
    VALIDATION_PROPORTION = 0.0
    SPARSE_LENGTH = 16

    # Get the dataset
    dataset = sparse_coding.generateFull(SPARSE_LENGTH)
    validationSet = sparse_coding.generateFull(SPARSE_LENGTH)
    dataset, classes = sparse_coding.toClassificationDataset(dataset)
    inDimension = dataset.indim
    outDimension = dataset.outdim

    print inDimension
    print outDimension

    # Set up the neral network layers
    inLayer = LinearLayer(inDimension, name='input')
    hiddenLayer1 = SigmoidLayer(HIDDEN_NODES, name='hidden1')
    hiddenLayer2 = TanhLayer(HIDDEN_NODES, name='hidden2')
    outLayer = LinearLayer(outDimension, name='output')

    # Set up the connections
    input_to_hidden1 = FullConnection(inLayer, hiddenLayer1, name='in_h1')
    hidden1_to_hidden2 = FullConnection(hiddenLayer1,
                                        hiddenLayer2,
                                        name='h1_h2')
    hidden2_to_output = FullConnection(hiddenLayer2, outLayer, name='h2_out')
    hidden1_to_output = FullConnection(hiddenLayer1, outLayer, name='h2_out')

    # Create the network and add the information
    neuralNet = FeedForwardNetwork()
    neuralNet.addInputModule(inLayer)
    neuralNet.addModule(hiddenLayer1)
    neuralNet.addModule(hiddenLayer2)
    neuralNet.addOutputModule(outLayer)

    neuralNet.addConnection(input_to_hidden1)
    neuralNet.addConnection(hidden1_to_hidden2)
    neuralNet.addConnection(hidden2_to_output)
    neuralNet.addConnection(hidden1_to_output)
    neuralNet.sortModules()

    print neuralNet

    # Train the network
    trainer = BackpropTrainer(neuralNet,
                              dataset,
                              learningrate=LEARNING_RATE,
                              momentum=MOMENTUM,
                              lrdecay=LEARNING_DECAY,
                              batchlearning=BATCH_LEARNING)

    trainingErrors = []
    validationErrors = []

    for i in xrange(TRAINING_ITERATIONS):
        print "Training iteration: ", i

        # Check if VALIDATION_PROPORTION is not 0. This will split the input dataset into
        # VALIDATION_PROPORTION % for Validation Data and
        # (1 - VALIDATION_PROPORTION) % for Training Data
        # e.g. 25% Validation Data and 75% Training Data

        if VALIDATION_PROPORTION == 0.0 or VALIDATION_PROPORTION == 0:
            # Cannot split the data set into Training and Validation Data. Train the
            # Neural Network by standard means. This will not calculate Validatinon Error

            # The result of training is the proportional error for the number of epochs run
            trainingError = trainer.train()
            trainingErrors.append(trainingError)

            # Display the result of training for the iteration
            print "   Training error:    ", trainingError
        else:
            trainingErrors, validationErrors = trainer.trainUntilConvergence(
                validationProportion=VALIDATION_PROPORTION)

    # Create the output path if it doesn't exist
    generated_dir = path.abspath(
        path.join("generated", "Q2Task2-TrainedNN-{}".format(
            strftime("%Y-%m-%d_%H-%M-%S"))))
    if not path.exists(generated_dir):
        makedirs(generated_dir)

    # save parameters
    with open(path.normpath(path.join(generated_dir, "params.txt")), "a") as f:
        f.write("HIDDEN_LAYERS = {}\n".format(HIDDEN_NODES))
        f.write("LEARNING_DECAY = {}\n".format(LEARNING_DECAY))
        f.write("LEARNING_RATE = {}\n".format(LEARNING_RATE))
        f.write("MOMENTUM = {}\n".format(MOMENTUM))
        f.write("TRAINING_ITERATIONS = {}\n".format(TRAINING_ITERATIONS))
        f.write("BATCH_LEARNING = {}\n".format(BATCH_LEARNING))
        f.write("VALIDATION_PROPORTION = {}\n".format(VALIDATION_PROPORTION))

    # Save the Trained Neural Network
    uniqueFileName = path.normpath(path.join(generated_dir, "data.pkl"))
    writeMode = 'wb'  # Write Bytes
    pickle.dump(neuralNet, open(uniqueFileName, writeMode))

    # Plot the results of training
    plot.plot(trainingErrors, 'b')
    plot.ylabel("Training Error")
    plot.xlabel("Training Steps")
    plot.savefig(path.normpath(path.join(generated_dir, "errors.png")))
    plot.show()
    plot.clf()

    from mpl_toolkits.mplot3d import Axes3D
    figure = plot.figure()
    axis = figure.add_subplot(111, projection='3d')
    colors = ['r', 'y', 'g', 'c', 'b', 'k']

    for sample in validationSet:
        classifier = sparse_coding.getClassifier(sample)
        activationResult = neuralNet.activate(sample)
        axis.bar(range(len(sample)),
                 activationResult,
                 classifier,
                 zdir='y',
                 color=colors[:len(sample)])

    plot.savefig(path.normpath(path.join(generated_dir, "activations.png")))
    plot.show()
class NFQIteration:

    _gamma = 0.9
    _epochs = 500  #1000
    _epochsNN = 100

    def __init__(self):

        self.Q = FeedForwardNetwork()

        # La funcion de valor se representa con una red neuronal
        # Input: S = (Angulo, Velocidad angular, Posicion), A = accion
        # Output: Valor
        # 2 capas ocultas de 5 neuronas cada una
        # Funcion de activacion sigmoidea
        inLayer = SigmoidLayer(4, name="Input Layer")
        hiddenLayer1 = SigmoidLayer(5, name="Hidden Layer 1")
        hiddenLayer2 = SigmoidLayer(5, name="Hidden Layer 2")
        outLayer = SigmoidLayer(1, name="Output Layer")

        self.Q.addInputModule(inLayer)
        self.Q.addModule(hiddenLayer1)
        self.Q.addModule(hiddenLayer2)
        self.Q.addOutputModule(outLayer)

        connInToHidden1 = FullConnection(inLayer, hiddenLayer1)
        connHidden1ToHidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
        connHidden2ToOut = FullConnection(hiddenLayer2, outLayer)

        self.Q.addConnection(connInToHidden1)
        self.Q.addConnection(connHidden1ToHidden2)
        self.Q.addConnection(connHidden2ToOut)

        self.Q.sortModules()

    def train(self, transitionSamples):

        print "Entrenando..."

        k = 0
        trainer = RPropMinusTrainer(self.Q, batchlearning=True)
        #trainer = BackpropTrainer(self.Q, batchlearning=False)
        TS = SupervisedDataSet(4, 1)

        while (k < self._epochs):

            if k % 10 == 0:
                print "\t ", k

            # Genero training set en base a las muestras
            # Input: Vector de 4 dimensiones (angulo, vel.angular, pos, accion)
            # Target: Valor

            TS.clear()

            for s, a, s_1, costo in transitionSamples:

                # Tomo Q para s', para todas las acciones posibles
                # (vector con el valor para s', para cada una de las 3 acciones posibles)
                # Q_s1 = [ self.Q.activate([s_1.angulo, s_1.velocidadAngular, s_1.posicion, b]) for b in range(Accion.maxValor + 1) ]
                valDerecha = self.Q.activate([
                    s_1.angulo, s_1.velocidadAngular, s_1.posicion,
                    Accion.DERECHA
                ])
                valIzquierda = self.Q.activate([
                    s_1.angulo, s_1.velocidadAngular, s_1.posicion,
                    Accion.IZQUIERDA
                ])

                if valDerecha >= 1 or valDerecha <= 0:
                    print "Q incorrecta: ", valDerecha

                if valIzquierda >= 1 or valIzquierda <= 0:
                    print "Q incorrecta: ", valIzquierda

                # Input y Target para la red neuronal
                inputVal = (s.angulo, s.velocidadAngular, s.posicion, a)

                if costo == 0:
                    targetVal = costo
                else:
                    targetVal = costo + self._gamma * min(
                        valDerecha, valIzquierda)

                if targetVal > 1 or targetVal < 0:
                    print "Target incorrecto: ", targetVal

                TS.addSample(inputVal, targetVal)

            # Entreno la red neuronal
            trainer.setData(TS)
            trainer.train()  # 1 epoch
            #trainer.trainEpochs(self._epochsNN)

            k = k + 1
from pybrain.structure import FullConnection
in_to_hidden = FullConnection(inLayer, hiddenLayer1)
hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
hidden_to_out = FullConnection(hiddenLayer2, outLayer)

#add the connections to the network
net.addConnection(in_to_hidden)
net.addConnection(hidden_to_hidden)
net.addConnection(hidden_to_out)
net.sortModules()

print net

#feed an input to the network
#the weights/parameters of the connections have already been initialized randomly
net.activate([1, 2, 3])
#show the parameters of the connections
in_to_hidden.params
hidden_to_out.params
#this will show all of the parameters in a single array
net.params


trainer = BackpropTrainer(net, trndata,learningrate=0.01, lrdecay=1.0, momentum=0.0)
trainer.train()
#apply trained network to computing error in training set
#train_output=trainer.testOnClassData(dataset=trndata,verbose=False,return_targets=True)
trnresult = percentError( trainer.testOnClassData(),trndata['class'] )
print("  train error: %5.4f%%" % trnresult)

#apply the trained network to classifying the test set
Beispiel #49
0
backprop_trainer \
 = BackpropTrainer(net, data_set, learningrate=0.1)

for i in xrange(50):
    err = backprop_trainer.train()
    print "Iter. %d, err.: %.5f" % (i, err)

#6.9
print "[w(x_1,j=1),w(x_2,j=1),w(x_1,j=2),w(x_2,j=2)]: " + str(in_to_h.params)
print "[w(j=1,j=3),w(j=2,j=3)]: " + str(h_to_out.params)
print "[w(x_b,j=1),w(x_b,j=2)]: " + str(bias_to_h.params)
print "[w(x_b,j=3)]:" + str(bias_to_out.params)

#6.10
print "Activating 0,0. Output: " + str(net.activate([0, 0]))
print "Activating 0,1. Output: " + str(net.activate([0, 1]))
print "Activating 1,0. Output: " + str(net.activate([1, 0]))
print "Activating 1,1. Output: " + str(net.activate([1, 1]))

###########
# From here onwards:RBMS

# Original Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD

import numpy as np
import matplotlib.pyplot as plt

from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
Beispiel #50
0
class Prey:
  # initial radius
  init_radius = 20
  def __init__(self, direction, x, y):
    # radius
    self.radius = self.init_radius
    #Neural network
    self.nn = FeedForwardNetwork()
    #Add layers
    inLayer = LinearLayer(8)
    hiddenLayer = SigmoidLayer(9)
    outLayer = LinearLayer(4)
    self.nn.addInputModule(inLayer)
    self.nn.addModule(hiddenLayer)
    self.nn.addOutputModule(outLayer)
    #Add connections
    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    self.nn.addConnection(in_to_hidden)
    self.nn.addConnection(hidden_to_out)
    #initialize NN
    self.nn.sortModules()
    
    # Energy - dies when reaches 0
    self.energy = 350

    # Max Energy. the max amount of energy a prey can have
    self.max_energy = 500

    # Location
    self.x = x
    self.y = y

    # direction / angle
    self.direction = direction

    # Senses predator
    self.senses_predator = False

    # predator's general direction
    self.pred_direction = 0

    # other prey's general direction
    self.prey_direction = 0

    # other prey's radius
    self.prey_radius = 0

    # where to move to next
    self.next_x = x
    self.next_y = y

    # eat or not (eating regains energy)
    self.want_to_eat = False

    # move or not
    self.want_to_move = False

    # if energy is less than 100, gets hungry status
    self.is_hungry = False

    # Age
    self.age = 0

    # output thresholds for decisions
    self.move_threshold = 0
    self.eat_threshold = 0

    # has it mated and reproduced yet?
    self.not_mated = True

    # number of attacking preds
    self.num_atk_pred = 0

    # energy per pred. how much energy each predator gains when eating this prey 
    self.energy_per_pred = 0

  def update(self):
    # metabolism depends on which state the prey is in (escaping from predator, idle)
    if (self.senses_predator is True):
      if (self.energy < 25):
        self.energy = 0
      else:
        self.energy -= 25
    else: # idle mode, consumes less energy
      if (self.energy < 10):
        self.energy = 0
      else:
        self.energy -= 10

    if (self.energy < 100):
      self.is_hungry = True
    else:
      self.is_hungry = False
    
    # Aging
    self.age += 1


    
    # Input vector
        # input values are determined by what the animat 
        # is seeing and / or touching
    input_vector = (
                    (2000 * int(self.senses_predator)),
                    (2000 * self.energy),
                    (2000 * self.is_hungry),
                    (2000 * self.direction),
                    (2000 * self.pred_direction),
                    (2000 * self.prey_direction),
                    (2000 * self.prey_radius),
                    (2000 * self.age)
                    )

    # Activate the nn
    output_vector = self.nn.activate(input_vector)
    # move
    if (output_vector[0] > self.move_threshold):
      self.want_to_move = True
    else:
      self.want_to_move = False
    # eat
    if (output_vector[1] > self.eat_threshold):
      self.want_to_eat = True
    else:
      self.want_to_eat = False
    # direction: turn right (clockwise)
    self.direction -= output_vector[2]
    #direction: turn left (counter clockwise)
    self.direction += output_vector[3]

    if (self.want_to_eat):
      if (self.energy >= 400):
        self.energy = 500
      else:
        self.energy += 100
      self.is_hungry = False
Beispiel #51
0
def test_neural_nets(ds):

	def plot_errors(x, train_err, test_err):
		plt.plot(x, train_err, label='Training error')
		plt.xlabel('Epochs')
		plt.ylabel('Error')
		plt.title('Training error using backpropagation')
		plt.legend()
		plt.show()

	input_size = len(ds.train.x[0]) # no. of attributes
	target_size = 1
	hidden_size = 5
	iterations = 1000

	n = FeedForwardNetwork()
	in_layer = LinearLayer(34)
	hidden_layer = [SigmoidLayer(20), SigmoidLayer(20), SigmoidLayer(20)]
	out_layer = LinearLayer(1)

	n.addInputModule(in_layer)
	for layer in hidden_layer:
		n.addModule(layer)
	n.addOutputModule(out_layer)
	in_to_hidden = FullConnection(in_layer, hidden_layer[0])
	h1 = FullConnection(hidden_layer[0], hidden_layer[1])
	h2 = FullConnection(hidden_layer[1], hidden_layer[2])
	hidden_to_out = FullConnection(hidden_layer[2], out_layer)

	n.addConnection(in_to_hidden)
	n.addConnection(h1)
	n.addConnection(h2)
	n.addConnection(hidden_to_out)

	n.sortModules()

	print n

	train_nnds = SupervisedDataSet(input_size, target_size)
	train_nnds.setField('input', ds.train.x)
	one_train_reshaped = np.array(ds.train.y).reshape(-1,1) 
	train_nnds.setField('target', one_train_reshaped)

	trainer = BackpropTrainer( n, train_nnds )
	epochs, train_acc, test_acc = [], [], []
	
	for i in xrange(iterations):
		trainer.train()
		train_pred_y = []
		# Compute percent training error
		for row in ds.train.x:
			p = int( round( n.activate(row)[0] ) )
			if p >= 1: p = 1 
			else: p = 0 # sometimes rounding takes us to 2 or -1
			train_pred_y.append(p)
		train_error = percentError(train_pred_y, ds.train.y)

		if i%25 == 0 or i==iterations-1:
			epochs.append(i)
			train_acc.append(train_error)
			print "Train error", train_error
	
	plot_errors(epochs, train_acc, test_acc)
Beispiel #52
0
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)

from pybrain.structure import FullConnection
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)

n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

n.sortModules()

print n

n.activate([1, 2])

in_to_hidden.params

hidden_to_out.params

# %load load_brain.py
from pybrain.datasets import ClassificationDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer

from pylab import ion, ioff, figure, draw, contourf, clf, show, hold, plot
from scipy import diag, arange, meshgrid, where
from numpy.random import multivariate_normal
Beispiel #53
0
#采用BP进行训练,训练至收敛,最大训练次数为1000
trainer = BackpropTrainer(fnn, DS, learningrate=0.01, verbose=True)
trainer.trainUntilConvergence(maxEpochs=1000)

# print "1"
# print fnn.activate(x)
# print sy.inverse_transform(fnn.activate(x))
# print sy.inverse_transform(fnn.activate(x))[0]
#在测试集上对其效果做验证
values = []
# sy.inverse_transform()
# for x in xTest:
#     values.append(sy.inverse_transform(fnn.activate(x))[0])
for x in xTest:
    x1 = fnn.activate(x)
    x2 = sy.inverse_transform(x1.reshape(-1, 1))
    values.append(x2[0])
print("2")
#计算RMSE (Root Mean Squared Error)均方差
totalsum = sum(
    map(lambda x: x**0.5,
        map(lambda x, y: pow(x - y, 2), boston.target[per:], values))) / float(
            len(xTest))
print(totalsum)
print("3")

#将训练数据进行保存
NetworkWriter.writeToFile(fnn, 'pathName.xml')
joblib.dump(sx, 'sx.pkl', compress=3)
joblib.dump(sy, 'sy.pkl', compress=3)
Beispiel #54
0
from pybrain.structure import FullConnection


#Se construye la red
n = FeedForwardNetwork()
#Se construyen las capas
inLayer = LinearLayer(2, name="input")
hiddenLayer = SigmoidLayer(3, name="hid")
outLayer = LinearLayer(1, name="output")

#Se agregan las capas a la red
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)

#Se conectan las capas
in_to_hidden = FullConnection(inLayer, hiddenLayer, name="con1")
hidden_to_out = FullConnection(hiddenLayer, outLayer, name="con2")

#Se agregan las conexiones
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

#All the elements are in place now, so we can do the final step that makes our MLP usable, which is to call the .sortModules() method:
n.sortModules()

print n
print n.activate([3,7])
print in_to_hidden.params
print hidden_to_out.params
print n.params
Beispiel #55
0
            for i in xrange(len(seq) - 1):
                inp = seq[:i] + [0] * (6 - i)
                print inp, seq[i]
                ds.addSample(inp, seq[i])

        net = FeedForwardNetwork()
        inp = LinearLayer(6)
        h1 = SigmoidLayer(6)
        outp = LinearLayer(1)

        # add modules
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)

        # create connections
        net.addConnection(FullConnection(inp, h1))
        net.addConnection(FullConnection(h1, outp))

        # finish up
        net.sortModules()

        # initialize the backprop trainer and train
        trainer = BackpropTrainer(net, ds)
        trainer.trainOnDataset(ds, 1000)
        trainer.testOnData(verbose=False)

        print net.activate((0, 0, 0, 0, 0, 0))
        import pdb
        pdb.set_trace()
Beispiel #56
0
class NeuralNet:
  def __init__(self, hidden_neuron_num=1, hidden_type='sigmoid'):
    self.hidden_neuron_num = hidden_neuron_num
    self.hidden_type = hidden_type

    self.net = FeedForwardNetwork()
    self.samples = SupervisedDataSet(784, 784)

    self.vectorizer = ImageVectorizer()

    self.add_layers()
    self.add_connections()
    self.sort()

  def add_layers(self):
    self.inLayer = LinearLayer(784, name='in')
    self.outLayer = LinearLayer(784, name='out')

    if self.hidden_type == 'sigmoid':
      self.hiddenLayer = SigmoidLayer(self.hidden_neuron_num, name='hidden')
    else:
      # I found I had to overwrite the output layer to sigmoid to get the 
      # hidden layer to work as linear
      self.hiddenLayer = LinearLayer(self.hidden_neuron_num, name='hidden')
      self.outLayer = SigmoidLayer(784, name='out')

    self.net.addInputModule(self.inLayer)
    self.net.addModule(self.hiddenLayer)
    self.net.addOutputModule(self.outLayer)

  def add_connections(self):
    self.in_to_hidden = FullConnection(self.inLayer, self.hiddenLayer)
    self.hidden_to_out = FullConnection(self.hiddenLayer, self.outLayer)

    self.net.addConnection(self.in_to_hidden)
    self.net.addConnection(self.hidden_to_out)

  def sort(self):
    self.net.sortModules()

  def activate(self, vector):
    return self.net.activate(vector)

  def train(self, paths):
    for path in paths:
      vector = self.vectorizer.image_to_vector(path) 
      vector = numpy.float64([el / 255.0 for el in vector])
      self.samples.addSample(vector, vector)

    trainer = BackpropTrainer(self.net, self.samples, learningrate=.5, lrdecay=0.98)
    for i in range(1,20):
      error = trainer.train()
      print "error for %(i)ith iteration: %(error)f" % locals()

  def input_weights_of_hidden_layer(self):
    weights = self.in_to_hidden.params
    hidden_weights_by_neuron = numpy.split(weights, self.hidden_neuron_num)
    return hidden_weights_by_neuron

  def input_weights_of_out_layer(self):
    weights = self.hidden_to_out.params
    hidden_weights_by_neuron = numpy.split(weights, self.hidden_neuron_num)
    return hidden_weights_by_neuron
Beispiel #57
0
	inLayer = LinearLayer(2)
	hiddenLayer = SigmoidLayer(3)
	outLayer = LinearLayer(1)

	n.addInputModule(inLayer)
	n.addModule(hiddenLayer)
	n.addOutputModule(outLayer)


	in_to_hidden = FullConnection(inLayer, hiddenLayer)
	hidden_to_out = FullConnection(hiddenLayer, outLayer)

	n.addConnection(in_to_hidden)
	n.addConnection(hidden_to_out)

	n.sortModules()

	print n.activate([1,2])

	n2 = RecurrentNetwork()
	n2.addInputModule(LinearLayer(2, name='in'))
	n2.addModule(SigmoidLayer(3, name='hidden'))
	n2.addOutputModule(LinearLayer(1, name='out'))
	n2.addConnection(FullConnection(n2['in'], n2['hidden'], name='c1'))
	n2.addConnection(FullConnection(n2['hidden'], n2['out'], name='c2'))
	n2.addRecurrentConnection(FullConnection(n2['hidden'], n2['hidden'], name='c3'))
	n2.sortModules()

	print n.activate((2,2))
	print n.activate((2,2))
	print n.activate((2,2))