def train_net(data_set, n, epochs=1):
    num_inputs = len(data_set[0][0][n])
    ds = SupervisedDataSet(num_inputs, 2)
    for i in range(len(data_set)):
        try:
            ds.appendLinked(data_set[i][0][n],
                            (data_set[i][1], data_set[i][2]))
        except:
            continue
    print str(len(ds)) + ' points successfully aquired'

    net = FeedForwardNetwork()
    net.addInputModule(LinearLayer(num_inputs, name='input'))
    net.addInputModule(BiasUnit(name='bias'))
    net.addOutputModule(LinearLayer(2, name='output'))
    net.addModule(SigmoidLayer(int((num_inputs + 2) / 2.), name='sigmoid'))
    net.addModule(TanhLayer(10, name='tanh'))
    net.addConnection(FullConnection(net['bias'], net['sigmoid']))
    net.addConnection(FullConnection(net['bias'], net['tanh']))
    net.addConnection(FullConnection(net['input'], net['sigmoid']))
    net.addConnection(FullConnection(net['sigmoid'], net['tanh']))
    net.addConnection(FullConnection(net['tanh'], net['output']))
    net.sortModules()

    trainer = BackpropTrainer(net,
                              learningrate=0.01,
                              momentum=0.1,
                              verbose=True)

    trainer.trainOnDataset(ds)
    trainer.trainEpochs(epochs)

    return net
    def __init__(self, genes=None):
        self.net = FeedForwardNetwork()

        inLayer = LinearLayer(Brain.G_INPUTNODES, name='input')
        hiddenLayer1 = SigmoidLayer(Brain.G_HIDDENNODES_L1, name='hidden1')
        hiddenLayer2 = SigmoidLayer(Brain.G_HIDDENNODES_L2, name='hidden2')
        outLayer = SigmoidLayer(Brain.G_OUTPUTNODES, name='out')
        bias = BiasUnit(name='bias')

        self.net.addInputModule(inLayer)
        self.net.addModule(hiddenLayer1)
        self.net.addModule(hiddenLayer2)
        self.net.addModule(bias)
        self.net.addOutputModule(outLayer)

        in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
        hidden1_to_hidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
        hidden2_to_out = FullConnection(hiddenLayer2, outLayer)
        bias_to_hidden1 = FullConnection(bias, hiddenLayer1)
        bias_to_hidden2 = FullConnection(bias, hiddenLayer2)
        bias_to_out = FullConnection(bias, outLayer)

        self.net.addConnection(in_to_hidden1)
        self.net.addConnection(hidden1_to_hidden2)
        self.net.addConnection(hidden2_to_out)
        self.net.addConnection(bias_to_hidden1)
        self.net.addConnection(bias_to_hidden2)
        self.net.addConnection(bias_to_out)

        self.net.sortModules()

        if genes != None:
            self.import_genes(genes)
Beispiel #3
0
 def xor_network(self, net):
     net.addInputModule(LinearLayer(2, name='in'))
     net.addModule(BiasUnit(name='bias'))
     net.addModule(LinearLayer(3, name='hidden'))
     net.addOutputModule(LinearLayer(1, name='out'))
     net.addConnection(FullConnection(net['in'], net['hidden']))
     net.addConnection(FullConnection(net['bias'], net['hidden']))
     net.addConnection(FullConnection(net['hidden'], net['out']))
 def init_network(self, net):
     net.addInputModule(LinearLayer(2, 'in'))
     net.addModule(SigmoidLayer(3, 'hidden'))
     net.addOutputModule(LinearLayer(2, 'out'))
     net.addModule(BiasUnit(name='bias'))
     net.addConnection(FullConnection(net['in'], net['hidden']))
     net.addConnection(FullConnection(net['hidden'], net['out']))
     net.sortModules()
def build_new_nets(data_set, n):
    num_inputs = len(data_set[0][0][n])
    arousal_net = FeedForwardNetwork()
    arousal_net.addInputModule(LinearLayer(num_inputs, name='input'))
    arousal_net.addInputModule(BiasUnit(name='bias'))
    arousal_net.addOutputModule(LinearLayer(1, name='output'))
    arousal_net.addModule(
        SigmoidLayer(int((num_inputs + 2) / 2.), name='sigmoid'))
    arousal_net.addModule(TanhLayer(10, name='tanh'))
    arousal_net.addConnection(
        FullConnection(arousal_net['bias'], arousal_net['sigmoid']))
    arousal_net.addConnection(
        FullConnection(arousal_net['bias'], arousal_net['tanh']))
    arousal_net.addConnection(
        FullConnection(arousal_net['input'], arousal_net['sigmoid']))
    arousal_net.addConnection(
        FullConnection(arousal_net['sigmoid'], arousal_net['tanh']))
    arousal_net.addConnection(
        FullConnection(arousal_net['tanh'], arousal_net['output']))
    arousal_net.sortModules()

    valence_net = FeedForwardNetwork()
    valence_net.addInputModule(LinearLayer(num_inputs, name='input'))
    valence_net.addInputModule(BiasUnit(name='bias'))
    valence_net.addOutputModule(LinearLayer(1, name='output'))
    valence_net.addModule(
        SigmoidLayer(int((num_inputs + 2) / 2.), name='sigmoid'))
    valence_net.addModule(TanhLayer(10, name='tanh'))
    valence_net.addConnection(
        FullConnection(valence_net['bias'], valence_net['sigmoid']))
    valence_net.addConnection(
        FullConnection(valence_net['bias'], valence_net['tanh']))
    valence_net.addConnection(
        FullConnection(valence_net['input'], valence_net['sigmoid']))
    valence_net.addConnection(
        FullConnection(valence_net['sigmoid'], valence_net['tanh']))
    valence_net.addConnection(
        FullConnection(valence_net['tanh'], valence_net['output']))
    valence_net.sortModules()

    return arousal_net, valence_net
Beispiel #6
0
 def lstm_network(self, net):
     i = LinearLayer(1, name='in')
     h = LSTMLayer(2, name='hidden')
     o = LinearLayer(1, name='out')
     b = BiasUnit(name='bias')
     net.addModule(b)
     net.addOutputModule(o)
     net.addInputModule(i)
     net.addModule(h)
     net.addConnection(FullConnection(i, h))
     net.addConnection(FullConnection(b, h))
     net.addRecurrentConnection(FullConnection(h, h))
     net.addConnection(FullConnection(h, o))
Beispiel #7
0
    def __init__(self, hidden_layers, ally_champ_obj_list,
                 enemy_champ_obj_list):

        self.ally_champ_obj_list = ally_champ_obj_list
        self.enemy_champ_obj_list = enemy_champ_obj_list

        self.set_nodes()

        self.network = FeedForwardNetwork()

        connect_queue = Queue.Queue()

        for layer in xrange(0, hidden_layers):
            connect_queue.put(
                TanhLayer(self.input_node_count,
                          name='hidden_layer_{}'.format(layer)))

        connect_queue.put(SigmoidLayer(1, name='output_layer'))

        prev_layer = LinearLayer(self.input_node_count, name='input_layer')
        self.network.addInputModule(prev_layer)

        while not connect_queue.empty():

            current_layer = connect_queue.get()
            if current_layer.name == 'output_layer':
                self.network.addOutputModule(current_layer)
            else:
                self.network.addModule(current_layer)

            bias = BiasUnit()
            bias_connection = FullConnection(
                bias,
                current_layer,
                name="bias_to_{}_connection".format(current_layer.name))
            self.network.addModule(bias)
            self.network.addConnection(bias_connection)

            connection = FullConnection(prev_layer,
                                        current_layer,
                                        name="{}_to_{}_connection".format(
                                            prev_layer.name,
                                            current_layer.name))
            self.network.addConnection(connection)

            prev_layer = current_layer

        self.network.sortModules()
Beispiel #8
0
def buildSimpleLSTMNetwork(peepholes=False):
    N = RecurrentNetwork('simpleLstmNet')
    i = LinearLayer(100, name='i')
    h = LSTMLayer(10, peepholes=peepholes, name='lstm')
    o = LinearLayer(1, name='o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, name='f1'))
    N.addConnection(FullConnection(b, h, name='f2'))
    N.addRecurrentConnection(FullConnection(h, h, name='r1'))
    N.addConnection(FullConnection(h, o, name='r1'))
    N.sortModules()
    return N
Beispiel #9
0
def buildParityNet():
    net = RecurrentNetwork()
    net.addInputModule(LinearLayer(1, name = 'i'))
    net.addModule(TanhLayer(2, name = 'h'))
    net.addModule(BiasUnit('bias'))
    net.addOutputModule(TanhLayer(1, name = 'o'))
    net.addConnection(FullConnection(net['i'], net['h']))
    net.addConnection(FullConnection(net['bias'], net['h']))
    net.addConnection(FullConnection(net['bias'], net['o']))
    net.addConnection(FullConnection(net['h'], net['o']))
    net.addRecurrentConnection(FullConnection(net['o'], net['h']))
    net.sortModules()

    p = net.params
    p[:] = [-0.5, -1.5, 1, 1, -1, 1, 1, -1, 1]
    p *= 10.

    return net
Beispiel #10
0
    def train(self):
        # We will build up a network piecewise in order to create a new dataset
        # for each layer.
        dataset = self.dataset
        piecenet = FeedForwardNetwork()
        piecenet.addInputModule(copy.deepcopy(self.net.inmodules[0]))
        # Add a bias
        bias = BiasUnit()
        piecenet.addModule(bias)
        # Add the first visible layer
        firstRbm = self.iterRbms().next()
        visible = copy.deepcopy(firstRbm.visible)
        piecenet.addModule(visible)
        # For saving the rbms and their inverses
        self.invRbms = []
        self.rbms = []
        for rbm in self.iterRbms():
            self.net.sortModules()
            # Train the first layer with an rbm trainer for `epoch` epochs.
            trainer = self.trainerKlass(rbm, dataset, self.cfg)
            for _ in xrange(self.epochs):
                trainer.train
            self.invRbms.append(trainer.invRbm)
            self.rbms.append(rbm)
            # Add the connections and the hidden layer of the rbm to the net.
            hidden = copy.deepcopy(rbm.hidden)
            biascon = FullConnection(bias, hidden)
            biascon.params[:] = rbm.biasWeights
            con = FullConnection(visible, hidden)
            con.params[:] = rbm.weights

            piecenet.addConnection(biascon)
            piecenet.addConnection(con)
            piecenet.addModule(hidden)
            # Overwrite old outputs
            piecenet.outmodules = [hidden]
            piecenet.outdim = rbm.hiddenDim
            piecenet.sortModules()

            dataset = UnsupervisedDataSet(rbm.hiddenDim)
            for sample, in self.dataset:
                new_sample = piecenet.activate(sample)
                dataset.addSample(new_sample)
            visible = hidden
Beispiel #11
0
 def weird_network(self, net):
     bias = BiasUnit(name='bias')
     inlayer = TanhLayer(1, name='input')
     outlayer = TanhLayer(1, name='output')
     gatelayer = GateLayer(1, name='gate')
     con1 = FullConnection(bias, gatelayer, outSliceFrom=0, outSliceTo=1)
     con2 = FullConnection(bias, gatelayer, outSliceFrom=1, outSliceTo=2)
     con3 = FullConnection(inlayer, gatelayer, outSliceFrom=0, outSliceTo=1)
     con4 = FullConnection(inlayer, gatelayer, outSliceFrom=1, outSliceTo=2)
     con5 = FullConnection(gatelayer, outlayer)
     net.addInputModule(inlayer)
     net.addModule(bias)
     net.addModule(gatelayer)
     net.addOutputModule(outlayer)
     net.addConnection(con1)
     net.addConnection(con2)
     net.addConnection(con3)
     net.addConnection(con4)
     net.addConnection(con5)
Beispiel #12
0
    def __init__(self, hidden_layers, data_index_size):

        self.network = FeedForwardNetwork()

        connect_queue = Queue.Queue()

        for layer in xrange(0, hidden_layers):
            connect_queue.put(
                TanhLayer(data_index_size,
                          name='hidden_layer_{}'.format(layer)))

        connect_queue.put(SigmoidLayer(1, name='output_layer'))

        prev_layer = LinearLayer(data_index_size, name='input_layer')
        self.network.addInputModule(prev_layer)

        while not connect_queue.empty():
            print 'layer'
            current_layer = connect_queue.get()
            if current_layer.name == 'output_layer':
                self.network.addOutputModule(current_layer)
            else:
                self.network.addModule(current_layer)

            bias = BiasUnit()
            bias_connection = FullConnection(
                bias,
                current_layer,
                name="bias_to_{}_connection".format(current_layer.name))
            self.network.addModule(bias)
            self.network.addConnection(bias_connection)

            connection = FullConnection(prev_layer,
                                        current_layer,
                                        name="{}_to_{}_connection".format(
                                            prev_layer.name,
                                            current_layer.name))
            self.network.addConnection(connection)

            prev_layer = current_layer

        print 'sorting....'
        self.network.sortModules()
Beispiel #13
0
def simple_network_builder(layers, partial_path):
    n = FeedForwardNetwork()
    ## create the network
    inlayer = LinearLayer(layers[0], name="In")
    hidden_one = TanhLayer(layers[1], name="Hidden 1")
    hidden_two = TanhLayer(layers[2], name="Hidden 2")
    b1 = BiasUnit(name="Bias")
    output = LinearLayer(1, name="Out")
    n.addInputModule(inlayer)
    n.addModule(hidden_one)
    n.addModule(hidden_two)
    n.addModule(b1)
    n.addOutputModule(output)
    in_to_one = FullConnection(inlayer, hidden_one)
    one_to_two = FullConnection(hidden_one, hidden_two)
    two_to_out = FullConnection(hidden_two, output)
    b1_to_one = FullConnection(b1, hidden_one)
    b2_to_two = FullConnection(b1, hidden_two)
    b3_to_output = FullConnection(b1, output)
    ### load weights and biases
    in_to_one._setParameters(np.array((csv_loader(partial_path + '_w1.csv'))))
    one_to_two._setParameters(np.array(csv_loader(partial_path + '_w2.csv')))
    two_to_out._setParameters(np.array(csv_loader(partial_path + '_w3.csv')))
    b1_to_one._setParameters(np.array(csv_loader(partial_path + '_b1.csv')))
    b2_to_two._setParameters(np.array(csv_loader(partial_path + '_b2.csv')))
    b3_to_output._setParameters(np.array(csv_loader(partial_path + '_b3.csv')))

    ### connect the network topology
    n.addConnection(in_to_one)
    n.addConnection(one_to_two)
    n.addConnection(two_to_out)
    #    n.sortModules()

    n.addConnection(b1_to_one)
    n.addConnection(b2_to_two)
    n.addConnection(b3_to_output)

    ### finalize network object
    n.sortModules()

    return n
Beispiel #14
0
def testTraining():
    # the AnBnCn dataset (sequential)
    d = AnBnCnDataSet()

    # build a recurrent network to be trained
    hsize = 2
    n = RecurrentNetwork()
    n.addModule(TanhLayer(hsize, name='h'))
    n.addModule(BiasUnit(name='bias'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['bias'], n['h']))
    n.addConnection(FullConnection(n['h'], n['out']))
    n.addRecurrentConnection(FullConnection(n['h'], n['h']))
    n.sortModules()

    # initialize the backprop trainer and train
    t = BackpropTrainer(n, learningrate=0.1, momentum=0.0, verbose=True)
    t.trainOnDataset(d, 200)

    # the resulting weights are in the network:
    print('Final weights:', n.params)
Beispiel #15
0
    def fromDims(cls, visibledim, hiddendim, params=None, biasParams=None):
        """Return a restricted Boltzmann machine of the given dimensions with the
        given distributions."""
        net = FeedForwardNetwork()
        bias = BiasUnit('bias')
        visible = LinearLayer(visibledim, 'visible')
        hidden = SigmoidLayer(hiddendim, 'hidden')
        con1 = FullConnection(visible, hidden)
        con2 = FullConnection(bias, hidden)
        if params is not None:
            con1.params[:] = params
        if biasParams is not None:
            con2.params[:] = biasParams

        net.addInputModule(visible)
        net.addModule(bias)
        net.addOutputModule(hidden)
        net.addConnection(con1)
        net.addConnection(con2)
        net.sortModules()
        return cls(net)
Beispiel #16
0
def buildNetwork(InputLength=1,
                 HiddenLength=0,
                 OutputLength=1,
                 bias=True,
                 seed=None):

    network = FeedForwardNetwork()
    input_layer = LinearLayer(InputLength)
    if HiddenLength > 0:
        hidden_layer = SigmoidLayer(HiddenLength)
    output_layer = SigmoidLayer(OutputLength)

    network.addInputModule(input_layer)
    network.addOutputModule(output_layer)
    if HiddenLength > 0:
        network.addModule(hidden_layer)

    if HiddenLength > 0:
        network.addConnection(FullConnection(input_layer, hidden_layer))
        network.addConnection(FullConnection(hidden_layer, output_layer))
    else:
        network.addConnection(FullConnection(input_layer, output_layer))

    if bias:
        bias_node = BiasUnit()
        network.addModule(bias_node)
        network.addConnection(FullConnection(bias_node, input_layer))
        if HiddenLength > 0:
            network.addConnection(FullConnection(bias_node, hidden_layer))
        network.addConnection(FullConnection(bias_node, output_layer))

    network.sortModules()

    numpy.random.seed(seed)
    random.seed(seed)
    network.randomize()

    #print network.params

    return network
Beispiel #17
0
def getModel(dept, hidden_size, input_size, target_size, online = False,):

	file_name = output_file_path + 'nn_dept' + str(dept) + '_epoch' + str(epochs)
	
	if online == True:
		try:
			fileObject = open(file_name + '_model', 'r')
			n = pickle.load(fileObject)
			fileObject.close()
			return n
		
		except IOError:
			print "There is no nn object for dept", dept, "exits, So a new model is built."
			pass

	n = RecurrentNetwork()

	n.addInputModule(LinearLayer(input_size, name='in'))
	n.addModule(BiasUnit('bias'))
	for i in range(0, num_hidden_layer+1):
		hidden_name = 'hidden'+str(i)
		n.addModule(SigmoidLayer(hidden_size, name=hidden_name))
	n.addOutputModule(LinearLayer(target_size, name='out'))

	n.addConnection(FullConnection(n['in'], n['hidden0'], name='c1'))
	next_hidden = 'hidden0'

	for i in range(0,num_hidden_layer ):
		current_hidden = 'hidden'+str(i)
		next_hidden = 'hidden'+str(i+1)
		n.addConnection(FullConnection(n[current_hidden], n[next_hidden], name='c'+str(i+2)))

	n.addConnection(FullConnection(n[next_hidden], n['out'], name='c'+str(num_hidden_layer+2)))
	n.addConnection(FullConnection(n['bias'], n['hidden0'], name='c'+str(num_hidden_layer+7)))

	n.sortModules()

	return n
Beispiel #18
0
    def create_network(self, nFeatures, hidden1Size=20, nClasses=1):
        # create network object
        self.ffn = FeedForwardNetwork()

        # create layer objects
        inLayer = LinearLayer(nFeatures, name="input")
        hiddenLayer = SigmoidLayer(hidden1Size, name="hidden1")
        #hiddenLayer2 = SigmoidLayer(hidden2Size, name="hidden2")
        outLayer = LinearLayer(nClasses, name="output")

        # add layers to feed forward network
        self.ffn.addInputModule(inLayer)
        self.ffn.addModule(hiddenLayer)
        #self.ffn.addModule(hiddenLayer2)
        self.ffn.addOutputModule(outLayer)

        # add bias unit to layers
        self.ffn.addModule(BiasUnit(name='bias'))

        # establish connections between layers
        self.in_to_hidden = FullConnection(inLayer, hiddenLayer)
        #hidden_to_hidden = FullConnection(hiddenLayer, hiddenLayer2)
        self.hidden_to_out = FullConnection(hiddenLayer, outLayer)

        # print "into hidden: {}".format(len(in_to_hidden.params))
        # print "into out: {}".format(len(hidden_to_out.params))

        # add connections to network
        self.ffn.addConnection(self.in_to_hidden)
        #self.ffn.addConnection(hidden_to_hidden)
        self.ffn.addConnection(self.hidden_to_out)

        # necessary, sort layers into correct/certain order
        self.ffn.sortModules()

        # dataset object
        self.train_ds = SupervisedDataSet(nFeatures, nClasses)
        self.validate_ds = SupervisedDataSet(nFeatures, nClasses)
Beispiel #19
0
def construct_network(input_len, output_len, hidden_nodes, is_elman=True):
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_len, name="i"))
    n.addModule(BiasUnit("b"))
    n.addModule(SigmoidLayer(hidden_nodes, name="h"))
    n.addOutputModule(LinearLayer(output_len, name="o"))

    n.addConnection(FullConnection(n["i"], n["h"]))
    n.addConnection(FullConnection(n["b"], n["h"]))
    n.addConnection(FullConnection(n["b"], n["o"]))
    n.addConnection(FullConnection(n["h"], n["o"]))

    if is_elman:
        # Elman (hidden->hidden)
        n.addRecurrentConnection(FullConnection(n["h"], n["h"]))
    else:
        # Jordan (out->hidden)
        n.addRecurrentConnection(FullConnection(n["o"], n["h"]))

    n.sortModules()
    n.reset()

    return n
Beispiel #20
0
    def _train(self):
        global bias_in
        hidden_layers = []
        bias_layers = []
        compressed_data = copy.copy(
            self.unsupervised
        )  # it isn't compressed at this point, but will be later on
        compressed_supervised = self.supervised

        mid_layers = self.layers[1:-1]  # remove the first and last
        for i, current in enumerate(mid_layers):
            prior = self.layers[i]
            # This accesses the layer before the "current" one,
            # since the indexing in mid_layers and self.layers is offset by 1
            # print "Compressed data at stage {0} {1}".format(i, compressed_data)
            """ build the NN with a bottleneck """
            bottleneck = FeedForwardNetwork()
            in_layer = LinearLayer(prior)
            hidden_layer = self.hidden_layer(current)
            out_layer = self.hidden_layer(prior)
            bottleneck.addInputModule(in_layer)
            bottleneck.addModule(hidden_layer)
            bottleneck.addOutputModule(out_layer)
            in_to_hidden = FullConnection(in_layer, hidden_layer)
            hidden_to_out = FullConnection(hidden_layer, out_layer)
            bottleneck.addConnection(in_to_hidden)
            bottleneck.addConnection(hidden_to_out)
            if self.bias:
                bias1 = BiasUnit()
                bias2 = BiasUnit()
                bottleneck.addModule(bias1)
                bottleneck.addModule(bias2)
                bias_in = FullConnection(bias1, hidden_layer)
                bias_hidden = FullConnection(bias2, out_layer)
                bottleneck.addConnection(bias_in)
                bottleneck.addConnection(bias_hidden)
            bottleneck.sortModules()
            print("3here network is okay bottleneck"
                  )  # ====================================
            """ train the bottleneck """
            print "\n...training for layer ", prior, " to ", current
            ds = SupervisedDataSet(prior, prior)
            print("5here supervised dataset was built"
                  )  # ==============================
            print("8.====================compressed_data_size=============")
            print compressed_data.__sizeof__()
            if self.dropout_on:
                noisy_data, originals = self.dropout(compressed_data,
                                                     noise=0.2,
                                                     bag=1,
                                                     debug=False)
                print("6here dropout is begin processing and it's okay"
                      )  # ==============================
                print "=============noisylen================"
                print len(noisy_data)  # =====
                for i, n in enumerate(noisy_data):
                    original = originals[i]

                    ds.addSample(n, original)

                print("7.drop out add nosizy sample success"
                      )  # =============================
            else:
                for d in (compressed_data):
                    ds.addSample(d, d)
            print("4here begin bp bp bp"
                  )  # ============================================
            trainer = BackpropTrainer(bottleneck,
                                      dataset=ds,
                                      learningrate=0.001,
                                      momentum=0.05,
                                      verbose=self.verbose,
                                      weightdecay=0.05)
            trainer.trainEpochs(self.compression_epochs)
            if self.verbose:
                print "...data:\n...", compressed_data[0][:10], \
                    "\nreconstructed to:\n...", bottleneck.activate(compressed_data[0])[:10]
                # just used 10dim of 95 dim mfcc

            hidden_layers.append(in_to_hidden)
            if self.bias: bias_layers.append(bias_in)
            """ use the params from the bottleneck to compress the training data """
            compressor = FeedForwardNetwork()
            compressor.addInputModule(in_layer)
            compressor.addOutputModule(
                hidden_layer)  # use the hidden layer from above
            compressor.addConnection(in_to_hidden)
            compressor.sortModules()
            compressed_data = [compressor.activate(d) for d in compressed_data]
            # del compressed_data  #del==============================================
            compressed_supervised = [
                compressor.activate(d) for d in compressed_supervised
            ]
            # del compressed_supervised  #del==============================================

            self.nn.append(compressor)
        """ Train the softmax layer """
        print "\n...training for softmax layer "
        softmax = FeedForwardNetwork()
        in_layer = LinearLayer(self.layers[-2])
        out_layer = self.final_layer(self.layers[-1])
        softmax.addInputModule(in_layer)
        softmax.addOutputModule(out_layer)
        in_to_out = FullConnection(in_layer, out_layer)
        softmax.addConnection(in_to_out)
        if self.bias:
            bias = BiasUnit()
            softmax.addModule(bias)
            bias_in = FullConnection(bias, out_layer)
            softmax.addConnection(bias_in)
        softmax.sortModules()

        # see if it's for classification or regression
        if self.final_layer == SoftmaxLayer:
            print "...training for a softmax network"
            ds = ClassificationDataSet(self.layers[-2], 1)
        else:
            print "...training for a regression network"
            ds = SupervisedDataSet(self.layers[-2], self.layers[-1])
        bag = 1
        noisy_data, _ = self.dropout(compressed_supervised,
                                     noise=0.5,
                                     bag=bag,
                                     debug=True)
        bagged_targets = []
        for t in self.targets:
            for b in range(bag):
                bagged_targets.append(t)

        for i, d in enumerate(noisy_data):
            target = bagged_targets[i]
            ds.addSample(d, target)

        # see if it's for classification or regression
        if self.final_layer == SoftmaxLayer:
            ds._convertToOneOfMany()

        trainer = BackpropTrainer(softmax,
                                  dataset=ds,
                                  learningrate=0.001,
                                  momentum=0.05,
                                  verbose=self.verbose,
                                  weightdecay=0.05)
        trainer.trainEpochs(self.compression_epochs)
        self.nn.append(softmax)
        # print "ABOUT TO APPEND"
        # print len(in_to_out.params)
        hidden_layers.append(in_to_out)
        if self.bias:
            bias_layers.append(bias_in)
        """ Recreate the whole thing """
        # print "hidden layers: " + str(hidden_layers)
        # print "bias layers: " + str(bias_layers)
        # print "len hidden layers: " + str(len(hidden_layers))
        # print "len bias layers: " + str(len(bias_layers))
        # connect the first two
        autoencoder = FeedForwardNetwork()
        first_layer = hidden_layers[0].inmod
        next_layer = hidden_layers[0].outmod
        autoencoder.addInputModule(first_layer)
        connection = FullConnection(first_layer, next_layer)
        connection.params[:] = hidden_layers[0].params
        autoencoder.addConnection(connection)

        # decide whether this should be the output layer or not
        if self.autoencoding_only and (len(self.layers) <= 3):
            #  TODO change this to 2 when you aren't using the softmax above
            autoencoder.addOutputModule(next_layer)
        else:
            autoencoder.addModule(next_layer)
        if self.bias:
            bias = bias_layers[0]
            bias_unit = bias.inmod
            autoencoder.addModule(bias_unit)
            connection = FullConnection(bias_unit, next_layer)
            # print bias.params
            connection.params[:] = bias.params
            autoencoder.addConnection(connection)
            # print connection.params

        # connect the middle layers
        for i, h in enumerate(hidden_layers[1:-1]):
            new_next_layer = h.outmod

            # decide whether this should be the output layer or not
            if self.autoencoding_only and i == (len(hidden_layers) - 3):
                autoencoder.addOutputModule(new_next_layer)
            else:
                autoencoder.addModule(new_next_layer)
            connection = FullConnection(next_layer, new_next_layer)
            connection.params[:] = h.params
            autoencoder.addConnection(connection)
            next_layer = new_next_layer

            if self.bias:
                bias = bias_layers[i + 1]
                bias_unit = bias.inmod
                autoencoder.addModule(bias_unit)
                connection = FullConnection(bias_unit, next_layer)
                connection.params[:] = bias.params
                autoencoder.addConnection(connection)

        return autoencoder, hidden_layers, next_layer, bias_layers
from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit
from pybrain.structure import FullConnection

rede = FeedForwardNetwork()

camadaEntrada = LinearLayer(2)
camadaOculta = SigmoidLayer(3)
camadaSaida = SigmoidLayer(1)
bias1 = BiasUnit()
bias2 = BiasUnit()

rede.addModule(camadaEntrada)
rede.addModule(camadaOculta)
rede.addModule(camadaSaida)
rede.addModule(bias1)
rede.addModule(bias2)

entradaOculta = FullConnection(camadaEntrada, camadaOculta)
OcultaSaida = FullConnection(camadaOculta, camadaSaida)
biasOculta = FullConnection(bias1, camadaOculta)
biasSaida = FullConnection(bias2, camadaSaida)

rede.sortModules()

print(rede)
print(entradaOculta.params)
print(OcultaSaida.params)
print(biasOculta.params)
print(biasSaida.params)
Beispiel #22
0
from pybrain.structure.networks import FeedForwardNetwork
from pybrain.structure import FullConnection
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit

rede = FeedForwardNetwork()

camadaEntrada = LinearLayer(2)  # quantidade de neuronios na camada de entrada
# Não serão submetidos a nenhuma função de ativação, por isso usar o LinearLayer

camadaOculta = SigmoidLayer(3)
camadaSaida = SigmoidLayer(1)
bias_oculta = BiasUnit()
bias_saida = BiasUnit()

rede.addModule(camadaEntrada)
rede.addModule(camadaOculta)
rede.addModule(camadaSaida)
rede.addModule(bias_oculta)
rede.addModule(bias_saida)

entradaOculta = FullConnection(camadaEntrada, camadaOculta)
ocultaSaida = FullConnection(camadaOculta, camadaSaida)
biasOculta = FullConnection(bias_oculta, camadaOculta)
biasSaida = FullConnection(bias_saida, camadaSaida)

rede.sortModules()
Beispiel #23
0
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import FullConnection, FeedForwardNetwork, TanhLayer, LinearLayer, BiasUnit
import matplotlib.pyplot as plt
from numpy import *

n = FeedForwardNetwork()
n.addInputModule(LinearLayer(1, name='in'))
n.addInputModule(BiasUnit(name='bias'))
n.addModule(TanhLayer(3, name='gotan'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['bias'], n['gotan']))
n.addConnection(FullConnection(n['in'], n['gotan']))
n.addConnection(FullConnection(n['gotan'], n['out']))
n.sortModules()

# initialize the backprop trainer and train
t = BackpropTrainer(n, learningrate=0.1, momentum=0.0, verbose=True)
#DATASET

DS = SupervisedDataSet(1, 1)
X = random.rand(100, 1) * 100
Y = X**3 + random.rand(100, 1) * 5
maxy = float(max(Y))
maxx = 100.0

for r in range(X.shape[0]):
    DS.appendLinked((X[r] / maxx), (Y[r] / maxy))

t.trainOnDataset(DS, 200)
Beispiel #24
0
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit
#serve para fazer ligacao entre camadas
from pybrain.structure import FullConnection

nn = FeedForwardNetwork()  #cria rede

#Camada de entrada. Recebe o numero de NEURONIOS na camda de ENTRADA. Ex: 2
in_layer = LinearLayer(2)

#Camada Oculta. Recebe o numero de NEURONIOS na camda de OCULTA. Ex: 3
hidden_layer = SigmoidLayer(3)

#Camada de saida. Recebe de NEURONIOS na camda de SAIDA. Ex: 1
out_layer = SigmoidLayer(1)

bias1 = BiasUnit()  #para a camada oculta
bias2 = BiasUnit()  #para a camda de saída

#adicionar camadas a rede

nn.addModule(in_layer)
nn.addModule(hidden_layer)
nn.addModule(out_layer)
nn.addModule(bias1)
nn.addModule(bias2)

#ligacao entre as camadas

#Cada neuronio de ENTRADA está LIGADO a TODOS os neuronios da camada OCULTA
hidden_in = FullConnection(in_layer, hidden_layer)
Beispiel #25
0
    pylab.grid(True)
    plotname = os.path.join(plotdir, ('jpq2layers_plot' + str(iter)))
    pylab.savefig(plotname)


# set-up the neural network
nneuron = 5
mom = 0.98
netname = "LSL-" + str(nneuron) + "-" + str(mom)
mv = ModuleValidator()
v = Validator()
n = FeedForwardNetwork(name=netname)
inLayer = LinearLayer(1, name='in')
hiddenLayer = SigmoidLayer(nneuron, name='hidden0')
outLayer = LinearLayer(1, name='out')
biasinUnit = BiasUnit(name="bhidden0")
biasoutUnit = BiasUnit(name="bout")
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addModule(biasinUnit)
n.addModule(biasoutUnit)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer)
bias_to_hidden = FullConnection(biasinUnit, hiddenLayer)
bias_to_out = FullConnection(biasoutUnit, outLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(bias_to_hidden)
n.addConnection(bias_to_out)
n.addConnection(hidden_to_out)
Beispiel #26
0
Created on Tue May 29 19:51:12 2018

@author: wstro
"""

from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit
from pybrain.structure import FullConnection

rede = FeedForwardNetwork()

camada_de_entrada = LinearLayer(2)
camada_oculta = SigmoidLayer(3)
camada_saida = SigmoidLayer(1)

bias_1 = BiasUnit()
bias_2 = BiasUnit()

rede.addModule(camada_de_entrada)
rede.addModule(camada_oculta)
rede.addModule(camada_saida)
rede.addModule(bias_1)
rede.addModule(bias_2)

entradaOculta = FullConnection(camada_de_entrada, camada_oculta)
Oculta_a_saida = FullConnection(camada_oculta, camada_saida)

biasOculta = FullConnection(bias_1, camada_oculta)
biasSaida = FullConnection(bias_2, camada_saida)

rede.sortModules()
Beispiel #27
0
#  Histórico:
#   v1.0 17/08//2020 (Começou a funcionar)
#  Testado em: python 3.8.1

# ------------------------------Importações----------------------------------- #
from pybrain.structure import FeedForwardNetwork  #estrutura da rede neural
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit  #camadas
from pybrain.structure import FullConnection  # ligação entre as camadas

# -------------------------------Criando Rede--------------------------------- #
rede = FeedForwardNetwork()

camadaEntrada = LinearLayer(2)  #dois neuronios camada de entrada
camadaOculta = SigmoidLayer(3)  #três neuronios camada intermediária
camadaSaida = SigmoidLayer(1)  #um neuronio camada de saida
bias1 = BiasUnit()  #bias para camada oculta
bias2 = BiasUnit()  #bias para camada de saida

# --------------------------Add a Rede as camadas----------------------------- #
rede.addModule(camadaEntrada)
rede.addModule(camadaOculta)
rede.addModule(camadaSaida)
rede.addModule(bias1)
rede.addModule(bias2)

entradaOculta = FullConnection(camadaEntrada, camadaOculta)
ocultaSaida = FullConnection(camadaOculta, camadaSaida)
biasOculta = FullConnection(bias1, camadaOculta)
biasSaida = FullConnection(bias2, camadaSaida)

rede.sortModules()
Beispiel #28
0
from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit
from pybrain.structure import FullConnection

rede = FeedForwardNetwork()

inLayer = LinearLayer(2)
hiddenLayer = SigmoidLayer(3)
outLayer = LinearLayer(1)
bias = BiasUnit()
bias2 = BiasUnit()

rede.addModule(inLayer)
rede.addModule(hiddenLayer)
rede.addModule(outLayer)
rede.addModule(bias)
rede.addModule(bias2)

entradaoculta = FullConnection(inLayer, hiddenLayer)
ocultasaida = FullConnection(hiddenLayer, outLayer)
biasoculta = FullConnection(bias, hiddenLayer)
bias2oculta = FullConnection(bias2, outLayer)

rede.sortModules()

print(rede)
Beispiel #29
0
    def BuildNN(self):
        """ 
                This function builds a FeedForwardNetwork object based on 
                the data that was used to create the NNBuilder object
            """
        nn = FeedForwardNetwork()

        # Set up the Layers
        inputLayer = LinearLayer(len(self.getInput()))
        outputLayer = SigmoidLayer(self.OUTPUT_NODES)

        # Add to NN
        nn.addInputModule(inputLayer)
        nn.addOutputModule(outputLayer)

        # Handle multiple hidden layers, add to NN
        topology = self.getHiddenLayers()
        hiddenLayers = []

        for i in range(0, len(topology)):
            size = int(topology[i])
            hlayer = SigmoidLayer(size)
            nn.addModule(hlayer)
            hiddenLayers.append(hlayer)

        # Get the bias for each hidden layer
        biasList = []
        for i in range(0, len(topology)):
            bias = BiasUnit(name="bias" + str(i))
            nn.addModule(bias)
            biasList.append(bias)

        # Manually connect input layer to first hidden,
        # and output layer to last hidden. Then connect all other
        # hidden layers
        input2hidden = FullConnection(inputLayer, hiddenLayers[0])
        hidden2output = FullConnection(hiddenLayers[-1], outputLayer)

        # If there was more than 1 hidden layer connect them together
        hiddenConList = []
        biasConList = []
        if len(topology) > 1:
            for i in range(0, len(topology) - 1):

                # Connect current layer to next layer
                connection = FullConnection(hiddenLayers[i],
                                            hiddenLayers[i + 1])
                hiddenConList.append(connection)

                # Make connection for bias
                biasConList.append(FullConnection(biasList[i],
                                                  hiddenLayers[i]))

        # Since we only looped to  < len(topology) - 1, have to get the last layer
        last = len(topology) - 1
        biasConList.append(FullConnection(biasList[last], hiddenLayers[last]))

        # Add connections to the NN
        nn.addConnection(input2hidden)
        for i in hiddenConList:
            nn.addConnection(i)
        for i in biasConList:
            nn.addConnection(i)
        nn.addConnection(hidden2output)

        # Not sure what this does but need to call it
        nn.sortModules()

        self.nn = nn
        return nn
Beispiel #30
0
from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit
from pybrain.structure import FullConnection

rede = FeedForwardNetwork()

camadaEntrada = LinearLayer(2)  #dois neuronios de entrada serao criados
camadaOculta = SigmoidLayer(
    3)  #tres camadas que seguem a regra sIGMOID serao criadas
camadaSaida = SigmoidLayer(1)
bias1 = BiasUnit()  #cria uma baia
bias2 = BiasUnit()

rede.addModule(camadaEntrada)
rede.addModule(camadaOculta)
rede.addModule(camadaSaida)
rede.addModule(bias1)
rede.addModule(bias2)

entradaOculta = FullConnection(camadaEntrada, camadaOculta)
ocultaSaida = FullConnection(camadaOculta, camadaSaida)
biasOculta = FullConnection(bias1, camadaOculta)
biasSaida = FullConnection(bias2, camadaSaida)

rede.sortModules()

print(rede)
print(entradaOculta.params)
print(ocultaSaida.params)
print(biasOculta.params)
print(biasSaida.params)