Exemplo n.º 1
0
Arquivo: tasknn.py Projeto: dzhu/cs181
 def task(self):
     listInst = load_training_9k(10)
     net = nn.init_net([14*14,15,10])
     listDblResult = list(build_and_measure_net(
         net, listInst, listInst, nn.distributed_encode_label,
         nn.distributed_decode_net_output, self.LEARNING_RATE,
         self.ROUNDS))
     return performance_graph([[a for a,_ in listDblResult]],
                              "Digit Recognition Training Accuracy")
def learn_xor():
    """Build a neural network which solves XOR."""
    net = nn.init_net([2, 2, 1])
    for _ in xrange(5000):
        for inst in XOR_INSTANCES:
            nn.update_net(net, inst, 0.5, [inst.iLabel])
    for inst in XOR_INSTANCES:
        print inst.iLabel, nn.feed_forward(net, inst.listDblFeatures)
    nn.print_net(net)
Exemplo n.º 3
0
 def task(self):
     listInst = load_training_9k(10)
     net = nn.init_net([14*14,15,10])
     listDblResult = list(build_and_measure_net(
         net, listInst, listInst, nn.distributed_encode_label,
         nn.distributed_decode_net_output, self.LEARNING_RATE,
         self.ROUNDS))
     return performance_graph([[a for a,_ in listDblResult]],
                              "Digit Recognition Training Accuracy")
def build_xor_net():
    HIDDEN_NODES = 2
    ROUNDS = 5000
    assert XOR_INSTANCES
    net = nn.init_net([2, HIDDEN_NODES, 1], 0.001)
    for ixRound in xrange(ROUNDS):
        dblAlpha = 2.0 * ROUNDS / (ixRound + ROUNDS)
        for inst in XOR_INSTANCES:
            nn.update_net(net, inst, dblAlpha, [inst.iLabel])
    return net
Exemplo n.º 5
0
def build_xor_net():
    HIDDEN_NODES = 2
    ROUNDS = 5000
    LEARNING_RATE = 0.35
    assert XOR_INSTANCES
    net = nn.init_net([2, HIDDEN_NODES, 1], 0.001)
    for ixRound in xrange(ROUNDS):
        dblAlpha = 2.0 * ROUNDS / (ixRound + ROUNDS)
        for inst in XOR_INSTANCES:
            nn.update_net(net, inst, dblAlpha, [inst.iLabel])
    return net
Exemplo n.º 6
0
Arquivo: tasknn.py Projeto: dzhu/cs181
 def measure_performance(self, fxnEncode, fxnDecode):
     listInstTraining = load_training_9k(self.TRAINING_INSTANCES)
     listInstTest = load_test_1k(self.TEST_INSTANCES)
     net = nn.init_net(self.NETWORK_CONFIGURATION)
     listDblResult = list(build_and_measure_net(
         net,listInstTraining,listInstTest, fxnEncode, fxnDecode,
         self.LEARNING_RATE, self.ROUNDS))
     listDblTest = [a for a,_ in listDblResult]
     listDblTrain = [b for _,b in listDblResult]
     sTitle = ("Digit Recognition Test Accuracy Trained on %d Instances"
               % len(listInstTraining))
     return performance_graph([listDblTest,listDblTrain], sTitle)
Exemplo n.º 7
0
    def test_update_net(self):
        # Test a simple network to make sure that forward and backprop are
        # working properly.
        # 2 inputs, 2 hidden nodes, 1 output node
        net = nn.init_net([2, 2, 1])
        # Set the weights on the first hidden node to 0.1, the weights on the
        # second hidden node to -0.1.
        def init_weights(p, input_weight, w0):
            for i in xrange(len(p.listDblW)):
                p.listDblW[i] = input_weight
            p.dblW0 = w0

        # Weights for the first hidden node to be 0.1
        init_weights(net.listLayer[0].listPcpt[0], 0.1, 0.0)
        # Weights for the second hidden node to be -0.1
        init_weights(net.listLayer[0].listPcpt[1], -0.1, 0.0)
        # Weights for the output layer to be 0.1
        init_weights(net.listLayer[1].listPcpt[0], 1.0, 0.0)
        # Inputs are 1 and -1
        inst = nn.Instance(0, [1.0, -0.9])
        # Target output is 0.5
        targets = [0.5]
        # The output of hidden unit 1 will be 1 / (1 + e^(-0.01))
        # The output of hidden unit 2 will be 1 / (1 + e^(0.01))
        # The inputs to the output unit will be 1.0, leading to output 0.731
        # The error at the output will be -0.231
        nn.update_net(net, inst, 1.0, targets)

        def get_weight(layer_id, perceptron_id, input_id):
            if input_id == -1:
                return net.listLayer[layer_id].listPcpt[perceptron_id].dblW0
            return net.listLayer[layer_id].listPcpt[perceptron_id].listDblW[input_id]

        output = 1.0 / (1.0 + math.exp(-1.0))
        delta_out = (0.5 - output) * output * (1 - output)
        h1_out = 1.0 / (1.0 + math.exp(-0.01))
        h2_out = 1.0 / (1.0 + math.exp(0.01))
        self.assertAlmostEqual(1.0 + 1.0 * h1_out * delta_out, get_weight(1, 0, 0))
        self.assertAlmostEqual(1.0 + 1.0 * h2_out * delta_out, get_weight(1, 0, 1))
        self.assertAlmostEqual(1.0 * 1.0 * delta_out, get_weight(1, 0, -1))

        in1 = 1.0 / (1.0 + math.exp(-0.01))
        delta_hidden1 = in1 * (1 - in1) * delta_out
        # For the hidden units, in is 0, so no weight updates should occur.
        self.assertAlmostEqual(0.1 + 1.0 * delta_hidden1, get_weight(0, 0, 0))
        self.assertAlmostEqual(0.1 - 0.9 * delta_hidden1, get_weight(0, 0, 1))
        self.assertAlmostEqual(delta_hidden1, get_weight(0, 0, -1))

        in2 = 1.0 / (1.0 + math.exp(0.01))
        delta_hidden2 = in2 * (1 - in2) * delta_out
        self.assertAlmostEqual(-0.1 + 1.0 * delta_hidden2, get_weight(0, 1, 0))
        self.assertAlmostEqual(-0.1 - 0.9 * delta_hidden2, get_weight(0, 1, 1))
        self.assertAlmostEqual(delta_hidden2, get_weight(0, 0, -1))
Exemplo n.º 8
0
    def test_update_net(self):
        # Test a simple network to make sure that forward and backprop are
        # working properly.
        # 2 inputs, 2 hidden nodes, 1 output node
        net = nn.init_net([2, 2, 1])
        # Set the weights on the first hidden node to 0.1, the weights on the
        # second hidden node to -0.1.
        def init_weights(p, input_weight, w0):
          for i in xrange(len(p.listDblW)):
            p.listDblW[i] = input_weight
          p.dblW0 = w0
        # Weights for the first hidden node to be 0.1
        init_weights(net.listLayer[0].listPcpt[0], 0.1, 0.0)
        # Weights for the second hidden node to be -0.1
        init_weights(net.listLayer[0].listPcpt[1], -0.1, 0.0)
        # Weights for the output layer to be 0.1
        init_weights(net.listLayer[1].listPcpt[0], 1.0, 0.0)
        # Inputs are 1 and -1
        inst = nn.Instance(0, [ 1.0, -0.9 ])
        # Target output is 0.5
        targets = [ 0.5 ]
        # The output of hidden unit 1 will be 1 / (1 + e^(-0.01))
        # The output of hidden unit 2 will be 1 / (1 + e^(0.01))
        # The inputs to the output unit will be 1.0, leading to output 0.731
        # The error at the output will be -0.231
        nn.update_net(net, inst, 1.0, targets)
        def get_weight(layer_id, perceptron_id, input_id):
          if input_id == -1:
            return net.listLayer[layer_id].listPcpt[perceptron_id].dblW0
          return net.listLayer[layer_id].listPcpt[perceptron_id].listDblW[input_id]

        output = 1.0 / (1.0 + math.exp(-1.0))
        delta_out = (0.5 - output) * output * (1 - output)
        h1_out = 1.0 / (1.0 + math.exp(-.01))
        h2_out = 1.0 / (1.0 + math.exp(.01))
        self.assertAlmostEqual(1.0 + 1.0 * h1_out * delta_out, get_weight(1, 0, 0))
        self.assertAlmostEqual(1.0 + 1.0 * h2_out * delta_out, get_weight(1, 0, 1))
        self.assertAlmostEqual(1.0 * 1.0 * delta_out, get_weight(1, 0, -1))

        in1 = 1.0 / (1.0 + math.exp(-0.01))
        delta_hidden1 = in1 * (1 - in1) * delta_out
        # For the hidden units, in is 0, so no weight updates should occur.
        self.assertAlmostEqual(0.1 + 1.0 * delta_hidden1, get_weight(0, 0, 0))
        self.assertAlmostEqual(0.1 - 0.9 * delta_hidden1, get_weight(0, 0, 1))
        self.assertAlmostEqual(delta_hidden1, get_weight(0, 0, -1))

        in2 = 1.0 / (1.0 + math.exp(0.01))
        delta_hidden2 = in2 * (1 - in2) * delta_out
        self.assertAlmostEqual(-0.1 + 1.0 * delta_hidden2,
                               get_weight(0, 1, 0))
        self.assertAlmostEqual(-0.1 - 0.9 * delta_hidden2,
                               get_weight(0, 1, 1))
        self.assertAlmostEqual(delta_hidden2, get_weight(0, 0, -1))
Exemplo n.º 9
0
 def measure_performance(self, fxnEncode, fxnDecode):
     listInstTraining = load_training_9k(self.TRAINING_INSTANCES)
     listInstTest = load_test_1k(self.TEST_INSTANCES)
     net = nn.init_net(self.NETWORK_CONFIGURATION)
     listDblResult = list(build_and_measure_net(
         net,listInstTraining,listInstTest, fxnEncode, fxnDecode,
         self.LEARNING_RATE, self.ROUNDS))
     listDblTest = [a for a,_ in listDblResult]
     listDblTrain = [b for _,b in listDblResult]
     sTitle = ("Digit Recognition Test Accuracy Trained on %d Instances"
               % len(listInstTraining))
     return performance_graph([listDblTest,listDblTrain], sTitle)
Exemplo n.º 10
0
def learn_nn_classifier(listCLayerSize, lstTrainBoostInst, fxnEncode, fxnDecode):
    """Given the test data set of BoostInstances, learns and 
    returns the learned neural net.
    net = nn.init_net([2,2,1])
    learn_nn_classifier(net, XOR_INSTANCES, nn.binary_encode_label, 
                        nn.binary_decode_net_output)
    Learning rate: 1.923447Round 200 complete.  Training Accuracy: 0.250000
    Learning rate: 1.905125Round 250 complete.  Training Accuracy: 0.750000
    Learning rate: 1.887149Round 300 complete.  Training Accuracy: 0.750000
    Learning rate: 1.869508Round 350 complete.  Training Accuracy: 0.750000
    Learning rate: 1.852195Round 400 complete.  Training Accuracy: 1.000000
    Out[99]: <nn.NeuralNet at 0x9504950>
    """
    # learn through rounds number of epochs
    rounds = 500
    interval = 50
    stopRound = 350
    iInst = len(lstTrainBoostInst)
    net = nn.init_net(listCLayerSize)
    
    for ixRound in xrange(rounds):
        dblAlpha = 2.0*rounds/(ixRound + rounds)
        
        # learn through one epoch and compute the error
        errors = 0
        for boostInst in lstTrainBoostInst:
            inst = nn.Instance(boostInst.iLabel, boostInst.listAttrs)
            listDblOut = nn.update_net(net, inst, dblAlpha, fxnEncode(inst.iLabel))
            iGuess = fxnDecode(listDblOut)
            if iGuess != inst.iLabel:
              errors += 1
          
        # print result after an interval of rounds
        if not((ixRound+1) % interval):
            sys.stderr.write('Learning rate: %f ' % dblAlpha)
            sys.stderr.write("Epoch: %d Training Accuracy: %f \n" % (ixRound + 1,
            1 - errors * 1.0 / iInst))
            
        # implement a stopping condition.
        if (ixRound+1) == stopRound:
            return net
    return net
Exemplo n.º 11
0
    def test_init_net(self):
        layer_sizes = [3, 2, 1]
        neural_net = nn.init_net(layer_sizes)
        self.assertEqual(2, len(neural_net.listLayer))
        # Make sure that the layers are configured correctly.
        layer0 = neural_net.listLayer[0]
        self.assertEqual(3, layer0.layer_input_size())
        self.assertEqual(2, layer0.layer_output_size())
        
        def check_perceptrons(layer, num_inputs):
            for p in layer.listPcpt:
                for w in p.listDblW:
                    self.assertTrue(w >= -0.1 and w <= 0.1)
                    self.assertTrue(p.dblW0 >= -0.1 and p.dblW0 <= 0.1)
                    self.assertEqual(num_inputs, len(p.listDblW))
        check_perceptrons(layer0, 3)

        # Weights on all inputs should be between -0.1 and 0.1
        layer1 = neural_net.listLayer[1]
        self.assertEqual(2, layer1.layer_input_size())
        self.assertEqual(1, layer1.layer_output_size())
        check_perceptrons(layer1, 2)
Exemplo n.º 12
0
    def test_init_net(self):
        layer_sizes = [3, 2, 1]
        neural_net = nn.init_net(layer_sizes)
        self.assertEqual(2, len(neural_net.listLayer))
        # Make sure that the layers are configured correctly.
        layer0 = neural_net.listLayer[0]
        self.assertEqual(3, layer0.layer_input_size())
        self.assertEqual(2, layer0.layer_output_size())
        
        def check_perceptrons(layer, num_inputs):
            for p in layer.listPcpt:
                for w in p.listDblW:
                    self.assertTrue(w >= -0.1 and w <= 0.1)
                    self.assertTrue(p.dblW0 >= -0.1 and p.dblW0 <= 0.1)
                    self.assertEqual(num_inputs, len(p.listDblW))
        check_perceptrons(layer0, 3)

        # Weights on all inputs should be between -0.1 and 0.1
        layer1 = neural_net.listLayer[1]
        self.assertEqual(2, layer1.layer_input_size())
        self.assertEqual(1, layer1.layer_output_size())
        check_perceptrons(layer1, 2)
def experiment(opts):
    """Run a neural net experiment."""
    dictSeen = {}
    writer = csv.writer(open("500epochs_80Nodes_4.csv", "wb"))

    def load(sAttrFilename, sClassFilename):
        if sAttrFilename in dictSeen:
            return dictSeen[sAttrFilename]
        sys.stderr.write("Loading %s and %s..." %
                         (sAttrFilename, sClassFilename))
        listInst = load_separate_data(sAttrFilename, sClassFilename,
                                      opts.max_inst)
        sys.stderr.write("done.\n")
        dictSeen[sAttrFilename] = listInst
        return listInst

    listInstTrain = load(opts.attrtrain, opts.classtrain)
    listInstVal = load(opts.attrvalidation, opts.classvalidation)
    listInstTest = load(opts.attrtest, opts.classtest)
    config = [opts.num_inputs]
    if opts.hidden_units:
        print 'Adding a hidden layer with %d units' % opts.hidden_units
        config.append(opts.hidden_units)

    config.append(7)  #Covertype data set
    #config.append(10)
    #config.append(4)

    dblPrevAccuracy = 0.0
    dblCurAccuracy = 0.0
    dblStopDeltaAccuracy = 0.05
    iEpochInterval = 5

    net = nn.init_net(config)
    for ixRound in xrange(opts.rounds):
        dblAlpha = 2.0 * opts.rounds / (ixRound + opts.rounds)
        print 'Learning rate: %f' % dblAlpha
        # Count error
        errors = 0
        for inst in listInstTrain:
            listDblOut = nn.update_net(
                net, inst, dblAlpha, nn.distributed_encode_label(inst.iLabel))
            iGuess = nn.distributed_decode_net_output(listDblOut)
            if iGuess != inst.iLabel:
                errors += 1
        # Compute validation error
        validation_correct = num_correct(net, listInstVal,
                                         nn.distributed_decode_net_output)
        sys.stderr.write(
            "Round %d complete.  Training Accuracy: %f, Validation Accuracy: %f\n"
            % (ixRound + 1, 1 - errors * 1.0 / len(listInstTrain),
               validation_correct * 1.0 / len(listInstVal)))
        # RECORDING
        writer.writerow([
            ixRound + 1, dblAlpha, 1 - errors * 1.0 / len(listInstTrain),
            validation_correct * 1.0 / len(listInstVal)
        ])
        if opts.stopping_condition:
            if (ixRound + 1) % iEpochInterval is 0:
                dblCurAccuracy = validation_correct * 1.0 / len(listInstVal)
                if (dblCurAccuracy - dblPrevAccuracy) < dblStopDeltaAccuracy:
                    break
                else:
                    dblPrevAccuracy = dblCurAccuracy

    cCorrect = 0
    for inst in listInstTest:
        listDblOut = nn.feed_forward(net, inst.listDblFeatures)
        iGuess = nn.distributed_decode_net_output(listDblOut)
        cCorrect += int(inst.iLabel == iGuess)
    print "correct:", cCorrect, "out of", len(listInstTest),
    print "(%.1f%%)" % (100.0 * cCorrect / len(listInstTest))