コード例 #1
0
    def setUp(self):
        self.test_pcn1 = pcn.pcn(1, 20180120, thresh_type = 'logistic')
        self.test_pcn2 = pcn.pcn(1, iter = 10)

        self.data = np.matrix([[0, 0], [0, 1], [1, 0], [1, 1]])
        self.targets1 = np.array([0, 0, 0, 1])
        self.targets2 = np.array([0, 1, 1, 1])

        self.predict1 = np.squeeze(self.test_pcn1.trainWeights(self.data, self.targets1))
        self.predict2 = np.squeeze(self.test_pcn2.trainWeights(self.data, self.targets2))
コード例 #2
0
def logic():
    import pcn
    """ Run AND and XOR logic functions"""

    a = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]])
    b = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])

    p = pcn.pcn(a[:, 0:2], a[:, 2:])
    p.pcntrain(a[:, 0:2], a[:, 2:], 0.25, 10)
    p.confmat(a[:, 0:2], a[:, 2:])
    q = pcn.pcn(b[:, 0:2], b[:, 2:])
    q.pcntrain(b[:, 0:2], b[:, 2:], 0.25, 10)
    q.confmat(b[:, 0:2], b[:, 2:])
コード例 #3
0
ファイル: pcn.py プロジェクト: sharped/4155
def logic():
	import pcn
	""" Run AND and XOR logic functions"""

	a = np.array([[0,0,0],[0,1,0],[1,0,0],[1,1,1]])
	b = np.array([[0,0,0],[0,1,1],[1,0,1],[1,1,0]])

	p = pcn.pcn(a[:,0:2],a[:,2:])
	p.pcntrain(a[:,0:2],a[:,2:],0.25,10)
	p.confmat(a[:,0:2],a[:,2:])

	q = pcn.pcn(b[:,0:2],b[:,2:])
	q.pcntrain(b[:,0:2],b[:,2:],0.25,10)
	q.confmat(b[:,0:2],b[:,2:])
コード例 #4
0
    def __init__(self, inputs, targets, n_rbf, sigma=0, use_kmeans=0,
                 normalise=0):
        self.n_in = np.shape(inputs)[1]
        self.n_out = np.shape(targets)[1]
        self.n_data = np.shape(inputs)[0]
        self.n_rbf = n_rbf
        self.use_kmeans = use_kmeans
        self.normalise = normalise

        if use_kmeans:
            self.kmeans_net = kmeans.kmeans(self.n_rbf, inputs)

        self.hidden = np.zeros((self.n_data, self.n_rbf + 1))

        if sigma == 0:
            # Set the width of Gaussians
            d = (inputs.max(axis=0) - inputs.min(axis=0)).max()
            self.sigma = d / np.sqrt(2 * n_rbf)
        else:
            self.sigma = sigma

        self.perceptron = pcn.pcn(self.hidden[:, :-1], targets)

        # Initialize the network
        self.weights1 = np.zeros((self.n_in, self.n_rbf))
コード例 #5
0
    def __init__(self,inputs,targets,nRBF,sigma=0,normalise=0,eta=0.25,functype='sigmoid',traintype='batch'):
        """ constructor """
        
        self.inputs = inputs
        self.targets = targets
        self.nRBF = nRBF #number of RBF nodes
        self.normalise = normalise
        self.eta = eta #learning rate
        self.functype = functype
        self.traintype = traintype
        
        #set width of gaussian
        if sigma==0:
            d = (self.inputs.max(axis=0)-self.inputs.min(axis=0)).max()
            self.sigma = d/np.sqrt(2*nRBF)  
        else:
            self.sigma = sigma
                
        #input array of RBF nodes
        self.hidden = np.zeros((np.shape(self.inputs)[0],self.nRBF))
        
        #set RBF weights to be random datapoints
        self.weights = np.zeros((np.shape(inputs)[1],self.nRBF))
        indices = np.arange(np.shape(self.inputs)[0])
        np.random.shuffle(indices)
        for i in range(self.nRBF):
            self.weights[:,i] = self.inputs[indices[i],:]
            
        #calculate the hidden rbf nodes (first layer)
        self.hidden = self.rbffwd(self.inputs,1)

        #use initialise perceptron for second layer
        self.perceptron = pcn.pcn(self.hidden,self.targets,self.eta,self.functype,self.traintype)
コード例 #6
0
def fullSetPerceptron(ain, bin, cin, din, iterations):
    """This method is used for using the perceptron on the entire set
    permutate the methods arguments to change the separated set"""

    #Gather the data
    a = getData(ain)
    b = getData(bin)
    c = getData(cin)
    d = getData(din)
    x = ones((50, 1))
    y = zeros((150, 1))
    targets = concatenate((x, y))
    inputs = concatenate((a, b, c, d))

    # Randomise order of inputs
    change = range(shape(inputs)[0])
    random.shuffle(change)
    inputs = inputs[change, :]
    targets = targets[change, :]

    #Do the training, find the weights, plot the classification
    #and create the confusion matrix
    p = pcn.pcn(inputs, targets)
    weights = p.pcntrain(inputs, targets, 0.25, iterations)
    #plotClassificationLine(weights,inputs)
    p.confmat(inputs, targets)
コード例 #7
0
ファイル: rbf.py プロジェクト: AjayKrP/Machine-Learning
    def __init__(self,
                 inputs,
                 targets,
                 nRBF,
                 sigma=0,
                 usekmeans=0,
                 normalise=0):
        self.nin = shape(inputs)[1]
        self.nout = shape(targets)[1]
        self.ndata = shape(inputs)[0]
        self.nRBF = nRBF
        self.usekmeans = usekmeans
        self.normalise = normalise

        if usekmeans:
            self.kmeansnet = kmeans.kmeans(self.nRBF, inputs)

        self.hidden = zeros((self.ndata, self.nRBF + 1))

        if sigma == 0:
            # Set width of Gaussians
            d = (inputs.max(axis=0) - inputs.min(axis=0)).max()
            self.sigma = d / sqrt(2 * nRBF)
        else:
            self.sigma = sigma

        self.perceptron = pcn.pcn(self.hidden[:, :-1], targets)

        # Initialise network
        self.weights1 = zeros((self.nin, self.nRBF))
コード例 #8
0
def fullSetPerceptron(ain,bin,cin,din,iterations):
    
    """This method is used for using the perceptron on the entire set
    permutate the methods arguments to change the separated set"""
    
    #Gather the data
    a = getData(ain)
    b = getData(bin)
    c = getData(cin)
    d = getData(din)
    x = ones((50,1))
    y = zeros((150,1))
    targets = concatenate((x,y))
    inputs = concatenate((a,b,c,d)) 
    
    # Randomise order of inputs
    change = range(shape(inputs)[0])
    random.shuffle(change)
    inputs = inputs[change,:]
    targets = targets[change,:]
    
    #Do the training, find the weights, plot the classification
    #and create the confusion matrix  
    p = pcn.pcn(inputs,targets)
    weights = p.pcntrain(inputs,targets,0.25,iterations)
    #plotClassificationLine(weights,inputs)
    p.confmat(inputs,targets)
コード例 #9
0
    def __init__(self,
                 inputs,
                 targets,
                 nRBF,
                 sigma=0,
                 usekmeans=0,
                 normalise=0):
        self.nin = inputs.shape[1]
        self.nout = targets.shape[1]
        self.ndata = inputs.shape[0]
        self.nRBF = nRBF
        self.usekmeans = usekmeans
        self.normalise = normalise

        if usekmeans:
            self.kmeansnet = kmeans.kmeans(self.nRBF, inputs)

        self.hidden = np.zeros((self.ndata, self.nRBF + 1))

        if sigma == 0:
            d = (inputs.max(axis=0) - inputs.min(axis=0)).max()
            self.sigma = d / np.sqrt(2 * nRBF)
        else:
            self.sigma = sigma

        self.perceptron = pcn.pcn(self.hidden[:, :-1], targets)
        self.weights1 = np.zeros((self.nin, self.nRBF))
コード例 #10
0
ファイル: rbf.py プロジェクト: vsrz/CS673
    def __init__(self,inputs,targets,nRBF,sigma=0,usekmeans=0,normalise=0):
        self.nin = shape(inputs)[1]
        self.nout = shape(targets)[1]        
        self.ndata = shape(inputs)[0]
        self.nRBF = nRBF
        self.usekmeans = usekmeans
        self.normalise = normalise

        #print "Initalizing RBFN with parameters: "
        #print "Inputs   : " + str(shape(inputs))
        #print "targets  : " + str(shape(targets))
        #print "nRBF     : " + str(nRBF)
#        print "Sigma    : " + str(sigma)
#        print "K-Means  : " + str(usekmeans)
#        print "Normalise: " + str(normalise)
#        print

        if usekmeans:
            self.kmeansnet = kmeans.kmeans(self.nRBF,inputs)
            
        self.hidden = zeros((self.ndata,self.nRBF+1))
        
        if sigma==0:
            # Set width of Gaussians
            d = (inputs.max(axis=0)-inputs.min(axis=0)).max()
            self.sigma = d/sqrt(2*nRBF)  
        else:
            self.sigma = sigma
                
        self.perceptron = pcn.pcn(self.hidden[:,:-1],targets)

        # Initialise network
        self.weights1 = zeros((self.nin,self.nRBF))
コード例 #11
0
def logic():
    import pcn
    """ Run AND and XOR logic functions"""

    a = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]])
    b = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])
    c = np.array([[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 1],
                  [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 1]])

    y = pcn.pcn(c[:, 0:3], c[:, 3:])
    y.pcntrain(c[:, 0:3], c[:, 3:], 0.2, 5)
    y.confmat(c[:, 0:3], c[:, 3:])
コード例 #12
0
ファイル: irisnew.py プロジェクト: nilinswap/neuro-evolution
def find_fitness(rest_setx, rest_sety, weightarr):

    rows = np.shape(rest_setx)[1] + 1  #for bias
    cols = np.shape(rest_sety)[1]

    #weightarr=np.array([[weightarr[i*rows+j] for j in range(cols) ] for i in range(rows) ])
    weightarr = np.reshape(weightarr, (5, 3))

    net = pcn.pcn(rest_setx, rest_sety, weightarr)
    arr = net.pcnfwd(rest_setx)

    er_arr = (1 / 2) * np.mean((arr - rest_sety)**2)

    return (er_arr)
コード例 #13
0
ファイル: som.py プロジェクト: Finbark/test
    def run_perceptron(self, data, data_targets, train_size, test_size):
        '''
        Takes the activations for the som and runs them through a perceptron.
        '''
        data = self.create_new_inputs(data)
        data = self.normalise(data)
        train_set, test_set = self.split_data(data, train_size, test_size)
        train_set_target, test_set_target = self.split_data(
            data_targets, train_size, test_size)

        p = pcn.pcn(train_set, train_set_target)
        p.pcntrain(train_set, train_set_target, 0.1, 200)
        correct = p.confmat(test_set, test_set_target)

        return correct
コード例 #14
0
def doubleSet_Perceptron(setX, setY, iterations):
    """This method is used for using the perceptron on two sets"""

    #Create the data
    a = getData(setX)
    b = getData(setY)
    c = ones((50, 1))
    d = zeros((50, 1))
    targets = concatenate((c, d))
    inputs = concatenate((a, b))

    # Randomise order of inputs
    change = range(shape(inputs)[0])
    random.shuffle(change)
    inputs = inputs[change, :]
    targets = targets[change, :]

    #Do the training, find the weights, plot the classification
    #and create the confusion matrix
    p = pcn.pcn(inputs, targets)
    weights = p.pcntrain(inputs, targets, 0.25, iterations)
    plotClassificationLine(weights, inputs)
    p.confmat(inputs, targets)
コード例 #15
0
    def initializeNeurons(self, data):
        # get input data attributes
        nData = np.shape(data)[0]

        vectReorder = np.arange(nData)
        np.random.shuffle(vectReorder)
        dataReordered = data[vectReorder, :]

        # create vector of RBFneurons
        self.matRBFNeurons = [
            rbfneuron(nData, self.sigma) for k in range(self.nNeurons)
        ]

        for m in range(self.nNeurons):
            self.matRBFNeurons[m].weights = dataReordered[m, :]

        # Create PCN layer
        self.PCNLayer = pcn.pcn(self.outputDim,
                                seed=self.seed,
                                iter=self.iter,
                                thresh_type=self.thresh_type)

        # Create vector of PCN neurons
        self.PCNLayer.initializeNeurons(self.nNeurons)
コード例 #16
0
def doubleSet_Perceptron(setX,setY,iterations): 
    
    """This method is used for using the perceptron on two sets"""
    
    #Create the data
    a = getData(setX)
    b = getData(setY)
    c = ones((50,1))
    d = zeros((50,1))
    targets = concatenate((c,d))
    inputs = concatenate((a,b)) 
    
    # Randomise order of inputs
    change = range(shape(inputs)[0])
    random.shuffle(change)
    inputs = inputs[change,:]
    targets = targets[change,:]
     
    #Do the training, find the weights, plot the classification
    #and create the confusion matrix   
    p = pcn.pcn(inputs,targets)
    weights = p.pcntrain(inputs,targets,0.25,iterations)
    plotClassificationLine(weights,inputs)
    p.confmat(inputs,targets)
コード例 #17
0
ファイル: rbf.py プロジェクト: aborodya/EclipseExperimental
 def __init__(self,inputs,targets,nRBF,sigma=0,usekmeans=0,normalise=0):
     self.nin = shape(inputs)[1]
     self.nout = shape(targets)[1]
     self.ndata = shape(inputs)[0]
     self.nRBF = nRBF
     self.usekmeans = usekmeans
     self.normalise = normalise
     
     if usekmeans:
         self.kmeansnet = kmeans.kmeans(self.nRBF,inputs)
         
     self.hidden = zeros((self.ndata,self.nRBF+1))
     
     if sigma==0:
         # Set width of Gaussians
         d = (inputs.max(axis=0)-inputs.min(axis=0)).max()
         self.sigma = d/sqrt(2*nRBF)  
     else:
         self.sigma = sigma
             
     self.perceptron = pcn.pcn(self.hidden[:,:-1],targets)
     
     # Initialise network
     self.weights1 = zeros((self.nin,self.nRBF))
コード例 #18
0
ファイル: pima.py プロジェクト: AndyShi12/Code-Example
from numpy import *
import pcn

pima = loadtxt('/Users/srmarsla/Book/Datasets/pima/pima-indians-diabetes.data',delimiter=',')

# Plot the first and second values for the two classes
#indices0 = where(pima[:,8]==0)
#indices1 = where(pima[:,8]==1)
#
#ion()
#plot(pima[indices0,0],pima[indices0,1],'go')
#plot(pima[indices1,0],pima[indices1,1],'rx')

# Perceptron training on the original dataset
print "Output on original data"
p = pcn.pcn(pima[:,:8],pima[:,8:9])
p.pcntrain(pima[:,:8],pima[:,8:9],0.25,100)
p.confmat(pima[:,:8],pima[:,8:9])

# Various preprocessing steps
pima[where(pima[:,0]>8),0] = 8

pima[where(pima[:,7]<=30),7] = 1
pima[where((pima[:,7]>30) & (pima[:,7]<=40)),7] = 2
pima[where((pima[:,7]>40) & (pima[:,7]<=50)),7] = 3
pima[where((pima[:,7]>50) & (pima[:,7]<=60)),7] = 4
pima[where(pima[:,7]>60)] = 5

pima[:,:8] = pima[:,:8]-pima[:,:8].mean(axis=0)
pima[:,:8] = pima[:,:8]/pima[:,:8].var(axis=0)
コード例 #19
0
ファイル: mnist.py プロジェクト: quietcoolwu/MLCode
# Read the dataset in (code from sheet)
f = gzip.open('mnist.pkl.gz', 'rb')
tset, vset, teset = pickle.load(f)
f.close()

nread = 200
# Just use the first few images
train_in = tset[0][:nread, :]

# This is a little bit of work -- 1 of N encoding
# Make sure you understand how it does it
train_tgt = np.zeros((nread, 10))
for i in range(nread):
    train_tgt[i, tset[1][i]] = 1

test_in = teset[0][:nread, :]
test_tgt = np.zeros((nread, 10))
for i in range(nread):
    test_tgt[i, teset[1][i]] = 1

# Train a Perceptron on training set
p = pcn.pcn(train_in, train_tgt)
p.pcntrain(train_in, train_tgt, 0.25, 100)

# This isn't really good practice since it's on the training data, 
# but it does show that it is learning.
p.confmat(train_in, train_tgt)

# Now test it
p.confmat(test_in, test_tgt)
コード例 #20
0
#Train 100 NNs with various learning rates, numbers of training iterations, and shuffle subsets
for learning_rate in [0.1,0.25,0.5,0.75,1]:
    for training_iterations in [50,200,500,1000]:
        for shuffle_iterations in xrange(0,5):
            #shuffle the email data-set records
            np.random.shuffle(emails)
            
            #use the first half for training, second half for testing 
            trainin = emails[0:300,:nInputs-1]
            testin = emails[300:600,:nInputs-1]
            traintgt = emails[0:300,nInputs-1:nInputs]
            testtgt = emails[300:600,nInputs-1:nInputs]

            #create a new empty perceptron and train it
            p = None
            p = pcn.pcn(trainin, traintgt)
            p.pcntrain(trainin, traintgt, learning_rate, training_iterations)
            
            #test the trained perceptron and store the resulting confusion matrix and success rate
            cm, sr = p.confmat(testin, testtgt)
            
            print "LR: ", learning_rate, "    TI: ", training_iterations, "    SI: ", shuffle_iterations, "    SR: ", int(sr*100), "%    FP: ", cm[0][1]
            
            #commit to disk and log the networks and with the highest success rates
            if (sr > highest_overall_sr):
                sp.save(p, "highest_overall_sr_pcn", cm, sr, learning_rate, training_iterations, 0)
                highest_overall_sr, best_nn = sr, p
            if ((cm[0][1] == 0) and sr > highest_sr_nofp):
                sp.save(p, "highest_sr_no_false_positives_pcn", cm, sr, learning_rate, training_iterations, 0)
                highest_sr_nofp, best_nn_nofp = sr, p
コード例 #21
0
ファイル: mnist_perceptron.py プロジェクト: bnhalder/basic_ml
@author: jabong
"""

import pylab as pl
import numpy as np
import pcn
import cPickle, gzip

f = gzip.open('mnist.pkl.gz', 'rb')
tset, vset, teset = cPickle.load(f)
f.close()

nread = 10000
train_in = tset[0][:nread, :]
train_tgt = np.zeros((nread, 10))
for i in range(nread):
    train_tgt[i, tset[1][i]] = 1

test_in = teset[0][:nread, :]
test_tgt = np.zeros((nread, 10))
for i in range(nread):
    test_tgt[i, teset[1][i]] = 1

p = pcn.pcn(train_in, train_tgt)
p.pcntrain(train_in, train_tgt, 0.25, 100)

p.confmat(train_in, train_tgt)

p.confmat(test_in, test_tgt)
コード例 #22
0
ファイル: logical_problem.py プロジェクト: RaoulMa/ml
"""
#logical OR
inputs = np.array([[1,1],[1,0],[0,1],[0,0]]) 
targets = np.array([[1],[1],[1],[0]])
"""

#logical XOR
inputs = np.array([[1,1],[1,0],[0,1],[0,0]]) 
targets = np.array([[0],[1],[1],[0]])

"""
#identity matrix
inputs = np.array([[1,1],[1,0],[0,1],[0,0]]) 
targets = np.array([[1,1],[1,0],[0,1],[0,0]])
"""

#use one-layer perceptron
p = pcn.pcn(inputs,targets,0.2,'linear','batch') 
p.pcntrain(10000)
p.confmat(inputs,targets)

#use rbf network
p = rbf.rbf(inputs,targets,4,0,1,0.2,'linear','batch') 
p.rbftrain(10000)
p.confmat(inputs,targets)

#use two-layer perceptron
p = mlpcn.mlpcn(inputs,targets,4,0.2,'linear','batch') 
p.mlptrain(10000)
p.confmat(inputs,targets)
コード例 #23
0
ファイル: project_pcn_script.py プロジェクト: sharped/4155
#Train models and get lists of errors for each fold
pcn_train_score = []
pcn_valid_score = []
test_scores = []
for train_indices, valid_indices, test_indices in kf:

    train = iris[train_indices]
    train_tgt = targets[train_indices]

    valid = iris[valid_indices]
    valid_tgt = targets[valid_indices]

    test = iris[test_indices]
    test_tgt = targets[test_indices]

    perceptron = pcn.pcn(train, train_tgt)
    train_scores, valid_scores = perceptron.pcntrainValid(train, train_tgt, valid, valid_tgt, 0.001, 1000)
    pcn_train_score.append(train_scores)
    pcn_valid_score.append(valid_scores)

    test = np.concatenate((test, -np.ones((test.shape[0], 1))), axis=1)
    print "FOLD"
    test_scores.append(perceptron.pcn_score(test, test_tgt))

print test_scores
print np.sum(test_scores)

pcn_train_score = np.array(pcn_train_score)
pcn_valid_score = np.array(pcn_valid_score)

コード例 #24
0
 def test_predict_3(self):
     data3D = np.matrix([[0, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])
     targets3 = np.array([0, 1, 1, 0])
     test_pcn3 = pcn.pcn(1, iter = 30)
     predict3 = np.squeeze(test_pcn3.trainWeights(data3D, targets3))
     np.testing.assert_equal(predict3.tolist(), targets3)
コード例 #25
0
ファイル: irispcn.py プロジェクト: nilinswap/neuro-evolution
def irismain():
    #1. make iris.data in usable form
    #2. make input set and output set out of it
    #3. make setpool out of the dataset
    #4. make pcn and train it
    #5. test on validation and testing set

    convert_iris()
    irisdata = np.loadtxt(
        "/home/swapnil/forgit/neuro-evolution/05/dataset/iris/newiris.data",
        delimiter=',')
    order = np.arange(np.shape(irisdata)[0])
    np.random.shuffle(order)
    irisdata = irisdata[order, :]
    nin = 4  # for four features of iris
    nout = 3  # for 3 sets of iris flowers
    minerr = minetaerr = miniterarr = 10000000
    switch = 10  #to see epoch vs error or eta vs error
    etalis = []
    valerrlis = []
    niterationslis = []

    #eta=0.49
    eta = .43
    niterations = 400
    if switch == 1:
        for eta in myrange(0.20, 0.5, 0.0005):
            minetaerr = 10000000
            etalis.append(eta)
            for tupoftup in nextpartition(irisdata, nin, nout):
                train, traintarget = tupoftup[0]
                valid, validtarget = tupoftup[
                    1]  #each row of setpool is input and their targets so we need to seperate them
                test, testtarget = tupoftup[2]

                #np.concatenate((train,valid),axis=0)
                #np.concatenate((traintarget,validtarget),axis=0)
                #valid is of no use on perceptron because perceptron can not overfit!! and neither is early-stopping.
                net = pcn.pcn(train, traintarget)

                net.pcntrain(train, traintarget, eta, niterations)
                print("below")
                err = net.confmat(valid, validtarget)
                print("\n")
                minetaerr = min(minetaerr, err)
                if minerr >= err:  #we are using 'equal to' strategically here
                    minerr = err
                    bestnet = trainedpcn.trainedpcn(net, test, testtarget, err,
                                                    eta, niterations)

            valerrlis.append(
                minetaerr)  #notice this minetaerr is error for each eta
            # and minerr is the minimum for all minetaerr that is over all eta
        print("\n best network with eta is attained with eta ", bestnet.eta)
        leasterr = bestnet.test()
        print("changing eta, error on test is %f while on valid  is %f" %
              (leasterr, bestnet.validmeanerr))
        etaarr = np.array(etalis) * np.ones((len(etalis), 1))
        valerrarr = np.array(valerrlis) * np.ones((len(valerrlis), 1))
        pl.plot(etaarr, valerrarr, '.')
        #pl.plot(x,,'o')
        pl.xlabel('eta')
        pl.ylabel('error')

        pl.show()
    elif switch == 2:
        for niterations in range(10, 1000, 10):
            miniteraerr = 10000000
            niterationslis.append(niterations)
            for tupoftup in nextpartition(irisdata, nin, nout):
                train, traintarget = tupoftup[0]
                valid, validtarget = tupoftup[
                    1]  #each row of setpool is input and their targets so we need to seperate them
                test, testtarget = tupoftup[2]

                #np.concatenate((train,valid),axis=0)
                #np.concatenate((traintarget,validtarget),axis=0)
                #valid is of no use on perceptron because perceptron can not overfit!! and neither is early-stopping.
                net = pcn.pcn(train, traintarget)

                net.pcntrain(train, traintarget, eta, niterations)
                print("below")
                err = net.confmat(valid, validtarget)
                print("\n")
                miniteraerr = min(miniteraerr, err)
                if minerr > err:
                    minerr = err
                    bestnet = trainedpcn.trainedpcn(net, test, testtarget, err,
                                                    eta, niterations)

            valerrlis.append(miniteraerr)
        print("\n best network with epochs is attained", bestnet.niterations)
        leasterr = bestnet.test()
        print("changing epochs, error on test is %f while on valid  is %f" %
              (leasterr, bestnet.validmeanerr))
        iterarr = np.array(niterationslis) * np.ones((len(niterationslis), 1))
        valerrarr = np.array(valerrlis) * np.ones((len(valerrlis), 1))
        pl.plot(iterarr, valerrarr, '.')
        #pl.plot(x,,'o')
        pl.xlabel('iterations')
        pl.ylabel('error')

        pl.show()
    elif switch == 3:
        for eta in myrange(0.25, 0.5, 0.0005):
            minetaerr = 10000000
            etalis.append(eta)
            for niterations in range(10, 1000, 50):
                miniteraerr = 10000000

                for tupoftup in nextpartition(irisdata, nin, nout):
                    train, traintarget = tupoftup[0]
                    valid, validtarget = tupoftup[
                        1]  #each row of setpool is input and their targets so we need to seperate them
                    test, testtarget = tupoftup[2]

                    #np.concatenate((train,valid),axis=0)
                    #np.concatenate((traintarget,validtarget),axis=0)
                    #valid is of no use on perceptron because perceptron can not overfit!! and neither is early-stopping.
                    net = pcn.pcn(train, traintarget)

                    net.pcntrain(train, traintarget, eta, niterations)
                    print("below")
                    err = net.confmat(valid, validtarget)
                    print("\n")
                    miniteraerr = min(miniteraerr, err)
                    if minerr > err:
                        minerr = err
                        bestnet = trainedpcn.trainedpcn(
                            net, test, testtarget, err, eta, niterations)

        print("\n best network  is attained, with epochs and eta",
              bestnet.niterations, bestnet.eta)
        leasterr = bestnet.test()
        print("changing epochs, error on test is %f while on valid  is %f" %
              (leasterr, bestnet.validmeanerr))

        #pl.plot(iterarr,valerrarr,'.')
        #pl.plot(x,,'o')
        #pl.xlabel('iterations')
        #pl.ylabel('error')

        #pl.show()
    elif switch == 4:
        lis = []
        for eta in myrange(0.3, 0.55, 0.0005):
            minetaerr = 10000000
            flag = 0
            for niterations in range(10, 1000, 50):
                miniteraerr = 10000000
                for tupoftup in nextpartition(irisdata, nin, nout):
                    train, traintarget = tupoftup[0]
                    valid, validtarget = tupoftup[
                        1]  #each row of setpool is input and their targets so we need to seperate them
                    test, testtarget = tupoftup[2]

                    #np.concatenate((train,valid),axis=0)
                    #np.concatenate((traintarget,validtarget),axis=0)
                    #valid is of no use on perceptron because perceptron can not overfit!! and neither is early-stopping.
                    net = pcn.pcn(train, traintarget)

                    net.pcntrain(train, traintarget, eta, niterations)
                    print("below")
                    err = net.confmat(valid, validtarget)
                    print("\n")
                    miniteraerr = min(miniteraerr, err)

                    if minerr > err:
                        minerr = err
                        bestnet = trainedpcn.trainedpcn(
                            net, test, testtarget, err, eta, niterations)
                if miniteraerr < 0.07:
                    tempiter = niterations
                    flag = 1
                    break
            if not flag:
                tempiter = 4000
            lis.append((eta, tempiter))
        niterationslis = [i[1] for i in lis]
        etalis = [i[0] for i in lis]
        iterarr = np.array(niterationslis) * np.ones((len(niterationslis), 1))
        etaarr = np.array(etalis) * np.ones((len(etalis), 1))
        pl.plot(etaarr, iterarr, '.')
        #pl.plot(x,,'o')
        pl.xlabel('eta')
        pl.ylabel('iter')

        pl.show()

    else:
        miniteraerr = 10000000
        #niterationslis.append(niterations)
        for tupoftup in nextpartition(irisdata, nin, nout):
            train, traintarget = tupoftup[0]
            valid, validtarget = tupoftup[
                1]  #each row of setpool is input and their targets so we need to seperate them
            test, testtarget = tupoftup[2]

            #np.concatenate((train,valid),axis=0)
            #np.concatenate((traintarget,validtarget),axis=0)
            #valid is of no use on perceptron because perceptron can not overfit!! and neither is early-stopping.
            net = pcn.pcn(train, traintarget)

            net.pcntrain(train, traintarget, eta, niterations)
            print("below")
            err = net.confmat(valid, validtarget)
            print("\n")

            if minerr > err:
                minerr = err
                bestnet = trainedpcn.trainedpcn(net, test, testtarget, err,
                                                eta, niterations)

        print("\n best network  is attained")
        leasterr = bestnet.test()
        print(" error on test is %f while on valid  is %f" %
              (leasterr, bestnet.validmeanerr))
コード例 #26
0
pima[np.where(pima[:, 7] <= 30), 7] = 1
pima[np.where((pima[:, 7] > 30) & (pima[:, 7] <= 40)), 7] = 2
pima[np.where((pima[:, 7] > 40) & (pima[:, 7] <= 50)), 7] = 3
pima[np.where((pima[:, 7] > 50) & (pima[:, 7] <= 60)), 7] = 4
pima[np.where(pima[:, 7] > 60), 7] = 5

pima[:, :8] = pima[:, :8] - pima[:, :8].mean(axis=0)
pima[:, :8] = pima[:, :8] / pima[:, :8].var(axis=0)

inputs1 = pima[::2, :8]
inputs2 = pima[1::2, :8]
targets1 = pima[::2, 8:9]
targets2 = pima[1::2, 8:9]

# Perceptron training on the preprocessed dataset
p1 = pcn.pcn(inputs1, targets1)
p1.pcntrain(inputs1, targets1, 0.25, 100)
cm1 = p1.confmat(inputs2, targets2)
p2 = pcn.pcn(inputs2, targets2)
p2.pcntrain(inputs2, targets2, 0.25, 100)
cm2 = p2.confmat(inputs1, targets1)
cm = cm1 + cm2
print("Perceptron classification accuracy: ")
print(np.trace(cm) / np.sum(cm))

# Linear regression on the preprocessed dataset
beta1 = linreg.linreg(inputs1, targets1)
beta2 = linreg.linreg(inputs2, targets2)
inputs1 = np.concatenate((inputs1, -np.ones((np.shape(inputs1)[0], 1))),
                         axis=1)
inputs2 = np.concatenate((inputs2, -np.ones((np.shape(inputs2)[0], 1))),
コード例 #27
0
 def test_initializeneurons(self):
     test_pcn3 = pcn.pcn(1)
     test_pcn3.initializeNeurons(np.shape(self.data)[1])
     self.assertEqual(np.shape(test_pcn3.matNeurons[0].weights), (3, 1))
コード例 #28
0
ファイル: logic.py プロジェクト: kikoval/Neural-networks
# Stephen Marsland, 2008
# Kristian Valentin, 2011

# Demonstration of the Perceptron and Linear Regressor on the basic logic functions

from numpy import array
import pcn

inputs = array([[0,0],[0,1],[1,0],[1,1]],dtype=float)
# AND data
ANDtargets = array([[0],[0],[0],[1]])
# OR data
ORtargets = array([[0],[1],[1],[1]])
# XOR data
XORtargets = array([[0],[1],[1],[0]])

print "AND logic function"
p = pcn.pcn(inputs,ANDtargets)
p.pcntrain(0.25,16,True)

print "=" * 20
print "OR logic function"
p = pcn.pcn(inputs,ORtargets)
p.pcntrain(0.25,6,True)

print "=" * 20
print "XOR logic function"
p = pcn.pcn(inputs,XORtargets)
p.pcntrain(0.25,6,True)
コード例 #29
0
from numpy import *

inputs = array([[0, 0], [0, 1], [1, 0], [1, 1]])
targets = array([[0], [1], [1], [1]])
import pcn

p = pcn.pcn(inputs, targets)
p.pcntrain(inputs, targets, 0.25, 6)
コード例 #30
0
ファイル: MySpamPcn.py プロジェクト: syzorr/spam-filter
 def __init__(self, inputs, targets):
     self.spam = pcn.pcn(inputs, targets)
コード例 #31
0
import os
import matplotlib.pyplot as pl
import numpy as np
import pcn

os.chdir(os.path.join(os.path.dirname(__file__), 'pimaData/'))
pima = np.loadtxt('pima-indians-diabetes.data', delimiter=',')
#np.shape(pima)
indices0 = np.where(pima[:,8] == 0) #Where the class is 0
indices1 = np.where(pima[:,8] == 1) #Where the class is 1


pl.plot(pima[indices0,0],pima[indices0,1],'go')
pl.plot(pima[indices1,0],pima[indices1,1],'rx')

p = pcn.pcn(pima[:,:8], pima[:,8:9])
p.pcntrain(pima[:,:8],pima[:,8:9],0.25,100)
p.confmat(pima[:,:8],pima[:,8:9])

trainin = pima[::2,:8]
testin = pima[1::2,:8]
traintgt = pima[::2, 8:9]
testtgt = pima[1::2,8:9]



pl.show()
コード例 #32
0
ファイル: pima.py プロジェクト: rrzatkie/ML_Algo_Persp
import pcn

pima = np.loadtxt('C:\\work\\Tutorials\\ml-alg-perp\\MarslandMLAlgo\Data\\pima-indians-diabetes.data',delimiter=',')

# Plot the first and second values for the two classesy
indices0 = np.where(pima[:,8]==0)
indices1 = np.where(pima[:,8]==1)

pl.ion()
pl.plot(pima[indices0,0],pima[indices0,1],'go')
pl.plot(pima[indices1,0],pima[indices1,1],'rx')
pl.show(block=True)
#%%
# Perceptron training on the original dataset
print "Output on original data"
p = pcn.pcn(pima[:,:8],pima[:,8:9])
p.pcntrain(pima[:,:8],pima[:,8:9],0.25,100)
p.confmat(pima[:,:8],pima[:,8:9])

# Various preprocessing steps
pima[np.where(pima[:,0]>8),0] = 8

pima[np.where(pima[:,7]<=30),7] = 1
pima[np.where((pima[:,7]>30) & (pima[:,7]<=40)),7] = 2
pima[np.where((pima[:,7]>40) & (pima[:,7]<=50)),7] = 3
pima[np.where((pima[:,7]>50) & (pima[:,7]<=60)),7] = 4
pima[np.where(pima[:,7]>60),7] = 5

pima[:,:8] = pima[:,:8]-pima[:,:8].mean(axis=0)
pima[:,:8] = pima[:,:8]/pima[:,:8].var(axis=0)
コード例 #33
0
ファイル: somtwo.py プロジェクト: jackzkdavies/foobar-
for i in range(13):
    where = pl.find(test_tgt[:,i] == 1)
    pl.plot(net.map[0,best[where]],net.map[1,best[where]],markers[i],ms=10)
   
pl.axis([-0.1,1.1,-0.1,1.1])
pl.axis('on')
   
   
# pl.show()

# print acts
# train_tgt = np.zeros((nreadTrain,numberReadIn))

# for pol in xrange(13):
#     for i in range(nreadTrain/numberReadIn):
#             train_tgt[i+(pol*rowsReadTrain),pol] = 1
             
actsTrain = actsTrain/actsTrain.max(axis=0) 
actsTest = actsTest/actsTest.max(axis=0)


import pcn
p = pcn.pcn(actsTrain, train_tgt)
p.pcntrain(actsTrain, train_tgt,0.25,500)
p.confmat(actsTrain,train_tgt)

# p = pcn.pcn(actsTest, test_tgt)
# p.pcntrain(actsTest, test_tgt,0.25,100)
# p.confmat(actsTest,test_tgt)