Beispiel #1
0
def solve3(data):
    Kp = polyKernel
    Ke = expKernel

    test = data
    # d = 5 is the best, discovered in problem 2
    wp, lp = perceptron(test, 1, Kp, {'d': 5})
    print "Polynomial Kernel computation finished"
    we, le = perceptron(test, 1, Ke, {'sigma': 10})
    print "Exponential Kernel computation finished"
    return (lp, le)
Beispiel #2
0
def main(train_set, test_set, iter=2):
    split("yelp_cat.csv")
    iter = int(iter)
    X_train, X_test, Y_train, Y_test = load(train_set, test_set)
    p = perceptron(iter, X_train, Y_train)
    p.train()
    print "ZERO-ONE LOSS=" + str(p.test(X_test, Y_test))
    def __init__(self,
                 number_classes,
                 learning_rate=0.01,
                 max_epoch=100,
                 cut_error=None):
        """

        :param number_classes: number of different classes(labels)
        :param learning_rate: learning rate for training Perceptrons of one vs all
        :param max_epoch: maximum number of epochs that perceptron is
                        allowed to use for training
        :param cut_error: stop training if error fall under specific
                value, if it's none don't consider it
        """
        self.__number_of_class = number_classes
        self.__learning_rate = learning_rate
        self.__maximum_epoch = max_epoch
        self.__perceptrons = []
        # stop training if error fall under specific value, if it's none don't consider it
        self.__cut_error = cut_error

        # initial 'number_classes' perceptrons
        for i in range(0, self.__number_of_class):
            self.__perceptrons.append(
                perceptron(learning_rate=learning_rate,
                           max_epoch=max_epoch,
                           cut_error=cut_error))
Beispiel #4
0
    def __init__(self, position, nInputs, id = 'none'):
        player.__init__(self, position, id)

        self.nInputs = nInputs
        self.brain  = perceptron(nInputs,.1)
        self.target = list()
        self.error  = vector(0,0,)
Beispiel #5
0
def run_perceptron(n_train=10, noisy=None):
    print("n_train = ", n_train)
    n_rep = 1000  #  number of replicates
    n_test = 1000

    its = 0
    e_train = 0
    e_test = 0

    for i in range(n_rep):
        x, y, w_f = mkdata(n_train + n_test, noisy)
        x_train = x[:, :n_train]
        y_train = y[:, :n_train]
        x_test = x[:, n_train:]
        y_test = y[:, n_train:]

        w_g, it = perceptron(x_train, y_train)
        its += it

        x_test = add_bias(x_test)
        x_train = add_bias(x_train)

        e_train += np.where(y_train *
                            (w_g.T @ x_train) < 0)[0].shape[0] / n_train
        e_test += np.where(y_test * (w_g.T @ x_test) < 0)[0].shape[0] / n_test

    print('E_train is %f, E_test is %f' % (e_train / n_rep, e_test / n_rep))
    print('Average number of iterations is %d.\n' % (its / n_rep))
    plotdata(x, y, w_f, w_g, 'Pecertron')
Beispiel #6
0
def main():
    print("Perceptron main instructions")
    data_set_name = sys.argv[1]
    number_features = int(sys.argv[2])
    number_epochs = int(sys.argv[3])
    print(" Data Set Name = ", data_set_name)
    print(" Features in Data Set  =  ", number_features)
    print(" Number of Epochs  =  ", number_epochs)
    #drop_this = 'Iris-virginica'
    drop_this = 'Iris-setosa'
    data_set = process_dataset(data_set_name, number_features, drop_this)
    #data_set = process_dataset(data_set_name, number_features)

    iris_p = perc.perceptron(number_epochs, number_features)

    train_set, test_set = iris_p.train_and_test(data_set)
    label0 = 'Iris-versicolor'
    x_train, y_train = iris_p.prepare_xy(train_set, label0)
    x_test, y_test = iris_p.prepare_xy(test_set, label0)

    pars, epochs = iris_p.find_pars(x_train, y_train)
    print(" Epochs completed = ", epochs)

    score = iris_p.score(pars, x_test, y_test)
    print("   SCORE =  ", score)

    xsample = np.array([4.9, 3.0, 1.4, 0.2])  # Iris Setosa
    print(iris_p.predict(xsample, pars))

    xsample = np.array([5.1, 2.5, 3.0, 1.1])  # Iris Versicolour
    print(iris_p.predict(xsample, pars))
def ex_AND():
	import perceptron
	
	a = np.array([[0,0,0],[0,1,0],[1,0,0],[1,1,1]]) 
	AND_in = a[:,0:2]
	AND_out = a[:,2:]
	p = perceptron.perceptron(AND_in,AND_out)
	p.train(AND_in,AND_out,0.25,10)
	p.confmat(AND_in,AND_out)
def ex_XOR():
	import perceptron
	
	a = np.array([[0,0,0],[0,1,1],[1,0,1],[1,1,0]])
	XOR_in = a[:,0:2]
	XOR_out = a[:,2:]
	p = perceptron.perceptron(XOR_in,XOR_out)
	p.train(XOR_in,XOR_out,0.25,10)
	p.confmat(XOR_in,XOR_out)
Beispiel #9
0
def begin(learning_rate, iteration):
    data = perceptron.readfile()  # 讀檔
    train, test, noiseData = perceptron.preprocess(data)  # 前處理
    train_accuracy, test_accuracy, final_weight = perceptron.perceptron(
        train, test, learning_rate, iteration)  # 感知機

    perceptron.plotData(train, final_weight, window, 'Training',
                        noiseData)  # 畫訓練資料
    perceptron.plotData(test, final_weight, window, 'Testing',
                        noiseData)  # 畫測試資料
    show_result(train_accuracy, test_accuracy, final_weight)  # 顯示準確率、鍵結值
Beispiel #10
0
def solve2(data):
    K = polyKernel
    degs = [1,3,5,7,10,15,20]
    train = data
    result = {}
    for d in degs:
        weights, loss = perceptron(train, 1, K, {'d':d})
        result[d] = loss[-1]
        sys.stdout.write("%d finished" % d)
        sys.stdout.flush()
    return result
Beispiel #11
0
def q2():
	dataset = populate_set_with_data('train', limit = 100)
	weights = perceptron(dataset)

	# Use learned weights to estimate y's
	estimates = estimate_tags(dataset, weights)
	print "Training error rate: %0.4f" % error_rate(dataset, estimates)

	# Now check error rate against test set.
	test_set = populate_set_with_data('test', limit = 1000)
	test_estimates = estimate_tags(test_set, weights)
	print "Test error rate: %0.4f" % error_rate(test_set, test_estimates)
Beispiel #12
0
def problem2():
	print "============================================="
	print "loading perceptron data..."
	perceptron_data = np.loadtxt("../dataset/perceptronData.txt")

	X,y = gd.extractData(perceptron_data)

	w = pcpt.perceptron(X,y)

	print "Classifier weights: ", w
	print "Normalized with threshold: ", w[1:]/-w[0]
	print "============================================="
Beispiel #13
0
def traingate(w,gate,lc):
    a=p.perceptron(w,lc) #perceptron with weight and learning constants
    c=0 #count for  how many iterations
    iper=gate() #the gate you want to mimic
    while(not equivalent(iper,a)):
        input = np.array([1,rbit(),rbit()])
        a.train(input,iper.feedforward(input))
        c+=1
        if (c==100000):
            print("No solution found.")
            return iper
    print("Gate trained in "+ str(c)+ " steps." )
    return a
def search_for_parameter():
    n_bis = [100000, 200000, 500000]
    steps = [5e-2, 1e-1, 5e-1, 1]
    n_epoch = 5

    for n_bi in n_bis:
        for step in steps:
            print('-' * 20)
            print(n_bi, ' ', step)
            config['n_bigram'] = n_bi
            config['smooth'] = step
            corpus = Corpus(config['data_dir'])
            pct = perceptron()
            for cnt in range(n_epoch):
                train(corpus.trainSet, pct)
            test(corpus.testSet, pct)
Beispiel #15
0
def start_trainning():
    """ Faz o treinamento de acordo com os inputs e outputs e mostra os pesos """
    data = get_train_data()
    chosen = choice.currentText()

    if chosen == "Hebb":
        weight = hebb_rule(data)
    elif chosen == "Perceptron":
        weight = perceptron(data)
    else:
        weight = "err"

    get_lbl("weightLabel_1").setText(f"W{sub('1')}: {weight[0]}")
    get_lbl("weightLabel_2").setText(f"W{sub('2')}: {weight[1]}")
    get_lbl("weightLabel_B").setText(f"W{sub('B')}: {weight[2]}")
    store(weight)
Beispiel #16
0
def train_and_test_perceptron():
    """
    Produce a random "True" line, throw some random points and classify
    them based on "true" line.
    Train a perceptron based on the data.
    Test perceptron output vs "true" function.
    """
    # Random points
    training_sample, true_values, fm, fb = throw_random_points(100)

    # Train perceptron
    gm, gb, number_iterations, g_w = perceptron(training_sample, true_values)

    # compare "true" vs " calculated" areas
    bad_area = compare_areas.area_misclassified(fm, fb, g_w)
    return fm, fb, gm, gb, training_sample, true_values, number_iterations, bad_area, g_w
Beispiel #17
0
def trainingNeuron(trainingTimes):
    neuron = perceptron();
    training_inputs = np.array([[0,0,1],
                               [1,1,1],
                               [1,0,1],
                               [0,1,1]]);

    training_outputs = np.array([[0,1,1,0]]).T;

    neuron.setSeed(1);
    neuron.setAmountVariables(3);

    print("Synaptic weigths:", neuron.synaptic_weigths);

    neuron.trainPerceptron(training_inputs,training_outputs,trainingTimes);

    print("Synaptic weigths after training:", neuron.synaptic_weigths);
    print("Trained outputs:", neuron.outputs);
    return neuron;
Beispiel #18
0
def animate(i):
    """
    update function for animation module
    i : frame number
    """
    for lnum, line in enumerate(lines):
        if lnum == 0:
            theta, theta_0 = perceptron(features, labels, T=i + 1)
            y = -(theta[0] * x + theta_0) / (theta[1] + 1e-16)
            line.set_ydata(y)  # update the data.
        elif lnum == 1:
            theta, theta_0 = average_perceptron(features, labels, T=i + 1)
            y = -(theta[0] * x + theta_0) / (theta[1] + 1e-16)
            line.set_ydata(y)  # update the data.
        elif lnum == 2:
            theta, theta_0 = pegasos(features, labels, T=i + 1, L=0.2)
            y = -(theta[0] * x + theta_0) / (theta[1] + 1e-16)
            line.set_ydata(y)  # update the data.

    ax.set_title('epoch = {}'.format(str(i + 1)))
    return lines
Beispiel #19
0
def main():
    number_tr = 2000
    number_ts = 1000
    radius = 10
    distance = 0
    width = 7
    number_samp = number_tr + number_ts

    print("initalizing half-moon\n")
    data =  halfmoon_shuffle(radius,width,distance,number_samp)

    print("number of samples generated= %d\n" %number_samp)
    print("radius= %2.1f\n" % radius)
    print("width of the half-moon=%2.1f\n" %width)
    print("distance=%2.1f\n" %distance)

    learn_rate = 0.01
    epochs = 50
    neuron = 2
    bias = distance/2

    perecep = perceptron(data,learn_rate,neuron,bias,epochs)
    perecep.train(number_tr)
    perecep.test(number_ts,number_tr)
Beispiel #20
0
def run():
    iters = zeros((runs, 1))
    diffs = zeros((runs, 1))

    for i in range(runs):

        p1 = uniform(-1, 1, 2)
        p2 = uniform(-1, 1, 2)

        f = target(p1, p2)

        X = ones((N, 3))

        X[:, 1:] = uniform(-1, 1, (N, 2))

        y = sign(f(X[:, 1]) - X[:, 2])

        w = zeros(3)

        iters[i], wt = perceptron(X, y, w)

        diffs[i] = evaluate(f, wt, 1000)

    return (mean(iters), mean(diffs))
Beispiel #21
0
def q3():
	errors = []
	for tlimit in range(100, 1001, 100):
		dataset = populate_set_with_data('train', limit = tlimit)
		weights = perceptron(dataset)

		# Use learned weights to estimate y's
		estimates = estimate_tags(dataset, weights)
		print ("Training error rate, %i samples: %0.4f" %
				(tlimit, error_rate(dataset, estimates)))

		# Now check error rate against test set.
		test_set = populate_set_with_data('test', limit = 1000)
		test_estimates = estimate_tags(test_set, weights)
		err = error_rate(test_set, test_estimates)
		errors.append(err)
		print ("Test error rate (trained on %i samples): %0.4f" %
				(tlimit, err))

	# Plot
	plt.plot(range(100, 1001, 100), errors)
	g = plt.gcf()
	g.set_size_inches((12, 16))
	plt.savefig("errors.png")
Beispiel #22
0
def nandgate():
    return p.perceptron(np.array([3,-2,-2]),0)
Beispiel #23
0
    def __init__( self, numpy_rng=None, theano_rng=None,n_inputs=None,
                  hidden_layers_sizes = None,
                  corruption_levels=[0.1, 0.1],
                  dA_initiall=True,
                  error_known=True,
                  method=None,
                  problem = None):         

        self.n_layers = len(hidden_layers_sizes)
        self.n_inputs=n_inputs
        self.hidden_layers_sizes=hidden_layers_sizes
        self.error_known = error_known
        self.method=method
        self.problem = problem
        
        assert self.n_layers > 2

        if not numpy_rng:
            numpy_rng = numpy.random.RandomState(123)
               
        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

        self.x = T.matrix('x')       
        self.mask = T.matrix('mask')


        ### encoder_layers ####
        
        self.encoder_layers = []
        self.encoder_params = []
        self.dA_layers=[]
        for i in range(self.n_layers):
            
            if i == 0:
                input_size = self.n_inputs
                corruption=True
                
            else:
                input_size = self. hidden_layers_sizes[i-1]
                corruption=False
          
            if i == 0:
                layer_input = self.x
            else:
                layer_input=self.encoder_layers[-1].output
                
            act_func=T.tanh
                
            self.encoder_layer=perceptron(rng = numpy_rng,
                                          theano_rng=theano_rng,
                                          input = layer_input,
                                          n_in = input_size,
                                          n_out = self.hidden_layers_sizes[i],
                                          activation = act_func,
                                          first_layer_corrup=corruption)

            if dA_initiall:
                dA_layer = dA(numpy_rng=numpy_rng,
                              theano_rng=theano_rng,
                              input=layer_input,
                              n_visible=input_size,
                              n_hidden=hidden_layers_sizes[i],
                              W=self.encoder_layer.W,
                              bhid=self.encoder_layer.b,
                              method = self.method)
                
                self.dA_layers.append(dA_layer)
            
            self.encoder_layers.append(self.encoder_layer)
            self.encoder_params.extend(self.encoder_layer.params)

 


        ### decoder_layers ####

        self.decoder_layers = []
        self.decoder_params = []
        
        self.reverse_layers=self.encoder_layers[::-1]
        #self.reverse_da=self.dA_layers[::-1]
        
        decode_hidden_sizes=list(reversed(self.hidden_layers_sizes))

        for i,j in enumerate(decode_hidden_sizes):
            
            
            input_size=j
            if i == 0:
                layer_input=self.reverse_layers[i].output
            else:
                layer_input=self.decoder_layers[-1].output
            
            if i==len(decode_hidden_sizes)-1:
                n_out= self.n_inputs
            else:
                n_out=decode_hidden_sizes[i+1]


            if i==len(decode_hidden_sizes)-1:
                if self.problem == 'regression':
                    act_func = None 
                else:
                    act_func = T.nnet.sigmoid
            else:
                act_func=T.tanh
            
            self.decoder_layer=perceptron(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=n_out,
                                        W= self.reverse_layers[i].W,
                                        b= None,
                                        activation=act_func,
                                        decoder=True
            )

            
            self.decoder_layers.append(self.decoder_layer)
            
            self.decoder_params.append(self.decoder_layer.b)
            
            
        self.network_layers=  self.encoder_layers + self.decoder_layers
        self.params = self.encoder_params + self.decoder_params
        print(self.params)
Beispiel #24
0
from perceptron import perceptron
import numpy as np

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])

print("[INFO] training perceptron...")
p = perceptron(X.shape[1], alpha=0.1)
p.fit(X, y, epochs=20)

print("[INFO] training perceptron...")

for (x, target) in zip(X, y):

    pred = p.predict(x)
    print("[INFO] data={}, ground-truth={}, pred={}".format(
        x, target[0], pred))
Beispiel #25
0
#Cyrus Burt |JAN 2019| MIT LICENSE

#Very simple test to see if the perceptron works
#Tests if the perceptron can classify X OR Y

import perceptron
import pandas as pd
import matplotlib.pyplot as plt
import random
from sklearn.utils import shuffle

model = perceptron.perceptron(0.7)

data = pd.read_csv("cars.data")

door_num = list(data.iloc[0:, 2])

safety = list(data.iloc[0:, 5])

#data = shuffle(data)

plt.plot(door_num, safety, linestyle="None", marker=".")
plt.show()
Beispiel #26
0
# plt.scatter(OXs[:, 0], OXs[:, 1], c=OYs, s=size, marker='o', cmap=plt.cm.Paired)

plt.title('A. Initial Word Vectors')
frame = pylab.gca()
pylab.ylim([-3.5, 3.5])
pylab.xlim([-3.5, 3.5])
frame.axes.get_yaxis().set_ticks([])
frame.axes.get_xaxis().set_ticks([])
# plt.legend(['fruit', 'animal', 'tool', 'movie'],['fruit', 'animal', 'tool', 'movie'], bbox_to_anchor=(1, 0, 0, 0), scatterpoints=1)

OX2 = np.copy(OXs)
OX3 = np.copy(OXs)
OX1 = OXs

W = np.ones((input_dim, output_dim))
W, Xs = perceptron(OX1, OYs, W, lr, n_iter)
x_min, x_max = Xs[:, 0].min() - gap, Xs[:, 0].max() + gap
y_min, y_max = Xs[:, 1].min() - gap, Xs[:, 1].max() + gap
x_min, x_max, y_min, y_max = -3.5, 3.5, -3.5, 3.5
xx, yy = np.meshgrid(np.arange(x_min, x_max, fineness),
                     np.arange(y_min, y_max, fineness))
Z = np.c_[xx.ravel(), yy.ravel()]
predicate = np.argmax(np.dot(Z, W), 1)
predicate = predicate.reshape(xx.shape)
plt.subplot(2, 4, 2)
plt.contourf(xx, yy, predicate, alpha=0.4, cmap=plt.cm.Paired)
plt.scatter(Xs[:, 0],
            Xs[:, 1],
            c=OYs,
            s=size,
            label="test",
Beispiel #27
0
    def __init__( self, numpy_rng=None, theano_rng=None,n_inputs=None,
                  hidden_layers_sizes = None,
                  corruption_levels=[0.1, 0.1],
                  dA_initiall=True,
                  error_known=True,
                  method=None,
                  problem = None,
                  activ_fun = None,
                  drop = None,
                  regu_l1 = None,
                  regu_l2 = None):         

        self.activ_fun = activ_fun  #T.arctan  #T.tanh 
        
        self.n_layers = len(hidden_layers_sizes)
        self.n_inputs=n_inputs
        self.hidden_layers_sizes=hidden_layers_sizes
        self.error_known = error_known
        self.method=method
        self.problem = problem
        self.drop = drop
        self.regu_l1 = regu_l1
        self.regu_l2 = regu_l2
        #assert self.n_layers >= 2

 
        self.x = T.matrix('x')       
        self.mask = T.matrix('mask')


        ### encoder_layers ####
        
        self.encoder_layers = []
        self.encoder_params = []
        self.dA_layers=[]
        for i in range(self.n_layers):
            
            if i == 0:
                input_size = self.n_inputs
                corruption=True
                
            else:
                input_size = self. hidden_layers_sizes[i-1]
                corruption=False
          
            if i == 0:
                layer_input = self.x
            else:
                layer_input=self.encoder_layers[-1].output
                
            
                
            self.encoder_layer=perceptron(input = layer_input,
                                          n_in = input_size,
                                          n_out = self.hidden_layers_sizes[i],
                                          activation = activ_fun,
                                          first_layer_corrup=corruption,
                                          drop = self.drop[i])

            if dA_initiall :
                dA_layer = dA(numpy_rng=numpy_rng,
                              theano_rng=theano_rng,
                              input=layer_input,
                              n_visible=input_size,
                              n_hidden=hidden_layers_sizes[i],
                              W=self.encoder_layer.W,
                              bhid=self.encoder_layer.b,
                              method = self.method,
                              activation=activ_fun,
                              regu_l1=self.regu_l1,
                              regu_l2=self.regu_l2)
                
                self.dA_layers.append(dA_layer)
            
            self.encoder_layers.append(self.encoder_layer)
            self.encoder_params.extend(self.encoder_layer.params)

 


        ### decoder_layers ####

        self.decoder_layers = []
        self.decoder_params = []
        self.drop.reverse()
        
        self.reverse_layers=self.encoder_layers[::-1]
        #self.reverse_da=self.dA_layers[::-1]
        
        decode_hidden_sizes=list(reversed(self.hidden_layers_sizes))

        for i,j in enumerate(decode_hidden_sizes):
            
            
            input_size=j
            if i == 0:
                layer_input=self.reverse_layers[i].output
            else:
                layer_input=self.decoder_layers[-1].output
            
            if i==len(decode_hidden_sizes)-1:
                n_out= self.n_inputs
            else:
                n_out=decode_hidden_sizes[i+1]


            if i==len(decode_hidden_sizes)-1:
                if self.problem == 'regression':
                    act_func = None 
                else:
                    act_func = T.nnet.sigmoid
            else:
                act_func=activ_fun
            
            self.decoder_layer=perceptron(input=layer_input,
                                          n_in=input_size,
                                          n_out=n_out,
                                          W= self.reverse_layers[i].W,
                                          b= None,
                                          activation=act_func,
                                          decoder=True,
                                          drop = None)#self.drop[i])
    

            
            self.decoder_layers.append(self.decoder_layer)
            
            self.decoder_params.append(self.decoder_layer.b)
            
            
        self.network_layers=  self.encoder_layers + self.decoder_layers
        self.params = self.encoder_params + self.decoder_params
Beispiel #28
0
import numpy as np
from perceptron import perceptron

data = np.array([[0, 0, 0], [1, 1, 1]])
w, E, k = perceptron(data)
print(w)
print('E=%d k=%d' % (E, k))
t = []
with open('Chapter 3 - Exercise 9/data.csv', 'rb') as csvfile:
    reader = csv.reader(csvfile, delimiter=',')
    for row in reader:
        if row[0] == "x1" and row[1] == "x2" and row[2] == "label":
            continue
        else:
            X.append([float(row[0]), float(row[1])])
            t.append(float(row[2]))

X = np.array(X)
t = np.array(t)

###############################################################################

W = perceptron(X, t)

###############################################################################

x_line, Y = hyperplane(np.min(X[:, 0]), np.max(X[:, 0]), W)

###############################################################################

fig, ax = plt.subplots()
ax.set_xlim(np.min(X[:, 0]), np.max(X[:, 0]))
ax.set_ylim(np.min(X[:, 1]), np.max(X[:, 1]))
ax.grid(True)
ax.set_xlabel("x1")
ax.set_ylabel("x2")
ax.set_title("Demonstration of Perceptron Algorithm")
ax.plot(X[t == 1, 0], X[t == 1, 1], 'x')
Beispiel #30
0
pylab.xlim([-3.5,3.5])
frame.axes.get_yaxis().set_ticks([])
frame.axes.get_xaxis().set_ticks([])
# plt.legend(['fruit', 'animal', 'tool', 'movie'],['fruit', 'animal', 'tool', 'movie'], bbox_to_anchor=(1, 0, 0, 0), scatterpoints=1)


OX2 = np.copy(OXs)
OX3 = np.copy(OXs)
OX1 = OXs 





W = np.ones((input_dim, output_dim))
W, Xs = perceptron(OX1, OYs, W, lr, n_iter)
x_min, x_max = Xs[:, 0].min() - gap, Xs[:, 0].max() + gap
y_min, y_max = Xs[:, 1].min() - gap, Xs[:, 1].max() + gap
x_min, x_max, y_min, y_max = -3.5, 3.5, -3.5, 3.5
xx, yy = np.meshgrid(np.arange(x_min, x_max, fineness),
                     np.arange(y_min, y_max, fineness))
Z = np.c_[xx.ravel(), yy.ravel()]
predicate = np.argmax(np.dot(Z, W), 1)
predicate = predicate.reshape(xx.shape)
plt.subplot(2, 4, 2)
plt.contourf(xx, yy, predicate, alpha=0.4, cmap=plt.cm.Paired)
plt.scatter(Xs[:, 0], Xs[:, 1], c=OYs, s=size, label="test", marker='o', cmap=plt.cm.Paired)



Beispiel #31
0
def solve1(data):
    K = simpleKernel
    train = data
    weights, loss = perceptron(train, 1, K)
    print "Perceptron learning complete"
    return loss
def ptron(X, Y):
    return perceptron.perceptron(X, Y, 10)
Beispiel #33
0
import perceptron as p
import numpy as np

ex = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
res = np.array([0, 0, 0, 1])

p1 = p.perceptron(np.array([1, 1]), p.h)
p1.fit(ex, res)
print(p1.poids)

p2 = p.perceptron(np.array([-0.8, -0.8]), p.h)
p2.poids = p1.poids

print(p2.predict(np.array([0, 0])))
print(p2.predict(np.array([0, 1])))
print(p2.predict(np.array([1, 0])))
print(p2.predict(np.array([1, 1])))
#!/usr/bin/python

import os
import pylab as pl
import numpy as np
import perceptron as pcn

os.chdir("/home/adam/github/marsland-machine-learning/datasets")
pima = np.loadtxt('pima-indians-diabetes.data',delimiter=',')
inputs = pima[:,:8]
outputs = pima[:,8:9]

p = pcn.perceptron(inputs,outputs)
p.train(inputs,outputs,0.25,100)
p.confmat(inputs,outputs)
Beispiel #35
0
import numpy as np
from perceptron import perceptron
from week2 import calcPoints

# data = [([1, 1], 1), ([-1, -1], -1), ([0, 1], 1),
#       ([0, -1], -1), ([1, 0], 1), ([-1, 0], -1)]

data = [([-1, -1], -1), ([-1.38, -1.25], -1), ([-1, -2], -1),
        ([-2.4, -0.99], -1), ([-2.44, -0.49], -1), ([-1.64, -0.55], -1), ([-2, -1.35], -1), ([-1.56, -2.19], -1), ([-2.2, 0.37], -1), ([1, 2], 1), ([1.46, 2.25], 1), ([2, 2], 1), ([1.62, 1.47], 1), ([1, 1], 1), ([0.18, 0.39], 1), ([0.52, 1.57], 1), ([1.82, 0.23], 1), ([2.48, 0.97], 1), ([3.12, 1.67], 1), ([2.34, 1.71], 1), ([1.96, 1.25], 1), ([0.98, 0.45], 1), ([1.26, -0.71], 1)]


newTheta = perceptron(data, 2)
print("The hyperplane is: ", newTheta[0], "+ ", newTheta[1])

for point in data:
    print("Point: ", point[0])
    if(calcPoints(newTheta[0], point[0]) > 0):
        print(1)
    else:
        print(-1)
    for folds in range(0, n_folds):

        xtrain, xtest, ttrain, ttest = train_test_split(X, t, test_size=0.25)

        numberOfTrain = len(xtrain)
        numberOfTest = len(xtest)

        ttrain1 = 2 * ttrain - 1
        # metatropi twn 0 se -1 kai to 1 paramenei 1
        ttest1 = 2 * ttest - 1
        # metatropi twn 0 se -1 kai to 1 paramenei 1

        xtrain = np.array(xtrain, dtype=float)
        xtest = np.array(xtest, dtype=float)

        w = pr.perceptron(np.transpose(xtrain), ttrain1, epochs, b)
        y = np.dot(xtest, np.transpose(w))
        predict = (y > 0)

        accuracy += evaluate(ttest, predict, 'accuracy')
        precision += evaluate(ttest, predict, 'precision')
        recall += evaluate(ttest, predict, 'recall')
        fmeasure += evaluate(ttest, predict, 'fmeasure')
        sensitivity += evaluate(ttest, predict, 'sensitivity')
        specificity += evaluate(ttest, predict, 'specificity')

        #plots

        subplt[(folds) / 3, (folds) % 3].plot(ttest, "ro")
        subplt[(folds) / 3, (folds) % 3].plot(predict, "b.")
Beispiel #37
0
"""
main.py
uses my simple perceptron to classify irises

>>> model.pred([1.4,0.2])
1.0
>>> model.pred([4.5,1.6])
-1.0
"""
import perceptron
import pandas as pd
import matplotlib.pyplot as plt
import random
from sklearn.utils import shuffle

model = perceptron.perceptron(
    0.8)  #initialize a new perceptron with a learning rate of 0.1

data = pd.read_csv("iris.data")  #read iris csv

data = shuffle(data)

species = list(data.iloc[
    0:,
    4])  #initialize a new array of the iris species, these will get encoded

petal_length = list(data.iloc[0:, 2])
petal_width = list(data.iloc[0:, 3])

encoded_species = []

for i in species:
Beispiel #38
0
def andgate():
    return p.perceptron(np.array([-3,2,2]),0)
Beispiel #39
0
import perceptron

perc = perceptron.perceptron()
print(perc.guess([-1, 0.5]))


Beispiel #40
0
    while (run_again):
        run_again = training_epoch(perceptron, p_id)
    return

#################
# data structures
#################
# create a dictionary of perceptrons
# such that all different letter combinations are represented
perceptrons = {}
for letter1 in string.ascii_uppercase:
    for letter2 in string.ascii_uppercase:
        if letter1 != letter2:
            if letter2 + letter1 not in perceptrons:
                letters_combined = letter1 + letter2
                perceptrons[letters_combined] = perceptron()

# loop through dictionary of perceptron instances
# and train perceptron for matching input
# e.g. perceptron[AB] gets all A and all B training instances
# from the training data
#######
# main
#######
perceptron_increment = 1
#
for m, n in perceptrons: #m, n are the two letters in the perceptron representation (perceptron[kv])
    # print "---------------------------------------",m, n
    # text = "\rTraining perceptron "+str(perceptron_increment)+"/"+str(len(perceptrons))
    # sys.stdout.write(text)
    perceptron_increment += 1
Beispiel #41
0
#

from files import process_file
from perceptron import perceptron
from ploting import graph


if __name__ == '__main__':

	epacas = 10000; #iteraciones
	eta = 0.01;	#factor de aprendizaje
	#path dataset
	path = "iris.data"

	obj_plt = graph();
	obj_files = process_file();
	data, labels = obj_files.load_file(path);


	#initialize perceptron
	obj_perceptron = perceptron(eta, epocas)
	#training perceptron
	model = obj_perceptron.training(data,labels);

	#plot errors
	#obj_plt.plotting_errors(model);

	#plot decision regions
	obj_plt.plotting_decision_regions(data, labels, obj_perceptron);

Beispiel #42
0
from perceptron import perceptron

a = perceptron()
a.classificar()
print(a.y)
Beispiel #43
0
def main():
    path = Path('./dataset')

    symbol_dict = {}
    samples = {}
    X_train = []
    Y_train = []
    X_val = []
    Y_val = []
    X_test = []
    Y_test = []

    dataset_index = 1

    with open(path / f'info_{dataset_index}.csv') as file:
        reader = csv.reader(file, delimiter='\n')
        for row in reader:
            try:
                row = row[0].split(',')
                symbol_dict[int(row[0])] = row[1]
                samples[int(row[0])] = 0
            except Exception as e:
                continue

    # print(symbol_dict)

    with open(path / f'train_{dataset_index}.csv') as file:
        reader = csv.reader(file, delimiter='\n')
        for row in reader:
            split_row = row[0].split(',')
            index = int(split_row[-1])
            samples[index] += 1
            X_train.append([int(string) for string in split_row[:-1]
                            ])  # training data, label in last position
            Y_train.append(index)

    with open(path / f'val_{dataset_index}.csv') as file:
        reader = csv.reader(file, delimiter='\n')
        for row in reader:
            split_row = row[0].split(',')
            index = int(split_row[-1])
            samples[index] += 1
            X_val.append([int(string) for string in split_row[:-1]
                          ])  # training data, label in last position
            Y_val.append(index)

    with open(path / f'test_with_label_{dataset_index}.csv') as file:
        reader = csv.reader(file, delimiter='\n')
        for row in reader:
            split_row = row[0].split(',')
            index = int(split_row[-1])
            samples[index] += 1
            X_test.append([int(string) for string in split_row[:-1]
                           ])  # training data, label in last position
            Y_test.append(index)

    #symbs = symbol_dict.values()
    #vals = samples.values()
    #fig, ax = plt.subplots()
    #ax.bar(symbs, vals)
    #plt.show()

    baseD = baseDT.baseDT(X_train, Y_train, X_test, Y_test)
    #outFile.createCSV(baseD, Y_test, symbol_dict, f"BASE-DT-DS{dataset_index}.csv")

    bestDT_pred = bestDT.bestDT(X_train, Y_train, X_test, Y_test,
                                list(symbol_dict.keys()))
    #outFile.createCSV(bestDT_pred, Y_test, symbol_dict, f"BEST-DT-DS{dataset_index}.csv")

    Y_gnb = gnb.gaussianNB(X_train, Y_train, X_test)
    #outFile.createCSV(Y_gnb, Y_test, symbol_dict, f"GNB-DS{dataset_index}.csv")

    pcp = perceptron.perceptron(X_train, Y_train, X_test)
    #outFile.createCSV(pcp, Y_test, symbol_dict, f"PER-DS{dataset_index}.csv")

    base_mlp = baseMLP.baseMLP(X_train, Y_train, X_test)
    #outFile.createCSV(base_mlp,Y_test,symbol_dict,f"BASE-MLP-DS{dataset_index}.csv")

    best_mlp = bestMLP.bestMLP(X_train, Y_train, X_val, Y_val, X_test)
Beispiel #44
0
import numpy as np
from perceptron import perceptron

data = np.loadtxt('OCR_14x14')
N, L = data.shape
D = L - 1
labs = np.unique(data[:, L - 1])
C = labs.size

np.random.seed(23)
perm = np.random.permutation(N)
data = data[perm]

NTr = int(round(.7 * N))
train = data[:NTr, :]
w, E, k = perceptron(train)

np.savetxt('percep_w', w, fmt='%.2f')
print(w)
Beispiel #45
0
def main():
    run_iteration = 500
    k = 10000
    m_samples = [5, 10, 15, 25, 70]
    svm_obj = svm_classifier()
    perceptron_obj = perceptron.perceptron()

    for itr, m in enumerate(m_samples):

        for i in range(run_iteration):
            print(1, i, m)
            svm_obj.do_labeling(m, 1, False)
            svm_obj.train(svm_obj.D1_2dGauss, svm_obj.D1_label)
            perceptron_obj.fit(svm_obj.D1_2dGauss, svm_obj.D1_label)

            svm_obj.do_labeling(k, 1, True)
            svm_obj.predict(svm_obj.D1_2dGauss)
            perceptron_obj.predict(svm_obj.D1_2dGauss)
            perceptron_obj.check_accuracy(svm_obj.D1_label, itr)
            svm_obj.check_accuracy(svm_obj.D1_label, itr)
            perceptron_obj.iter_reset()

    perceptron_obj.accuracy_lst = [
        i / (run_iteration * k) for i in perceptron_obj.accuracy_lst
    ]
    svm_obj.accuracy_lst = [
        i / (run_iteration * k) for i in svm_obj.accuracy_lst
    ]
    plt.title("D1 distribution")
    plt.ylabel("Accuracy")
    plt.xlabel("Number of samples (m)")
    plt.plot(m_samples, svm_obj.accuracy_lst, label="SVM Mean accuracy")
    plt.plot(m_samples,
             perceptron_obj.accuracy_lst,
             label="Perceptron Mean accuracy")
    plt.legend(loc=4)

    svm_obj.reset()
    perceptron_obj.reset_all()

    for itr, m in enumerate(m_samples):

        for i in range(run_iteration):
            print(2, i, m)
            svm_obj.do_labeling(m, 2, False)
            svm_obj.train(svm_obj.all_points, svm_obj.D2_label)
            perceptron_obj.fit(svm_obj.all_points, svm_obj.D2_label)

            svm_obj.do_labeling(k, 2, True)
            svm_obj.predict(svm_obj.all_points)
            perceptron_obj.predict(svm_obj.all_points)
            perceptron_obj.check_accuracy(svm_obj.D2_label, itr)

            svm_obj.check_accuracy(svm_obj.D2_label, itr)
            perceptron_obj.iter_reset()

    svm_obj.accuracy_lst = [
        i / (run_iteration * k) for i in svm_obj.accuracy_lst
    ]
    perceptron_obj.accuracy_lst = [
        i / (run_iteration * k) for i in perceptron_obj.accuracy_lst
    ]
    plt.figure(2)
    plt.title("D2 distribution")
    plt.ylabel("Accuracy")
    plt.xlabel("Number of samples (m)")
    plt.plot(m_samples, svm_obj.accuracy_lst, label="SVM Mean accuracy")
    plt.plot(m_samples,
             perceptron_obj.accuracy_lst,
             label="Perceptron Mean accuracy")
    plt.legend(loc=4)
    plt.show()
Beispiel #46
0
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 15:58:32 2019

@author: Lucil
"""

import nltk
import pickle
from perceptron import perceptron

## Download dataset from nltk universal file.
tagged_sentence = nltk.corpus.treebank.tagged_sents(tagset='universal')
tagged_words = list(set([tup for i in tagged_sentence for tup in i]))
vocab = list(set([word for word, tag in tagged_words]))
tags = list(set([tag for word, tag in tagged_words]))

## Split dataset into training set and test set
#train = tagged_sentence[:int(len(tagged_sentence)*0.7)]
#test = tagged_sentence[int(len(tagged_sentence)*0.7+1):]

alpha = perceptron(tagged_sentence, tags, 50, avg=True)
with open('alpha.pickle', 'wb') as f:
    pickle.dump(alpha, f)
Beispiel #47
0
Parameters

n - input size
bias - True if to include else False
sample_size - number of points along each axis
parameter_limit - sample interval is from -parameter_limit up to +parameter_limit

'''
n = 5
bias = False
sample_size = 40
parameter_limit = 1
'''
Initialize perceptron and input
'''
p = perceptron(n)
b_in = b_input(n)
'''
Initialize parameter - class matrix
'''
parameter_space = np.ndarray([sample_size] * n, dtype=int)

neutral_set_dict = {}
neutral_set_list = []

print('initialized', time.time() - start_time)

for number in range(sample_size**n):
    indices = []

    for _ in range(n):
Beispiel #48
0
def main():
    args = parse_args()

    data = load_data('data/adult.data')
    test_data = load_data('data/adult.test2')
    val_data = load_data('data/adult.val')

    if args.depth_plot:
        print('Calculating f1-scores for different depths...')
        depths, scores = dt.tune_max_depth(data, val_data)
        plt.plot(depths, scores)
        plt.ylabel('F1-score')
        plt.xlabel('Maximum Depth')
        plt.show()
        quit()

    baseline_tree = dt.build_decision_tree(
        data, max_depth=1, forced_attribute=args.baseline_attribute)
    print('Building decision tree...')
    dt_start = time.time()
    if args.depth is not None:
        tree = dt.build_decision_tree(data, max_depth=args.depth)
    else:
        tree = dt.build_decision_tree(data)

    print('Decision tree built in ' + str(time.time() - dt_start) + ' s.')

    baseline_metrics = compute_metrics(dt.decision_tree_classify, test_data,
                                       [baseline_tree])
    dt_metrics = compute_metrics(dt.decision_tree_classify, test_data, [tree])

    if args.rep:
        print('Pruning decision tree (reduced error)...')
        dtre_start = time.time()
        dt.reduced_error_prune(tree, val_data)
        print('Decision tree pruned (reduced error) in ' +
              str(time.time() - dtre_start) + ' s.')
        dtre_metrics = compute_metrics(dt.decision_tree_classify, test_data,
                                       [tree])
    elif args.csp:
        print('Pruning decision tree (chi-square)...')
        dtcs_start = time.time()
        dt.chi_square_prune(tree)
        print('Decision tree pruned (chi-square) in ' +
              str(time.time() - dtcs_start) + ' s.')
        dtcs_metrics = compute_metrics(dt.decision_tree_classify, test_data,
                                       [tree])

    y_train = get_labels(data)
    y_test = get_labels(test_data)

    features = extract_features(data, test_data)
    X_train = features[0]
    X_test = features[1]
    feature_names = features[2]
    print('Building logistic regression model...')
    lr_start = time.time()
    lr_model = LogisticRegression(solver='sag').fit(X_train, y_train)

    print('Logistic regression model built in ' + str(time.time() - lr_start) +
          ' s.')

    if args.lr_top is not None:
        print('Top weighted features in logistic regression model: ' +
              str(get_lr_top_weights(lr_model, args.lr_top, feature_names)[0]))
    if args.lr_bot is not None:
        print(
            'Top negatively weighted features in logistic regression model: ' +
            str(get_lr_top_weights(lr_model, args.lr_bot, feature_names)[1]))

    lr_pred = lr_model.predict(X_test)

    weights = perceptron.perceptron(X_train, y_train, 10)
    perceptron_pred = perceptron.perceptron_test(X_test, weights)

    perceptron_metrics = [
        y_test[i] == perceptron_pred[i] for i in range(len(y_test))
    ].count(True) / len(test_data), precision_score(
        y_test, perceptron_pred), recall_score(y_test,
                                               perceptron_pred), f1_score(
                                                   y_test, perceptron_pred)
    lr_metrics = [y_test[i] == lr_pred[i] for i in range(len(y_test))
                  ].count(True) / len(test_data), precision_score(
                      y_test, lr_pred), recall_score(y_test,
                                                     lr_pred), f1_score(
                                                         y_test, lr_pred)

    print('Baseline:')
    print('Accuracy: ' + str(baseline_metrics[0]))
    print('Precision: ' + str(baseline_metrics[1]))
    print('Recall: ' + str(baseline_metrics[2]))
    print('F1 Score: ' + str(baseline_metrics[3]))

    print('\nDecision Tree:')
    print('Accuracy: ' + str(dt_metrics[0]))
    print('Precision: ' + str(dt_metrics[1]))
    print('Recall: ' + str(dt_metrics[2]))
    print('F1 Score: ' + str(dt_metrics[3]))

    if args.rep:
        print('\nDecision Tree (w/ reduced error pruning):')
        print('Accuracy: ' + str(dtre_metrics[0]))
        print('Precision: ' + str(dtre_metrics[1]))
        print('Recall: ' + str(dtre_metrics[2]))
        print('F1 Score: ' + str(dtre_metrics[3]))
    elif args.csp:
        print('\nDecision Tree (w/ chi-square pruning):')
        print('Accuracy: ' + str(dtcs_metrics[0]))
        print('Precision: ' + str(dtcs_metrics[1]))
        print('Recall: ' + str(dtcs_metrics[2]))
        print('F1 Score: ' + str(dtcs_metrics[3]))

    print('\nPerceptron:')
    print('Accuracy: ' + str(perceptron_metrics[0]))
    print('Precision: ' + str(perceptron_metrics[1]))
    print('Recall: ' + str(perceptron_metrics[2]))
    print('F1 Score: ' + str(perceptron_metrics[3]))

    print('\nLogistic Regression:')
    print('Accuracy: ' + str(lr_metrics[0]))
    print('Precision: ' + str(lr_metrics[1]))
    print('Recall: ' + str(lr_metrics[2]))
    print('F1 Score: ' + str(lr_metrics[3]))

    if args.plot:
        metrics_baseline = (baseline_metrics[0], baseline_metrics[1],
                            baseline_metrics[2], baseline_metrics[3])
        metrics_dt = (dt_metrics[0], dt_metrics[1], dt_metrics[2],
                      dt_metrics[3])
        metrics_perceptron = (perceptron_metrics[0], perceptron_metrics[1],
                              perceptron_metrics[2], perceptron_metrics[3])
        metrics_lr = (lr_metrics[0], lr_metrics[1], lr_metrics[2],
                      lr_metrics[3])
        metrics_dtre, metrics_dtcs = None, None
        if args.rep:
            metrics_dtre = (dtre_metrics[0], dtre_metrics[1], dtre_metrics[2],
                            dtre_metrics[3])
        elif args.csp:
            metrics_dtcs = (dtcs_metrics[0], dtcs_metrics[1], dtcs_metrics[2],
                            dtcs_metrics[3])
        plot_metrics(metrics_baseline, metrics_dt, metrics_perceptron,
                     metrics_lr, metrics_dtre, metrics_dtcs)
    Y = np.array(Y)
    return X, Y


def tweak_Y_for_multiclass(Y, class_index):
    return np.equal(Y, class_index).astype(int)


X, Y = data_set_create(3, TOTAL_SAMPLES)

# One VS All strategy

# For Class 0
Y_class0 = tweak_Y_for_multiclass(Y, 0)
node_0 = perceptron(total_features=DIMENSION_OF_X,
                    epochs=EPOCHS,
                    learning_rate=LEARNING_RATE)
node_0.train(X, Y_class0)

# For Class 2,
Y_class2 = tweak_Y_for_multiclass(Y, 2)
node_2 = perceptron(total_features=DIMENSION_OF_X,
                    epochs=EPOCHS,
                    learning_rate=LEARNING_RATE)
node_2.train(X, Y_class2)

X_test, Y_test = data_set_create(3, TOTAL_SAMPLES)

Y_test_hat_class0 = node_0.predict(X_test)
Y_test_hat_class2 = node_2.predict(X_test)
Y_test_hat = np.zeros((TOTAL_SAMPLES, 1))
Beispiel #50
0
def orgate():
    return p.perceptron(np.array([0,1,1]),0)