Exemplo n.º 1
0
def main():
    """"This is the main method that will run all the tests and
    display the results."""
    
    target = np.zeros(2500)
    target[500:750] = 1
    #target = np.array(np.append(np.zeros(500),np.ones(250)))
    #target = np.array(np.append(target, np.zeros(1750)))
    #target = np.empty(2500)
    #for k in range(2500):
        #target[k] = ((k//250) == 2)
    #print(len(target))
    falsePositives = 0
    misses = 0
    print("Initialized Perceptron: \n")
    twoPerceptron = Perceptron(196,1,True)
    #print(len(target))
    twoMatrices, twoTargets = loadPatterns("digits_train.txt")
    twoTests, twoTestsTargets = loadPatterns("digits_test.txt")
    twoPerceptron.train(twoMatrices,target,1000)
    print("Expect True: " + str(twoPerceptron.test(twoTests[521])))
    for i in range(len(twoTests)):
        test = twoPerceptron.test(twoTests[i])
        if test == True and target[i] == 0:
            falsePositives += 1
        if test == False and target[i] == 1:
            misses += 1
    print("Misses: " + str(misses) + "/250")
    print("False Positives: " + str(falsePositives) + "/2250")
Exemplo n.º 2
0
def train_simple_perceptron():
    with Timer("Loading dataset"):
        trainset, validset, testset = load_mnist()

    with Timer("Creating model"):
        # TODO: We should the number of different targets in the dataset,
        #       but I'm not sure how to do it right (keep in mind the regression?).
        output_size = 10
        model = Perceptron(trainset.input_size, output_size)
        model.initialize()  # By default, uniform initialization.

    with Timer("Building optimizer"):
        optimizer = SGD(loss=NLL(model, trainset))
        optimizer.append_update_rule(ConstantLearningRate(0.0001))

    with Timer("Building trainer"):
        # Train for 10 epochs
        batch_scheduler = MiniBatchScheduler(trainset, 100)
        stopping_criterion = tasks.MaxEpochStopping(10)

        trainer = Trainer(optimizer, batch_scheduler, stopping_criterion=stopping_criterion)

        # Print time for one epoch
        trainer.append_task(tasks.PrintEpochDuration())
        trainer.append_task(tasks.PrintTrainingDuration())

        # Print mean/stderror of classification errors.
        classif_error = tasks.ClassificationError(model.use, validset)
        trainer.append_task(tasks.Print("Validset - Classif error: {0:.1%} ± {1:.1%}", classif_error.mean, classif_error.stderror))

    with Timer("Training"):
        trainer.train()
Exemplo n.º 3
0
    def test_perceptron_generated_data_error(self):
        train_dataset = [((9.30194146152442, 54.29378711947825), True), ((2.1235211235782163, 42.41857119148967), True), ((1.359590385942031, 48.19605969472737), True), ((11.304173995362238, 40.21203508190859), True), ((15.491418600968164, 51.74125443774198), True), ((3.0448137332985663, 55.033225748928615), True), ((7.410534521463678, 48.82884207059357), True), ((7.965318834195054, 41.168243991915965), True), ((12.025772533772868, 44.82181684357318), True), ((-2.5480606577592253, 52.21817880722483), True), ((14.616754918016932, 27.56413924824511), False), ((13.735161526492831, 28.195520388962247), False), ((26.320312452059365, 37.52778678930581), False), ((28.50174788722075, 24.833317461626116), False), ((16.625494494802766, 35.423472182867286), False), ((19.135182106291616, 24.00082676846897), False), ((22.4174108797297, 36.127585975425156), False), ((12.439758335580695, 21.353479917856465), False), ((24.57194081489678, 32.46668179093647), False), ((28.556992040085298, 23.344536461376247), False)]
        perceptron = Perceptron(function=lambda x: x >= 0)

        perceptron.train(train_dataset)
        perceptron.plot(train_dataset)
        raw_input('Enter to continue')
Exemplo n.º 4
0
def test(results):

	test_file = results.test


	logger.debug(	'Started testing with options:'			+ "\n" +
					'test file:	' + str(results.test) 		+ "\n")


	logger.info("Loading model and meta_data")
	model = read_obj('model')
	meta_data= read_obj('meta_data')

	if not os.path.exists('model/test.feats'):
		test_feats = extract_feats(meta_data,test_file)
		logger.info("Done feature extraction for testing data, writing in test.feats")
		write_obj(test_feats,'test.feats')
	else:
		logger.info("test.feats already exists ... loading.")
		test_feats = read_obj('test.feats')

	classifier = Perceptron(meta_data)

	classifier.load_theta(model)
	classifier.test(test_feats)
Exemplo n.º 5
0
def main():

    #load dataset
    data = pd.read_csv('../data/iris.data', sep=',', header=0)
    
    #classifying with two classes only
    dataset = data[data['CLASS'] != 'Iris-virginica']    

    #using the LENGHTS for separation
    X = dataset[['SEPAL_LENGHT','PETAL_LENGHT']]   
    
    #converting classes to -1 and 1    
    y = dataset['CLASS'].apply(convert)
    
    #train Perceptron
    model = Perceptron(X.shape[1])
    model.fit(X,y)
    
    #plotting
    w = model.get_w()

    #decision boundary: w[0] + w[1]*x + w[2]*y = 0
    x1 = np.linspace(0, 10, 100)            
    x2 = -w[0]/w[2] - (w[1]/w[2])*x1
    plt.plot(x1,x2,'k')
    plt.scatter(X['SEPAL_LENGHT'],X['PETAL_LENGHT'])    
    plt.show()
Exemplo n.º 6
0
    def test_calculate_output2(self):
        perceptron = Perceptron(initial_weights=[1.0, 1.0, 1.0])

        self.assertEqual(perceptron.calculate_output([10.0, -5.0]), 1)
        self.assertEqual(perceptron.calculate_output([0.0, -2.0]), 0)
        self.assertEqual(perceptron.calculate_output([0.0, -0.5]), 1)
        self.assertEqual(perceptron.calculate_output([0.0, 0.0]), 1)
        self.assertEqual(perceptron.calculate_output([-1.0, -1.0]), 0)
Exemplo n.º 7
0
    def test_train(self):

        p = Perceptron(2)
        for i in range(100):
            p.train([1, 0], 0, 0.05)
            p.train([0, 1], 1, 0.05)
        self.assertAlmostEqual(p.activate([1, 0]), 0)
        self.assertAlmostEqual(p.activate([0, 1]), 1)
Exemplo n.º 8
0
    def test_perceptron_generated_data(self):
        train_dataset = generate_classified_points(classes=[True, False], n=20)

        perceptron = Perceptron(function=lambda x: x >= 0)

        perceptron.train(train_dataset)
        perceptron.plot(train_dataset)
        print train_dataset
        raw_input('Enter to continue')
Exemplo n.º 9
0
 def __init__(self, input_num):
     '''
     Desc:
         初始化线性单元,设置输入参数的个数
     Args:
         input_num —— 输入参数的个数
     Returns:
         None
     '''
     # 初始化我们的感知器类,设置输入参数的个数 input_num 和 激活函数 f
     Perceptron.__init__(self, input_num, f)
Exemplo n.º 10
0
def train_and_perceptron():
    '''
    使用and真值表训练感知器
    '''
    # 创建感知器,输入参数为2(因为and是2元函数),激活函数是f
    p = Perceptron(2, f)
    # 训练,迭代10轮,学习速率为0.1
    input_vecs, labels = get_training_dataset()
    p.train(input_vecs, labels, 10, 0.1)
    # 返回训练好的感知器
    return p
Exemplo n.º 11
0
    def test_weights_dont_change(self):
        perceptron = Perceptron(initial_weights=[0.0, 0.0, -1.0])

        data = [
            ([0, 0], 0),
        ]

        perceptron.train(data)

        self.assertAlmostEquals(perceptron.weights[0], 0.0)
        self.assertAlmostEquals(perceptron.weights[1], 0.0)
        self.assertAlmostEquals(perceptron.weights[2], -1.0)
Exemplo n.º 12
0
    def test_single_update2(self):
        perceptron = Perceptron(initial_weights=[1.0, 1.0, 1.0], learning_rate=0.01)

        data = [
            ([1, 1], 0),
        ]

        perceptron.train(data)

        self.assertAlmostEquals(perceptron.weights[0], 0.99)
        self.assertAlmostEquals(perceptron.weights[1], 0.99)
        self.assertAlmostEquals(perceptron.weights[2], 0.99)
Exemplo n.º 13
0
    def test_feedforward_returns_negative_one_if_sum_is_less_than_zero(
        self,
        mock_uniform
    ):
        expected = [0.1, 0.2, 0.3]
        mock_uniform.side_effect = expected

        inputs = [1, -3, 1]

        perceptron = Perceptron()
        actual = perceptron.feedforward(inputs)

        expected = -1
        self.assertEqual(actual, expected)
Exemplo n.º 14
0
def train(results):
	# get files

	vocab_file = results.vocab
	labels_file = results.labels
	train_file = results.train
	step = results.step
	iterations = results.iter


	if not step:
		step = 0.01
	if not iterations:
		iterations = 10

	logger.debug(	'Started training with options:'		+ "\n" +
					'training file:	' + str(train_file) 	+ "\n" +
					'step size: '	  + str(step)			+ "\n" +
					'no. of iter: '	  + str(iterations)		+ "\n" +
					'vocab file:	' + str(vocab_file)		+ "\n" +
					'labels file:	' + str(labels_file)	+ "\n")




	if not os.path.exists('model/meta_data'):
		meta_data_instance = MetaData(vocab_file,labels_file)
		meta_data = meta_data_instance.get_meta_data()
		logger.info("Writing meta data file")
		write_obj(meta_data,'meta_data')
	else:
		logger.info("meta data file already exists ... loading")
		meta_data = read_obj('meta_data')

	if not os.path.exists('model/train.feats'):
		train_feats = extract_feats(meta_data,train_file)
		logger.info("Writing extracted feats for training files to train.feats")
		write_obj(train_feats,'train.feats')
	else:
		logger.info("train.feats already exists ... loading.")
		train_feats = read_obj('train.feats')

	if not os.path.exists('model/model'):
		classifier = Perceptron(meta_data)
		classifier.train(train_feats,step,iterations)
		logger.info("Done Training, model is written in model file")
		model = classifier.get_theta()
		write_obj(model, 'model')
	else:
		logger.info('model already exists, nothing to do!')
Exemplo n.º 15
0
def perceptron_model():
    """ Perceptron classifier on Iris flower dataset
    """

    df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)

    # setosa and versicolor
    y = df.iloc[0:100, 4].values
    y = np.where(y == 'Iris-setosa', -1, 1)

    # sepal length and petal length
    X = df.iloc[0:100, [0, 2]].values
    ppn = Perceptron(epochs=10, eta=0.1)
    ppn.fit(X, y)
    print('Weights: %s' % ppn.w_)
Exemplo n.º 16
0
def run():
	perceptron = Perceptron()
	data = get_data(2)

	# Trains the perceptron.
	perceptron.train(data['training_data'], data['training_labels'])

	# Tests the perceptron.
	predictions = get_predictions(perceptron, data['test_data'])
	correct_prediction_count = count_correct_predictions(data['test_labels'], predictions)

	# Displays the results.
	print('Accuracy: {0}/{1}.'.format(correct_prediction_count, predictions.shape[0]))
	print('Hyperplane: {0}'.format(np.append(perceptron.weights, perceptron.bias)))
	plot_2d_results(perceptron, data)
Exemplo n.º 17
0
    def test_train_returns_new_adjusted_weights_in_positive_direction(
        self,
        mock_uniform
    ):
        expected = [0.1, 0.2, 0.3]
        mock_uniform.side_effect = expected

        inputs = [1, -3, 1]
        desired = 1

        perceptron = Perceptron()
        actual = perceptron.train(inputs, desired)

        expected = [0.12000000000000001, 0.14, 0.32]
        self.assertEqual(actual, expected)
Exemplo n.º 18
0
def main(data):

    # Normalise the data
    training_data = normalise(data)

    # Create the perceptron
    p = Perceptron(len(data[0][0]))

    # Number of full iterations
    epochs = 0

    # Instantiate mse for the loop
    mse =999

    while (abs(mse-LMSE) > 0.002):

        # Epoch cumulative error
        error = 0

        # For each set in the training_data
        for value in training_data:

            # Calculate the result
            output = p.result(value[0])

            # Calculate the error
            iter_error = value[1] - output

            # Add the error to the epoch error
            error += iter_error

            # Adjust the weights based on inputs and the error
            p.weight_adjustment(value[0], iter_error)

        # Calculate the MSE - epoch error / number of sets
        mse = float(error/len(training_data))

        # Print the MSE for each epoch
        print "The MSE of %d epochs is %.10f" % (epochs, mse)

        # Every 100 epochs show the weight values
        if epochs % 100 == 0:
            print "0: %.10f - 1: %.10f - 2: %.10f - 3: %.10f" % (p.w[0], p.w[1], p.w[2], p.w[3])

        # Increment the epoch number
        epochs += 1

    return p
Exemplo n.º 19
0
 def __init__(
     self, model=None, oracle_type=DETERMINE_TREE_TO_GRAPH_ORACLE_SC, action_type="basic", verbose=1, elog=sys.stdout
 ):
     self.sent = ""
     self.oracle_type = oracle_type
     self.verbose = verbose
     self.elog = elog
     self.model = model
     if self.oracle_type == DETERMINE_TREE_TO_GRAPH_ORACLE:
         Parser.State = __import__("graphstate").GraphState
         Parser.State.init_action_table(ACTION_TYPE_TABLE[action_type])
         Parser.oracle = __import__("oracle").DetOracle(self.verbose)
     elif self.oracle_type == DETERMINE_TREE_TO_GRAPH_ORACLE_SC:
         Parser.State = __import__("graphstate").GraphState
         Parser.State.init_action_table(ACTION_TYPE_TABLE[action_type])
         Parser.oracle = __import__("oracle").DetOracleSC(self.verbose)
     elif self.oracle_type == DET_T2G_ORACLE_ABT:
         Parser.State = __import__("graphstate").GraphState
         Parser.State.init_action_table(ACTION_TYPE_TABLE[action_type])
         Parser.oracle = __import__("oracle").DetOracleABT(self.verbose)
     elif self.oracle_type == DETERMINE_STRING_TO_GRAPH_ORACLE:
         Parser.State = __import__("newstate").Newstate
     else:
         pass
     self.perceptron = Perceptron(model)
     Parser.State.model = model
Exemplo n.º 20
0
    def test_single_update3(self):
        perceptron = Perceptron(initial_weights=[0.0, 0.0, 0.0], learning_rate=0.01)

        # Truth table of an AND function
        data = [
            ([0, 0], 0),
            ([0, 1], 1),
            ([1, 0], 1),
            ([1, 1], 1),
        ]

        perceptron.train(data)

        self.assertAlmostEquals(perceptron.weights[0], 0.0)
        self.assertAlmostEquals(perceptron.weights[1], 0.01)
        self.assertAlmostEquals(perceptron.weights[2], 0.0)
Exemplo n.º 21
0
    def test_perceptron(self):
        train_dataset = [
            ((1, 0, 0), 1),
            ((1, 0, 1), 1),
            ((1, 1, 0), 1),
            ((1, 1, 1), 0),
        ]

        perceptron = Perceptron(function=lambda x: x >= 1)
        perceptron.train(train_dataset)  # executa o algoritmo de treinamento

        v = perceptron.run((1, 0, 0))
        self.assertEqual(v, 1)

        v = perceptron.run((1, 1, 1))
        self.assertEqual(v, 0)
Exemplo n.º 22
0
def main():
	#read in parameters
	input_rep = int(sys.argv[1])
	num_output_nodes = int(sys.argv[2])
	learn_rate = float(sys.argv[3])

	def print_help():
		print("Run the neural net as follows:")
		print("python3 main.py input_rep output_rep learn_rate")
		print("input_rep         = size of input representation, must be 8 or 32 (int)")
		print("num_output_nodes  = number of output nodes, must be 1 or 10 (int)")
		print("learn_rate        = learning rate, recommended 0 < learn_rate <= 1 (double)")

	#quit if input or output representations have illegal values
	if input_rep != 8 and input_rep != 32:
		print_help()
		quit()

	if num_output_nodes != 1 and num_output_nodes != 10:
		print_help()
		quit()

	num_input_nodes = input_rep ** 2

	if num_output_nodes == 1:
		nn = Perceptron(num_input_nodes, learn_rate, 50, True)
	else:
		nn = MultiplePerceptrons(num_input_nodes, learn_rate, 50)

	if input_rep == 8:
		nn.learn(train_8, test_8)
	else:
		nn.learn(train_32, test_32)

	print("Training finished. 50 epochs completed.")
Exemplo n.º 23
0
def main():
    # Classify with arbitrary condition. Parameter list were the position is the
    # corresponding power of x and the parameter value is the value in list.
    n_dots = 100
    params = [-2.0, 5.0]
    train_data = gen_sample(n_dots, 0.0, 1.0, params)

    plt.figure(figsize=(5, 5), dpi=150)
    plot_line(0.0, 1.0, params)
    plt.scatter(train_data.x1,
                train_data.x2,
                c=train_data.label,
                cmap='cividis')
    plt.xlim(0., 1.)
    plt.ylim(0., 1.)
    plt.legend(loc='best')
    plt.title("Training data set")
    plt.savefig('01_training_data.png')
    plt.clf()

    # Slice data frame into features and labels. ===============================
    x = train_data[['x1', 'x2']].values
    y = train_data.loc[:, 'label']
    model = Perceptron(x.shape[1])  # Initialize model with two features.
    w, loss = model.train(x, y)  # Train model.

    loss_plot(loss)

    # Validate model generating a new data set. ================================
    plot_line(0.0, 1.0, params)
    test_data = gen_sample(n_dots, 0.0, 1.0, params)
    test_data['predicted'] = np.nan  # Add new column for predictions
    test_data.predicted = model.predict(test_data[['x1', 'x2']].values)

    pl, ml = count_vals(
        test_data.label.values)  # Compare predictions to targets
    pp, mp = count_vals(test_data.predicted.values)
    print("Bias: {:4.5}, W1: {:3.5}, W2: {:3.5}".format(w[0], w[1], w[2]))
    print("Labels:      +1 = {}, -1 = {}".format(pl, ml))
    print("Predictions: +1 = {}, -1 = {}".format(pp, mp))
    print("Accuracy: {:5.3}%".format(100 * (1 - np.abs(pp - pl) / n_dots)))

    test_plot(test_data, w)

    return None
Exemplo n.º 24
0
def step1_learing():
    # 학습과 테스트를 위해 사용할 데이터
    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    y = np.array([-1, -1, -1, 1])
    # 퍼셉트론 객체를 생성한다.
    ppn = Perceptron(eta=0.1)
    # 학습한다.
    stime = time()
    ppn.fit(X, y)
    etime = time()
    print("학습에 걸린시간 : ", (etime - stime))
    print("학습중 오차가 난 개수 : ", ppn.errors_)

    # 학습이 완료된 객체를 파일로 저장한다.
    with open('./2.Perceptron/perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)

    print("머신러닝 학습 완료")
Exemplo n.º 25
0
    def ExerciseIris(self):
        iris = datasets.load_iris()
        names = iris.target_names
        data = iris.data
        target = iris.target

        # removing type 2
        data_train, y = _matrix.FilterByY(data, target, 2)

        percep = Perceptron(len(data_train[0]), 10)
        percep.train(data_train, y)
        result = []

        for i in data_train:
            result.append(percep.predict(i))

        plot.Perceptron(data_train, np.asarray(result),
                        "Result Iris - Setosa x Versicolor", names)
Exemplo n.º 26
0
    def init_neurons(self, num_neurons, num_inputs):

        #print('creating neurons...')

        for k in range(1, num_neurons + 1):

            #print('creating neuron ' + str(k))
            n = Perceptron(num_inputs, label=self.label)
            self.neurons.append(n)
Exemplo n.º 27
0
def step2_learning():
    ppn = Perceptron(eta=0.1)
    data = step1_get_data()

    X = data[0]
    y = data[1]

    # 학습
    ppn.fit(X, y)
    print(ppn.errors_)
    print(ppn.w_)

    # 학습된 객체를 저장
    # 학습이 완료된 객체를 파일로 저장
    with open('./iris/perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)

    print('학습 완료')
Exemplo n.º 28
0
 def __init__(self, languages, weights_learning_rate,
              threshold_learning_rate):
     self.languages = languages
     self.perceptrons = []
     for language in languages:
         self.perceptrons.append(
             Perceptron([random.uniform(0, 1) for _ in ascii_lowercase],
                        random.uniform(0, 1), weights_learning_rate,
                        threshold_learning_rate))
Exemplo n.º 29
0
    def __init__(self):

        self.data_reader = DataReader('data/training_data/training.data', 'data/stopwords/stopwords.txt', True, 1000)
        self.perceptron = Perceptron()
        self.softmax = Softmax()
        # Let's create 5 classifiers
        universe_size = len(self.data_reader.universe)
        self.perceptron_classifiers = [np.zeros((universe_size + 1)) for i in range(5)]
        self.softmax_classifier = np.ones((5, universe_size + 1))
Exemplo n.º 30
0
def test_weight_initialization():
    input_dimensions = 2
    number_of_classes = 5
    model = Perceptron(input_dimensions=2,
                       number_of_classes=number_of_classes,
                       seed=1)
    assert model.weights.ndim == 2 and model.weights.shape[
        0] == number_of_classes and model.weights.shape[
            1] == input_dimensions + 1
    weights = np.array([[1.62434536, -0.61175641, -0.52817175],
                        [-1.07296862, 0.86540763, -2.3015387],
                        [1.74481176, -0.7612069, 0.3190391],
                        [-0.24937038, 1.46210794, -2.06014071],
                        [-0.3224172, -0.38405435, 1.13376944]])
    np.testing.assert_allclose(model.weights, weights, rtol=1e-3, atol=1e-3)
    model.initialize_all_weights_to_zeros()
    assert np.array_equal(model.weights,
                          np.zeros((number_of_classes, input_dimensions + 1)))
def init():

    # load data
    X, Y = load_data()

    # init model
    perceptron = Perceptron()

    # train
    wt_matrix, b_vecb_vec = perceptron.train(X, Y, 500)

    # tuned w, b
    w = perceptron.w
    b = perceptron.b
    mistake = perceptron.mistake

    # plot
    perceptron.plot()
Exemplo n.º 32
0
 def train(self):
     alpha = float(self.view.get_entry("alpha").get())
     self.model.set_perceptron(Perceptron(self.model, alpha))
     self.view.set_button_normal("test")
     self.view.set_button_normal("classify")
     self.view.graphs.show_graphs(self.model.trainset,
                                  self.model.dimensions,
                                  self.model.train_colors,
                                  self.model.perceptron, "train")
Exemplo n.º 33
0
def main():

    training_data, test_data = load_data()

    layers = [784, 16, 10]
    epochs = 20
    minibatch_size = 20
    learning_rate = 4.0

    model = Perceptron(layers);

    start = time.time()
    print("Starting training...")
    model.train(training_data , test_data, epochs, minibatch_size, learning_rate)
    print("Training complete")
    end = time.time()

    print(f"Training time: {end - start}")
Exemplo n.º 34
0
    def test_tashizan(self):
        """test method for perceptron
        """
        # 初期化
        expected = np.array([-0.4, -0.68, 1.82])
        
        df = pd.read_csv('../tests/data/iris.data', header=None)
        y = df.iloc[0:100, 4].values
        y = np.where(y == 'Iris-setosa', -1, 1)
        X = df.iloc[0:100, [0, 2]].values

        # テスト実行
        pc = Perceptron()
        pc.fit(X, y)
        actual = pc.w_

        # Assert
        np.testing.assert_array_almost_equal(expected, actual, 2)
Exemplo n.º 35
0
def run_this(data, test, w, b, lr):
    perceptron = Perceptron(w, b, lr)

    perceptron.print_arg()

    perceptron.train(data)

    print(perceptron.predict(test))
Exemplo n.º 36
0
 def test_perceptron_predict(self):
     self.assertEqual(Perceptron([1, 2, 0]).predict([0, 0]), 0)
     self.assertEqual(Perceptron([1, 2, 0]).predict([1, 1]), 1)
     self.assertEqual(Perceptron([1, 2, 0]).predict([1, -1]), 0)
     self.assertEqual(Perceptron([1, 2, 0]).predict([-1, 1]), 1)
     self.assertEqual(Perceptron([1, 2, -4]).predict([1, 1]), 0)
     self.assertEqual(Perceptron([1, 2, -3]).predict([1, 1]), 0)
     self.assertEqual(Perceptron([1, 2, -2]).predict([1, 1]), 1)
Exemplo n.º 37
0
def runOneTest(seed=None):
    sys.stdout.write('.')
    sys.stdout.flush()
    rnd = random.Random()
    rnd.seed(seed)
    slope = rnd.uniform(-3, 3)
    intercept = rnd.uniform(-1,1)
    trainingSet = Perceptron.generatePoints(slope, intercept, TRAINING_EXEMPLARS, str(seed)+"training")
    p = Perceptron()
    trainingSucceeded, iterations = p.train(trainingSet, 1, MAX_ITER, ERROR_THRESHOLD)
    misclassifications = 0
    if (trainingSucceeded):
        testSet = Perceptron.generatePoints(slope, intercept, TEST_VECTORS, str(seed)+"test")
        for vector, expected in testSet:
            result = p.evaluate(vector)
            if (result != expected):
                misclassifications += 1

    return slope, intercept, trainingSucceeded, iterations, misclassifications
Exemplo n.º 38
0
    def __init__(self, input_nodes, learning_rate, epochs, output_nodes=10):
        self.input_nodes = input_nodes
        self.learning_rate = learning_rate
        self.epochs = epochs

        #Initialize group of perceptrons
        self.perceptrons = []
        for i in range(output_nodes):
            self.perceptrons.append(
                Perceptron(self.input_nodes, self.learning_rate))
Exemplo n.º 39
0
def main():

    # train data
    M, D, C = 6, 3, 3
    data = np.zeros((M,D+1))  # D + label
    # last element is the label
    data[0] = [0.9, 0.1, 0, 0]
    data[1] = [1.9, 0.8, 0.9, 1]
    data[2] = [2, 0.9, 0.8, 2]
    data[3] = [1 ,0.2, 0.1, 0]
    data[4] = [1.2, 0.1, 0.4, 1]
    data[5] = [1.6, 0.6, 0.6, 1]
    # train perceptron
    nn = Perceptron(D,C)     
    nn.train(data)          
    # test perceptron
    testData = np.asarray([1.2, 0.3, 0.8])
    print("testData is of class ", nn.evaluate(testData))
    plot(data,'test',testData,D,C,nn.w,nn.w0)
Exemplo n.º 40
0
def init():
    layers = list()
    count = 0
    for i in args['<n>']:
        parceptrons = list()
        for j in range(i):
            parceptrons.append(Perceptron(count, args['--lam']))
            count += 1
        layers.append(parceptrons)
    return layers
Exemplo n.º 41
0
    def __init__(self, inputs, outputs, hidden_layers, hidden_numbers):
        self._inputs = []
        self._hidden = []
        self._outputs = []
        hid_num = []

        if isinstance(hidden_numbers, list):
            if len(hidden_numbers) != int(hidden_layers):
                # hidden_numbers is a list with incorrect lenght
                raise ValueError("List of hidden layer number of " + \
                                 "perceptrons has %d values, expected %d"
                                  % (len(hidden_numbers), int(hidden_layers)))
            else:
                # hidden_numbers is a list with correct lenght
                hid_num = hidden_numbers
        else:
            # hidden_numbers isn't a list, so create a list with that value*
            #                                        *(assuming it's an int)
            hid_num = [int(hidden_numbers)] * int(hidden_layers)

        # input perceptrons with only 1 input each
        for i in range(int(inputs)):
            self._inputs.append(Perceptron(1))

        # hidden layer perceptrons
        # self._hidden is a list of a list of perceptrons, with each index
        # having hid_num[i] perceptrons
        for i in range(int(hidden_layers)):
            hidden = []
            for j in range(int(hid_num[i])):
                if i == 0:
                    # First hidden layer has perceptrons with "inputs" inputs
                    hidden.append(Perceptron(int(inputs)))
                else:
                    # Other hidden layers have perceptrons with the number of
                    # the previous hidden layer perceptrons inputs
                    hidden.append(Perceptron(hid_num[i - 1]))
            self._hidden.append(hidden)

        # output layer has perceptrons with n inputs, n being the number of
        # last hidden layer's perceptrons
        for i in range(int(outputs)):
            self._outputs.append(Perceptron(hid_num[-1]))
Exemplo n.º 42
0
def step2_learning():
    ppn = Perceptron(eta=0.1)

    # X, y 불러오기,,
    data = step1_get_data()
    # 파이썬에서 두 개 이상을 리턴하면 튜플로 나오기 때문에 0,1 로 구분해주는 것
    X = data[0]
    y = data[1]

    # 학습 시키기
    ppn.fit(X, y)

    print(ppn.errors_)
    # 가중치 프린트
    print(ppn.w_)
    # 학습된 객체를 저장한다.
    with open('perceptron.dat', 'wb') as fp:
        pickle.dump(ppn, fp)
    print("학습완료")
Exemplo n.º 43
0
 def test_constructor(self):
     test = False
     try:
         Perceptron("fail perceptron", ARGUMENTS, [0, 0, 0])
     except ValueError as e:
         logging.warning(e.__str__())
         test = True
     assert test
     assert (abs(self.perceptron.out(self.x) - 1.0) <
             EPSILON) != (abs(self.perceptron.out(self.x) - 0.0) < EPSILON)
def simulation(x, test_range, step_size, file, n = 100, runs = 1000, dim = 2, learn_rate = 1):
    '''
    Function runs a series of simulations with the perceptron on a number or randomly generated feature vectors.
    Depending on which variable we are controlling for the simulations fix the values for dimensionality, number of points, and learning rate (c value)
    The variable that we control for will (x) will be initialized to the low end of the test range and incremented by the step size repeatedly.
    With each incrementation of the step size, we run the perceptron (with weights/bias always initialized to zero) 1000 times.
    After each single run, we record the results (i.e. number or perceptron iterations required for convergence) as a row in our dataframe
    The results are saved to a csv
    :param x: variable to control for, must be 'n', 'dim', or 'c'
    :param test_range: range of variable to test
    :param step_size: how to incrament the variable
    :param file: save destination for csv
    :return: N/A
    '''
    # check for invalid x
    if x not in ['n', 'c', 'dim']:
        raise ValueError('Invalid parameter x')

    (low, high) = test_range
    val = low
    data = []
    plot = Plotter()

    while val < high:
        # Increment independent variable
        if x == 'n':
            n = val
        elif x == 'c':
            learn_rate = val
        elif x == 'dim':
            dim = val
        # Run perceptron 1000 times each on a randomly generated set of feature vectors
        for i in range(runs):
            features = plot.generate_points(n, dim)
            labels = plot.generate_labels_linear(features)
            model = Perceptron(dim, zeros=False)
            iterations = model.train(features,labels, c=learn_rate)
            data.append([n, dim, learn_rate, iterations])
        val += step_size

    # Move data to pandas dataframe and save
    df = pd.DataFrame(data, columns=['n features', 'dimensions', 'c', 'iterations'])
    df.to_csv(file, sep=',', index=False)
Exemplo n.º 45
0
 def __init__(self, shape):
     """ Construct a backpropagation network
     :param shape: Where shape[0] is equal to the number of inputs
     and shape[-1] is equal to the number of possible labels"""
     self.layers = []
     for l in range(0, len(shape) - 1):
         layer = []
         for _ in range(shape[l + 1]):
             layer.append(Perceptron(shape[l]))
         self.layers.append(layer)
Exemplo n.º 46
0
def main():
    file_add = sys.argv[1]
    tst, val, tst_lbl, val_lbl = get_data(file_add)
    vec_size = len(tst[0])
    neuron = Perceptron(vec_size, 0.1)
    # train
    has_err = True
    iters = 0
    cnt_err = 0
    while has_err:
        has_err = False
        cnt_err = 0
        iters += 1
        for x, y in zip(tst, tst_lbl):
            p = neuron.input(x)
            # print('input:', x)
            # print('out:', p)
            # print('expected:', y)
            b = neuron.feedback(p, y, x)
            if b:
                cnt_err += 1
            has_err = b or has_err
        print(f"iteration {iters} finished (err: {cnt_err})")
    print(f"learned weights: {neuron.w} bias: {neuron.b}")
    # evaluate
    count = len(val)
    if count > 0:
        cnt_err = 0
        for x, y in zip(val, val_lbl):
            p = neuron.input(x)
            if p != y:
                cnt_err += 1
        ratio = cnt_err / count
        print(f"err: {cnt_err}/{count} = {ratio:.2f}")
    if vec_size == 2:
        fig = plt.figure()
        ax = fig.add_subplot(111)
        plot_data(tst, tst_lbl, ax)
        x0 = np.min(tst, 0)[0]
        x1 = np.max(tst, 0)[0]
        plot_line(neuron.w, neuron.b, ax, x0, x1)
        plt.show()
Exemplo n.º 47
0
    def test_give_weights_random_value_when_create_perceptron(
        self,
        mock_uniform
    ):
        expected = [0.1, 0.2, 0.3]
        mock_uniform.side_effect = expected

        perceptron = Perceptron()
        actual = perceptron.get_weights()

        self.assertEqual(actual, expected)

        self.assertEqual(mock_uniform.call_count, 3)

        expected_calls = [
            call(-1, 1),
            call(-1, 1),
            call(-1, 1)
        ]
        self.assertEqual(mock_uniform.mock_calls, expected_calls)
Exemplo n.º 48
0
    def test_converges1(self):
        perceptron = Perceptron(initial_weights=[0.0, 0.0, 0.0], learning_rate=0.01)

        # Truth table of an OR function
        data = [
            ([0, 0], 0),
            ([0, 1], 1),
            ([1, 0], 1),
            ([1, 1], 1),
        ]

        converged = False

        while not converged:
            perceptron.train(data)

            converged = True

            for input, expected_outcome in data:
                converged = converged and perceptron.calculate_output(input) == expected_outcome
 def __init__(self,
              w_size,
              num_epoch,
              learning_rate,
              num_class,
              decay=False):
     self.num_class = num_class
     self.perceptrons = [
         Perceptron(w_size, num_epoch, learning_rate, i, decay)
         for i in range(num_class)
     ]
Exemplo n.º 50
0
    def __init__(self, inputs, targets, nhidden, bias_value=-1):
        self.beta = 1
        self.eta = 0.1
        self.momentum = 0.0
        self.no_hidden = nhidden

        self.inputs = inputs
        self.no_inputs = len(inputs[0])
        self.targets = targets
        self.no_outputs = len(targets[0])

        #Initiate hidden layer
        self.hidden = []
        for i in range(self.no_hidden):
            self.hidden.append(Perceptron(self.no_inputs, self.beta, bias_value))

        #Initiate output layer
        self.output = []
        for i in range(self.no_outputs):
            self.output.append(Perceptron(self.no_hidden, self.beta, bias_value))
Exemplo n.º 51
0
    def __init__(self):
        """
        Load the Iris dataset
        """
        self.df = pd.read_csv(
            'https://archive.ics.uci.edu/ml/'
            'machine-learning-databases/iris/iris.data',
            header=None)
        self.df.tail()
        self.ppn = None

        # select setosa and versicolor
        self.y = self.df.iloc[0:100, 4].values
        self.y = np.where(self.y == 'Iris-setosa', -1, 1)

        # extract sepal length and petal length
        self.X = self.df.iloc[0:100, [0, 2]].values

        self.ppn = Perceptron(eta=0.1, n_iter=10)
        self.ppn.fit(self.X, self.y)
def predicting_perceptron(training_inputs, training_outputs, day_of_week,
                          airline, origin_airport):
    """
    This function trains the neural network.
    """
    neuron = Perceptron(training_inputs, training_outputs)
    print("* Training the neural network...")
    neuron.train()

    print("\n# Neural network PREDICTION mode:")
    day_of_week = day_of_week
    origin_airport = origin_airport
    airline = airline

    origin_airport = ORIGIN_AIRPORT[origin_airport]
    airline = AIRLINE[airline]

    prediction = day_of_week, origin_airport, airline

    return round(neuron.think(prediction))
Exemplo n.º 53
0
def learn(values):
    global perceptrons
    del perceptrons[:]
    perceptrons = [[] for i in range(50)]
    i = 0
    for x in range(50):
        for y in range(50):
            perceptrons[x].append(Perceptron(i, values))
            i = i + 1

    print("done")
Exemplo n.º 54
0
def run(data=None):
    def get_input():

        name = input('Name: ')
        left = input('has left? ')
        unhappy = input('is unhappy? ')
        is_edu = input('is edu? ')
        is_associate = input('is associate? ')

        return {
            'left': left,
            'unhappy': unhappy,
            'is_edu': is_edu,
            'is_associate': is_associate,
            'raw': [left, unhappy, is_edu, is_associate]
        }, name

    if not data:
        data, name = get_input()
    else:
        name = data['name']

    if not data.get('raw'):
        data['raw'] = [
            data['left'], data['unhappy'], data['is_edu'], data['is_associate']
        ]

    p = Perceptron()
    train = gen_train_base()
    for sample in train:
        p.train(sample['raw'], sample['label'])

    prediction = p.guess(data['raw'])
    test = get_label(data)
    guessed_right = prediction == test
    print('\nRaw prediction:', prediction)
    print('Defined label:', test)
    print('{} is {} to join to Assembléia\n'.format(
        name, 'able' if prediction == 1 else 'not able'))

    return guessed_right
Exemplo n.º 55
0
class LearningPerceptron(object):
    """Learning Perceptron: Compose of a Perceptron, train it and use it"""
    def __init__(self, name: str, input_size: int, lr: float):
        self.lr = lr
        self.perceptron = Perceptron(name, input_size)
        self.name = self.perceptron.name
        self.number_of_training = 0

    def train(self, x_input: [float], expected: float) -> int:
        """
        Train a perceptron

        :param x_input: An input to use for training
        :param expected: Expected value of x_input (aka label)
        :return: Number of epochs
        """
        real_output = self.perceptron.out(x_input)
        diff = expected - real_output
        for i, x in enumerate(x_input):
            self.perceptron.w[
                i] = self.perceptron.w[i] + self.lr * x_input[i] * diff
        self.perceptron.b += self.lr * diff
        self.number_of_training += 1
        return self.number_of_training

    def feed(self, x_input: [float]) -> float:
        """
        Feed the train with an input an give probability of belonging the class

        :param x_input: An input
        :return: Probability [0, 1]
        """
        return self.perceptron.out(x_input)

    def get_weights(self) -> ([float], float):
        """
        Get weights of the perceptron and the bias

        :return: Tuple of list of weight and bias value
        """
        return self.perceptron.w, self.perceptron.b
Exemplo n.º 56
0
    def test_perceptron_generated_data_min_weights(self):
        train_dataset = generate_classified_points(classes=[True, False], n=20, distance=50)

        perceptron = Perceptron(function=lambda x: x >= 0)

        perceptron.train(train_dataset)
        perceptron.plot(train_dataset)
        perceptron.count_errors(train_dataset)

        raw_input('Enter to continue')
Exemplo n.º 57
0
    def test_does_not_converge(self):
        perceptron = Perceptron(initial_weights=[0.0, 0.0, 0.0], learning_rate=0.01)

        # Truth table of an XOR function
        # This should never converge
        data = [
            ([0, 0], 0),
            ([0, 1], 1),
            ([1, 0], 1),
            ([1, 1], 0),
        ]

        for i in range(1000000):
            perceptron.train(data)

        converged = True

        for input, expected_outcome in data:
            converged = converged and perceptron.calculate_output(input) == expected_outcome

        self.assertFalse(converged)
Exemplo n.º 58
0
    def test_perceptron_2(self):
        train_dataset = [
            ((1, -1), False),
            ((2, -1), False),
            ((1, -3), False),
            ((4, -4), False),
            ((3, -2), False),
            ((5, -2), False),
            ((3, -4), False),
            ((-1, 2), True),
            ((-2, 3), True),
            ((-3, 2), True),
            ((-4, 4), True),
            ((-4, 1), True),
            ((-2, 4), True),
            ((-3, 3), True),
        ]

        perceptron = Perceptron(function=lambda x: x >= 0)
        perceptron.train(train_dataset)  # executa o algoritmo de treinamento
        perceptron.plot(train_dataset)
        raw_input('Enter to continue')

        v = perceptron.run((1, 1, 1))
        self.assertEqual(v, 0)
def train_perceptron(X, y):
	"""Training the perceptron model"""
	ppn = Perceptron(eta=0.1, n_iter=10)

	ppn.fit(X, y)

	plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
	plt.xlabel('Epochs')
	plt.ylabel('Number of updates')

	plt.tight_layout()
	# plt.savefig('./perceptron_1.png', dpi=300)
	plt.show()

	plot_decision_regions(X, y, classifier=ppn)
	plt.xlabel('sepal length [cm]')
	plt.ylabel('petal length [cm]')
	plt.legend(loc='upper left')

	plt.tight_layout()
	# plt.savefig('./perceptron_2.png', dpi=300)
	plt.show()
Exemplo n.º 60
0
def main(args):
    p = Perceptron(2)

    print('Starttilstand:')
    print(' ' * 7, str(p))
    print('-' * 50)

    epoch = 1
    xs = [(0, 0), (0, 1), (1, 0), (1, 1)]

    last_w = [1, 1]
    while calc_delta(p.w, last_w) > args.delta:
        last_w = p.w

        print('Epoch', epoch)
        for x in xs:
            p.train(x, args.function(x), args.alpha)
            print('   ->   ' + str(p))
        print()
        epoch += 1

    plot_contour(p)