コード例 #1
0
    def test_perceptron(self):

        model = Perceptron()
        self.assertEqual(
            model._trained,
            False,
            "Initially False. Change this property to True after train() is called",
        )
        model.train(np.array([[1, 1], [10, 1]]), np.array([1, 0]))
        self.assertEqual(
            model._trained,
            True,
            "Initially False. Change this property to True after train() is called",
        )



        self.assertIn(
            type(model.predict(np.array([1,1]))),
            (np.float64, float ),
            "Return type of predict() is np.float64 or float"
        )

        self.assertIn(
            type(model.evaluate(np.array([[1,1], [10,1]]), np.array([1, 0]))),
            (float, np.float64,),
            "Return type of evaluate() is np.float64 or float"
        )
コード例 #2
0
ファイル: perc.py プロジェクト: hhc2tech/AIprojects
def main():

    # train data
    M, D, C = 6, 3, 3
    data = np.zeros((M,D+1))  # D + label
    # last element is the label
    data[0] = [0.9, 0.1, 0, 0]
    data[1] = [1.9, 0.8, 0.9, 1]
    data[2] = [2, 0.9, 0.8, 2]
    data[3] = [1 ,0.2, 0.1, 0]
    data[4] = [1.2, 0.1, 0.4, 1]
    data[5] = [1.6, 0.6, 0.6, 1]
    # train perceptron
    nn = Perceptron(D,C)     
    nn.train(data)          
    # test perceptron
    testData = np.asarray([1.2, 0.3, 0.8])
    print("testData is of class ", nn.evaluate(testData))
    plot(data,'test',testData,D,C,nn.w,nn.w0)
コード例 #3
0
ファイル: problem7.py プロジェクト: zorts/CS1156x
def runOneTest(seed=None):
    sys.stdout.write('.')
    sys.stdout.flush()
    rnd = random.Random()
    rnd.seed(seed)
    slope = rnd.uniform(-3, 3)
    intercept = rnd.uniform(-1,1)
    trainingSet = Perceptron.generatePoints(slope, intercept, TRAINING_EXEMPLARS, str(seed)+"training")
    p = Perceptron()
    trainingSucceeded, iterations = p.train(trainingSet, 1, MAX_ITER, ERROR_THRESHOLD)
    misclassifications = 0
    if (trainingSucceeded):
        testSet = Perceptron.generatePoints(slope, intercept, TEST_VECTORS, str(seed)+"test")
        for vector, expected in testSet:
            result = p.evaluate(vector)
            if (result != expected):
                misclassifications += 1

    return slope, intercept, trainingSucceeded, iterations, misclassifications
コード例 #4
0
ファイル: main.py プロジェクト: atmafra/perceptron
F = 0.0
inputs = [[F, F], [T, F], [F, T], [T, T]]
dataset_and = {'inputs': inputs, 'outputs': [F, F, F, T]}
dataset_or = {'inputs': inputs, 'outputs': [F, T, T, T]}
dataset_1 = {'inputs': inputs, 'outputs': [F, T, F, T]}
dataset_not1 = {'inputs': inputs, 'outputs': [T, F, T, F]}
dataset_2 = {'inputs': inputs, 'outputs': [F, F, T, T]}
dataset_not2 = {'inputs': inputs, 'outputs': [T, T, F, F]}
dataset_xor = {'inputs': inputs, 'outputs': [F, T, T, F]}

if __name__ == '__main__':
    training_dataset = dataset_and
    perceptron = Perceptron(input_dimension=2, activation_function=TanH())

    print('\n--> Before training')
    perceptron.evaluate(dataset=training_dataset)

    training_log = perceptron.train(dataset=training_dataset,
                                    epochs=10000,
                                    learning_rate=0.01)

    print('\n--> After training')
    perceptron.evaluate(dataset=training_dataset)

    print()
    print('Final loss: {:0.6f}'.format(perceptron.loss))
    print('Final weights:', perceptron.weights)

    # training_log.plot_loss(log_scale=False)
    # training_log.plot_bias()
    # training_log.plot_weights()
コード例 #5
0
import pandas as pd

from activation_functions import SignFunction
from perceptron import Perceptron



# Database dataset-treinamento:
    
dataset = pd.read_csv('database/dataset-treinamento.csv', sep=';', decimal=',')
X = dataset.iloc[:,0:3].values 
d = dataset.iloc[:,3:].values



perceptron = Perceptron(X, d, 0.01, SignFunction)  # entrada, saída, taxa de ativação e função de ativação
perceptron.train()


# Database dataset-teste:
    
dataset = pd.read_csv('database/dataset-teste.csv', sep=';', decimal=',')
X_teste = dataset.iloc[:,0:3].values 

for x in X_teste:
    y = perceptron.evaluate(x)
    print(f'Input: {x},Output: {y}')    
コード例 #6
0
def test_perceptron():
    p = Perceptron(5, activation.ReLU(is_leaky=True))
    p.show_values()
    print(p.evaluate([1, 2, 3, 5, 7]))
コード例 #7
0
Resultado = teste.copy()
Treinamentos = pd.DataFrame()
for i in range(5):

    activation_function = Perceptron.sign_function
    perceptron = Perceptron(X, Y, 0.01, activation_function)
    wi, wf, ep = perceptron.train(200000)
    wi = [round(x, 4) for x in wi]
    wf = [round(x, 4) for x in wf]
    w = np.concatenate(([wi], [wf], [[ep]]), axis=1)

    Treino = pd.DataFrame(data=w,
                          columns=[
                              'Wi0', 'Wi1', 'Wi2', 'Wi3', 'Wf0', 'Wf1', 'Wf2',
                              'Wf3', 'N_Epocas'
                          ],
                          index=[f'T{i+1}'])
    Treinamentos = pd.concat([Treinamentos, Treino])

    resultado_parcial = pd.DataFrame(columns=[f'T{i+1}'])
    respostas = []
    for j in teste.values:
        respostas.append(perceptron.evaluate(j))

    resultado_parcial = pd.DataFrame(data=respostas, columns=[f'T{i+1}'])
    Resultado = pd.concat([Resultado, resultado_parcial], axis=1)

print(Treinamentos)

print(Resultado)
コード例 #8
0
from classifier import BinaryClassifier
from perceptron import Perceptron, AveragedPerceptron
from naive_bayes import NaiveBayes
from utils import read_data, build_vocab
import utils
from config import args

if __name__ == '__main__':
    filepath = '../data/given/'
    build_vocab(filepath, vocab_size=args.vocab_size)
    train_data, test_data = read_data(filepath)

    perc_classifier = Perceptron(args)
    perc_classifier.fit(train_data)
    acc, prec, rec, f1 = perc_classifier.evaluate(test_data)
    print('Perceptron Results:')
    print('Accuracy: %.2f, Precision: %.2f, Recall: %.2f, F1: %.2f'%(acc, prec, rec, f1))

    avg_perc_classifier = AveragedPerceptron(args)
    avg_perc_classifier.fit(train_data)
    acc, prec, rec, f1 = avg_perc_classifier.evaluate(test_data)
    print('\nAveraged Perceptron Results:')
    print('Accuracy: %.2f, Precision: %.2f, Recall: %.2f, F1: %.2f'%(acc, prec, rec, f1))

    nb_classifier = NaiveBayes(args)
    nb_classifier.fit(train_data)
    acc, prec, rec, f1 = nb_classifier.evaluate(test_data)
    print('\nNaive Bayes Performance:')
    print('Accuracy: %.2f, Precision: %.2f, Recall: %.2f, F1: %.2f'%(acc, prec, rec, f1))
コード例 #9
0
ファイル: main.py プロジェクト: kauemenezes/Mestrado
#     if dataset[i, 4] == 1:
#         dataset[i, 4] = 0
#
# for i in range(len(dataset)):
#     if dataset[i, 4] == 2:
#         dataset[i, 4] = 1

hit_rates = []

for j in range(0, 1):
    print("realization %d" % j)
    perceptron = Perceptron(2)
    train_X, train_y, test_X, test_y = perceptron.train_test_split(dataset)
    # Generating convergence chart
    dict = perceptron.train(train_X, train_y)
    # plt.figure(j)
    # plt.plot(np.array(list(dict.keys())), np.array(list(dict.values())), marker="^")
    # plt.ylabel('sum of errors')
    # plt.xlabel('epochs')

    predictions = perceptron.predict(test_X)
    hit_rates.append(perceptron.evaluate(test_y, predictions))
    print(perceptron.confusion_matrix(test_y, predictions))
    perceptron.plot_decision_boundaries(train_X, train_y, test_X, test_y, j)

print('hit rates: {}'.format(hit_rates))
print('accuracy: {}'.format(np.mean(hit_rates)))
print('std: {}'.format(np.std(hit_rates)))
# plt.show()
Classifier.show_plot_decision_boundaries()