Пример #1
0
def adaline_model(row_vectors, col_vectors, eta, iterations):
    '''
    creates a adaline model
    '''
    model = Adaline(eta, iterations)
    model.learn(row_vectors, col_vectors)
    return model
Пример #2
0
def main():

    X, Y = loadIris()
    stdX = standardize(X)

    classifier = Adaline(learnRate=0.01, maxEpochs=20)
    classifier.fit(stdX, Y)
    plotDecisionBoundary(classifier, stdX, Y)
Пример #3
0
def rodarAdaline(inputTreinamento, outputTreinamento):
    log.print(">> Adaline")
    adalines = [None]*5
    for e in range(5):
        log.print(f">> Treinamento {e + 1}")
        adaline = Adaline(log, len(inputTreinamento[0]))
        adaline.train(inputTreinamento, outputTreinamento, e + 1)
        adalines[e] = adaline
    testar(adalines, False)
Пример #4
0
def handleOnClick(event):
    x = round(event.xdata, 2)
    y = round(event.ydata, 2)

    if event.button is MouseButton.LEFT:  # guardamos y dibujamos un punto inactivo
        # patrones.append( [1, x, y, 0] )
        neuronas.append(Adaline([1, x, y], 0))
        plt.plot(x, y, marker='o', color='red')
        plt.draw()
    elif event.button is MouseButton.RIGHT:  # guardamos y dibujamos un punto activo
        # patrones.append( [1, x, y, 1] )
        plt.plot(x, y, marker='o', color='green')
        neuronas.append(Adaline([1, x, y], 1))
        plt.draw()
Пример #5
0
 def __init__(self, n, hid, rate=0.3, thresh=1.2):
     self.count = n + 1
     self.hid_count = hid + 1
     self.rate = rate
     self.thresh = thresh
     self.units = [Adaline(n, rate, thresh) for i in range(hid)]
     self.edges = [1] * (hid + 1)
     return
Пример #6
0
def adaline_helper(data, label=None, eta=0.005, multi=False, iterations=1):
    if label == None:
        label = class_label

    f = 5  # fold-value

    # tracker variables for performance/timing
    perf = []

    # get starting attrs for building tree
    tune = data['tune']
    attrs = tune.drop(columns=[label]).columns.values

    print('\n======== ADALINE ========')
    print('eta:\t\t', eta)
    print('iterations:\t', iterations)
    print()
    for i in range(f):
        print('\n>> FOLD #{0}'.format(i + 1))
        folds = data['folds'].copy()
        holdout = folds[i]
        folds.pop(i)  # remove holdout fold
        training = pd.concat(
            folds)  # concat remaining folds to create training set
        accuracy = 0

        # build adaline model, depending on whether there are multiple classes (k > 2) or not
        if (multi):
            ada = Adaline(label, eta, iterations)
            w_map = ada.build(training)
            accuracy_map = ada.test_multi_class_helper(holdout, w_map)

            # grab the accuracies (values) per class and sum them for total accuracy
            accuracy_sum = np.sum(list(accuracy_map.values()))

            # divide by number of class options (keys) to determine average accuracy
            # for the multi-class scenario
            accuracy = accuracy_sum / (len(accuracy_map.keys()))
        else:
            ada = Adaline(label, eta, iterations)
            w_map = ada.build(training)
            accuracy = ada.test(holdout, w_map['main'])

        # track results
        perf.append(accuracy)
        print('accuracy:\t{:.0%}'.format(accuracy))

    print('------------')
    print('\n---- ADALINE SUMMARY ----')
    print_helper_classifier(perf, f)
Пример #7
0
def main():
    classifier_name=sys.argv[1]
    datapath=sys.argv[2]
    df=pd.read_csv(datapath)
    X=df.iloc[0:100,[0,2]].values
    y=df.iloc[0:100,4].values
    y=np.where(y=='Iris-setosa',1,-1)
    
    if(classifier_name=='Perceptron'):
        from perceptron import Perceptron
        model=Perceptron(eta=0.01,n_iter=10) 
        model.learn(X,y)
        print('errors for this classification are:\n',model.errors)
        plt.plot(range(1,len(model.errors)+1), model.errors,marker='o')
        model.testdatairis('test.csv')
        print("Accuracy of Perceptron is:",model.accuracy)
        print("--- %s seconds ---" % (time.time() - start_time))
        
    elif(classifier_name=='Adaline'):
        from adaline import Adaline
        model=Adaline(eta=0.01,n_iter=20)
        X_std = np.copy(X)
        X_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
        X_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
        model.learn(X_std,y)
        print('sum of errors in each iteration for this classification are:\n',model.cost)
        plt.plot(range(1,len(model.cost)+1), model.cost,marker='o')
        model.testdatairis('test.csv')
        print("Accuracy of Adaline is:",model.accuracy)
        print("--- %s seconds ---" % (time.time() - start_time))
        
    elif(classifier_name=='SGD'):
        from sgd import SGD
        model=SGD(eta=0.01,n_iter=15)
        model.learn(X,y)
        print('sum of errors in each interation for this classification are:\n' ,model.cost)
        plt.plot(range(1,len(model.cost)+1), model.cost,marker='o')
        model.testdatairis('test.csv')
        print("Accuracy of SGD is:",model.accuracy)
        print("--- %s seconds ---" % (time.time() - start_time))
        
        
    else:
        print("invalid classifier")
        return

    plt.title(classifier_name)
    plt.xlabel('iteration')
    plt.ylabel('errors')
    plt.show()
    return(model)
Пример #8
0
def adaline_implementation(targets_train, targets_test, patterns_train,
                           patterns_test, plot, d3):
    a = Adaline()
    atargets_train, atargets_test = a.transmute_targets(
        targets_train, targets_test)
    max_epochs = int(input('Μέγιστος αριθμός εποχών: '))
    learning_rate = float(input('Ρυθμός εκμάθησης: '))
    min_mse = float(input('Ελάχιστο σφάλμα: '))
    weights = a.train(max_epochs, patterns_train,
                      atargets_train, learning_rate, min_mse, plot, d3)
    # if plot == False:
    print(weights)
    guesses = a.test(weights, patterns_test, atargets_test)
    a.plot_accuracy(atargets_test, guesses)
Пример #9
0
def main():
    sources_list = [[1, 1], [-1, 1], [1, -1], [-1, -1]]
    targets = [1, 1, 1, -1]

    square_errors = {}

    learning_rates = [1e-4, 1e-3, 1e-2, 1e-1, 2e-1, 3e-1, 0.35, 0.4]
    weight_adjustment_tolerance = None
    square_error_tolerance = None
    max_cycles = 50

    network = Adaline(activation_function=activation_function)
    for sources, target in zip(sources_list, targets):
        network.add_sources(sources, target)

    for learning_rate in learning_rates:
        network.learning_rate = learning_rate

        network.train(random_starting_weights=False,
                      max_cycles=max_cycles,
                      weight_adjustment_tolerance=weight_adjustment_tolerance,
                      square_error_tolerance=square_error_tolerance)

        print(
            f">>Learning rate: {learning_rate}\n\n"
            f"Final weights:\n"
            f"{[float(f'{weigth:.5f}') for weigth in network.neuron.weights]}\n"
            f"Final bias:\n"
            f"{network.neuron.bias:.5f}\n\n"
            f"Cycles: {network.cycles}\n"
            f"Final square error: {network.total_square_error_by_cycle[-1]:.5f}\n\n\n"
        )

        square_errors[learning_rate] = network.total_square_error_by_cycle

    curves = []
    for learning_rate, square_error in square_errors.items():
        curves.append(
            plt.plot(range(len(square_error)),
                     square_error,
                     '--',
                     linewidth=2,
                     label=str(learning_rate))[0])
    plt.ylim([-0.1, 4])
    plt.legend(handles=curves)
    plt.show()
Пример #10
0
def treinar_rede(dataset, rn=None):
    x, d = dataset

    if (rn == None):
        rn = Adaline(qnt_entradas=len(x[0, :]))

    rn.treino(x, d, verbose=0, guardar_historico=1)

    rn.plotar_curva_aprendizado('Treino (%d épocas de treinamento)' % rn.epoca)

    # rn.plotar_animacao(x,d,titulo='Treino (%d épocas de treinamento)'%rn.epoca)

    # rn.salvar_animacao(x,d,titulo='Treino (%d épocas de treinamento)'%rn.epoca,nome_arquivo='reta_1/animacao.mp4')

    print(rn.epoca)

    return rn
Пример #11
0
# Embaralhar
x = np.arange(len(d))
np.random.shuffle(x)
X_new = X[x]
d_new = d[x]


#print(d)
#print(d_new)
X_base_de_treinamento = X_new[:155,:]
d_base_de_treinamento = d_new[:155,:]
X_base_de_testes = X_new[155:,:]
d_base_de_testes = d_new[155:,:]

p = Adaline(len(X_base_de_treinamento[0]), epochs=1000)
p.train(X_base_de_treinamento, d_base_de_treinamento)
p.printMatrizparaMatriz(X_base_de_testes,d_base_de_testes)
p.printValoresParaPlanilha()
#p.printWeights

#p.restartWeights
#plt.xlim(-1,3)
#plt.ylim(-1,3)
#for i in range(len(d)):
 #   if d[i] == 1:
 #       plt.plot(X[i, 0], X[i, 1], 'ro')
 #   else:
#       plt.plot(X[i, 0], X[i, 1], 'bo')
       
#f = lambda x: (p.weights[0]/p.weights[2]) - (p.weights[1]/p.weights[2] * x)
Пример #12
0
        data = preprocess(args.dataset)
    else:
        data = preprocess(args.dataset, makebinary=True)

    if data is None:
        print('Dataset unrecognized.')
        exit()

    X, y = data['train'] 
    Xs, ys = data['test']

    # Based on input, call classifiers
    if   args.classifier == 'perceptron':
        model = Perceptron(args.eta, args.iters)
    elif args.classifier == 'adaline':
        model = Adaline(args.eta, args.iters)
    elif args.classifier == 'sgd':
        model = SGD(args.eta, args.iters)
    elif args.classifier == 'ovr':
        model = OVR(data['classes'], args.eta, args.iters)

    model.fit(X, y)
    res = model.predict(Xs)

    matches = 0

    if args.classifier == 'ovr':
        accuracy = []
        res  = res[1]
        clas = res[0]
        print(res)
    for learning_rate in [0.2, 0.02, 0.002, 0.0002, 0.00002, 0.000002]:
        print "Testing learning rate = %f" % learning_rate
        data_indices = [idx for idx in xrange(data_instances.shape[0])]
        # 10-fold cross validation
        fold_size = (data_instances.shape[0]) / 10
        total_performance = 0.0
        for holdout_fold_idx in xrange(5):
            # training_indices = data_indices - holdout_fold indices
            training_indices = np.array(
                np.setdiff1d(
                    data_indices,
                    data_indices[fold_size * holdout_fold_idx : \
                                 fold_size * holdout_fold_idx + fold_size]))
            # test_indices = holdout_fold indices
            test_indices = np.array([
                i for i in xrange(fold_size * holdout_fold_idx, fold_size *
                                  holdout_fold_idx + fold_size)
            ])

            model = Adaline(20.0, learning_rate)
            # Train the model
            model.train(data_instances[training_indices])
            # Test performance on test set
            predictions = model.predict(data_instances[test_indices, :-1])
            total_performance += \
                sum(predictions == data_instances[test_indices, -1]) / \
                float(test_indices.shape[0])
        print "Average overall classification rate: %f" % (total_performance /
                                                           10)
Пример #14
0
from mlxtend.plotting import plot_decision_regions
from adaline import Adaline

logging.basicConfig(level=logging.DEBUG)

df = pd.read_csv('../data/iris.data', header=None)

# setosa and versicolor
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)

# sepal length and petal length
X = df.iloc[0:100, [0, 2]].values

#
ada = Adaline(epochs=10, eta=0.01).train(X, y)
plt.plot(range(1, len(ada.cost_) + 1), np.log10(ada.cost_), marker='o')
plt.xlabel('Iterations')
plt.ylabel('log(Sum-squared-error)')
plt.title('Adaline - Learning rate 0.01')
plt.show()

#
ada = Adaline(epochs=10, eta=0.0001).train(X, y)
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Iterations')
plt.ylabel('Sum-squared-error')
plt.title('Adaline - Learning rate 0.0001')
plt.show()

#
timeSamples = utils.getFileData(f"data/{filePrefix}_t", (1))

xSamples = utils.getFileData(f"data/{filePrefix}_x",
                             tuple(range(1, xDimension + 1)))
if xDimension == 1:
    xSamples = xSamples.reshape(len(xSamples), 1)

adalineXSamples = utils.addConstantTerm(xSamples)

ySamples = utils.getFileData(f"data/{filePrefix}_y", (1))

trainIndexes, testIndexes = separateIndexesByRatio(len(timeSamples), 0.7)

# %% Initialize and Train Adaline
adaline = Adaline([0] * (xDimension + 1), 0.1)

xTrain = adalineXSamples[trainIndexes]
yTrain = ySamples[trainIndexes]
adaline.train(xTrain, yTrain, tol, maxIterations)

# %% Test
xTest = adalineXSamples[testIndexes]
yTest = ySamples[testIndexes]
testResult = adaline.test(xTest, yTest)
print(f"Mean Squared Error: {testResult}")

# %% Plot
adalineApproxYArr = adaline.evaluate(adalineXSamples)

fig = make_subplots(x_title="t", y_title="y")
def main():
    xs = [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]
    ys = [2.26, 3.8, 4.43, 5.91, 6.18, 7.26, 8.15, 9.14, 10.87, 11.58, 12.55]

    a_regression, b_regression, correlation_coefficient, _, regression_standard_error = linregress(
        xs, ys)
    regression_equation = f"y={a_regression:.5f}*x+{b_regression:.5f}"
    ys_regression = [a_regression * x + b_regression for x in xs]

    determination_coefficient = correlation_coefficient**2

    print(f"Regression equation: {regression_equation}")
    print(f"r = {correlation_coefficient:.5f}")
    print(f"r² = {determination_coefficient:.5f}")
    print(f"σ = {regression_standard_error:.5f}\n")

    learning_rate = 0.0015
    list_max_cycles = [100, 200, 500, 1000]
    random_starting_weights = False
    weight_adjustment_tolerance = None
    square_error_tolerance = None

    network = Adaline(learning_rate=learning_rate)
    for source, target in zip(xs, ys):
        network.add_sources([source], target)

    adaline_plots = []

    for max_cycles in list_max_cycles:
        print(f"Max cycles: {max_cycles}\n--------------------------")
        network.train(random_starting_weights=random_starting_weights,
                      max_cycles=max_cycles,
                      weight_adjustment_tolerance=weight_adjustment_tolerance,
                      square_error_tolerance=square_error_tolerance,
                      verbose=False)

        a_adaline, b_adaline = network.neuron.weights[0], network.neuron.bias
        adaline_equation = f"y={a_adaline:.5f}*x+{b_adaline:.5f}"
        ys_adaline = [a_adaline * x + b_adaline for x in xs]

        total_square_error = sum([(y - y_line)**2
                                  for y, y_line in zip(ys, ys_adaline)])

        adaline_standard_error = (total_square_error / len(ys))**0.5

        print(f"Adaline equation: {adaline_equation}\n")

        print(
            f"Difference for a coefficient: {abs(a_adaline - a_regression):.5f}"
        )
        print(
            f"Difference for b coefficient: {abs(b_adaline - b_regression):.5f}"
        )
        print(f"σ = {adaline_standard_error}\n-----------------------\n")

        adaline_plots.append(
            plt.plot(xs,
                     ys_adaline,
                     linestyle='--',
                     linewidth=3,
                     label=f"Cycles: {max_cycles}",
                     zorder=1)[0])

    regression_plot, = plt.plot(xs,
                                ys_regression,
                                color='blue',
                                linestyle='-',
                                linewidth=5,
                                label=f"Regression: {regression_equation}",
                                zorder=0)

    scatter_plot = plt.scatter(xs,
                               ys,
                               color='black',
                               marker='x',
                               s=80,
                               label='Source points',
                               zorder=2)
    plt.legend(handles=[scatter_plot, *adaline_plots, regression_plot])

    plt.show()
def _train(eta_field, epoch_field, neuron='perceptron', sqre_field=None):
    try:
        eta = float(eta_field.get())
        epoch_limit = int(epoch_field.get())
        sqre = 0 if sqre_field == None else float(sqre_field.get())
        with open('bulk_data.json', 'r+') as file:
            file.seek(0)
            json.dump(data_set, file)

        args = {'bulk_data': data_set, 'weights': weights}
        # If eta, epoch_limit or the desired quadratic error are equal to zero,
        # then, the Neuron must take default values; to do that
        # the args in initialization must be null
        if (eta != 0):
            args['eta'] = eta
        if (epoch_limit != 0):
            args['epoch_limit'] = epoch_limit
        if (neuron == 'adaline' and sqre != 0):
            args['sqre'] = sqre

        try:
            if (neuron == 'perceptron'):
                trainer = Perceptron(**args)
                trainer.process()
                layout.perceptron_weights = trainer.weights
                layout.perceptron_trained = True
            else:
                trainer = Adaline(**args)
                trainer.process()
                layout.adaline_weights = trainer.weights
                layout.adaline_trained = True
        except AttributeError as error:
            messagebox.showerror(error, 'Provided data not found!')

        if (layout.adaline_trained == False
                or layout.perceptron_trained == False):
            l = layout.ax.lines.pop(2)
            wl = weakref.ref(l)
            del l
        x = np.linspace(-5, 5, 100)
        if (neuron == 'perceptron'):
            line_color = 'blue'
        elif (neuron == 'adaline'):
            line_color = 'red'
        for line in trainer.lines:
            y = (line[0] - (line[1] * x)) / line[2]
            lines = layout.ax.plot(x, y, color=line_color)
            l = lines.pop()
            wl = weakref.ref(l)
            layout.canvas.draw()
            l.remove()
            del l
        y = (trainer.weights[0][0] -
             (trainer.weights[0][1] * x)) / trainer.weights[0][2]
        layout.ax.plot(x, y, color=line_color)
        messagebox.showinfo(
            '{} training has finished'.format(neuron.title()),
            'The solution was found in the epoch number {}'.format(
                trainer.current_epoch))
        refresh_button.config(state='normal')
        window_error(trainer.current_epoch, trainer.error_freq)
    except ValueError as error:
        messagebox.showerror(
            error,
            'Input values must be float (for eta & quadratic error) and integer (for epoch limit)!'
        )
Пример #18
0
    ax1.plot(errors_d1[i], color=COLORS[i], label='eta = ' + str(etas[i]))
ax1.title.set_text('Learning Rate (Dataset 1)')

for i in range(len(errors_d2)):
    ax2.plot(errors_d2[i], color=COLORS[i], label='eta = ' + str(etas[i]))
ax2.title.set_text('Learning Rate (Dataset 2)')

plt.legend()
plt.show()

# b. ADALINE
print '=== ADALINE ====='
# - DATASET 1
errors_d1, etas = [], [0.001, 0.003, 0.005, 0.007, 0.009]
for eta in etas:
    adal_d1 = Adaline(2)
    print '(D1) Training (eta =', eta, ')'
    errors_d1.append(adal_d1.train(x1, y1, eta))
print '(D1) FINISHED\n==='

# - DATASET 2
errors_d2, etas = [], [0.001, 0.003, 0.005, 0.007, 0.009]
for eta in etas:
    adal_d2 = Adaline(4)
    print '(D2) Training (eta =', eta, ')'
    errors_d2.append(adal_d2.train(x2, y2, eta))
print '(D2) FINISHED\n==='

# - plotting the learning rate for perceptron
fig = plt.figure()
ax1 = fig.add_subplot(121)
Пример #19
0
# Separando Database Teste e Treinamento
trainingDataset = shuffledDataset[0:int(np.floor(len(shuffledDataset)*0.75))]
testDataset = shuffledDataset[int(np.floor(len(shuffledDataset)*0.75)):]

# Separando trainingDataset em valores e resultados
trainInputs = trainingDataset[:, 0:(len(trainingDataset[0])-1)]
trainOutputs = trainingDataset[:, (len(trainingDataset[0])-1):]

# Separando testDataset em valores e resultados
testInputs = testDataset[:, 0:(len(testDataset[0])-1)]
testOutputs = testDataset[:, (len(testDataset[0])-1):]

# Criando Adaline
#a = Adaline(len(trainInputs[0]), epochs=1000,learning_rate=0.0025, precision=0.000001)
a = Adaline(len(trainInputs[0]), epochs=1000,learning_rate=0.0025, precision=0.000001)

# Salvando Pesos Anteriores
oldWeights = ';'.join(['%.8f' % num for num in a.weights])

# Treinando Adaline
<<<<<<< HEAD
oldWeights = a.weights
qntEpochs, erro = a.train(trainInputs, trainOutputs)
newWeights = a.weights
=======
qntEpochs = a.train(trainInputs, trainOutputs)

# Salvando Pesos Novos
newWeights = ';'.join(['%.8f' % num for num in a.weights])
Пример #20
0
from adaline import Adaline
import dataset

adaline = Adaline()

print("Training....")

m_error = 1
iteration = 0
while m_error > 0.00000000000000001 and iteration < 30000:

    m_error = 0

    for data in dataset.mini_random_test_data:
        adaline.set_x(data.x)
        adaline.set_y(data.y)

        y = adaline.calc_y()
        e = data.y - y
        adaline.update(e)

        iteration += 1
        m_error += e if e > 0 else -e

    m_error = m_error / len(dataset.mini_random_test_data)

print()
print("Training completed")
print("Dataset size:", len(dataset.mini_random_test_data))
print("Number of iterations", iteration)
adaline.print_me()
Пример #21
0
import numpy as np
from adaline import Adaline

x_train = np.random.randn(1000, 2)
x_test = np.random.randn(100, 2)
w = np.array([2, 16])
b = 18
y_train = np.dot(x_train, w) + b
y_test = np.dot(x_test, w) + b

adaline = Adaline(x_train.shape[1], 1e-3)

epochs = 10
batch_size = 10
for epoch in range(epochs):
    for batch_idx in range(int(np.ceil(x_train.shape[0] / batch_size))):
        batch_start_idx = batch_idx * batch_size
        batch_end_idx = batch_start_idx + batch_size
        if batch_end_idx > x_train.shape[0]:
            batch_end_idx = x_train.shape[0]

        adaline.train(x_train[batch_start_idx:batch_end_idx],
                      y_train[batch_start_idx:batch_end_idx])

print("weights:", adaline.weights)
print("bias:", adaline.bias)
print("root mean squared error on training set:",
      adaline.root_mean_squared_error(x_train, y_train))
print("root mean squared error on testing set:",
      adaline.root_mean_squared_error(x_test, y_test))
Пример #22
0
# X = np.random.uniform(0, 1, 100)
# dataset = Classifier.generate_dataset_one(3, 5, X)
# no_of_inputs = 1

X1 = np.random.uniform(0, 1, 100)
X2 = np.random.uniform(0, 1, 100)
dataset = Classifier.generate_dataset_two(3, 5, 7, X1, X2)
no_of_inputs = 2

dictionary = {}
dictionary['mse'] = []
dictionary['rmse'] = []

for j in range(0, 1):
    print("realization %d" % j)
    adaline = Adaline(0.01)
    train_X, train_y, test_X, test_y = adaline.train_test_split(dataset)
    adaline.fit(no_of_inputs, np.array(train_X), np.array(train_y))

    adaline.calculate_error(np.array(test_X), np.array(test_y))
    dictionary['mse'].append(adaline.mse_)
    dictionary['rmse'].append(adaline.rmse_)
    # adaline.plot_decision_boundaries_one(train_X, train_y, test_X, test_y, j)
    adaline.plot_decision_boundaries_two(train_X, train_y, test_X, test_y, j,
                                         dataset)
#
print('mean square error: {}'.format(dictionary['mse']))
print('root mean square error: {}'.format(dictionary['rmse']))
print('mean mse: {}'.format(np.mean(dictionary['mse'])))
print('mean rmse: {}'.format(np.mean(dictionary['rmse'])))
print('std mse: {}'.format(np.std(dictionary['mse'])))
Пример #23
0
    plt.plot(e_count, history, label='alpha: {}'.format(lr))


if __name__ == '__main__':

    np.random.seed(42)

    alpha = 0.01
    epochs = 20
    k = 5

    filename = 'housing.data'
    df = pre.clean_ugly_dataset(filename)
    df = pre.estandarizar_datos(df)
    cross = cross_v(df, k)
    clf = Adaline(alpha, epochs)

    #X_train, y_train, X_test, y_real = next(cross)
    #istory_ = clf.fit(X_train, y_train)

    for fold in range(k):

        X_train, y_train, X_test, y_real = next(cross)
        #print(X_train)
        print()
        print('Fold {}'.format(fold))
        print()
        history_ = clf.fit(X_train, y_train)

        plot_error(history_, epochs, fold)
Пример #24
0
 def __init__(self, n_epochs=2000, learning_rate=0.001):
     Adaline.__init__(self, n_epochs, learning_rate)
Пример #25
0
secondGroupInput = np.random.normal(secondGroupMean, secondGroupSigma,
                                    (nSamplesPerGroup, 2))
secondGroupInput = utils.addConstantTerm(secondGroupInput)

secondGroupOutput = [-1] * nSamplesPerGroup

inputData = np.concatenate((firstGroupInput, secondGroupInput))
outputData = np.concatenate((firstGroupOutput, secondGroupOutput))

trainIndexes, testIndexes = separateIndexesByRatio(2 * nSamplesPerGroup,
                                                   trainSamplesRatio)
random.shuffle(trainIndexes)

# %% Initialize and Train Adaline

adaline = Adaline([0] * (adalineDimension), 0.1, lambda x: 1 if x >= 0 else -1)

xTrain = inputData[trainIndexes]
yTrain = outputData[trainIndexes]
adaline.train(xTrain, yTrain, tol, maxIterations)

# %% Test
xTest = inputData[testIndexes]
yTest = outputData[testIndexes]
testResult = adaline.test(xTest, yTest)
print(f"Mean Squared Error: {testResult}")

# %% Plot
adalineApproxYArr = adaline.evaluate(inputData)

weights = adaline.getWeights()
        if noise[i] == 1:
            copy[i] = 1 if copy[i] == 0 else 0

    return copy.reshape(data.shape)


# contains logical representation of states of cells
squares = np.zeros((cell_res_h, cell_res_w))
rectangles = []  # contains interface rectangles
buttons = []  # contains interface buttons
perceptrons = []  # contains perceptrons

# create perceptrons
labels = []
for i in range(10):
    _perceptron = Adaline(i, cell_res_w * cell_res_h)
    perceptrons.append(_perceptron)

# prepare training data
training_data = [np.ravel(num) for num in number]
for i in range(10):
    _labels = [0 for _ in range(10)]
    _labels[i] = 1
    labels.append(_labels)


class Button:
    def __init__(self, text, pos, color, function):
        self.text = text
        self.pos = pos
        self.color = color
Пример #27
0
### Plotar o grafico
### vermelhos ----> Setosa (-1)
### azuis ----> Versicolor (1)
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
plt.scatter([sum(r) for index, r in df.iterrows()],
            x[:, 3],
            c=y,
            cmap=cm_bright)
plt.scatter(None, None, color='r', label='Versicolor')
plt.scatter(None, None, color='b', label='Setosa')
plt.legend()
plt.title('Visualizacao do Dataset (flores Iris)')
plt.savefig('train.png')

# Adaline com 4 entradas
adaline = Adaline(4)
# Treinamento
adaline.train(x, y)

## Test 1
A = [0.4329, -1.3719, 0.7022, -0.8535]  # Versicolor (1)
predict = adaline.predict(A)
print('## Teste 1')
print('Entrada: ', A)
print('Classe esperada: Versicolor (1)')
if predict == 1:
    print('Previsão: Versicolor (1)')
else:
    print('Previsão: Setosa (-1)')
#=> 1
Пример #28
0
from init_obj import create_universe, prepare_data, show_universe, test_model
from adaline import Adaline
from adalineSGD import AdalineSGD
import numpy as np

groups = create_universe()
X, Y = prepare_data(groups)

# show_universe(groups)

y = np.where(Y == 'A', 1, -1)

model = Adaline(0.0001, 50)
model.fit(X, y)

# stochastic

model2 = AdalineSGD(0.0001, 50)

model2.fit(X, y)

test_model(X, model, model2)
Пример #29
0
min_max_scaler.fit(dataset)
dataset = min_max_scaler.transform(dataset)

mse = np.zeros((20, 1))
rmse = np.zeros((20, 1))
mean_time = 0
#cost = []
for i in range(20):
    X_train, X_test, Y_train, Y_test = train_test_split(dataset[:, :2],
                                                        dataset[:, 2],
                                                        test_size=0.80)
    Y_train = Y_train.reshape((Y_train.shape[0], 1))
    Y_test = Y_test.reshape((Y_test.shape[0], 1))

    start_time = time.clock()
    adaline = Adaline(eta=0.01, n_iter=200)
    adaline.fit(X_train, Y_train)
    Y_hat = adaline.predict(X_test)
    mean_time += (time.clock() - start_time) / 20

    mse[i] = ((Y_test - Y_hat)**2).mean(axis=0)
    rmse[i] = mse[i]**(1. / 2)
    #cost.append(adaline.error)

print("Mean execution time", mean_time)
print("Standard Deviation (MSE)", np.std(mse, axis=0))
print("Standard Deviation (RMSE)", np.std(rmse, axis=0))
'''
fig, ax = plt.subplots()
plt.plot(range(1, len(cost[0]) + 1), cost[0], "o-")
plt.title("Cost")
Пример #30
0
def main():
    if len(sys.argv) != 2:
        print('You should give path to the data file in the argument')
        return
    file_add = sys.argv[1]
    tst, val, tst_lbl, val_lbl = get_data(file_add)
    vec_size = len(tst[0])
    neuron = Adaline(vec_size, 0.1)
    # train
    lr_curve = []
    cutoff = 1000
    iters = 0
    test_data_size = len(tst_lbl)
    while cutoff > iters:
        iters += 1
        err = 0
        for x, y in zip(tst, tst_lbl):
            p = neuron.input(x)
            t = heavyside(p)
            if t != y:
                err += 1
        tmp = err / test_data_size
        lr_curve.append(tmp)
        for x, y in zip(tst, tst_lbl):
            p = neuron.input(x)
            # activation function
            t = sigmoid(p)
            # print('input:', x)
            # print('out:', p)
            # print('expected:', y)
            neuron.feedback(t, y, x)
        print(f"iteration {iters} finished)")
    print(f"learned weights: {neuron.w} bias: {neuron.b}")
    plt.figure()
    plt.plot(lr_curve)
    plt.show()
    # evaluate
    count = len(val)
    if count > 0:
        TP = 0
        FP = 0
        TN = 0
        FN = 0
        for x, y in zip(val, val_lbl):
            p = neuron.input(x)
            t = heavyside(p)
            if t == y:
                if t == 1:
                    TP += 1
                else:
                    TN += 1
            else:
                if t == 1:
                    FP += 1
                else:
                    FN += 1
        if (TP + FP) != 0:
            percision = TP / (TP + FP)
            print(f"Percision: {percision:.2f}")
        if (TP + FN) != 0:
            recall = TP / (TP + FN)
            print(f"Recall: {recall:.2f}")
    # plot
    if vec_size == 2:
        fig = plt.figure()
        ax = fig.add_subplot(111)
        plot_data(tst, tst_lbl, ax)
        x0 = np.min(tst, 0)[0]
        x1 = np.max(tst, 0)[0]
        plot_line(neuron.w, neuron.b, ax, x0, x1)
        plt.show()