Ejemplo n.º 1
0
def adaline_implementation(targets_train, targets_test, patterns_train,
                           patterns_test, plot, d3):
    a = Adaline()
    max_epochs = int(input('Μέγιστος αριθμός εποχών: '))
    learning_rate = float(input('Ρυθμός εκμάθησης: '))
    min_mse = float(input('Ελάχιστο σφάλμα: '))
    weights = a.train(max_epochs, patterns_train,
                      targets_train, learning_rate, min_mse, plot, d3)
    # if plot == False:
    guesses = a.test(weights, patterns_test, targets_test)
    a.plot_accuracy(targets_test, guesses)
Ejemplo n.º 2
0
def adaline_helper(data, label=None, eta=0.005, multi=False, iterations=1):
    if label == None:
        label = class_label

    f = 5  # fold-value

    # tracker variables for performance/timing
    perf = []

    # get starting attrs for building tree
    tune = data['tune']
    attrs = tune.drop(columns=[label]).columns.values

    print('\n======== ADALINE ========')
    print('eta:\t\t', eta)
    print('iterations:\t', iterations)
    print()
    for i in range(f):
        print('\n>> FOLD #{0}'.format(i + 1))
        folds = data['folds'].copy()
        holdout = folds[i]
        folds.pop(i)  # remove holdout fold
        training = pd.concat(
            folds)  # concat remaining folds to create training set
        accuracy = 0

        # build adaline model, depending on whether there are multiple classes (k > 2) or not
        if (multi):
            ada = Adaline(label, eta, iterations)
            w_map = ada.build(training)
            accuracy_map = ada.test_multi_class_helper(holdout, w_map)

            # grab the accuracies (values) per class and sum them for total accuracy
            accuracy_sum = np.sum(list(accuracy_map.values()))

            # divide by number of class options (keys) to determine average accuracy
            # for the multi-class scenario
            accuracy = accuracy_sum / (len(accuracy_map.keys()))
        else:
            ada = Adaline(label, eta, iterations)
            w_map = ada.build(training)
            accuracy = ada.test(holdout, w_map['main'])

        # track results
        perf.append(accuracy)
        print('accuracy:\t{:.0%}'.format(accuracy))

    print('------------')
    print('\n---- ADALINE SUMMARY ----')
    print_helper_classifier(perf, f)
Ejemplo n.º 3
0
trainIndexes, testIndexes = separateIndexesByRatio(2 * nSamplesPerGroup,
                                                   trainSamplesRatio)
random.shuffle(trainIndexes)

# %% Initialize and Train Adaline

adaline = Adaline([0] * (adalineDimension), 0.1, lambda x: 1 if x >= 0 else -1)

xTrain = inputData[trainIndexes]
yTrain = outputData[trainIndexes]
adaline.train(xTrain, yTrain, tol, maxIterations)

# %% Test
xTest = inputData[testIndexes]
yTest = outputData[testIndexes]
testResult = adaline.test(xTest, yTest)
print(f"Mean Squared Error: {testResult}")

# %% Plot
adalineApproxYArr = adaline.evaluate(inputData)

weights = adaline.getWeights()


def hyperPlan(x):
    return -(weights[0] * x + weights[2]) / weights[1]


xPlan = np.linspace(0.5, 5.5, 100)
yPlan = np.vectorize(hyperPlan)(xPlan)