def main():
    ## generate data and define inputs
    mu = np.arange(0, 2 * math.pi, 0.1)
    sigma = 0.1
    x_train = np.arange(0, 2 * math.pi, 0.1)
    x_test = np.arange(0.05, 2 * math.pi, 0.1)

    # #! DELTA RULE

    #! LEAST SQUARE
    ## init rbf class
    dim = mu.shape[0]
    rbf_LS = RBF(dim)

    ## Generate data
    sinus, square = rbf_LS.generateData(x_train)
    sinus_test, square_test = rbf_LS.generateData(x_test)

    ## Init and train.
    weights = rbf_LS.initWeights()
    weights, error = rbf_LS.train_DELTA(x_train, square, weights, mu, sigma)

    ## Evaluation
    print(rbf_LS)
    y_test = rbf_LS.evaluation_DELTA(x_test, weights, mu, sigma)
    # y_test=threshold(y_test)
    re = residualError(y_test, square_test)
    plt.figure('Least Square Error')
    plt.plot(x_test, y_test, label='Approximation')
    plt.plot(x_test, square_test, label='True value')
    plt.title('Least Square Error')
    plt.legend()
    plt.show()
Ejemplo n.º 2
0
def square_LS(x_test, x_train, mu, sigma):
    dim = mu.shape[0]
    rbf = RBF(dim)
    _, square = rbf.generateData(x_train, noise=True)
    _, square_test = rbf.generateData(x_test, noise=True)

    ## Init and train.
    weights = rbf.initWeights()
    weights, error = rbf.train_LS(x_train, square, weights, mu, sigma)

    ## Evaluation
    y_test = rbf.evaluation_LS(x_test, weights, mu, sigma)
    residual_error = np.sum(abs(y_test - square_test)) / y_test.shape[0]
    return residual_error, y_test, square_test
Ejemplo n.º 3
0
def sinus_delta(x_test, x_train, mu, sigma):
    dim = mu.shape[0]
    rbf = RBF(dim)

    ## Generate data
    sinus, _ = rbf.generateData(x_train, noise=True)
    sinus_test, _ = rbf.generateData(x_test, noise=True)
    sinus_test = sinus_test.reshape((sinus_test.shape[0], 1))
    ## Init and train.
    weights = rbf.initWeights()
    weights, _, _ = rbf.train_DELTA(x_train, weights, mu, sigma)

    ## Evaluation
    y_test = rbf.evaluation_DELTA(x_test, weights, mu, sigma)
    tmp = abs(y_test - sinus_test)
    residual_error = np.sum(abs(y_test - sinus_test)) / y_test.shape[0]
    return residual_error, y_test, sinus_test
Ejemplo n.º 4
0
def square_delta(x_test, x_train, mu, sigma):
    dim = mu.shape[0]
    rbf = RBF(dim)
    _, square = rbf.generateData(x_train, noise=True)
    _, square_test = rbf.generateData(x_test, noise=True)
    square_test = square_test.reshape((square_test.shape[0], 1))

    ## Init and train.
    weights = rbf.initWeights()
    weights, _, _ = rbf.train_DELTA(x_train,
                                    weights,
                                    mu,
                                    sigma,
                                    sinus_type=False)

    ## Evaluation
    y_test = rbf.evaluation_DELTA(x_test, weights, mu, sigma)
    residual_error = np.sum(abs(y_test - square_test)) / y_test.shape[0]
    return residual_error, y_test, square_test
def main():

    # RBF
    mu = np.arange(0, 2 * math.pi, 0.2)
    dim = mu.shape[0]
    rbf = RBF(dim)
    weights = rbf.initWeights()

    # NN
    nodes = mu.shape[0]
    bias = False
    NN = neuralNetwork(bias=bias, layers=[nodes, 1])
    NN.initWeights()

    ## Dataset for RBF
    x_train = np.arange(0, 2 * math.pi, 0.1)
    x_train = np.random.permutation(x_train)
    split = 0.1
    nr_datapoints = x_train.shape[0]

    x_train = x_train[round(split * nr_datapoints):]
    x_valid = x_train[0:round(split * nr_datapoints)]
    x_test = np.arange(0.05, 2 * math.pi, 0.1)

    # SINUS
    # y_train,_ = rbf.generateData(x_train, noise=True)
    # y_valid,_ = rbf.generateData(x_valid, noise=True)
    # y_test,_ = rbf.generateData(x_test, noise=True)
    # SQUARE
    _, y_train = rbf.generateData(x_train, noise=True)
    _, y_valid = rbf.generateData(x_valid, noise=True)
    _, y_test = rbf.generateData(x_test, noise=True)

    ## Dataset for Neural Network
    x_train_NN = x_train.reshape((1, x_train.shape[0]))
    y_train_NN = y_train.reshape((1, y_train.shape[0]))

    x_valid_NN = x_valid.reshape((1, x_valid.shape[0]))
    y_valid_NN = y_valid.reshape((1, y_valid.shape[0]))

    x_test_NN = x_test.reshape((1, x_test.shape[0]))
    y_test_NN = y_test.reshape((1, y_test.shape[0]))

    ## Training the NN
    epoch_vec, loss_vec_train, loss_vec_val = NN.train(x_train=x_train_NN,
                                                       y_train=y_train_NN,
                                                       x_valid=x_valid_NN,
                                                       y_valid=y_valid_NN,
                                                       epochs=2000000,
                                                       eta=0.001,
                                                       alpha=0)
    print("Patient=1 triggerad at epoch: ", epoch_vec[-1])
    plt.figure("Learning Curve")
    plt.plot(epoch_vec, loss_vec_train)
    plt.plot(epoch_vec, loss_vec_val)
    plt.legend(("Training loss", "Validation loss"))

    ## Training the RBF
    sigma = 0.5
    weights, error = rbf.train_LS(x_train, y_train, weights, mu, sigma)

    ## Evaluation
    y_test_RBF = rbf.evaluation_LS(x_test, weights, mu, sigma)
    residual_error = np.sum(abs(y_test - y_test)) / y_test.shape[0]

    output = NN.classify(x_test_NN)
    plt.figure("Results")
    plt.title('Single Hidden Layer VS RBF')
    plt.plot(x_test, y_test_RBF, label='RBF')
    plt.plot(x_test_NN[0], output[0], label='Single Hidden Layer')
    plt.plot(x_test, y_test, label='True Value', color='black')
    plt.xlabel('x')
    plt.ylabel('f(x)')
    plt.legend()

    plt.show()