Exemple #1
0
def findParameters(trials = 100):
        for runs in range(1,trials + 1):
                net = rbf.rbf(train,traint,10,0,0)
                net.rbftrain(train,traint,0.00001 + runs * 0.00001,2000)
                output = net.rbffwd(alldata)
                err = average(abs(output - alltgts))
                print "Params: (" + str(0.00001 + runs * 0.00005) + ", 2000) / Error: " + str(err)

                net = rbf.rbf(train,traint,10,0,0)
                numruns = 2000 + runs * 10
                net.rbftrain(train,traint,0.00001,numruns)
                output = net.rbffwd(alldata)
                err = average(abs(output - alltgts))
                print "Params: (0.00001, " + str(numruns) + ") / Error: " + str(err)
Exemple #2
0
def predict(train, traint, test, testt):

        for res in range(0,2000):
                tgts = []
                for runs in range(0,20):
                        net = rbf.rbf(train,traint,16,0,0)
                        nbf = 9000
                        net.rbftrain(train,traint,0.00001,nbf)
                        output = net.rbffwd(alldata)
                        err = average(abs(output - alltgts))

                        tgts.append(str(14) + '\n' + str(1599) + str('\n'))
                        #f = open('res-' + str(runs) + '.csv','w')
                        #f.write("Error for this run: " + str(err) + "\n")
                        cnt = 0
                        for e in output:
                                #for i in range(1,20):
                                #       f.write(str(e[0]) + str(','))
                                #f.write('\n')
                                tgts.append(str(average(e)) + str('\n'))
                                cnt += 1

                        #f.close()
                f = open('tgts-' + str(err)[0:5] + '.csv','w')
                for each in tgts:
                        f.write(each)
                f.close()
Exemple #3
0
def findRbfs(trials = 100):
        for runs in range(1,trials + 1):
                nrbfs = 1
                net = rbf.rbf(train,traint,nrbfs + runs,0,0)

                net.rbftrain(train,traint,0.00001,2000)
                output = net.rbffwd(alldata)
                err = average(abs(output - alltgts))
                print "Num of RBFS: " + str(nrbfs + runs) + " / Error: " + str(err)
Exemple #4
0
 def setUp(self):
     self.rbf_test1 = rbf.rbf(4,
                              0.5,
                              eta=0.25,
                              nIter=100,
                              thresh_type="logistic")
     self.data = np.matrix([[0, 0], [0, 1], [1, 0], [1, 1]])
     self.labels = np.array([0, 1, 1, 0])
     self.rbf_test1.trainWeights(self.data, self.labels)
Exemple #5
0
 def setSurrogate(self,
                  type_surrogate='RBF',
                  min_snapshots=5,
                  max_snapshots=100,
                  max_degree=2,
                  initial_iteration=None,
                  no_keep=0,
                  expensive=0,
                  type_kernel=0,
                  type_solver=0):
     self.min_snapshots = min_snapshots
     self.max_snapshots = max_snapshots
     if type_surrogate == 'SCM':
         self.surrogate = scm(max_degree)
     else:
         self.surrogate = rbf(initial_iteration, no_keep, expensive,
                              type_kernel, type_solver)
Exemple #6
0
 def __init__(self, X, y, kernel, params = None, optim=False):
     print("Initialising {} GP...".format(kernel))
     if params is None:
         params = {}
         params['ln_noise'] = np.random.uniform(-1,1)
         params['ln_signal'] = np.random.uniform(-1,1)
         params['ln_length'] = np.random.uniform(-1,1)
     if kernel.lower() == "rbf":
         self.kernel = rbf(X,params)
     elif kernel.lower() == "matern":
         self.kernel = matern(X,params)
     self.X = X
     self.y = y
     self.N = X.shape[0]
     self.KMat(X,params)
     if optim:
         print("Optimising GP...")
         self.optimize()
Exemple #7
0
def CV_test(data, xs, ys, data_test, k):
    ptxm = []
    ptym = []
    tptxm = []
    tptym = []
    for i in data:
        x = 2 * (i[0] - xs.min()) / xs.ptp() - 1
        y = 2 * (i[1] - ys.min()) / ys.ptp() - 1
        ptx = [x, y]
        ptxm.append(ptx)
        label = i[2]
        ptym.append(label)
    for i in data_test:
        x = 2 * (i[0] - xs.min()) / xs.ptp() - 1
        y = 2 * (i[1] - ys.min()) / ys.ptp() - 1
        ptx = [x, y]
        tptxm.append(ptx)
        label = i[2]
        tptym.append(label)
    nn = rbf(k)
    cv_error = 0.0
    in_error = 0.0
    for i in range(len(data)):
        _ptxm = list(ptxm)
        _ptym = list(ptym)
        test_x = np.array([_ptxm.pop(i)])
        test_y = _ptym.pop(i)
        x_matrix = np.array(_ptxm)
        y_matrix = np.array(np.transpose(_ptym))
        nn.train(x_matrix, y_matrix)
        if nn.predict(test_x) * test_y < 0:
            cv_error += 1
    cv_error /= len(data)
    ptxm = np.array(ptxm)
    ptym = np.array(ptym)
    nn.train(ptxm, ptym)
    tptxm = np.array(tptxm)
    tptym = np.array(tptym)
    y_test = nn.predict(tptxm)
    test_error = (np.multiply(y_test, tptym) < 0).sum() / float(len(tptym))
    y_test = nn.predict(ptxm)
    in_error = (np.multiply(y_test, ptym) < 0).sum() / float(len(tptym))
    return test_error, cv_error
Exemple #8
0
def predictKfold(train, traint, test, testt, runs, datafile):
        err = []
        cnt = 0
        pred = []
        tgts = []
        
        allpred = []
        alltgt = []
        # per datafile params
        if (datafile[0] == 'w'):
                rowcnt = '4898'
                outfn = 'ww-'
        else:
                rowcnt = '1599'        
                outfn = 'rw-'
        
        for res in range(0,runs):
                tgts.append(str(13) + '\n' + str(rowcnt) + str('\n'))
                pred.append(str(14) + '\n' + str(rowcnt) + str('\n'))
                for folds in range(0,5):                        
                        net = rbf.rbf(train[folds],traint[folds],22,0,0)
                        nbf = 2000
                        net.rbftrain(train[folds],traint[folds],0.00001,nbf)
                        output = net.rbffwd(test[folds])
                        err.append(average(abs(output - testt[folds])))                        
                        for e in output:
                                pred.append(str(e[0]) + str('\n'))
                                allpred.append(e[0])
                        for e in testt[folds]:
                                tgts.append(str(e[0])[0] + str('\n'))
                                alltgt.append (int(str(e[0])[0]))
        print(average(err))
        f = open(outfn + 'pred-' + str(average(err))[0:5] + '.csv','w')
        for each in pred:
                f.write(each)                
        f.close()
        f = open(outfn + 'tgts-' + str(average(err))[0:5] + '.csv','w')
        for each in tgts:
                f.write(each)                
        f.close()
def train(k, xx, yy, data):
    ptxm = []
    ptym = []
    for i in data:
        ptx = [i[0], i[1]]
        ptxm.append(ptx)
        label = i[2]
        ptym.append(label)
    nn = rbf(k)
    ptxm = np.array(ptxm)
    ptym = np.array(ptym)
    #print(ptxm)
    #print(ptym)
    nn.train(ptxm, ptym)
    zz = []
    #print(nn.predict(ptxm))
    x_len, y_len = np.shape(xx)
    tptx = []
    for i in range(x_len):
        for j in range(y_len):
            px = xx[i][j]
            py = yy[i][j]
            ptx = [px, py]
            tptx.append(ptx)
    #print(tptx)
    z = nn.predict(np.array(tptx))
    #print(z)
    for i in range(x_len):
        z0 = []
        for j in range(y_len):
            x = z[i * x_len + j]
            if x < 0:
                z0.append(-1)
            else:
                z0.append(1)
        zz.append(z0)
    zz = np.array(zz)
    return zz
Exemple #10
0
def rBasis():

    pima = np.loadtxt(
        'C:\Users\subha\PycharmProjects\Radial_Basis/pima-indians-diabetes.data',
        delimiter=',')
    pima[:, :8] = pima[:, :8] - pima[:, :8].mean(axis=0)
    imax = np.concatenate((pima.max(axis=0) * np.ones(
        (1, 9)), pima.min(axis=0) * np.ones((1, 9))),
                          axis=0).max(axis=0)
    pima[:, :8] = pima[:, :8] / imax[:8]

    target = np.zeros((np.shape(pima)[0], 3))
    indices = np.where(pima[:, 8] == 0)
    target[indices, 0] = 1
    indices = np.where(pima[:, 8] == 1)
    target[indices, 1] = 1
    indices = np.where(pima[:, 8] == 2)
    target[indices, 2] = 1

    order = range(np.shape(pima)[0])
    np.random.shuffle(order)
    pima = pima[order, :]
    target = target[order, :]

    train = pima[::2, :8]
    traint = target[::2]
    valid = pima[3::4, 0:8]
    validt = target[3::4]
    test = pima[1::4, 0:8]
    testt = target[1::4]

    net = rbf.rbf(train, traint, 3)

    net.rbftrain(train, traint, 0.25, 2000)
    cm_train, accuracy_train = net.confmat(train, traint)
    cm_test, accuracy_test = net.confmat(test, testt)
    return cm_train, accuracy_train, cm_test, accuracy_test
    if i > 0:
        set_size[i] = set_size[i] + set_size[i - 1]

train = data[:set_size[0], 0:col - output_size]
traint = target[:set_size[0]]
valid = data[set_size[0]:set_size[1], 0:col - output_size]
validt = target[set_size[0]:set_size[1]]
test = data[set_size[1]:set_size[2], 0:col - output_size]
testt = target[set_size[1]:set_size[2]]

# Train the network
# MLP
if run[0]:
    import mlp as mlp_module
    mlp = mlp_module.mlp(train, traint, nhidden, outtype='softmax')
    mlp.earlystopping(train, traint, valid, validt, eta)
    mlp_correct_pct = mlp.confmat(test, testt)
    if save_results:
        mlp.get_data(resultPath, filename, SEED)
        print 'MLP weights saved'

#RBF
if run[1]:
    import rbf as rbf_module
    rbf = rbf_module.rbf(train, traint, nRBF, 1, 1)
    rbf.rbftrain(train, traint, 0.25, 2000)
    rbf_correct_pct = rbf.confmat(test, testt)
    if save_results:
        rbf.get_data(resultPath + filename)
        print 'RBF weights saved'
Exemple #12
0
import numpy as np
import rbf

iris = load_iris()

X = iris.data
y = iris.target

n_rbf = 5
sigma = 1
use_kmeans = 1

eta = 0.25
n_iterations = 2000

order = range(X.shape[0])
np.random.shuffle(order)

X = X[order]
y = y[order]

lb = LabelBinarizer()
y = lb.fit_transform(y)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)

net = rbf.rbf(X_train, y_train, n_rbf, sigma, use_kmeans)

net.rbf_train(X_train, y_train, eta, n_iterations)
net.confmat(X_test, y_test)
Exemple #13
0
target = np.zeros((np.shape(iris)[0], 3));
indices = np.where(iris[:, 4] == 0)
target[indices, 0] = 1
indices = np.where(iris[:, 4] == 1)
target[indices, 1] = 1
indices = np.where(iris[:, 4] == 2)
target[indices, 2] = 1

order = list(range(np.shape(iris)[0]))
np.random.shuffle(order)
iris = iris[order, :]
target = target[order, :]

train = iris[::2, 0:4]
traint = target[::2]
valid = iris[1::4, 0:4]
validt = target[1::4]
test = iris[3::4, 0:4]
testt = target[3::4]

# print train.max(axis=0), train.min(axis=0)

import rbf

net = rbf.rbf(train, traint, 5, 1, 1)

net.rbftrain(train, traint, 0.25, 2000)
# net.confmat(train,traint)
net.confmat(test, testt)
Exemple #14
0
import os
import string
import pylab as pl
import numpy as np

#find the data files in the source directory
os.chdir(os.path.abspath(os.path.dirname(__file__)))
train_data = np.genfromtxt('datatraining.txt', delimiter=',', skip_header=1)
test_data = np.genfromtxt('datatest.txt', delimiter=',', skip_header=1)
test_data2 = np.genfromtxt('datatest2.txt', delimiter=',', skip_header=1)

#splitting of the data into training and testing matrices
trainin = train_data[:1333:, 2:7:]
traintgt = train_data[:1333:, 7::]
testin = test_data[::2, 2:7:]
testtgt = test_data[::2, 7::]
test2in = test_data2[:1333:, 2:7:]
test2tgt = test_data2[:1333:, 7::]

#multilayer perceptron model
print('Multilayer Perceptron Model')
net = mlp.mlp(trainin, traintgt, 2, outtype='linear')
net.earlystopping(trainin, traintgt, testin, testtgt, 0.001, 1000)
net.confmat(test2in, test2tgt)

#rbf model
print('RBF Model')
net = rbf.rbf(trainin, traintgt, 6, 0, 1)
net.rbftrain(trainin, traintgt, 0.01, 2000)
net.confmat(testin, testtgt)
Exemple #15
0
        comm_world.Recv(received_data, source=group_leader_ids[0], tag=1)
        Model.SetNoisyObservation(received_data[no_parameters + 1:])
        print('artificial_observation_without_noise:', received_data[1:])
        print('artificial observation with noise:', Model.observation)

    SF = SamplingFramework.SamplingFramework(no_chains, stages, max_snapshots,
                                             min_snapshots, no_solvers)
    MHsampler.MHsampler.SF = SF
    MHsampler.MHsampler.seed = seed
    initial_samples = lhs_norm.lhs_norm(Model, SF.no_chains, seed)
    print("initial samples:", initial_samples)
    G_data = np.zeros([0, no_observations + no_parameters])
    if surr_type == 'SCM':
        Surrogate = scm.scm()
    else:
        Surrogate = rbf.rbf()
    Surrogate.parameters = surrogate_parameters
    #    G_data = np.vstack([G_data,np.append(artificial_observation_without_noise,artificial_real_parameters)]) # should not be known
    #    SF.shared_queue_surrogate.put([artificial_real_parameters,artificial_observation_without_noise]) # should not be known
    if __name__ == '__main__':
        jobs = []
        for i in range(SF.no_chains + SF.no_helpers):
            p = mp.Process(target=process_worker.worker,
                           args=(i, comm_world, Model, SF, Surrogate,
                                 no_solvers, initial_samples, group_leader_ids,
                                 proposalStd))
            p.start()
            jobs.append(p)
    process_master.master(comm_world, Model, SF, G_data, no_solvers,
                          group_leader_ids)
    if __name__ == '__main__':
Exemple #16
0
#target[indices,1] = 1

target = zeros((shape(iris)[0], 3))
indices = where(iris[:, 4] == 0)
target[indices, 0] = 1
indices = where(iris[:, 4] == 1)
target[indices, 1] = 1
indices = where(iris[:, 4] == 2)
target[indices, 2] = 1

order = range(shape(iris)[0])
random.shuffle(order)
iris = iris[order, :]
target = target[order, :]

train = iris[::2, 0:4]
traint = target[::2]
valid = iris[1::4, 0:4]
validt = target[1::4]
test = iris[3::4, 0:4]
testt = target[3::4]

#print train.max(axis=0), train.min(axis=0)

import rbf
net = rbf.rbf(train, traint, 5, 1, 1)

net.rbftrain(train, traint, 0.25, 2000)
#net.confmat(train,traint)
net.confmat(test, testt)
Exemple #17
0
])

class1_Y = np.tile(np.array([1, 0, 0]), (class1_n_points, 1))
class2_Y = np.tile(np.array([0, 1, 0]), (class2_n_points, 1))
class3_Y = np.tile(np.array([0, 0, 1]), (class3_n_points, 1))

n_samples = class1_n_points + class2_n_points + class3_n_points
permutation = np.random.permutation(n_samples)

X = np.take(np.vstack([class1_X, class2_X, class3_X]), permutation, axis=0)
Y = np.take(np.vstack([class1_Y, class2_Y, class3_Y]), permutation, axis=0)

x_train, y_train, x_test, y_test, x_valid, y_valid = split_dataset(
    X, Y, 0.2, 0)

classifier = rbf()
spread = 0.3
classifier.fit(x_train, y_train, spread=spread)

y_pred = np.where(classifier.predict(x_train) > 0.5, 1, 0)
correctly_classified = row_wise_equal(y_pred, y_train)
print(
    f'{correctly_classified}/{x_train.shape[0]} ({correctly_classified / x_train.shape[0]}) points of train set are classified correctly!'
)

y_pred = np.where(classifier.predict(x_test) > 0.5, 1, 0)
correctly_classified = row_wise_equal(y_pred, y_test)
print(
    f'{correctly_classified}/{x_test.shape[0]} ({correctly_classified / x_test.shape[0]}) points of test set are classified correctly!'
)
Exemple #18
0
"""
#logical OR
inputs = np.array([[1,1],[1,0],[0,1],[0,0]]) 
targets = np.array([[1],[1],[1],[0]])
"""

#logical XOR
inputs = np.array([[1,1],[1,0],[0,1],[0,0]]) 
targets = np.array([[0],[1],[1],[0]])

"""
#identity matrix
inputs = np.array([[1,1],[1,0],[0,1],[0,0]]) 
targets = np.array([[1,1],[1,0],[0,1],[0,0]])
"""

#use one-layer perceptron
p = pcn.pcn(inputs,targets,0.2,'linear','batch') 
p.pcntrain(10000)
p.confmat(inputs,targets)

#use rbf network
p = rbf.rbf(inputs,targets,4,0,1,0.2,'linear','batch') 
p.rbftrain(10000)
p.confmat(inputs,targets)

#use two-layer perceptron
p = mlpcn.mlpcn(inputs,targets,4,0.2,'linear','batch') 
p.mlptrain(10000)
p.confmat(inputs,targets)
    if i > 0:
        set_size[i] = set_size[i] + set_size[i - 1]

train = data[:set_size[0],0:col-output_size]
traint = target[:set_size[0]]
valid = data[set_size[0]:set_size[1],0:col-output_size]
validt = target[set_size[0]:set_size[1]]
test = data[set_size[1]:set_size[2],0:col-output_size]
testt = target[set_size[1]:set_size[2]]

# Train the network
# MLP
if run[0]:
    import mlp as mlp_module
    mlp = mlp_module.mlp(train,traint,nhidden,outtype='softmax')
    mlp.earlystopping(train,traint,valid,validt,eta)
    mlp_correct_pct = mlp.confmat(test,testt)
    if save_results:
        mlp.get_data(resultPath, filename, SEED)
        print 'MLP weights saved'

#RBF
if run[1]:
    import rbf as rbf_module
    rbf = rbf_module.rbf(train,traint,nRBF,1,1)
    rbf.rbftrain(train,traint,0.25,2000)
    rbf_correct_pct = rbf.confmat(test,testt)
    if save_results:
        rbf.get_data(resultPath + filename)
        print 'RBF weights saved'
Exemple #20
0
def lspi():
    gridworld = Gridworld()

    gamma = .8
    beta = 1
    numEpisodes = 10
    beta_factor = 1 - 5 * numEpisodes / 10000

    # Sample the state space with different grid sizes
    X_1, X_2 = np.meshgrid(np.arange(.5, 7, 1),
                           np.arange(.5, 7, 1),
                           indexing='ij')
    X_1m, X_2m = np.meshgrid(np.arange(.5, 7, 0.5),
                             np.arange(.5, 7, 0.5),
                             indexing='ij')

    # initialize the policy randomly. should give for each state in s (nx2) a
    # random action of the form (r,a), where r ~ Unif[0,1] and a ~ Unif[0,2pi].
    # pi = lambda s: ...

    # samples from initial distribution n starting positions (you can start
    # with a random initialization in the entire gridworld)
    # initialDistribution = lambda n: ...

    converged = False

    # generate an ndgrid over the state space for the centers of the basis
    # functions
    X1, X2, A1, A2 = np.meshgrid(np.arange(.5, 7, 1), np.arange(.5, 7, 1),
                                 np.arange(-1, 2), np.arange(-1, 2))
    # NOTE: the policy returns the action in polar coordinates while the basis
    # functions use cartesian coordinates!!! You have to convert between these
    # representations.

    # matrix of the centers
    c = np.column_stack(
        (np.transpose(X1.flatten()), np.transpose(X2.flatten()),
         np.transpose(A1.flatten()), np.transpose(A2.flatten())))

    # number of basis functions
    # k = ...

    # initialize weights
    # w = ...

    # compute bandwiths with median trick
    bw = np.zeros(4)
    for i in range(4):
        dist = pdist(c[:, [i]])
        bw[i] = np.sqrt(np.median(dist**2)) * .4

    # feature function (making use of rbf)
    # feature = lambda x_: ...

    # time step
    t = 0

    # initialize A and b
    # A = ...
    # b = ...

    while not converged:
        # Policy evaluation
        # sample data
        s1, a, r, s2 = sampleData(gridworld, pi, initialDistribution,
                                  numEpisodes, 50)

        # compute actions in cartesian space
        ac1, ac2 = pol2cart(a[:, 0, np.newaxis], a[:, 1, np.newaxis])

        # compute PHI
        # PHI = ...

        # compute PPI
        # PPI = ...

        # update A and b
        # A = ...
        # b = ...

        # compute new w
        w_old = w
        # w = ...

        # Policy improvement
        # pi = ...

        beta = beta_factor * beta
        t = t + 1

        # Check for convergence
        if np.abs(w - w_old).sum() / len(w) < 0.05:
            converged = True

        print(t, ' - ', beta, ' - ', np.abs(w - w_old).sum() / len(w))

        ### plotting
        a = policy(np.hstack((X_1m.reshape(-1, 1), X_2m.reshape(-1, 1))),
                   feature, w, 0)

        ax1, ax2 = pol2cart(a[:, 0].reshape(-1, 1), a[:, 1].reshape(-1, 1))
        phi = rbf(
            np.hstack((X_1m.reshape(-1, 1), X_2m.reshape(-1, 1), ax1, ax2)), c,
            bw)
        Q = phi.dot(w)
        n_plot = len(X_1m)

        plot_a = np.hstack((ax1, ax2)).reshape((n_plot, n_plot, 2))
        plot_V = Q.reshape((n_plot, n_plot))

        plotPiV(plot_a, plot_V, vmin=-5, vmax=5, block=False)

    plotPiV(plot_a, plot_V, vmin=-5, vmax=5)
Exemple #21
0
test = iris[3::4, 0:4]
testt = target[3::4]
"""
#use one-layer perceptron
net = pcn.pcn(train,traint,0.2,'softmax','batch')

print('\nperceptron training...')
(trainerror,validerror) = net.pcntrain_automatic(valid,validt,100)

print('Training stop after',len(trainerror)*100,'iterations')
print('Final Train Error',net.errfunc(net.pcnfwd(train,True),traint))
print('Final Valid Error',net.errfunc(net.pcnfwd(valid,True),validt))
"""

#use rbf perceptron
net = rbf.rbf(train, traint, 10, 0, 1, 0.2, 'softmax', 'batch')

print('\nperceptron training ...')
(trainerror, validerror) = net.rbftrain_automatic(valid, validt, 100)

print('Training stop after', len(trainerror) * 100, 'iterations')
print('Final Train Error', net.errfunc(net.rbffwd(train, 2), traint))
print('Final Valid Error', net.errfunc(net.rbffwd(valid, 2), validt))
"""
#use two-layer perceptron
net = mlpcn.mlpcn(train,traint,4,0.2,'softmax','batch')

print('perceptron training...')
(trainerror,validerror) = net.mlptrain_automatic(train,traint,valid,validt,100)
    
print('Training stop after',len(trainerror)*100,'iterations')
Exemple #22
0
 def execute(self):
     [rbf(expression) for expression in self._params_to_execute]
def rbnn(x_train, y_train, theta, funtol):
    #  Initialization
    q = (np.shape(x_train))[0]
    r = (np.shape(x_train))[1]
    x = x_train.copy()  # Create a copy of Training Data
    p = np.zeros([q, q])  # Design Matrix
    err = np.zeros([q, 1])  # Store Errors associated with columns of p
    c = np.zeros([q, r])  # Store neurons centers
    phi_col = np.zeros([q, 1])  # Array to update design matrix for actual error calculations
    showiterinfo = True  # Display iterations
    #  Create Lists for Plots
    x_data = []  # Epochs
    y_data = []  # MSE
    #  Create Design Matrix
    for i in range(q):
        for j in range(q):
            p[j][i] = rbf(x[j:j + 1, 0:r], x[i:i + 1, 0:r], theta)  # RBF function call
    #  Find column vector in p with maximum error
    for k in range(q):
        a = p[0:q, k:k + 1]
        g = np.dot(np.transpose(a), y_train) / np.dot(np.transpose(a), a)
        err[k] = pow(g, 2) * np.dot(np.transpose(a), a) / np.dot(np.transpose(y_train), y_train)
    maxerr = err.max()
    j = err.argmax()
    wj = p[0:q, j:j + 1]
    p = np.delete(p, j, 1)
    err = np.delete(err, j, 0)
    c[0:1, 0:r] = x[j:j + 1, 0:r]
    x = np.delete(x, j, 0)
    #  Calculate mean-squared error of the network
    for i in range(q):
        # Update Design Matrix for Training Data
        phi_col[i][0] = rbf(x_train[i:i + 1, 0:r], c[0:1, 0:r], theta)
    bias = np.ones([q, 1])  # Add bias term
    phi = np.concatenate((bias, phi_col), axis=1)
    lw = np.dot(np.linalg.pinv(phi[0:q, 0:2]), y_train)
    mse = np.mean(np.square(np.dot(phi[0:q, 0:2], lw) - y_train))
    x_data.append(1)
    y_data.append(mse)
    # Display iteration info
    if showiterinfo:
        print("Epoch: " + str(1) + " MSE: " + str(mse))
    #  Main loop
    for it in range(q - 1):
        alpha = np.dot(np.transpose(wj), p) / np.dot(np.transpose(wj), wj)
        p = p - np.dot(wj, alpha)
        e = (np.shape(p))[1]
        #  Find column vector in p with maximum error
        for k in range(e):
            a = p[0:q, k:k + 1]
            g = np.dot(np.transpose(a), y_train) / np.dot(np.transpose(a), a)
            err[k] = pow(g, 2) * np.dot(np.transpose(a), a) / np.dot(np.transpose(y_train), y_train)
        maxerr = err.max()
        j = err.argmax()
        wj = p[0:q, j:j + 1]
        p = np.delete(p, j, 1)
        err = np.delete(err, j, 0)
        c[it + 1:it + 2, 0:r] = x[j:j + 1, 0:r]
        x = np.delete(x, j, 0)
        #  Calculate mean-squared error of the network
        for i in range(q):
            phi_col[i][0] = rbf(x_train[i:i + 1, 0:q - 1], c[it + 1:it + 2, 0:r], theta)
        # Update Design Matrix for Training Data
        phi = np.concatenate((phi, phi_col), axis=1)
        lw = np.dot(np.linalg.pinv(phi[0:q, 0:it + 3]), y_train)
        mse = np.mean(np.square(np.dot(phi[0:q, 0:it + 3], lw) - y_train))
        x_data.append(it + 2)
        y_data.append(mse)
        # Display iteration info
        if showiterinfo:
            print("Epoch: " + str(it + 2) + " MSE: " + str(mse))
        #  Check for convergence
        if mse <= funtol:
            print("Algorithm Stopped:Mean-squared error less than specified tolerance")
            for j in range(q-1, it+1, -1):
                c = np.delete(c, j, 0)
            break
        if it == (q - 2):
            print("Algorithm Stopped: Maximum number of neurons reached")
            break
    #  Plot for Best and Average Cost Function with Iterations
    plt.plot(x_data, y_data, '-b')
    plt.xlabel('Epochs')
    plt.ylabel("MSE")
    plt.grid()
    plt.legend(["Training"], loc="upper right", frameon=False)
    plt.show()
    return c, lw