def predict_frame_pixel(data, def_param=(shared_v_data, shared_u_data)):
    y, x = data

    shared_delayed_v_data = create_0d_delay_coordinates(shared_v_data[:, y, x],
                                                        ddim,
                                                        tau=32)

    delayed_patched_v_data_train = shared_delayed_v_data[:trainLength]
    u_data_train = shared_u_data[:trainLength, y, x]

    delayed_patched_v_data_test = shared_delayed_v_data[
        trainLength:trainLength + testLength]
    u_data_test = shared_u_data[trainLength:trainLength + testLength, y, x]

    flat_v_data_train = delayed_patched_v_data_train.reshape(-1, ddim)
    flat_u_data_train = u_data_train.reshape(-1, 1)

    flat_v_data_test = delayed_patched_v_data_test.reshape(-1, ddim)
    flat_u_data_test = u_data_test.reshape(-1, 1)

    rbf = RBF(sigma=5.0)
    rbf.fit(flat_v_data_train, flat_u_data_train, basisQuota=0.02)
    pred = rbf.predict(flat_v_data_test)
    pred = pred.ravel()
    return pred
def predict_inner_pixel(data, def_param=(shared_v_data, shared_u_data)):
    y, x = data

    shared_delayed_v_data = create_2d_delay_coordinates(
        shared_v_data[:, y - patch_radius:y + patch_radius + 1,
                      x - patch_radius:x + patch_radius +
                      1][:, ::sigma_skip, ::sigma_skip],
        ddim,
        tau=32)
    shared_delayed_patched_v_data = np.empty(
        (ndata, 1, 1, ddim * eff_sigma * eff_sigma))
    shared_delayed_patched_v_data[:, 0, 0] = shared_delayed_v_data.reshape(
        -1, ddim * eff_sigma * eff_sigma)

    delayed_patched_v_data_train = shared_delayed_patched_v_data[:trainLength,
                                                                 0, 0]
    u_data_train = shared_u_data[:trainLength, y, x]

    delayed_patched_v_data_test = shared_delayed_patched_v_data[
        trainLength:trainLength + testLength, 0, 0]
    u_data_test = shared_u_data[trainLength:trainLength + testLength, y, x]

    flat_v_data_train = delayed_patched_v_data_train.reshape(
        -1, shared_delayed_patched_v_data.shape[3])
    flat_u_data_train = u_data_train.reshape(-1, 1)

    flat_v_data_test = delayed_patched_v_data_test.reshape(
        -1, shared_delayed_patched_v_data.shape[3])
    flat_u_data_test = u_data_test.reshape(-1, 1)

    rbf = RBF(sigma=5.0)
    rbf.fit(flat_v_data_train, flat_u_data_train, basisQuota=0.02)
    pred = rbf.predict(flat_v_data_test)
    pred = pred.ravel()
    return pred
예제 #3
0
    def calFitness(self, x):
        # # sum = 0
        # # length = len(x)
        # # x = x ** 2
        # # for i in range(length):
        # #     sum += x[i]
        # if(x[0]>1 or x[0]<0):
        #     x[0]=0.1
        # result = start_cluster(self.data, x[0])
        # centers = []
        # for i in range(len(result)):
        #     #print("----------第" + str(i + 1) + "个聚类----------",result[i])
        #     #y=0
        #     center=np.zeros(5)
        #     for j in range(len(result[i])):
        #         center+=np.array(result[i][j])
        #         #y+=self.Y[self.data.index(result[i][j])]
        #     center/=len(result[i])
        #     #y/=len(result[i])
        #     centers.append(center)
        # b = self.calbeta(result,centers)
        centers = []
        b = []
        for i in range(int(self.dim / 6)):
            temp = x[i * 6:(i + 1) * 6 - 1]
            centers.append(temp)
            temp = x[(i + 1) * 6 - 1]
            b.append(temp)
        rbf = RBF(5, int(self.dim / 6), 1, centers, b)
        rbf.train(self.data, self.Y)
        fitness = rbf.cal_distance(self.data, self.Y)
        #print('fitness:',fitness)

        return fitness
예제 #4
0
    def run(self):
        # Train an RBF network based on its input parameters and dataset
        print(
            "Thread {0}: starting {1} TRAINING with {2} dimensions and {3} basis functions at {4}"
            .format(self.thread_ID, self.name, self.num_dim, self.num_basis,
                    time.ctime(time.time())))
        rbf = RBF.RBF(self.num_basis, self.training_data)
        loss_set = rbf.train()
        with open('RBF_{0}.csv'.format(self.thread_ID), 'wb') as csvfile:
            results_writ = csv.writer(csvfile,
                                      delimiter=' ',
                                      quotechar='|',
                                      quoting=csv.QUOTE_MINIMAL)
            results_writ.writerow(loss_set)

        # Test the same RBF network on a portion of the dataset
        print(
            "Thread {0}: starting {1} TESTING with {2} dimensions and {3} basis functions at {4}"
            .format(self.thread_ID, self.name, self.num_dim, self.num_basis,
                    time.ctime(time.time())))
        result = rbf.hypothesis_of(self.testing_data)
        print(
            "Thread {0}: {1} result {5} with {2} dimensions and {3} basis functions at {4}"
            .format(self.thread_ID, self.name, self.num_dim, self.num_basis,
                    time.ctime(time.time()), '?'))
예제 #5
0
 def run_RBF(self, key, dataset, train, test, isClassification, output_neuron, type_):
         prot = ld.LoadDataset().getRBFhiddenLayer(key, type_)
         rbf = RBF.RBF(train, prot, isClassification, output_neuron)
         epoch, result = rbf.train()
         plot_graph(key+' with RBF type '+ type_, epoch, result)
         x_test, test_label = ld.LoadDataset().get_neural_net_input_shape(dataset, test, isClassification)
         predicted = rbf.test(x_test)
         return predicted, test.iloc[:, -1]
예제 #6
0
 def layoutBest(self):
     centers = []
     b = []
     for i in range(int(self.dim / 6)):
         temp = self.gbest[i * 6:(i + 1) * 6 - 1]
         centers.append(temp)
         temp = self.gbest[(i + 1) * 6 - 1]
         b.append(temp)
     dim = int(self.dim / 6)
     rbf = RBF(5, dim, 1, centers, b)
     rbf.train(self.data, self.Y)
     return rbf
예제 #7
0
def fit_predict_pixel(y, x, running_index, training_data, test_data, generate_new):
    training_data_in = training_data[1][:, y - patch_radius:y + patch_radius+1, x - patch_radius:x + patch_radius+1][:, ::sigma_skip, ::sigma_skip].reshape(-1, eff_sigma*eff_sigma)
    training_data_out = training_data[0][:, y, x].reshape(-1, 1)

    test_data_in = test_data[1][:, y - patch_radius:y + patch_radius+1, x - patch_radius:x + patch_radius+1][:, ::sigma_skip, ::sigma_skip].reshape(-1, eff_sigma*eff_sigma)
    test_data_out = test_data[0][:, y, x].reshape(-1, 1)

    rbf = RBF(sigma=5.0)
    rbf.fit(training_data_in, training_data_out, basisQuota=0.02)
    pred = rbf.predict(test_data_in)
    pred = pred.ravel()

    pred[pred>1.0] = 1.0
    pred[pred<0.0] = 0.0

    return pred
예제 #8
0
def fit_predict_frame_pixel(y, x, running_index, training_data, test_data, generate_new):
    ind_y, ind_x = y, x

    min_border_distance = np.min([y, x, N-1-y, N-1-x])

    training_data_in = training_data[1][:, y - min_border_distance:y + min_border_distance+1, x - min_border_distance:x + min_border_distance+1].reshape(-1, int((2*min_border_distance+1)**2))
    training_data_out = training_data[0][:, y, x].reshape(-1, 1)

    test_data_in = test_data[1][:, y - min_border_distance:y + min_border_distance+1, x - min_border_distance:x + min_border_distance+1].reshape(-1, int((2*min_border_distance+1)**2))
    test_data_out = test_data[0][:, y, x].reshape(-1, 1)

    rbf = RBF(sigma=5.0)
    rbf.fit(training_data_in, training_data_out, basisQuota=0.02)
    pred = rbf.predict(test_data_in)
    pred = pred.ravel()

    pred[pred>1.0] = 1.0
    pred[pred<0.0] = 0.0

    return pred
points = np.array([
    random.uniform(0.000001, 0.2),
    random.uniform(0.000001, 0.2),
    random.uniform(0.000001, 0.2)
])
values = np.array([])
for i in range(0, points.size):
    values = np.append(values,
                       evaluate(points[i], x_train, y_train, x_test, y_test))

print("Learning rate: ", points)
print("Loss value: ", values)
plot = np.copy(np.log10(points))

for i in range(1, numberOfIterations + 1):
    rbf = r.RBF(points, values)
    rbf.interpolate()
    if i % 3 == 0:
        newx = rbf.newxGivenf(True)
    else:
        newx = rbf.newxGivenf()
    # find the new learning rate
    points = np.append(points, np.power(10, newx))
    print("new learning rate: ", np.power(10, newx))
    plot = np.append(plot, newx)
    # evauate the model with the new learning rate
    newf = evaluate(np.power(10, -newx), x_train, y_train, x_test, y_test)
    values = np.append(values, newf)

rbf = r.RBF(points, values)
rbf.interpolate()
예제 #10
0
print('\nMin Loss: %0.4f' % min_loss)
print('\nOptimal hyperparameter: (%d, %0.1e)' % opt_hyp)

#%%
c_opt, v_opt, val_loss = trainRBF(x_train,
                                  y_train,
                                  N_opt,
                                  rho_opt,
                                  sigma_opt,
                                  max_iter=100000)

RBF = makeRBF(c_opt, v_opt)

print('\nLoss on train set: %0.8f \nLoss on test set: %0.8f' %
      (val_loss, compute_loss(RBF(x_test), y_test)))

#%%

# Plot

from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np

grid_X = [[i, j] for i in np.arange(0, 1.0, 0.01)
          for j in np.arange(0, 1.0, 0.01)]

grid_franke = np.array([franke(x[0], x[1]) for x in grid_X]).reshape(
	
	par.X = X_Train
	par.Y = Y_Train
	par.Y_Test = Y_Test
	par.classNum = countDistinct(Y_Total)
	
	min_clusters = 1
	max_clusters = 4
	CLUSTER = list(range(min_clusters, max_clusters, step))
	ACCUR = []
	# '''
	for i in range(min_clusters, max_clusters, step):
		par.c = i
		fcm = FCM(X_Train, par.fuzziness, par.c, par.epsilon)
		fcm.run()
		rbf = RBF(X_Train, par.Y, par.V, par.gamma)
		rbf.train()
		rbf.test(X_Test)
		print("number of clusters = ", par.c)
		print("RBF Train Accuracy = ", par.RBF_train_accuracy)
		print("RBF Test Accuracy = ", par.RBF_test_accuracy)
		ACCUR.append(par.RBF_test_accuracy)
	f = plt.figure(1)
	plt.ylabel('Classification Accuracy')
	plt.xlabel('Number of Clusters')
	plt.plot(CLUSTER, ACCUR)
	plt.show()
	# '''
	
	# '''
	# For using RBF with dataset ‘5clstrain1500.csv’, uncomment the following 2 lines
예제 #12
0
                                               x_test, y_test,
                                               N_opt, rho_opt, sigma_opt,
                                               learning_rate = learning_rate,
                                               max_iter = max_iter,
                                               epsilon = epsilon,
                                               verbose = True)
print('\nTraining time: %d seconds'%(time.time()-start))
RBF = makeRBF(c_opt, v_opt, sigma_opt)

print('\nLoss on train set: %0.8f \nLoss on test set: %0.8f' %(train_loss, test_loss))

# Plot
grid_X = np.array([[i, j] for i in np.arange(0, 1.0, 0.01) for j in np.arange(0, 1.0, 0.01)])

grid_franke = np.array([franke(x[0], x[1]) for x in grid_X]).reshape((100,100))
grid_Y = RBF(grid_X).reshape((100,100))

fig = plt.figure()
ax = fig.gca(projection='3d')

# Make data.

x1 = np.array(np.arange(0,1.0, 0.01))
X, Y = np.meshgrid(x1, x1)


# Plot the surface.
surf = ax.plot_surface(X, Y, grid_Y, cmap = cm.Reds, linewidth=0, antialiased=False)
surf2 = ax.plot_surface(X, Y, grid_franke, cmap = cm.Blues, linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))