def cross_validate(network_shape, epochs_num, learn_rate, _groups_x,
                   _groups_y):
    k = _groups_x.shape[0]
    _sum = 0
    results = np.zeros(k)
    for i in range(k):

        train_x = None
        train_y = None
        valid_x = np.copy(
            _groups_x[i])  # the validation set for th i'th iteration.
        valid_y = np.copy(_groups_y[i])

        net = NeuralNetwork(network_shape, epochs_num, learn_rate)

        for j in range(k):
            if j != i:
                # arrange the train set for the i'th iteration.
                if train_x is None:
                    train_x = np.copy(_groups_x[j])
                    train_y = np.copy(_groups_y[j])
                else:
                    train_x = np.concatenate((train_x, _groups_x[j]), axis=0)
                    train_y = np.concatenate((train_y, _groups_y[j]), axis=0)

        old_mins, denoms = norm.minmax_params(train_x)
        train_x = norm.minmax(train_x, 0, 1)
        valid_x = norm.minmax(valid_x, 0, 1, old_mins, denoms)

        net.train(train_x, train_y)
        results[i] = net.accuracy(valid_x, valid_y)

        old_mins, denoms = norm.minmax_params(train_x)
        train_x = norm.minmax(train_x, 0, 1)
        valid_x = norm.minmax(valid_x, 0, 1, old_mins, denoms)

    print(results)
    return np.average(results)
Exemple #2
0
'''
    Load the data. For this demo, we're using sklearn's digits dataset
    Digits are 8x8 pixel images. Each row is one image, in a linear format,
    where columns 65-74 correspond to one hot encoded responses representing
    digits 0 through 9. 1797 rows 74 columns
'''
data = np.loadtxt("transformed.csv", delimiter = ',')
m = len(data)

# Split the data into training set and test set.
train_set = data[:(3*m/4),:]
test_set = data[m/4:,:]

# Instantiate a new neural network. 64 input, 64 hidden, 10 output nodes.
NN = NeuralNetwork(64,HIDDEN_NODES,10,LEARNING_RATE,ITERATIONS)

# Train on the training set, test on the test set. The test() function
# will print out the percent correctness on the test set.
errors = NN.train(train_set)
NN.test(test_set)



# Plot the error curve
if VIEW_PLOT == True:
    plt.plot(errors)
    plt.title("Average Error Per Iteration On Training Set")
    plt.xlabel("Iteration")
    plt.ylabel("Average Error")
    pylab.show()
Exemple #3
0
if __name__ == '__main__':

    test = test()
    print("getting dataset")
    test.getDataset(1)
    print(np.asarray(test.y).reshape((9, -1)))
    print(np.shape(test.X))
    print(np.shape(test.y))

    nn = NeuralNetwork([9, 18, 18, 9])
    nn.train(X=np.asarray(test.X).reshape((9, -1)),
             y=np.asarray(test.y).reshape((9, -1)),
             batch_size=9,
             epochs=2,
             learning_rate=0.4,
             print_every=10,
             validation_split=0.2,
             tqdm_=False,
             plot_every=20000)

    #X is the current gamestate and y is the next move to make
    #X = np.random.random((1,9))
    #print(X)

    #network = Network()

    #Train on the dataset
    #network.train("./dataset.csv", 3)

    #
	# Preset Parameters
	"n_inputs" 				:  image_length, 		# Number of input signals
	"n_outputs"				:  1, 					# Number of output signals from the network
	"n_hidden_layers"		:  1,					# Number of hidden layers in the network (0 or 1 for now)
	"n_hiddens"				:  100,   				# Number of nodes per hidden layer
	"activation_functions"	:  [ LReLU_function, sigmoid_function ],		# Activation functions by layer

	# Optional parameters

	"weights_low"			: -0.1,		# Lower bound on initial weight range
	"weights_high"			: 0.1,  	# Upper bound on initial weight range
	"save_trained_network"  : False,	# Save trained weights or not.

	"batch_size"			: 1, 		# 1 for stochastic gradient descent, 0 for gradient descent
}

# Initialization
network = NeuralNetwork( settings )


# Train
network.train( 				fem_images, fem_scores, 	# Trainingset
							ERROR_LIMIT = 1e-3,			# Acceptable error bounds
							learning_rate	= 1e-5,		# Learning Rate
						)

# Alter image

network.alter_image(		fem_images[0], 				# Image to alter
							fem_scores[0]				# Label for initial backprop
						)
Exemple #5
0
    plt.grid(1)
    plt.xlabel('epochs')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(range(history['epochs'])[:n],
             history['train_acc'][:n],
             label='train_acc')
    plt.plot(range(history['epochs'])[:n],
             history['test_acc'][:n],
             label='test_acc')
    plt.title('train & test accuracy')
    plt.grid(1)
    plt.xlabel('epochs')
    plt.legend()


#LINEAR PROBLEM
data = datasets.make_blobs(n_samples=1000, centers=2, random_state=2)
X = data[0].T
y = np.expand_dims(data[1], 1).T

neural_net = NeuralNetwork([2, 4, 4, 1], seed=0)
history = neural_net.train(X=X,
                           y=y,
                           batch_size=16,
                           epochs=100,
                           learning_rate=0.4,
                           validation_split=0.2)

plot_history(history)