示例#1
0
train_labels = data[:, len(images[0]):]

scaler = skp.StandardScaler()
train_images = scaler.fit_transform(train_images)
test_data = np.load('data/test_data.npy')
test_data = scaler.transform(test_data)

# Set target values in our labels matrix(e.g. to 0.15 and 0.85).
for i in range(len(train_labels[:,0])):
	for j in range(len(train_labels[0,:])):
		if train_labels[i,j] < 0.5:
			train_labels[i,j] = .05
		else:
			train_labels[i,j] = .95

NN = NeuralNet(train_images, train_labels)
_v_learning_rate = 0.1
_w_learning_rate = 0.01
for i in range(12):
	if (i > 0):
		data = np.concatenate((train_images, train_labels), axis=1)
		train_images = data[:, :len(images[0])]
		train_labels = data[:, len(images[0]):]
		NN.updateData(train_images, train_labels)
		if ((i % 6) == 0):
			_v_learning_rate = 0.9*_v_learning_rate
			_w_learning_rate = 0.6*_w_learning_rate
	NN.trainMini(batch_size=25,v_learning_rate=_v_learning_rate,
		w_learning_rate=_w_learning_rate)

f = open('kaggle_submission.csv', 'w')
示例#2
0
def main(batch_size = 25, target_values=[0.05,0.95], epochs=18,
	v_learning_rate=0.1, w_learning_rate=0.01, v_decay_rate=0.9, w_decay_rate=0.6,
	decay_frequency=6):
	_v_learning_rate = v_learning_rate
	_w_learning_rate = w_learning_rate
	images = np.load('data/images.npy')
	labels = np.load('data/vec_labels.npy')
	data = np.concatenate((images, labels), axis=1)
	np.random.shuffle(data)
	
	train_images = data[:round(.8*len(data)), :len(images[0])]
	train_labels = data[:round(.8*len(data)), len(images[0]):]
	validation_images = data[round(.8*len(data)):, :len(images[0])]
	validation_labels = data[round(.8*len(data)):, len(images[0]):]

	scaler = skp.StandardScaler()
	train_images = scaler.fit_transform(train_images)
	validation_images = scaler.transform(validation_images)
	

	# Set target values in our labels matrix(e.g. to 0.15 and 0.85).
	if target_values is not None:
		for i in range(len(train_labels[:,0])):
			for j in range(len(train_labels[0,:])):
				if train_labels[i,j] < 0.5:
					train_labels[i,j] = target_values[0]
				else:
					train_labels[i,j] = target_values[1]

	print('\n============\n  SETTINGS\n============')
	print('Batch Size: ', batch_size)
	print('Target Values: ', target_values)
	print('Number of Epochs: ', epochs)
	print('V Learning Rate: ', v_learning_rate)
	print('W Learning Rate: ', w_learning_rate)
	print('V Decay Rate: ', v_decay_rate)
	print('W Decay Rate: ', w_decay_rate)
	print('Decay Frequency: ', decay_frequency)

	NN = NeuralNet(train_images, train_labels)

	for i in range(epochs):
		if (i > 0):
			data = np.concatenate((train_images, train_labels), axis=1)
			np.random.shuffle(data)
			train_images = data[:, :len(images[0])]
			train_labels = data[:, len(images[0]):]
			NN.updateData(train_images, train_labels)
			if ((i % decay_frequency) == 0):
				_v_learning_rate = v_decay_rate*_v_learning_rate
				_w_learning_rate = w_decay_rate*_w_learning_rate

		NN.trainMini(batch_size=batch_size,v_learning_rate=_v_learning_rate,
			w_learning_rate=_w_learning_rate)

		print('\n============\n  EPOCH:', i, '\n============')

		# TRAINING ACCURACY
		y_hat = NN.classifyAll(train_images)
		test_correct = 0
		test_size = len(NN.images)
		for j in range(test_size):
			if (y_hat[j] == np.argmax(NN.labels[j]) + 1):
				test_correct += 1
			else:
				continue
		print('\nTest Classfication Complete:')
		print('Test Set Error: ', 1 - (test_correct / test_size))

		# VALIDATION ACCURACY
		total_correct = 0
		validation_size = len(validation_images)
		z = NN.classifyAll(validation_images)
		for j in range(len(z)):
			if (z[j] == np.argmax(validation_labels[j]) + 1):
				total_correct += 1
			else:
				continue
		print('\nValidation Classification Complete:')
		print('Validation Error Rate: ', 1 - (total_correct/validation_size), '\n')
training_dataset = MNISTDataset('C:/mnist/train-images-idx3-ubyte.gz', 'C:/mnist/train-labels-idx1-ubyte.gz')
test_dataset = MNISTDataset('C:/mnist/t10k-images-idx3-ubyte.gz', 'C:/mnist/t10k-labels-idx1-ubyte.gz')

#These are the training and testing batch sizes
training_batch_size = 50
test_batch_size = 1000

#Loads the datasets into dataloaders so the datasets can be enumerated
training_loader = torch.utils.data.DataLoader(training_dataset, batch_size = training_batch_size, shuffle = True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size = test_batch_size, shuffle = True)

#Variable for the number of epochs we are training for
num_epochs = 25

#Creates the neural network object
neural_net = NeuralNet()

#Creating empty lists to append values to during training and testing
train_losses = []
train_counter = []
test_accuracy = []
test_losses= []

#This creates a list with the point for where each epoch ends.  This will later be used to plot the average loss
#during testing  It's num_epochs + 1 because it will also make a point for when no training has been done and 
#the weights are just random.  You would remove the +1 if you did not do an initial test
test_counter = [num * training_dataset.num_images for num in range(num_epochs + 1)]

#The loss function is created. The loss function evaluates how well the neural network is doing
#We use Cross Entropy Loss as it punishes the model more heavily for being confident in a wrong answer.  
loss_function = nn.CrossEntropyLoss()
train = 'stochastic'
#train = 'batch'
#parameter setting#

def error_rate(X_val, Y_val, nn):
    error = 0
    for i in range(X_val.shape[0]):
        y = nn.predict(X_val[i, :])
        if y != Y_val[i, 0]:
            error += 1
    return error / float(X_val.shape[0])

er = []
if train == 'stochastic':
    for hdim in hdims:
        nn = NeuralNet(X_train.shape[1], hdim, 6, lam)
        for it in range(20):
            LL = 0
            for i in range(X_train.shape[0]):
                pr = nn.update_stochastic(X_train[i], Y_train[i, 0], lrate)
                LL += pr
                LL /= float(X_train.shape[0])

        er_val = error_rate(X_val, Y_val, nn)
        er_train = error_rate(X_train, Y_train, nn)
        er_test = error_rate(X_test, Y_test, nn)
        er.append(er_val)
        print "error rate on val db: hdim: {}, er rate: {}".format(hdim, er_val)
        print "error rate on train db: hdim: {}, er rate: {}".format(hdim, er_train)
        print "error rate on test db: hdim: {}, er rate: {}".format(hdim, er_test)
示例#5
0
validation_labels = data[round(.8*len(data)):, len(images[0]):]

scaler = skp.StandardScaler()
train_images = scaler.fit_transform(train_images)
validation_images = scaler.transform(validation_images)

# Set target values in our labels matrix to 0.15 and 0.85
for i in range(len(train_labels[:,0])):
	for j in range(len(train_labels[0,:])):
		if train_labels[i,j] < 0.5:
			train_labels[i,j] = 0.1
		else:
			train_labels[i,j] = 0.9

# CREATE NeuralNet Object
NN = NeuralNet(train_images, train_labels)

# TRAIN NeuralNet Object
for i in range(5):
	if (i > 0):
		data = np.concatenate((train_images, train_labels), axis=1)
		np.random.shuffle(data)
		train_images = data[:, :len(images[0])]
		train_labels = data[:, len(images[0]):]
		NN.updateData(train_images, train_labels)
	NN.trainMini(batch_size=25, v_learning_rate=0.1, w_learning_rate=0.01)

correct_list = list()
incorrect_list = list()

# TRAINING ACCURACY
hdims = [10]


def error_rate(X_val, Y_val, nn):
    error = 0
    for i in range(X_val.shape[0]):
        y = nn.predict(X_val[i, :])
        if y != Y_val[i, 0]:
            error += 1
    return error / float(X_val.shape[0])


er = []

for hdim in hdims:
    nn = NeuralNet(X_train.shape[1], hdim, 3, lam)
    for it in range(20):
        LL = 0
        for i in range(X_train.shape[0]):
            pr = nn.update_stochastic(X_train[i], Y_train[i, 0], lrate)
            LL += pr
            LL /= float(X_train.shape[0])

    er_val = error_rate(X_val, Y_val, nn)
    er_train = error_rate(X_train, Y_train, nn)
    er_test = error_rate(X_test, Y_test, nn)
    er.append(er_val)
    print "error rate on val db: hdim: {}, er rate: {}".format(hdim, er_val)
    print "error rate on train db: hdim: {}, er rate: {}".format(hdim, er_train)
    print "error rate on test db: hdim: {}, er rate: {}".format(hdim, er_test)
示例#7
0
train_labels = data[:, len(images[0]):]

scaler = skp.StandardScaler()
train_images = scaler.fit_transform(train_images)
test_data = np.load('data/test_data.npy')
test_data = scaler.transform(test_data)

# Set target values in our labels matrix(e.g. to 0.15 and 0.85).
for i in range(len(train_labels[:,0])):
	for j in range(len(train_labels[0,:])):
		if train_labels[i,j] < 0.5:
			train_labels[i,j] = .05
		else:
			train_labels[i,j] = .95

NN = NeuralNet(train_images, train_labels)

for i in range(8):
	if (i > 0):
		data = np.concatenate((train_images, train_labels), axis=1)
		train_images = data[:, :len(images[0])]
		train_labels = data[:, len(images[0]):]
		NN.updateData(train_images, train_labels)
	NN.trainMini(batch_size=50,v_learning_rate=0.1, w_learning_rate=0.01,
		v_decay_rate=1, w_decay_rate=1)

f = open('kaggle_submission.csv', 'w')
header = 'Id,Category\n'
f.write(header)

predictions = NN.classifyAll(test_data)
示例#8
0
validation_labels = data[round(.8 * len(data)):, len(images[0]):]

scaler = skp.StandardScaler()
train_images = scaler.fit_transform(train_images)
validation_images = scaler.transform(validation_images)

# Set target values in our labels matrix to 0.15 and 0.85
for i in range(len(train_labels[:, 0])):
    for j in range(len(train_labels[0, :])):
        if train_labels[i, j] < 0.5:
            train_labels[i, j] = 0.1
        else:
            train_labels[i, j] = 0.9

# CREATE NeuralNet Object
NN = NeuralNet(train_images, train_labels)

# TRAIN NeuralNet Object
for i in range(1):
    x_plot, y_plot = NN.trainPlot(v_learning_rate=0.01, w_learning_rate=0.001)
    # Plot of cost function vs number of iterations.
    plt.plot(x_plot, y_plot, 'r-')
    plt.xlabel('Number of Iterations')
    plt.ylabel('J(y, z; x, V, W)')
    plt.title('Cost Function vs. Number of Iterations')
    plt.savefig('images/Cost_Function.png', bbox_inches='tight')

# TRAINING ACCURACY
y_hat = NN.classifyAll(train_images)
test_correct = 0
test_size = len(NN.images)