示例#1
0
文件: Net.py 项目: phycsgy/ML
	def __init__(self, net, training_data, validation_data):
		# self.net = net
		validation_inputs_data, validation_results_data = \
			Net_Util.seperate(validation_data)

		training_inputs_data, training_results_data = \
			Net_Util.seperate(training_data)

		self.evaluation_cost = np.sum(Net_Util.Cost.crossentropycost(
			net.feedforword(validation_inputs_data), validation_results_data))

		self.evaluation_accurate = Net_Util.accurate(
			net.feedforword(training_inputs_data), training_results_data)

		self.training_cost = np.sum(Net_Util.Cost.crossentropycost(
			net.feedforword(training_inputs_data), training_results_data))

		self.training_accurate = Net_Util.accurate(
			net.feedforword(training_inputs_data), training_results_data)
示例#2
0
文件: Net.py 项目: phycsgy/ML
	def net_training(self, training_data, learning_rate, lamda):
		"""Training the net work with stochastic gradient descent
			eta: learning rate
			lamda: regulation rate

		"""
		# seperate input and output data
		input_data, result_data = Net_Util.seperate(training_data)
		num_sample = len(input_data)
		# feedforword
		self.outputs = self.feedforword(input_data)
		# backpropagation
		self.input_layer.backpropagation(result_data)
		# update weights and biases
		for layer in self.hiden_layers:
			layer.weights = layer.weights - learning_rate*(np.sum(layer.delta_weights, axis=1)/num_sample
														   + layer.regularization(lamda))  # regularization
			layer.biases = layer.biases - learning_rate*np.sum(layer.delta_biases, axis=1)/num_sample