def optimise_network(self, all_x, all_y):
		'''run backpropagation for digit = self.digit and save results'''
		
		self.zone2init = fk_zone2(self.zone2_queue)
		self.z2thread = threading.Thread(target=self.zone2init.start)
		#self.z2thread.start()
		self.data_size = shape(all_x)[1]
		# execute backprop procedure forever
		while True:

			# get some pixels' labels to mangle
			self.selection = random.random_integers(0,self.data_size-1, self.num_threads)
			#self.mangle0 = random.randint(0,self.mangle_upper)
			#self.mangle1 = random.randint(0,self.mangle_upper)
			#self.random_mangle0 = random.random_integers(0,self.xi[0]-1, self.mangle0)
			#self.random_mangle1 = random.random_integers(0,self.xi[0]-1, self.mangle1)
			# mangle pixels, some --> 0 some --> 255	
			#self.x[self.random_mangle0] = 0
			#self.x[self.random_mangle1] = 255

			self.x = all_x[:,self.selection]
			self.y = all_y[:,self.selection]

			# spin up some threads
			for i in range(0,self.num_threads):
				th = threading.Thread(target=forwardBackward, args=(self.xi, self.x[:,i], self.y[:,i], self.MT, self.DELTA_queue, self.errors_queue, self.zone2_queue))
				self.threads.append(th)

			print 'spinning'

			# pause until finished			
			for i in range(0, self.num_threads): self.threads[i].start()
			for i in range(0, self.num_threads): self.threads[i].join()
			
			print 'joining'

			# debugging:	
			# print self.y		
			# print self.zone1out	
			print amax(self.MT)
			# print amin(self.MT)
			# print amax(self.x)
			# print amin(self.y)
			# print self.A[-30:]
			
			self.errors = self.errors_queue.get()/self.num_threads
			# run back propagation
		  	self.DELTA = back_propagation(self.y, self.A, self.MT, self.xi)
			self.MT = self.MT - self.alpha*self.DELTA/self.num_threads - self.beta*self.MT
			
			if self.counter == self.save_when:
				savetxt(self.file_out, self.MT, delimiter=',')
				print 'saved zone1 data to ' + self.file_out + '\t',
				print 'errors zone1: ' + str(sum(self.errors)) + '\tmax error arg: ' + str(argmax(self.errors)) + '\t MEAE Z1: ' + str(int(amax(self.errors))) + '\tmin error arg: ' + str(argmin(self.errors)) + '\t MinEAE Z1: ' + str(int(amin(self.errors)))

				self.counter = 0 # reset
			self.counter += 1

			self.DELTA = 0*self.DELTA
			self.errors = 0*self.errors
Example #2
0
def entire_network_back_prop(network, input_layer, cur_model_output, cur_desired_output): # cur_model_output is the current model outputs
    print("entire_network_back_prop: Begin")
    cost_ini = cost_func(cur_model_output, cur_desired_output)[2]
    print("entire_network_back_prop: finished calling cost_func()")
    new_cost = []
    
    if len(network.hidden_layers) == 1:
        print("entire_network_back_prop: len(network.hidden_layers) == 1")
        new_cost = back_propagation(network.hidden_layers[0], input_layer, cost_ini)
    else:
        for i in range(len(network.hidden_layers)):
            print("entire_network_back_prop: i == " + str(i))
            if i == 0:
                new_cost = back_propagation(network.hidden_layers[len(network.hidden_layers) - 1], network.hidden_layers[len(network.hidden_layers) - 2], cost_ini)
            elif i == len(network.hidden_layers) - 1:
                new_cost = back_propagation(network.hidden_layers[len(network.hidden_layers) - (i + 1)], input_layer, new_cost)
            else:
                new_cost = back_propagation(network.hidden_layers[len(network.hidden_layers) - (i + 1)], network.hidden_layers[len(network.hidden_layers) - (i + 2)], new_cost)

    print("entire_network_back_prop: Done")
    return cost_ini
def forwardBackward(xi, x, y, MT, time_queue, good_queue, DELTA_queue):
	A = neural_forward(xi, x, MT)
	check = argmax(A[-xi[-1]:])
	# send back some progress statistic
 	if y[check]-1 == 0:
		good = good_queue.get()
		good += 1
		good_queue.put(good)
		good_queue.task_done()

	time = time_queue.get()
	time += 1
	time_queue.put(time)
	time_queue.task_done()

	DELTA = DELTA_queue.get()
	DELTA = DELTA + back_propagation(y, A, MT, xi)
	DELTA_queue.put(DELTA)
	DELTA_queue.task_done()
	def start(self):
		'''start learning process for zone2 of facial keypoints analysis'''
		
		while False:
			
			self.data = self.zone2queue.get()
			self.zone2queue.task_done()
			self.x = self.data[:,0]
			self.y = self.data[:,1]
			self.A = neural_forward(self.xi, self.x, self.MT)
			self.DELTA = back_propagation(self.y, self.A, self.MT, self.xi)
			self.errors = sqrt((self.A[-self.xi[1]:]-self.y)**2)
			self.MT = self.MT - self.alpha*self.DELTA - self.beta*self.MT
                        self.DELTA = 0*self.DELTA

                        if self.counter == self.save_when:
                                savetxt(self.file_out, self.MT, delimiter=',')
                                print 'saved zone2 data to ' + self.file_out + '\t',
                                print 'errors zone2: ' + str(sum(self.errors)) + '\tmax error arg: ' + str(argmax(self.errors)) + '\t MEAE Z2: ' + str(int(amax(self.errors))) + '\tmin error arg: ' + str(argmin(self.errors)) + '\t MinEAE Z2: ' + str(int(amin(self.errors)))

                                self.counter = 0 # reset
                        self.counter += 1