예제 #1
0
def back_propagation_online_momentum(S,rho,nu,units_by_layer,factivation,max_it=250,report_it=50):
	k = 0      # it
	delta = [] # Errores  
	
	# Inicializar pesos a 0 #
	theta = []
	for l in xrange(1,len(units_by_layer)):
		theta.append([])
		if l-1==0: sm = units_by_layer[l-1]
		else:	   sm = units_by_layer[l-1]+1
		for i in xrange(units_by_layer[l]): theta[-1].append(np.zeros(sm))
				
	# Inicializar incr_theta_ant #
	incr_theta_ant = []
	for l in xrange(1,len(units_by_layer)):
		incr_theta_ant.append([])
		if l-1==0: sm = units_by_layer[l-1]
		else:	   sm = units_by_layer[l-1]+1
		for i in xrange(units_by_layer[l]): incr_theta_ant [-1].append(np.zeros(sm))
		
	# Plot #
	if Config.VERBOSE: color_classes = [Config.COLORS[c%len(Config.COLORS)]+Config.STYLE[c%len(Config.STYLE)] for c in xrange(len(S[0][1]))]
	
	# Mientras no converja #
	while k<max_it:
		# Inicializar delta #
		delta = []
		for l in xrange(1,len(units_by_layer)):
			delta.append([])
			for i in xrange(units_by_layer[l]): delta[-1].append(0)
		# Para cada muestra #
		m = 0	   # N muestra
		for (xk,tk) in S: 
			# Inicializar incr_theta a cada muestra #
			incr_theta = []
			for l in xrange(1,len(units_by_layer)):
				incr_theta.append([])
				if l-1==0: sm = units_by_layer[l-1]
				else:	   sm = units_by_layer[l-1]+1
				for i in xrange(units_by_layer[l]): incr_theta[-1].append(np.zeros(sm))
			##########################
			phi,s = forward_propagation(units_by_layer,xk,theta,factivation)
			# Desde la salida a la entrada #
			for l in xrange(len(units_by_layer)-1,0,-1):
				# Para cada nodo #
				for i in xrange(units_by_layer[l]):
					########## Calcular delta ###########	
					if l==len(units_by_layer)-1: delta[l-1][i] = (factivation[l-1][i][1](phi[l-1][i])*(tk[i]-s[l][i]))
					else: delta[l-1][i] += factivation[l-1][i][1](phi[l-1][i])*sum([delta[l][r]*theta[l][r][i+1] for r in xrange(units_by_layer[l+1])])					
					#####################################
					if l==len(units_by_layer)-1:
						# Calcular incr_theta (capa salida) #
						incr_theta[l-1][i][0] += delta[l-1][i]
						for j in xrange(units_by_layer[l-1]): incr_theta[l-1][i][j+1] += delta[l-1][i]*s[l-1][j]
						#####################################	
					else:
						# Calcular incr_theta (capas ocultas) #
						for j in xrange(units_by_layer[l-1]):
							if j==0: incr_theta[l-1][i][j] += delta[l-1][i]
							else:    incr_theta[l-1][i][j] += delta[l-1][i]*s[l-1][j]
						#####################################
			# Actualizaciones #					
			for l in xrange(len(theta)):
				for i in xrange(len(theta[l])): 	
					# Actualizar los pesos #	
					theta[l][i] += (rho*incr_theta[l][i]) + (nu*incr_theta_ant[l][i])
					# Actualizar incr_theta_ant #
					incr_theta_ant[l][i] = (rho*incr_theta[l][i]) + (nu*incr_theta_ant[l][i])
			m += 1	
		
		# Plot de las muestras #
		if Config.VERBOSE:
			if k%report_it==0:
				plt.clf()
				plt.ylabel("Y")
				plt.xlabel("X")
				maxX = float("-inf")
				maxY = float("-inf")
				plt.title("Iteracion backprop online con momentum: "+str(k))
				for (xk,tk) in S:
					predicted_tk = Decision.classify(units_by_layer,xk,theta,factivation)
					plt.plot(xk[1],xk[2],color_classes[predicted_tk])
					if xk[1]>maxX: maxX = xk[1]
					if xk[2]>maxY: maxY = xk[2]
				plt.axis([-maxX,2*maxX,-maxY,2*maxY])
				plt.show(block=False)
				print "Siguientes ",report_it," iteraciones [Enter]"
				raw_input()
				plt.close()
		k += 1
	return theta
예제 #2
0
	########
	# Aprender vector theta      #
	rho = 0.5
	nu  = 0.5
	l   = 1
	theta1 = MLPLearning.back_propagation_batch(S,rho,units_by_layer,factivation,850,50)
	theta2 = MLPLearning.back_propagation_online(S,rho,units_by_layer,factivation,850,50)
	theta3 = MLPLearning.back_propagation_batch_momentum(S,rho,nu,units_by_layer,factivation,850,50)
	theta4 = MLPLearning.back_propagation_batch_buffer(S,rho,l,units_by_layer,factivation,850,50)
	theta5 = MLPLearning.back_propagation_online_buffer(S,rho,l,units_by_layer,factivation,850,50)
	theta6 = MLPLearning.back_propagation_online_momentum(S,rho,nu,units_by_layer,factivation,850,50)
	theta7,fitness = MLPLearning.evolutional(S,units_by_layer,factivation,200,500,-2,2,1.1,0.9)
	##############################
	
	# Clasificacion #
	logging.info("Clase con theta1: (Backprop batch): "+str(Decision.classify(units_by_layer,[1.0,-6.3,1.0],theta1,factivation)))
	logging.info("Clase con theta2: (Backprop online): "+str(Decision.classify(units_by_layer,[1.0,-6.3,1.0],theta2,factivation)))
	logging.info("Clase con theta3: (Backprop batch con momentum): "+str(Decision.classify(units_by_layer,[1.0,-6.3,1.0],theta3,factivation)))
	logging.info("Clase con theta4: (Backprop batch con amortiguamiento): "+str(Decision.classify(units_by_layer,[1.0,-6.3,1.0],theta4,factivation)))
	logging.info("Clase con theta5: (Backprop online con amortiguamiento): "+str(Decision.classify(units_by_layer,[1.0,-6.3,1.0],theta5,factivation)))
	logging.info("Clase con theta6: (Backprop online con momentum): "+str(Decision.classify(units_by_layer,[1.0,-6.3,1.0],theta6,factivation)))
	logging.info("Clase con theta7: (Algoritmo genetico): "+str(Decision.classify(units_by_layer,[1.0,-5.0,1.0],theta7,factivation))+" fitness: "+str(fitness))

	#################
	# Regresion #
	logging.info("Regresion con theta1: (Backprop batch): "+str(Decision.regression(units_by_layer,[1.0,-6.3,1.0],theta1,factivation)))
	logging.info("Regresion con theta2: (Backprop online): "+str(Decision.regression(units_by_layer,[1.0,-6.3,1.0],theta2,factivation)))
	logging.info("Regresion con theta3: (Backprop batch con momentum): "+str(Decision.regression(units_by_layer,[1.0,-6.3,1.0],theta3,factivation)))
	logging.info("Regresion con theta4: (Backprop batch con amortiguamiento): "+str(Decision.regression(units_by_layer,[1.0,-6.3,1.0],theta4,factivation)))
	logging.info("Regresion con theta5: (Backprop online con amortiguamiento): "+str(Decision.regression(units_by_layer,[1.0,-6.3,1.0],theta5,factivation)))
	logging.info("Regresion con theta6: (Backprop online con momentum): "+str(Decision.regression(units_by_layer,[1.0,-6.3,1.0],theta6,factivation)))
예제 #3
0
    theta3 = MLPLearning.back_propagation_batch_momentum(
        S, rho, nu, units_by_layer, factivation, 850, 50)
    theta4 = MLPLearning.back_propagation_batch_buffer(S, rho, l,
                                                       units_by_layer,
                                                       factivation, 850, 50)
    theta5 = MLPLearning.back_propagation_online_buffer(
        S, rho, l, units_by_layer, factivation, 850, 50)
    theta6 = MLPLearning.back_propagation_online_momentum(
        S, rho, nu, units_by_layer, factivation, 850, 50)
    theta7, fitness = MLPLearning.evolutional(S, units_by_layer, factivation,
                                              200, 500, -2, 2, 1.1, 0.9)
    ##############################

    # Clasificacion #
    logging.info("Clase con theta1: (Backprop batch): " + str(
        Decision.classify(units_by_layer, [1.0, -6.3, 1.0], theta1,
                          factivation)))
    logging.info("Clase con theta2: (Backprop online): " + str(
        Decision.classify(units_by_layer, [1.0, -6.3, 1.0], theta2,
                          factivation)))
    logging.info("Clase con theta3: (Backprop batch con momentum): " + str(
        Decision.classify(units_by_layer, [1.0, -6.3, 1.0], theta3,
                          factivation)))
    logging.info("Clase con theta4: (Backprop batch con amortiguamiento): " +
                 str(
                     Decision.classify(units_by_layer, [1.0, -6.3, 1.0],
                                       theta4, factivation)))
    logging.info("Clase con theta5: (Backprop online con amortiguamiento): " +
                 str(
                     Decision.classify(units_by_layer, [1.0, -6.3, 1.0],
                                       theta5, factivation)))
    logging.info("Clase con theta6: (Backprop online con momentum): " + str(