def differential_evolution(dim, epochs, pop_size, axis_range, cr, dw):
    """Differential evolution training function.
	Main driver for the DE optimization of network weights.

	Parameters:
		dim : the dimensionality of network.
		epochs : how many generations to run.
		pop_size : the population size.
		axis_range : the minimum and maximum values for a given axis.
		cr : crossover rate.
		dw : differential weight.
	"""
    if not AUTO:
        print('Epoch, MSE, Train. Acc%, Test Acc%')
    # initialize network as initially random
    population = net.initialize_population(Solution, pop_size, dim, axis_range)
    for e in range(1, epochs + 1):
        population.sort()  # sort population by fitness
        MSE.append(population[0].get_fit())  # get fitness of best network
        # change most fit solution to a network to test performance
        network = net.initialize_network(population[0].get_pos(), \
         FEATURES, HIDDEN_SIZE, CLASSES)
        # training accuracy of network
        TRP.append(net.performance_measure(network, TRAIN,
                                           activation_function))
        # testing accuracy of network
        TEP.append(net.performance_measure(network, TEST, activation_function))
        # evolve population based on differential evolution rules
        population = evolve(population, dim, cr, dw)
        io.out_console(AUTO, e, MSE, TRP, TEP)
Exemple #2
0
def pso(dim, epochs, swarm_size, ine_c, cog_c, soc_c):
	"""Particle Network training function
	Main driver for PSO algorithm

	Parameters:
		dim : dimensionality of the problem.
		epochs : how many iterations.
		swarm_size : how big a swarm is.
		ine_c : inertial coefficient (omega).
		cog_c : cognitive coefficient (c_1).
		soc_c : social coefficient (c_2).
	"""
	if not AUTO:
		print('Epoch, MSE, Train. Acc%, Test Acc%')
	swarm = initialize_swarm(swarm_size, dim) # init swarm
	for e in range(1, epochs+1):
		# get swarm best fitness and position
		swarm_best = get_swarm_best(swarm)
		MSE.append(swarm_best[0]) # get error of network using swarm best
		# network to get performance metrics on
		network = net.initialize_network(swarm_best[1], FEATURES, \
			HIDDEN_SIZE, CLASSES)
		# get classification error of network for training and test
		TRP.append(net.performance_measure(network, TRAIN, activation_function))
		TEP.append(net.performance_measure(network, TEST, activation_function))
		# reposition particles based on PSO params
		move_particles(swarm, dim, ine_c, cog_c, soc_c)
		io.out_console(AUTO, e, MSE, TRP, TEP)
def particle_swarm_optimization(dim, epochs, swarm_size, axis_range, w, c1, c2):
	"""Particle Network training function.
	Main driver for the PSO optimization of network weights.

	Parameters:
		dim : dimensionality of the problem.
		epochs : how many iterations.
		swarm_size : how big a swarm is.
		axis_range : the minimum and maximum value an axis may be.
		w : inertial coefficient (omega).
		c1 : cognitive coefficient (c_1).
		c2 : social coefficient (c_2).
	"""
	if not AUTO:
		print('Epoch, MSE, Train. Acc%, Test Acc%')
	# initialize swarm of solutions
	swarm = net.initialize_population(Particle, swarm_size, dim, axis_range)
	for e in range(1, epochs+1):
		swarm.sort() # sort swarm by fitness
		MSE.append(swarm[0].get_fit()) # get error of network using swarm best
		# network to get performance metrics on
		network = net.initialize_network(swarm[0].get_pos(), FEATURES, \
			HIDDEN_SIZE, CLASSES)
		# get classification error of network for training and test
		TRP.append(net.performance_measure(network, TRAIN, activation_function))
		TEP.append(net.performance_measure(network, TEST, activation_function))
		# reposition particles based on PSO params
		move_particles(swarm, swarm[0], dim, w, c1, c2)
		io.out_console(AUTO, e, MSE, TRP, TEP)
Exemple #4
0
def bat_algorithm(dim, epochs, pop_size, axis_range, alf, gam, bnd, qmin,
                  qmax):
    """Differential evolution training function.
	Main driver for the BA optimization of network weights.

	Parameters:
		dim : the dimensionality of network.
		epochs : how many generations to run.
		pop_size : the population size.
		alf : loudness decreasing rate.
		gam : pulse rate increasing rate.
		bnd : boundary to clamp position.
		qmin : minimum frequency.
		qmax : maximum frequency.
	"""
    if not AUTO:
        print('Epoch, MSE, Train. Acc%, Test Acc%')
    # initialize the network as initially random
    population = net.initialize_population(Bat, pop_size, dim, axis_range)
    for e in range(1, epochs + 1):
        population.sort()  # sort the population by fitness
        MSE.append(population[0].get_fit())  # get fitness of best network
        # make network to get performance metrics
        network = net.initialize_network(population[0].get_pos(), \
         FEATURES, HIDDEN_SIZE, CLASSES)
        # training accuracy of network
        TRP.append(net.performance_measure(network, TRAIN,
                                           activation_function))
        # testing accuracy of network
        TEP.append(net.performance_measure(network, TEST, activation_function))
        step = float(e) / epochs  # how many epochs have elapsed
        # move each bat in population
        population = move_bats(population, dim, qmin, qmax, alf, gam, bnd,
                               step)
        io.out_console(AUTO, e, MSE, TRP, TEP)
 def __init__(self, pos):
     """Solution constructor."""
     self.pos = pos
     # initialize solution as a network to check fitness, since fitness is
     # a function of feedforwarding training examples
     network = net.initialize_network(self.pos, FEATURES, \
      HIDDEN_SIZE, CLASSES)
     self.fit = net.mse(network, CLASSES, TRAIN, activation_function)
Exemple #6
0
	def set_genes(self, genes):
		"""Genes mutator method."""
		self.genes = genes
		# when setting genes subsequent times
		# update the fitness
		network = net.initialize_network(self.genes, FEATURES, \
			HIDDEN_SIZE, CLASSES)
		self.fit = net.mse(network, CLASSES, TRAIN, activation_function)
	def __init__(self, pos):
		"""Particle constructor."""
		# initialize position and velocity
		self.pos, self.vel = pos, [0.00 for _ in range(len(pos))]
		# find fitness at instantiation
		network = net.initialize_network(self.pos, FEATURES, \
			HIDDEN_SIZE, CLASSES)
		self.fit = net.mse(network, CLASSES, TRAIN, activation_function)
		# best so far is just initial
		self.best_pos, self.best_fit = self.pos, self.fit
Exemple #8
0
 def __init__(self, pos):
     """Bat constructor."""
     self.pos, self.vel = pos, [0.00 for _ in range(len(pos))]
     self.loudness = uniform(1, 2)  # loudness is some random value 1..2
     self.max_pulse_rate = uniform(0, 1)  # max pulse rate varies per bat
     self.pulse_rate = 0  # initially pulse rate is 0 and climbs to max
     # find fitness at instantiation
     network = net.initialize_network(self.pos, FEATURES, \
      HIDDEN_SIZE, CLASSES)
     self.fit = net.mse(network, CLASSES, TRAIN, activation_function)
Exemple #9
0
	def __init__(self, genes, fit=None):
		"""Chromosome constructor without fitness."""
		# initialize weights from parameter
		self.genes = genes
		# if no argument passed as fitness
		# take fitness from genes argument
		# else init as fit argument
		if fit is None:
			network = net.initialize_network(self.genes, FEATURES, \
				HIDDEN_SIZE, CLASSES)
			self.fit = net.mse(network, CLASSES, TRAIN, activation_function)
		else:
			self.fit = fit
	def set_pos(self, pos):
		"""Position mutator method."""
		self.pos = pos
		if not any(p < -BOUND for p in pos)\
		and not any(p > BOUND for p in pos):
			# get fitness of new position
			network = net.initialize_network(self.pos, FEATURES, \
				HIDDEN_SIZE, CLASSES)
			fitness = net.mse(network, CLASSES, TRAIN, activation_function)
			# if better
			if fitness < self.best_fit:
				self.fit = fitness
				# update best fitness
				self.best_fit = self.fit
				# update best position
				self.best_pos = self.pos
Exemple #11
0
def genetic_network(el_p, to_p, dim, epochs, pop_size, cro_r, mut_r):
	"""Genetic Neural Network training function.

	Parameters:
		el_p : the proportion of elites
		to_p : the proportion of tournament
		dim : dimensionality of network.
		epochs : how many generations to run.
		pop_size : the population size.
		cro_r : crossover rate.
		mut_r : mutation rate.

	Returns:
		A trained neural network.
	"""
	if not AUTO:
		print('Epoch, MSE, Train. Acc%, Test Acc%')
	# initialize network as initially random
	population = initialize_population(pop_size, dim)
	for e in range(1, epochs+1):
		# sort the population by fitness
		population.sort()
		# get fitness of network
		MSE.append(population[0].get_fit())
		# make network to get performance metrics
		network = net.initialize_network(population[0].get_genes(), \
			FEATURES, HIDDEN_SIZE, CLASSES)
		# training accuracy of network
		TRP.append(net.performance_measure(network, TRAIN, activation_function))
		# testing accuracy of network
		TEP.append(net.performance_measure(network, TEST, activation_function))
		mating_pool = [] # init mating pool
		# get elites from population
		elites = elite_selection(population, el_p)
		del population[:len(elites)] # remove elites
		# find tournament and winner
		t_winner = tournament_selection(population, to_p)
		# add tournament victor and elites to mating pool
		mating_pool.extend(elites)
		mating_pool.append(t_winner)
		# generate a new population based on mating pool
		population = evolve(mating_pool, elites, pop_size, cro_r, mut_r)
		mating_pool.clear() # clear mating pool for next gen
		io.out_console(AUTO, e, MSE, TRP, TEP)
Exemple #12
0
		z : summing output.

	Returns:
		The differential of the neural output.
	"""
    return z * (1 - z)


if __name__ == '__main__':
    # if executed from automation script
    if len(argv) == 3:
        AUTO = bool(int(argv[2]))
    else:
        AUTO = False
    TRAIN, TEST = io.load_data(f'../data/{argv[1]}.csv')
    FEATURES = len(TRAIN[0][:-1])
    CLASSES = len({c[-1] for c in TRAIN + TEST})
    HIDDEN_SIZE = par.get_hidden_size(argv[1])
    DIMENSIONS = (HIDDEN_SIZE * (FEATURES+1)) + \
     (CLASSES * (HIDDEN_SIZE+1))
    WEIGHTS = [random.uniform(par.get_rand_range()[0], par.get_rand_range()[1])\
     for _ in range(DIMENSIONS)]
    NETWORK = net.initialize_network(WEIGHTS, FEATURES, HIDDEN_SIZE, CLASSES)
    LEARNING_RATE, MOMENTUM_RATE = par.get_bp_params(argv[1])
    EPOCHS = par.get_epochs()
    MSE, TRP, TEP = [], [], []
    stochastic_gradient_descent(NETWORK, CLASSES, TRAIN)
    if not AUTO:
        io.plot_data(EPOCHS, MSE, TRP, TEP)
    exit(0)