def differential_evolution(dim, epochs, pop_size, axis_range, cr, dw):
    """Differential evolution training function.
	Main driver for the DE optimization of network weights.

	Parameters:
		dim : the dimensionality of network.
		epochs : how many generations to run.
		pop_size : the population size.
		axis_range : the minimum and maximum values for a given axis.
		cr : crossover rate.
		dw : differential weight.
	"""
    if not AUTO:
        print('Epoch, MSE, Train. Acc%, Test Acc%')
    # initialize network as initially random
    population = net.initialize_population(Solution, pop_size, dim, axis_range)
    for e in range(1, epochs + 1):
        population.sort()  # sort population by fitness
        MSE.append(population[0].get_fit())  # get fitness of best network
        # change most fit solution to a network to test performance
        network = net.initialize_network(population[0].get_pos(), \
         FEATURES, HIDDEN_SIZE, CLASSES)
        # training accuracy of network
        TRP.append(net.performance_measure(network, TRAIN,
                                           activation_function))
        # testing accuracy of network
        TEP.append(net.performance_measure(network, TEST, activation_function))
        # evolve population based on differential evolution rules
        population = evolve(population, dim, cr, dw)
        io.out_console(AUTO, e, MSE, TRP, TEP)
def particle_swarm_optimization(dim, epochs, swarm_size, axis_range, w, c1, c2):
	"""Particle Network training function.
	Main driver for the PSO optimization of network weights.

	Parameters:
		dim : dimensionality of the problem.
		epochs : how many iterations.
		swarm_size : how big a swarm is.
		axis_range : the minimum and maximum value an axis may be.
		w : inertial coefficient (omega).
		c1 : cognitive coefficient (c_1).
		c2 : social coefficient (c_2).
	"""
	if not AUTO:
		print('Epoch, MSE, Train. Acc%, Test Acc%')
	# initialize swarm of solutions
	swarm = net.initialize_population(Particle, swarm_size, dim, axis_range)
	for e in range(1, epochs+1):
		swarm.sort() # sort swarm by fitness
		MSE.append(swarm[0].get_fit()) # get error of network using swarm best
		# network to get performance metrics on
		network = net.initialize_network(swarm[0].get_pos(), FEATURES, \
			HIDDEN_SIZE, CLASSES)
		# get classification error of network for training and test
		TRP.append(net.performance_measure(network, TRAIN, activation_function))
		TEP.append(net.performance_measure(network, TEST, activation_function))
		# reposition particles based on PSO params
		move_particles(swarm, swarm[0], dim, w, c1, c2)
		io.out_console(AUTO, e, MSE, TRP, TEP)
Beispiel #3
0
def pso(dim, epochs, swarm_size, ine_c, cog_c, soc_c):
	"""Particle Network training function
	Main driver for PSO algorithm

	Parameters:
		dim : dimensionality of the problem.
		epochs : how many iterations.
		swarm_size : how big a swarm is.
		ine_c : inertial coefficient (omega).
		cog_c : cognitive coefficient (c_1).
		soc_c : social coefficient (c_2).
	"""
	if not AUTO:
		print('Epoch, MSE, Train. Acc%, Test Acc%')
	swarm = initialize_swarm(swarm_size, dim) # init swarm
	for e in range(1, epochs+1):
		# get swarm best fitness and position
		swarm_best = get_swarm_best(swarm)
		MSE.append(swarm_best[0]) # get error of network using swarm best
		# network to get performance metrics on
		network = net.initialize_network(swarm_best[1], FEATURES, \
			HIDDEN_SIZE, CLASSES)
		# get classification error of network for training and test
		TRP.append(net.performance_measure(network, TRAIN, activation_function))
		TEP.append(net.performance_measure(network, TEST, activation_function))
		# reposition particles based on PSO params
		move_particles(swarm, dim, ine_c, cog_c, soc_c)
		io.out_console(AUTO, e, MSE, TRP, TEP)
Beispiel #4
0
def bat_algorithm(dim, epochs, pop_size, axis_range, alf, gam, bnd, qmin,
                  qmax):
    """Differential evolution training function.
	Main driver for the BA optimization of network weights.

	Parameters:
		dim : the dimensionality of network.
		epochs : how many generations to run.
		pop_size : the population size.
		alf : loudness decreasing rate.
		gam : pulse rate increasing rate.
		bnd : boundary to clamp position.
		qmin : minimum frequency.
		qmax : maximum frequency.
	"""
    if not AUTO:
        print('Epoch, MSE, Train. Acc%, Test Acc%')
    # initialize the network as initially random
    population = net.initialize_population(Bat, pop_size, dim, axis_range)
    for e in range(1, epochs + 1):
        population.sort()  # sort the population by fitness
        MSE.append(population[0].get_fit())  # get fitness of best network
        # make network to get performance metrics
        network = net.initialize_network(population[0].get_pos(), \
         FEATURES, HIDDEN_SIZE, CLASSES)
        # training accuracy of network
        TRP.append(net.performance_measure(network, TRAIN,
                                           activation_function))
        # testing accuracy of network
        TEP.append(net.performance_measure(network, TEST, activation_function))
        step = float(e) / epochs  # how many epochs have elapsed
        # move each bat in population
        population = move_bats(population, dim, qmin, qmax, alf, gam, bnd,
                               step)
        io.out_console(AUTO, e, MSE, TRP, TEP)
Beispiel #5
0
def genetic_network(el_p, to_p, dim, epochs, pop_size, cro_r, mut_r):
	"""Genetic Neural Network training function.

	Parameters:
		el_p : the proportion of elites
		to_p : the proportion of tournament
		dim : dimensionality of network.
		epochs : how many generations to run.
		pop_size : the population size.
		cro_r : crossover rate.
		mut_r : mutation rate.

	Returns:
		A trained neural network.
	"""
	if not AUTO:
		print('Epoch, MSE, Train. Acc%, Test Acc%')
	# initialize network as initially random
	population = initialize_population(pop_size, dim)
	for e in range(1, epochs+1):
		# sort the population by fitness
		population.sort()
		# get fitness of network
		MSE.append(population[0].get_fit())
		# make network to get performance metrics
		network = net.initialize_network(population[0].get_genes(), \
			FEATURES, HIDDEN_SIZE, CLASSES)
		# training accuracy of network
		TRP.append(net.performance_measure(network, TRAIN, activation_function))
		# testing accuracy of network
		TEP.append(net.performance_measure(network, TEST, activation_function))
		mating_pool = [] # init mating pool
		# get elites from population
		elites = elite_selection(population, el_p)
		del population[:len(elites)] # remove elites
		# find tournament and winner
		t_winner = tournament_selection(population, to_p)
		# add tournament victor and elites to mating pool
		mating_pool.extend(elites)
		mating_pool.append(t_winner)
		# generate a new population based on mating pool
		population = evolve(mating_pool, elites, pop_size, cro_r, mut_r)
		mating_pool.clear() # clear mating pool for next gen
		io.out_console(AUTO, e, MSE, TRP, TEP)
Beispiel #6
0
def stochastic_gradient_descent(network, classes, training):
    """Training function for neural network.
	Performs feed-forward, backpropagation, and update weight functions.

	Parameters:
		network : the neural network.
		classes : the number of classes for the data.
		training : data to train the network on.
	"""
    if not AUTO:  # if normal execution
        print('Epoch, MSE, Train. Acc%, Test Acc%')
    for e in range(1, EPOCHS + 1):
        # there is no temporal delta and therefore no momentum for the first
        # training example, so skip that step later for first example
        first_example = True
        total_error = 0.00
        for example in training:
            # skip if not first example; keep track of prior example delta
            temporal_delta = [neuron['d'] \
             for layer in network for neuron in layer] \
             if not first_example else None
            # create a list of possible outputs
            outputs = [0 for _ in range(classes)]
            outputs[int(example[-1])] = 1  # denote correct classification
            # get actual output from feed forward pass. Feeding forward will
            # also initialize network neuron outputs to be used in backprop
            actual = net.feed_forward(network, example, activation_function)
            total_error += net.sse(actual, outputs)  # aggregate error
            # perform backpropagation to propagate error through network
            backpropagate(network, outputs)
            # update weights based on network params and neuron contents
            update_weights(network, example, temporal_delta)
            first_example = False  # now we can consider momentum
        # append results for this epoch to global lists to make plots
        MSE.append(total_error / len(TRAIN))
        TRP.append(net.performance_measure(NETWORK, TRAIN,
                                           activation_function))
        TEP.append(net.performance_measure(NETWORK, TEST, activation_function))
        io.out_console(AUTO, e, MSE, TRP, TEP)  # output to console