def initialize_population(size, dim): """Initializes a random population. Parameters: size : the size of the population. dim : the dimensionality of the problem Returns: A random population of that many points. """ population = [] # population stored as a list for _ in range(size): # for the size of the population # get random initial weight range rand_min, rand_max = par.get_rand_range() # randomly uniform genes genes = [random.uniform(rand_min, rand_max) for _ in range(dim)] chromosome = Chromosome(genes) # create the chromosome population.append(chromosome) # add to population return population
def initialize_swarm(size, dim): """Swarm initialization function. Parameters: size : the size of our swarm. dim : the dimensionality of the problem. Returns: A random swarm of that many Particles. """ swarm = [] # swarm stored as list for _ in range(size): # for the size of the swarm # get random initial weight range rand_min, rand_max = par.get_rand_range() # position is random in every dimension position = [random.uniform(rand_min, rand_max) for _ in range(dim)] # velocity is initially zero in every dimension velocity = [0 for _ in range(dim)] # init a particle particle = Particle(position, velocity) swarm.append(particle) # add to swarm return swarm
Returns: The neuron activation based on the summed output. """ return z if z >= 0 else 0.01 * z if __name__ == '__main__': # if executed from automation script if len(argv) == 3: AUTO = bool(int(argv[2])) else: AUTO = False MSE, TRP, TEP = [], [], [] # set up variables to store testing data # load data to train and test network on TRAIN, TEST = io.load_data(f'../data/{argv[1]}.csv', par.get_holdout()) # network-specific parameters FEATURES = len(TRAIN[0][:-1]) # number of attributes of data CLASSES = len({c[-1] for c in TRAIN + TEST}) # distinct classifications HIDDEN_SIZE = par.get_hidden_size(argv[1]) DIMENSIONS = (HIDDEN_SIZE * (FEATURES + 1)) + (CLASSES * (HIDDEN_SIZE + 1)) EPOCHS, AXIS_RANGE = par.get_epochs(), par.get_rand_range() # de-specific parameters POP_SIZE = par.get_de_population_size() CROSS_RATE, DIFF_WEIGHT = par.get_de_params(argv[1]) # run the de-nn differential_evolution(DIMENSIONS, EPOCHS, POP_SIZE, AXIS_RANGE, \ CROSS_RATE, DIFF_WEIGHT) if not AUTO: io.plot_data(EPOCHS, MSE, TRP, TEP) exit(0)
z : summing output. Returns: The differential of the neural output. """ return z * (1 - z) if __name__ == '__main__': # if executed from automation script if len(argv) == 3: AUTO = bool(int(argv[2])) else: AUTO = False TRAIN, TEST = io.load_data(f'../data/{argv[1]}.csv') FEATURES = len(TRAIN[0][:-1]) CLASSES = len({c[-1] for c in TRAIN + TEST}) HIDDEN_SIZE = par.get_hidden_size(argv[1]) DIMENSIONS = (HIDDEN_SIZE * (FEATURES+1)) + \ (CLASSES * (HIDDEN_SIZE+1)) WEIGHTS = [random.uniform(par.get_rand_range()[0], par.get_rand_range()[1])\ for _ in range(DIMENSIONS)] NETWORK = net.initialize_network(WEIGHTS, FEATURES, HIDDEN_SIZE, CLASSES) LEARNING_RATE, MOMENTUM_RATE = par.get_bp_params(argv[1]) EPOCHS = par.get_epochs() MSE, TRP, TEP = [], [], [] stochastic_gradient_descent(NETWORK, CLASSES, TRAIN) if not AUTO: io.plot_data(EPOCHS, MSE, TRP, TEP) exit(0)