forked from isabels/glacier-modeling
-
Notifications
You must be signed in to change notification settings - Fork 0
/
population.py
135 lines (111 loc) · 4.97 KB
/
population.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import numpy as np
import random
import genetic_tools as gt
import basic_model
import evaluate
import tools
import csv
import operator
import pp
class Individual(object):
def __init__(self, parameters):
self.parameters = parameters
self.fitness = float("inf")
class Population(object):
base = tools.load_nolan_bedrock()
def __init__(self, size, length, zmin, zmax, fitness_function):
self.generation = 0
self.n = size #is NUMBER OF INDIVIDUALS
self.individuals = np.empty(size, dtype=object)
self.zmin = zmin
self.zmax = zmax
for i in range(self.n):
self.individuals[i] = Individual(gt.create(length, zmin, zmax))#gt.create(length, param_range))
self.mutation_rate = 1.0/length #trying w/ slightly more common mutation. we'll see what happens.
self. pool_size = 25 #this is how many you pick the best for for evolution. NOT the population size.
self.fitness_function = fitness_function
def best_fitness(self, return_index=False): #will return the index of best one as well when index is true
best = float("inf")
index = -1
for i in range(self.n):
if(self.individuals[i].fitness < best):
best = self.individuals[i].fitness
index = i
if(return_index):
return (index, best)
else:
return best
def choose(self): #returns the index in self.individuals of the fittest of the 10
best = float("inf")
best_index = -1
for i in range(self.pool_size):
index = random.randint(0, self.n-1)
if(self.individuals[index].fitness < best):
best = self.individuals[index].fitness
best_index = index
return(self.individuals[best_index])
def evolve(self):
new_generation = np.empty(self.n, dtype=object)
for i in range(self.n): #this completely replaces each generation. ???
a = self.choose()
b = self.choose()
child = gt.cross(a.parameters, b.parameters, .7)
new_generation[i] = Individual(gt.mutate(child, self.mutation_rate, self.zmin, self.zmax))
new_generation[i]
self.individuals = new_generation
self.generation += 1
# def run_model(self, parameters, base, fitness_function): #runs one model. to make things parallelizable
def run_models(self, parallelize = False, job_server = None): #defaults to running in serial, can make it parallel w/ params.
if(parallelize):
for i in range(self.n):
#gotta do this in serial first, because it's an argument to isothermalISM, which needs to be created in serial (or so it seems. but it's working now so i'm not gonna mess with it)
self.individuals[i].bed = map(operator.add, self.individuals[i].parameters, self.base)
#creates list of tuples of i and job running i's model
jobs = [(i, job_server.submit(run_model,(self.individuals[i].bed, basic_model.isothermalISM(58, 1000, .0015, .0005, .00022, self.individuals[i].bed[:]), self.fitness_function), (), ("operator", "basic_model", "tools"))) for i in range(self.n)]
print 'jobs created'
for i, job in jobs:
self.individuals[i].fitness = job()
print 'on individual', i, 'of', self.n
print 'fitness', self.individuals[i].fitness
else:
for i in range(self.n):
self.individuals[i].bed = map(operator.add, self.base, self.individuals[i].parameters)
run = basic_model.isothermalISM(58, 1000, .0015, .0005, .00022, self.individuals[i].bed[:])
for j in range(2000):
run.timestep(1)
self.individuals[i].surface = run.get_surface_elev()
self.individuals[i].fitness = self.fitness_function.evaluate(self.individuals[i].bed, self.individuals[i].surface)
print 'on individual', i, 'of', self.n
print 'fitness', self.individuals[i].fitness
def save_iteration(self, filename):
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
for i in range(self.n):
writer.writerow([str(self.individuals[i].parameters).strip('"[]"'), self.individuals[i].fitness])
def load_iteration(self, filename):
return False # implement this when you need it, future isabel, i don't care
def run_model(bed, run, fitness_function): #runs one model. for parallelization.
for j in range(2000):
run.timestep(1)
surf = run.get_surface_elev()
fitness = fitness_function.evaluate(bed, surf[:58])
return fitness
# return surf
def main():
job_server = pp.Server()
print 'Currently using', job_server.get_ncpus(), 'cpus'
fitness_function = evaluate.FitnessFunction()
population = Population(5, 58, -500, 500, fitness_function)
population.run_models(True,job_server) #initial run at generation 0 before we start evolving
while(population.best_fitness() > 500):
population.evolve()
population.run_models(True,job_server)
print population.best_fitness(True) #now this reflects generation that has just been done
population.save_iteration('generation%d.csv' % population.generation)
print "best fitness better than 500, program has finished."
if __name__=='__main__':
main()
#experiments to run/things to do:
#figure out why the hell average starting fitness is so high. it wasn't in my bed parameters tests.
#experiment with larger population (500?)
#something w/ overall smoothing applied after mutation?