def setUp(self): self.stable_nobirth = Specie("Stable_NoBirth", 0, 0.02, 0.02) self.stable = Specie("Stable", 0.2, 0.025, 0.03) self.continuous = Specie("Continuous", 0, 0.5, 0.2) self.extinction = Specie("Extinction", 0, 0.2, 0.4) self.continuous_carryingcap = Specie("Continuous_CarryingCapacity", 0, 0.03, 0.02, 0.0001)
def insert_entity(self, entity, suggestion=None): """ Inserts entity into one of species :param entity: Entity New entity :param suggestion: Specie, optional Suggested specie for new entity """ if suggestion is not None: delta = suggestion.get_genetic_distance(entity, self.distance_c1, self.distance_c2, self.distance_c3) if delta < self.specie_acceptance: suggestion.entities.append(entity) return for specie in self.species: delta = specie.get_genetic_distance(entity, self.distance_c1, self.distance_c2, self.distance_c3) if delta < self.specie_acceptance: specie.entities.append(entity) return new_specie = Specie(entity) self.species.append(new_specie)
class TestSpecie(unittest.TestCase): def setUp(self): self.stable_nobirth = Specie("Stable_NoBirth", 0, 0.02, 0.02) self.stable = Specie("Stable", 0.2, 0.025, 0.03) self.continuous = Specie("Continuous", 0, 0.5, 0.2) self.extinction = Specie("Extinction", 0, 0.2, 0.4) self.continuous_carryingcap = Specie("Continuous_CarryingCapacity", 0, 0.03, 0.02, 0.0001) def test_equilibrium(self): self.assertEqual(self.stable_nobirth.equilibrium(), -1, "Stable & NoBirth should reach -1") self.assertTrue(self.stable.equilibrium() > 0, "Stable should reach > 1") self.assertEqual(self.continuous.equilibrium(), -1, "Continuous should reach -1") self.assertEqual(self.extinction.equilibrium(), 0, "Extinction should reach 0") self.assertEqual(self.continuous_carryingcap.equilibrium(), -1, "Stable & NoBirth should reach -1")
def loop(self): self.cntGenes=0 self.menu={} for s in self.species: self.cntGenes+=len(s.genes) self.menu[s.num]=[] for g in s.genes: self.menu[s.num].append(g.num) self.showInfo() sumfitness=0.0 newspecies=[] newgenes = [] for s in self.species: s.breed() for n in s.newSpecies(): newspecies.append(n) for n in newspecies: self.species.append(Specie(members=[n],appearTime=self.generation)) for s in self.species: for n in s.newGenes(): newgenes.append(n) while len(newgenes): n=newgenes[0] for s in self.species: for o in s.genes: if o not in newgenes: self.adjustFitness(new=n,old=o) del newgenes[0] for s in self.species: s.sort() s.recountFitness() sumfitness+=s.fitness # sumfitness/=len(self.species) self.resource+=RESOURCE dieList=[] i=0 while i <len(self.species): s=self.species[i] r=self.resource*(s.fitness)/sumfitness print '[%d] get resource:%d'%(s.num,r) for d in s.distribute(resource=r): dieList.append(d) if not len(s.genes): print 'The specie [%d] has died out.'%s.num del self.species[i] continue i+=1 self.resource=0.0 while len(dieList): # self.resource+=self.happyCorner(unlucky=dieList[0]) self.happyCorner(unlucky=dieList[0]) del dieList[0] saveCurrent(self.packCurrent()) self.generation+=1
def speciate_genomes(self, genome_list: List[Genome], specie_count: int) -> List[Specie]: specie_list: List[Specie] = [] for i in range(specie_count): new_specie = Specie() selected_genome = genome_list[i] selected_genome.specie = new_specie new_specie.genome_list.append(selected_genome) new_specie.centroid = selected_genome.get_position() specie_list.append(new_specie) for i in range(len(specie_list), len(genome_list)): selected_genome = genome_list[i] closest_specie = self.find_closest_specie(selected_genome, specie_list) selected_genome.specie = closest_specie closest_specie.genome_list.append(selected_genome) for specie in specie_list: specie.centroid = self.calculate_specie_centroid(specie) return self.speciate_until_convergence(specie_list)
def init(): drop() print'Initializing...' str=GeneStructure(gsNew=-1,fromDB=1,num=0) g=Gene(structure=str,weights=[],thresholds=[],num=0) for i in range (0,SIZE_SENSOR+SIZE_OUTPUT+1): str.appendNeuron() g.thresholds.append(random.uniform(-0.5,0.5)) for i in range(0,SIZE_SENSOR): str.appendSynapse(origin=i,terminus=SIZE_SENSOR+SIZE_OUTPUT) g.weights.append(random.uniform(-2,2)) str.appendSynapse(origin=SIZE_SENSOR+SIZE_OUTPUT,terminus=i+SIZE_SENSOR) g.weights.append(random.uniform(-2,2)) save.saveStruct(str.pack()) g.set() sp1=Specie(members=[g],appearTime=0) nature=Nature(species=[sp1],generation=0,restResource=0) save.saveCurrent(nature.packCurrent()) print'Initializing has been done.'
'''This will download images from the website: https://www.insectimages.org/ and save into a folder inside the images folder''' from read_site import Site from save_images import save_images from specie import Specie # This number specify what is the minimum number of images necessary, then the software will paginate the website ultil it reaches the min number. NUM_MIN = 140 cockroaches = Specie( 'https://www.insectimages.org/browse/taxthumb.cfm?order=369', 'cockroaches') orthoptera = Specie( 'https://www.insectimages.org/browse/taxthumb.cfm?order=159', 'orthoptera') neuroptera = Specie( 'https://www.insectimages.org/browse/taxthumb.cfm?order=152', 'neuroptera') mantodea = Specie('https://www.insectimages.org/browse/taxthumb.cfm?order=139', 'mantodea') isoptera = Specie('https://www.insectimages.org/browse/taxthumb.cfm?order=121', 'isoptera') odonata = Specie('https://www.insectimages.org/browse/taxthumb.cfm?order=155', 'odonata') lst_specie = [cockroaches, orthoptera, neuroptera, mantodea, isoptera, odonata] def down_lst_img(specie, num_min=NUM_MIN): site = Site(specie.url, num_min) site.browser.quit() save_images(site.lst_img_path, specie.folder)
tk.Tk.wm_title(self, "Evolyfe") self.container = tk.Frame(self) self.container.pack(side="top", fill="both", expand=True) self.container.grid_rowconfigure(0, weight=1) self.container.grid_columnconfigure(0, weight=1) def display_pop(self, pop: Population, emulated: int = 0): frame = PopGraphPage(self.container, pop, emulated) frame.grid(row=0, column=0, sticky="nsew") frame.tkraise() def display_environment_populations(self, env: Environment): frame = PopListHistogram(self.container, env) frame.grid(row=0, column=0, sticky="nsew") frame.tkraise() if __name__ == "__main__": pops = [[Specie("Blob-Stable", 0, 0.02, 0.02), 40], [Specie("Blob-Increase", 0, 0.023, 0.02), 50], [Specie("Blob-Decrease", 0, 0.02, 0.025), 50]] e = Environment("BlobLand", pops) for _ in range(300): e.progress() app = App() app.display_environment_populations(e) app.mainloop()
def parse(exper, job_count): # loop throught the number of jobs and parse each filepath for i in range(1, job_count): # we do not want to alter our original source filepath, so we create a local variable curr_dir = exper.source_file curr_props = exper.properties_file # if we are using label props, gather properties data if exper.label_props: exper.parse_properties(curr_props, i) # we need to add the job directory so we can access the xml file curr_dir += 'job_%i/run/' % i # get the complete file path of the working xml file path = os.path.join(curr_dir, 'run0.xml') # setup the tree tree = ET.parse(path) # get the root of the tree root = tree.getroot() # array of all the maximum fitness values in the document max_fitvals = [] # array of all the minimum fitness values in the doc min_fitvals = [] # array of all the average fitness values in the doc avg_fitvals = [] # array of all the champion complexity values in the document champ_compvals = [] # array of all the maximum complexity values in the document max_compvals = [] # array of all the minimum complexity values in the document min_compvals = [] # array of all the average complexity values in the document avg_compvals = [] # array of species for this job job_species = [] # keep track of whether or not we are at the proper epoch to pull or skip information epoch_index = 0 if exper.generation_count is None: # search paramaters contains all the information on population size and generation size search_parameters = root.findall('search-parameters') # look at search-paramaters to get the given generation size for param in search_parameters: for child in param.getchildren(): if child.tag == 'generations': exper.generation_count = int(child.text) # get all the generation elements so we can walk through them and process data gen_list = root.findall('generation') # get the last generation element so we can check it and make sure we dont miss the final data point, regardless of epoch modifier last_gen = gen_list[-1] # go through each generation tag in the xml file for parsing for generation in gen_list: # need to create a list of species for this generation gen_species = [] # check to see if we are a multiple of the epoch modifier if epoch_index % exper.epoch_modifier == ( exper.epoch_modifier - 1) or epoch_index == 0 or generation == last_gen: # loop through the generation elements subchildren to find fitness and complexity for genchild in generation.getchildren(): # identify fitness element using .tag if genchild.tag == 'fitness': # loop through children of fitness to find the max fitness for fitchild in genchild: # check if element is max using .tag if fitchild.tag == 'max': # add the value of the max fitness to the array max_fitvals.append(int(fitchild.text)) # check if element is min if fitchild.tag == 'min': min_fitvals.append(int(fitchild.text)) # check if element is avg if fitchild.tag == 'avg': avg_fitvals.append(float(fitchild.text)) # repeat above process for complexity if genchild.tag == 'complexity': for compchild in genchild: if compchild.tag == 'champ': champ_compvals.append(int(compchild.text)) # check if element is max if compchild.tag == 'max': max_compvals.append(int(compchild.text)) # check if element is min if compchild.tag == 'min': min_compvals.append(int(compchild.text)) # check if element is avg if compchild.tag == 'avg': avg_compvals.append(float(compchild.text)) # get the species of the exper if genchild.tag == 'specie': # create a new specie and add it to the list of species for this generation specie = Specie(genchild.get('id'), genchild.get('count')) # loop through the chromosomes in the specie and create add them to the specie list of chromosomes for schild in genchild: specie.add_chromosome(schild.get('id'), schild.get('fitness')) gen_species.append(specie) # increment the epoch_index after we've pulled the data epoch_index += 1 else: # even if we don't pull data we still want to increment the epoch_index to keep track of where we are epoch_index += 1 # need to add the list of this generations species to the list of this jobs species by generation if len(gen_species) > 0: job_species.append(gen_species) # add parsed values to global arrays declared in the init function exper.max_fit_by_job.append(max_fitvals) exper.min_fitness_by_job.append(min_fitvals) exper.avg_fitness_by_job.append(avg_fitvals) exper.champ_comp_by_job.append(champ_compvals) exper.max_comp_by_job.append(max_compvals) exper.min_comp_by_job.append(min_compvals) exper.avg_comp_by_job.append(avg_compvals) exper.species_by_job.append(job_species)