def resumeSession(runNumber):
    #raise Exception("Not properly integrated with new graphics!  Exiting...")
    population, params = loadSession(runNumber)

    path = "./Data/Run " + str(runNumber)

    #Get the iteration number
    try:
        os.remove(path + "/.DS_Store") #Try and get rid of .DS_Store

    except:
        pass
    
    iterNumber = int(sorted(os.listdir(path)[1:], key=lambda x: int(x.split(" ")[-1]))[-1].split(" ")[-1]) + 1

    """
    #What that (^) line does
     - Gets a list of all files in the directory ./Data/Run # and get rid of evolution_parameters.pkl     os.listdir(path)[1]
     - Sorts the list based on the number after 'Iteration '                                              sorted(os.listdir(path), key=lambda x: int(x.split(" ")[-1])
     - Gets the last element and extracts its number                                                      [-1].split(" ")[-1])
     - Increments that number by 1                                                                         + 1
    """
    
    #Startup the graphics window
    window = graphics.StatusWindow(params.iterations, params.popSize, params.trainingTimeout, params.trainingTarget)
    
    #Restart evolution
    GA.continuousEvolution(population, params, run_number = runNumber, start_iteration_number = iterNumber, graphicsWindow = window)
Exemple #2
0
def setup_page(request):
## Handles BigCommerce Setup
	if request.method =='POST' and request.POST.get('BG'):
		try:
			user = dashfacts.objects.get(user_id = request.user.id)
			bgform = dashfactsForm(request.POST, instance = user)
		except ObjectDoesNotExist:
			bgform = dashfactsForm(request.POST, request.FILES)
		if bgform.is_valid():
			bgprof = bgform.save(commit=False)
			bgprof.user_id = request.user.id
			bgform.save()
			return redirect('setup')
	else:
		try: 
			user = dashfacts.objects.get(user_id = request.user.id)
			bgform = dashfactsForm(instance = user)
		except ObjectDoesNotExist:
			bgform = dashfactsForm(request.FILES)
	## Displays message about BG Authentication
	try :
		dashfacts.objects.get(user_id = request.user.id, bg_api_key__isnull=True)
		bgmessage = {'warning':'Please enter your BigCommerce Credentials!'}
	except:
		bgmessage = {'success':'You\'ve already authenticated BigCommerce! Yippie!'}

## Handles Google Analytics Setup
	if request.method =='POST' and request.POST.get('GA'):
		tokenfile=tempfile.NamedTemporaryFile()
		TOKEN_FILE_NAME = tokenfile.name
		GA.prepare_credentials(TOKEN_FILE_NAME)
		token = tokenfile.read()
		token = str(token)
		gacreds = dashfacts.objects.get(user_id = request.user.id)
		gacreds.ga_token = token
		gacreds.save()			
		return redirect('setup')	
		## Displays message about GA Authentication
	try :
		dashfacts.objects.get(user_id = request.user.id, ga_token__isnull=True)
		gamsg = {'warning':'Please click to integrate Google Analytics!'}
	except:
		gamsg = {'success':'You\'ve already integrated Google Analytics! Yippie!'}

	if request.method =='POST' and request.POST.get('MC'):
		print 'test'
		
		## Displays message about GA Authentication
	try :
		dashfacts.objects.get(user_id = request.user.id, mc_token__isnull=True)
		mcmsg = {'warning':'Please click to integrate MailChimp!'}
	except:
		mcmsg = {'success':'You\'ve already integrated MailChimp! Yippie!'}

	return render_to_response('setup.html',{'bgform':bgform,'bgmessage':bgmessage,'gamsg':gamsg,'mcmsg':mcmsg}, context_instance=RequestContext(request))
Exemple #3
0
def train(inputs, outputs, size, participants, victors, generations, threshold, cRate, mRate, printFile=False):
    '''Create and start training the NN via evolution strategy. Selection, crossover, mutation, evaluation.''' 
    global hero
    global OrigAnswers
    OrigAnswers = copy.deepcopy(outputs)
    EvaluationNN = GA.create_net(inputs, outputs)
    population = init_pop(EvaluationNN, inputs, outputs, size)
    # Test each citizen and determine initial fitness
    GA.evaluate(EvaluationNN, population, inputs, outputs)

    if printFile: f = open('ES.csv', 'w')
    gen = 0
    children = []
    # loop until a hero is found or we've reached max generations
    while gen <= generations and hero == 0:
        # Select our parents using tournament selection
        parents = GA.tournament(population, participants, victors)
        # Have our parents mate (Crossover)
        children = GA.mate(parents, cRate)
        # Have the children experience the world (Mutate)
        for child in children:
            mutate(child, mRate)
        # Test each child's fitness
        GA.evaluate(EvaluationNN, children, inputs, outputs)
        children = GA.tournament(children, participants, victors)
        population = sorted(population + children,
                            key=itemgetter(-1))[:-victors]
        if GA.heroFound(population, threshold):
            break
        else:
            print("Training: {:2.2%}".format(
                population[0][-1]), "{:2.2%}     ".format(gen / generations), end="\r")
            if printFile: f.write('%f,' % population[0][-1])
            if printFile: f.write('\n')
        gen += 1
    if printFile: f.close()
    if hero == 0:
        gen -= 1
        hero = sorted(population, key=itemgetter(-1))[0]
    EvaluationNN.SetNNWeights(hero[:-1])  # Load hero into NN, prep for usage.

    # Evaluate the hero on the inputs and outputs
    print('Generations: %d' % gen, ' ' * 20)
    print("Error Relative: {:2.5%}".format(NN.calcRelativeError(EvaluationNN, inputs, OrigAnswers)))
    print("Least Squares: %d" % NN.calcLeastSquaresError(EvaluationNN, inputs, OrigAnswers))
    print("Loss Squared: %d" % NN.calcLossSquared(EvaluationNN, inputs, OrigAnswers))
    #for x in inputs:
    #    EvaluationNN.SetStartingNodesValues(x)
    #    EvaluationNN.CalculateNNOutputs()
    #    print(x, EvaluationNN.GetNNResults(), EvaluationNN.GetNNResultsInt(), OrigAnswers[inputs.index(x)])
    print()

    return EvaluationNN
Exemple #4
0
def mainWhile(cursor, parameters, recordBest, recordAvg, recordCounter):
    while True:
        try:
            if  recordCounter <= 0:
                break
            else:
                recordCounter -= 1
                
            ## update: 1) number of actuators, 2) now policy
            actuators, parameters.maxNumActuators = policy.getAttributeOfActuator(cursor)
            nowPolicy = policy.getPolicy(cursor)

            ## update: 1) context, 2) models
            nowContext = context.getContext(cursor)
            nowModels  = context.getModels(cursor, nowContext)

            ## Genetic Algorithm
            population, recordBest, recordAvg = GA.getPlan(nowPolicy, nowModels, nowContext, parameters, actuators, recordBest, recordAvg)

            ## setting: 1) execute instruction, 2) print
            setting.getInstructions(population, actuators, parameters.maxNumActuators)
            setting.printSection(population, actuators, parameters.maxNumActuators)
            
        except KeyboardInterrupt:
            cursor.close()
    return recordBest, recordAvg
Exemple #5
0
def run_test(algorithm, dataset):
    if algorithm == "BruteForce":
        result = BruteForce.BruteForce(dataset[1])
        test_result = TestResult(result[0], dataset[0][0], dataset[0][1], 
                                 dataset[0][2], dataset[0][3], dataset[0][4], 
                                 result[3], result[1], 1).get_result()
    elif algorithm == "BranchNBound":
        result = BranchNBound.BranchNBound(dataset[1])
        test_result = TestResult(result[0], dataset[0][0], dataset[0][1], 
                                 dataset[0][2], dataset[0][3], dataset[0][4], 
                                 result[3], result[1], 1).get_result()
    elif algorithm == "Greedy":
        #bb_result = BranchNBound.BranchNBound(dataset[1])
        result = Greedy.better_greedy(dataset[1])
        test_result = TestResult(result[0], dataset[0][0], dataset[0][1], 
                                 dataset[0][2], dataset[0][3], dataset[0][4], 
                                 result[3], 
                                 result[1], result[1]).get_result()
    elif algorithm == "Randomized":
        #bb_result = BranchNBound.BranchNBound(dataset[1])
        result = Randomized.better_result_of_randomized(dataset[1])
        test_result = TestResult(result[0], dataset[0][0], dataset[0][1], 
                                 dataset[0][2], dataset[0][3], dataset[0][4], 
                                 result[3], 
                                 result[1], result[1]).get_result()
    elif algorithm == "MinSpanTree":
        #bb_result = BranchNBound.BranchNBound(dataset[1])
        result = MinSpanTree.MinSpanTree(dataset[1])
        test_result = TestResult(result[0], dataset[0][0], dataset[0][1], 
                                 dataset[0][2], dataset[0][3], dataset[0][4], 
                                 result[3], 
                                 result[1], result[1]).get_result()
    elif algorithm == "EA_hillclimbing":
        #bb_result = BranchNBound.BranchNBound(dataset[1])
        result = EA_hillclimbing.hillclimbing_algorithm(dataset[1], dataset[0][0])
        test_result = TestResult(result[0], dataset[0][0], dataset[0][1], 
                                 dataset[0][2], dataset[0][3], dataset[0][4], 
                                 result[3], 
                                 result[1], result[1]).get_result()
    elif algorithm == "Genetic":
        #bb_result = BranchNBound.BranchNBound(dataset[1])
        result = GA.genetic_algorithm(dataset[1], dataset[0][0])
        test_result = TestResult(result[0], dataset[0][0], dataset[0][1],
                                 dataset[0][2], dataset[0][3], dataset[0][4],
                                 result[3],
                                 result[1], result[1]).get_result()
            
    elif algorithm == "TwoOpt":
        #bb_result = BranchNBound.BranchNBound(dataset[1])
        result = TwoOpt.two_opt(dataset[1])
        test_result = TestResult(result[0], dataset[0][0], dataset[0][1], 
                                 dataset[0][2], dataset[0][3], dataset[0][4], 
                                 result[3], 
                                 result[1], result[1]).get_result()    
    else:
        raise Exception("Wrong algorithm chosen!")
    
    return test_result
Exemple #6
0
def init_pop(es_net, inputs, outputs, size):
    '''ES citizens have 2 vectors: x = reals, sigma = strategies <x, sigma'''
    pop = []
    pop = GA.generatePopulation(es_net, inputs, outputs, size)
    for i in range(len(pop)):
        del pop[i][-1]
        for j in range(len(pop[i])):
            pop[i].append(numpy.std(pop[i]))
        pop[i].append(0)
    return pop
Exemple #7
0
def run8(p):
    ga_basic = GA.GA_Faster_Both(p)
    start = time.process_time()
    ga_basic.run()
    end = time.process_time()
    run_time = end - start
    b_ratio = ga_basic.memo_FV[tuple(ga_basic.opt_chromo)].b_ratio
    f = open('GA with Both results.txt', 'a')
    f.write(
        str(p.n) + "\t" + str(p.b) + "\t" + str(p.rho) + "\t" + str(run_time) +
        "\t" + str(b_ratio) + "\n")
    f.close()
def GATrain(evalType, testAmount, homeTeamID, awayTeamID, loadTxt,
            generationCount, generationSize, finalSelectionCount
            ):  # trains neural network using a genetic algorithm
    population, fitnessValues = selectiveTrain(evalType, testAmount,
                                               homeTeamID, awayTeamID, loadTxt,
                                               generationSize, 0, False, None)
    numOfParents = int(generationSize / 3) + 1
    # gets initial population information

    for gen in range(generationCount):
        loadTxt.set("Running Genetic Evaluation...")

        parents = GA.getParents(population, fitnessValues, numOfParents)
        offspring = GA.breed(parents, generationSize, 4)
        offspring = GA.crossover(offspring, 0.4, 4)
        population = GA.mutate(offspring, 0.4, 4)
        # runs through the processes of the genetic algorithm to generate a new generation

        fitnessValues = selectiveTrain(evalType, testAmount, homeTeamID,
                                       awayTeamID, loadTxt, generationSize, 0,
                                       False, population)[1]
        # generates fitness values based on this next generation

    bestSchema = population[fitnessValues.index(max(fitnessValues))]
    # finds the best schema from trained models

    iterNum = bestSchema[0]
    schemaConst = bestSchema[1]
    learningRate = bestSchema[2]
    accumulatorVal = bestSchema[3]
    # extracts schema values from this list

    results = massTrain(evalType, testAmount, homeTeamID, awayTeamID, loadTxt,
                        finalSelectionCount, iterNum, schemaConst,
                        learningRate, accumulatorVal)
    accuracy = max(results)
    modelNum = results.index(accuracy)
    # trains a final set of models with the best schema and finds the best one

    return accuracy, modelNum, iterNum, schemaConst, learningRate, accumulatorVal
Exemple #9
0
def train(shape,
          mu,
          generations,
          number_possible_parents,
          number_parents,
          data,
          print_frequency=250,
          target_fitness=0.00001,
          mutation_rate=0.5,
          crossover_rate=0.5,
          quick=False):
    ''' Trains a network using the mu + lambda evolution strategy. Returns the best network created after either the max
        number of generations have been run, or the target_fitness has been achieved. '''

    lifetime_error = ['Evolution Strategy {}'.format(shape)]

    # create and evaluate the original seed population
    population = create_es_population(shape, mu)
    GA.evaluate(shape, population, data, quick)

    for i in range(generations + 1):

        #parents = GA.tournament(population, number_possible_parents, number_parents)
        parents = GA.tournament_unique(population, number_possible_parents,
                                       number_parents)
        #print("Tournament selected {} parents".format(number_parents))
        children = GA.reproduce(parents, crossover_rate)
        #print("Parents reproduced {} children".format(len(children)))

        for child in children:
            #print("Child before mutation = {}".format(child))
            mutate(child, mutation_rate)
            #print("Child after mutation = {}".format(child))

        GA.evaluate(shape, children, data, quick)

        # keeps the best individuals from the combined children and population arrays to act as the
        # next generations population
        if len(children) != 0:
            population = np.concatenate((population, children))
        population = sorted(population,
                            key=itemgetter(-1))[:mu]  # elitist selection
        error = [individual[-1] for individual in population]
        lifetime_error.append(statistics.mean(error))
        #lifetime_error.append(population[0][-1])

        if population[0][-1] < target_fitness: break

        if i % print_frequency == 0:
            print("Generation {}'s fitnesses are: ".format(i), end='')
            for i in population:
                print("{}, ".format(i[-1]), end='')
            print()

    return GA.build_network(shape, population[0]), lifetime_error
Exemple #10
0
def run14(p):
    ga = GA.GA_Sourd_Bounded(p)
    start = time.process_time()
    ga.run()
    end = time.process_time()
    run_time = end - start
    f = open('GA_Sourd_bound1_0717.txt', 'a')
    f.write(
        str(p.n) + "\t" + str(p.b) + "\t" + str(p.rho) + "\t" + str(run_time) +
        "\t" + str(ga.memo_opt[ga.max_iter]) + "\t" +
        str(ga.memo_opt[ga.max_iter - 5]) + "\t" +
        str(ga.memo_opt[ga.max_iter - 10]) + "\n")
    f.close()
Exemple #11
0
def run5(p):
    ga = GA.GA_BASIC(p)
    start = time.process_time()
    ga.run()
    end = time.process_time()
    run_time = end - start
    f = open('GA_My_DP_0717.txt', 'a')
    f.write(
        str(p.n) + "\t" + str(p.b) + "\t" + str(p.rho) + "\t" + str(run_time) +
        "\t" + str(ga.memo_opt[ga.max_iter]) + "\t" +
        str(ga.memo_opt[ga.max_iter - 5]) + "\t" +
        str(ga.memo_opt[ga.max_iter - 10]) + "\n")
    f.close()
def createSession(popSize = 45, iterations = 10, mutate=0.2, extraParents=0.1, originalParents=0.2, hiddenLayers = 3, maxLayerSize = 15, trainingTimeout = 10, trainingTarget = 1.0):
    global window
    population = GA.networkLib.createPopulation(popSize, hiddenLayers, maxLayerSize)

    #Startup the graphics window
    window = graphics.StatusWindow(iterations, popSize, trainingTimeout, trainingTarget)
    window.update()
    #raise Exception("Program stopped!")
    
    #Set up an object to store evolutionary parameters
    params = GA.EvolutionObject()
    
    params.popSize = popSize
    params.iterations = iterations
    params.mutate = mutate
    params.extraParents = extraParents
    params.originalParents = originalParents
    params.trainingTimeout = trainingTimeout
    params.trainingTarget = trainingTarget

    #Start the program
    GA.continuousEvolution(population, params, graphicsWindow = window)
Exemple #13
0
def Hill_climbing(n, edges):
    current = list(range(n))
    random.shuffle(current)
    path_cost = GA.GetPathCost(current, edges)
    iteration = 0
    while True:
        best = 100000
        for i in range(0, n):
            for j in range(i + 1, n):
                tmp = current.copy()
                tmp[i], tmp[j] = tmp[j], tmp[i]
                new_cost = GA.GetPathCost(tmp, edges)
                if new_cost < best:
                    best = new_cost
                    best_state = tmp
        if best >= path_cost:
            break
        current = best_state
        path_cost = best
    # while True:
    #     iteration += 1
    #     # print(iteration, path_cost)
    #     next_state = current.copy()
    #     count = 0
    #     while True:
    #         count += 1
    #         if count > 1000000:
    #             return path_cost, current
    #         a = random.randint(0, n - 1)
    #         b = random.randint(0, n - 1)
    #         tmp = next_state[a]
    #         next_state[a] = next_state[b]
    #         next_state[b] = tmp
    #         new_cost = GA.GetPathCost(next_state, edges)
    #         if new_cost < path_cost:
    #             path_cost = new_cost
    #             current = next_state.copy()
    #             break
    return path_cost, current
Exemple #14
0
def runner(m, n, alpha, b):
    s = 0
    f_name = "{0}_{1}_{2}_{3}.txt".format(str(m), str(n), str(alpha), str(b))
    for i in range(1):
        w, c, p = ks.generate_knapsack(m, n, alpha)
        for j in range(10):
            print(m, " ", n, " ", alpha, " ", b, " ", i, " ", j, end=" ")
            r = genetic.fga(n, p, w, c, m)
            pd = abs(b - r) / b
            print(pd)
            s += pd
    avg_pd = s / 10
    fm.write_to_file(f_name, avg_pd)
Exemple #15
0
def run7(p):
    ga = GA.GA_Faster_Select(p)
    start = time.process_time()
    ga.run()
    end = time.process_time()
    run_time = end - start
    f = open('GA_bound2.txt', 'a')
    f.write(
        str(p.n) + "\t" + str(p.b) + "\t" + str(p.rho) + "\t" + str(run_time) +
        "\t" + str(ga.memo_opt[ga.max_iter]) + "\t" +
        str(ga.memo_opt[ga.max_iter - 5]) + "\t" +
        str(ga.memo_opt[ga.max_iter - 10]) + "\n")
    f.close()
def BECO_Ensemble_train(Minset, Majset, model_old, lr, epoch, batch_size,
                        pop_size, gen_max, cx, mx):
    # define ranges of variables
    # Minset is minority dataset (x, y), x is 3D array, and y is 1D vector.
    # Majset is majority dataset (x, y), x is 3D array, and y is 1D vector.
    xMin, yMin = Minset
    xMaj, yMaj = Majset
    # get the best chromosomes population based on binary ga
    print('GA implementation')
    best_chromosomes_pop = GA.BGA_train(xMin=xMin,
                                        yMin=yMin,
                                        xMaj=xMaj,
                                        yMaj=yMaj,
                                        model_old=model_old,
                                        lr=lr,
                                        epoch=epoch,
                                        batch_size=batch_size,
                                        pop_size=pop_size,
                                        gen_max=gen_max,
                                        cx=cx,
                                        mx=mx)
    model_lists = []
    error_lists = []
    # train Ne(=pop_size) classifier
    print(best_chromosomes_pop)
    for i in range(len(best_chromosomes_pop)):
        N_new = best_chromosomes_pop[i][0]
        k = best_chromosomes_pop[i][1]
        nn = best_chromosomes_pop[i][2]
        Cls = best_chromosomes_pop[i][3]
        CSO_type = best_chromosomes_pop[i][4]
        Min_new = CSOSDG(xMin,
                         N_new=int(N_new),
                         k=int(k),
                         nn=int(nn),
                         Cls=Cls,
                         CSO_type=CSO_type)
        # construct new dataset
        x_new = np.vstack((xMaj, xMin, Min_new))
        y_new = np.hstack((yMaj, yMin, np.ones(len(Min_new), ) * yMin[0]))
        # finetune model based on new dataset
        model, error = model_train(x=x_new,
                                   y=y_new,
                                   model_old=model_old,
                                   lr=lr,
                                   epoch=epoch,
                                   batch_size=batch_size)
        model_lists.append(model)
        error_lists.append(error)
    save_results(model_lists, error_lists)
    return model_lists, error_lists
def run(network, populationSize, gen):
    fcEval = routeFitness
    gaParam = {'popSize': populationSize, 'noGen': gen}
    problParam = {'function': fcEval, 'network': network}

    ga = GA(gaParam, problParam)
    ga.initialisation()
    ga.evaluation()
    bestChromo = None
    for g in range(gaParam['noGen']):
        #ga.oneGeneration()
        ga.oneGenerationElitism()
        #ga.oneGenerationSteadyState()

        bestChromo = ga.bestChromosome()
        print('Best solution in generation ' + str(g + 1) + ' is: x = ' +
              str(bestChromo.repres) + ' f(x) = ' + str(bestChromo.fitness))

    print("Solution is: ", bestChromo)
def PAtester(graph, name):
	controls=5
	experimentals=5
	true=10
	false=true
	params=sim.paramClass()
	sampleLists,geneDicts= [],[]
	# import starting points
	for i in range(1,11):
		sampleList, geneDict=readFpkmData2('neg_binom_gen_'+str(i)+'.csv', ',') # read in data
		sampleLists.append(sampleList)
		geneDicts.append(geneDict)
	knockoutLists, knockinLists= setupEmptyKOKI(len(sampleList))
	updateBooler=cdll.LoadLibrary('./simulator.so')
	boolC=updateBooler.syncBool 
	geneNames=geneDicts[0].keys()
	for node in graph.nodes():
		if node in geneNames:
			print(node)
		else:
			print(node)
			for k in range(10):
				q=randint(0,len(geneNames)-1)
				geneDicts[k][str.upper(node)]=geneDicts[k][geneNames[q]]
				for j in range(10):
					sampleLists[k][j][str.upper(node)]=sampleLists[k][j][str.upper(geneNames[q])]
				print([sampleLists[k][j][str.upper(node)] for j in range(10)])
	for j in range(10): # iterate over imported starting points
		model= sim.modelClass(graph,sampleLists[j], True)
		rule=ga.genBits(model)
		newInitValueList=genInitValueList(sampleLists[j],model)
		model.initValueList=newInitValueList
		# print(newInitValueList)
		model.updateCpointers() 
		output=[sim.NPsync(rule[1], model, params.cells, newInitValueList[k], params, knockinLists[k], knockoutLists[k], boolC) for k in range(5)]
		controlSampleList=compileOuts(output,sampleLists[j], controls, model)

		# generate model
		# loop over number of times we want to generate fake data and perform sequence of events
		# generate Boolean model for this trial
		genelist=geneDicts[j].keys()
		for perturbation in [0,5,10,15,20]: 
			tSampleList=list(sampleLists[j][5:10])			
			perturbationSize=2.**(-.1*perturbation)
			for i in range(5):
				# generate values across samples
				for node in graph.nodes():
					if len(graph.predecessors(node))==0:
						tSampleList[i][str.upper(node)]=min(max(0,sampleLists[j][i+5][str.upper(node)]*(perturbationSize)),1)
			outputData(controlSampleList, tSampleList, genelist,name+str(perturbation)+'_true_'+str(j)+'.csv', geneDicts[j])
Exemple #19
0
def transformTest(graph,name,fileName):
	# can't fit a rule to only one node
	if len(graph.nodes())<2:
		print('not enough overlap')
		return
	
	# load in C function
	#updateBooler=ctypes.cdll.LoadLibrary('./testRun.so')
	updateBooler=cdll.LoadLibrary('./testRun.so')
	boolC=updateBooler.syncBool 

	# load data, params, make empty knockout and knockin lists (no KO or KI in transform tests)
	sampleDict = constructBinInput(fileName)
	params=sim.paramClass()

	# generate turn sample dict into sample list (list of dicts instead of dict of lists)
	keyList=sampleDict.keys()
	sampleList=[{} for i in range(len(sampleDict[keyList[0]]))]
	for i in range(len(sampleList)):
		for key in keyList:
			if key in graph.nodes():
				sampleList[i][key]=sampleDict[key][i]
	
	knockoutLists, knockinLists= setupEmptyKOKI(len(sampleList))

	# generate model encompassing graph and samples to do rule inference on
	model=sim.modelClass(graph,sampleList, False)
	model.updateCpointers()
	# cpy data into correct order for simulation 
	newInitValueList=genInitValueList(sampleList,model)
	model.initValueList=newInitValueList
	print('setup successful')

	# find the rules
	model, dev1, bruteOut =ga.GAsearchModel(model, sampleList, params, knockoutLists, knockinLists, name, boolC)
	bruteOut, equivalents, dev2 = ga.localSearch(model, bruteOut, sampleList, params, knockoutLists, knockinLists, boolC)
	pickle.dump( [[dev1],[dev2],[bruteOut],[model]], open( name+"_output.pickle", "wb" ) )
Exemple #20
0
def selector(algo, func_details, popSize, Iter):
    function_name = func_details[0]
    lb = func_details[1]
    ub = func_details[2]
    dim = func_details[3]

    if (algo == 0):
        x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 1):
        x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 2):
        x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 3):
        x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 4):
        x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                  Iter)
    if (algo == 5):
        x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 6):
        x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 7):
        x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 8):
        x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 9):
        x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                  Iter)
    if (algo == 10):
        x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 11):
        x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                    Iter)
    if (algo == 12):
        x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                      Iter)
    if (algo == 13):
        x = de.DE(getattr(benchmarks, function_name), lb, ub, dim, popSize,
                  Iter)
    return x
Exemple #21
0
def selector(algo, func_details, popSize, Iterasyon):
    function_name = func_details[0]
    lb = func_details[1]
    ub = func_details[2]
    dim = func_details[3]
    x = hho.HHO(getattr(functions, function_name), lb, ub, dim, popSize,
                Iterasyon)

    # Algoritma listesi
    if (algo == 0):
        x = hho.HHO(getattr(functions, function_name), lb, ub, dim, popSize,
                    Iterasyon)
    if (algo == 1):
        x = ga.GA(getattr(functions, function_name), lb, ub, dim, popSize,
                  Iterasyon)
    return x
Exemple #22
0
    def main(self):
        ############ generate model from data ##################
        data = earthquakedata.dataremodel()

        path = 'data.dat'

        ######## map setting #######

        data.longitudemax = 145
        data.longitudemin = 140

        data.latitudemax = 35
        data.latitudemin = 30

        data.Interval = 0.5

        data.datareader(path)

        ######## setting end ######

        data.selectyear = 2010
        data.selectmonths = 1
        data.selectmonthe = 12

        data.setnum()

        data.findhappentimes()

        #data.setmodel()

        best = -100000
        GAf = GA.GA()
        for i in range(0, 200 * 200):
            rf = randommodel.generatemodel(self.latitudenum,
                                           self.longitudebinnum)
            intPopulation = np.zeros((data.latitudenum, data.longitudebinnum),
                                     float)
            intPopulation = randommodel.intergermodel(rf, self.latitudenum,
                                                      self.longitudebinnum)
            score = GAf.Evalate(intPopulation, data)

            if score > best:
                best = score

        print "R best = ", best

        return best
Exemple #23
0
def greedySolve(system):

    g = ga.GA(system)

    chromosome = {}
    chromosome['vmmsMatrix'] = []
    chromosome['vminstances'] = []

    vm, microserviceList = getNewVm(system, None, chromosome, g, 1)

    chromosome['vmmsMatrix'].append(microserviceList)
    chromosome['vminstances'].append(vm)

    #hemos creado una primera vm con ningun ms asignado

    #creamos una instancia de cada uno de los ms

    for msId in range(0, system.numberMicroServices):
        allocateMs(system, msId, chromosome, g, 1)

    #creamos aleatoriamente una instancia de cada uno de los ms y también aleatorio si es store o running, y así hasta que empeoremos el fitness

    vmLoads = g.calculateVmsWorkload(chromosome)
    fitness = normalizationMia(
        g.calculateCost(chromosome, vmLoads)) + normalizationMia(
            g.calculateMttr(chromosome)) + normalizationMia(
                g.calculateLatency(chromosome))
    newfitness = fitness
    tempchromosome = chromosome

    for i in range(0, 1000):
        msId = random.randint(0, system.numberMicroServices - 1)
        tempchromosome = copy.deepcopy(tempchromosome)
        msState = random.randint(0, 1)
        allocateMs(system, msId, tempchromosome, g, msState)
        vmLoads = g.calculateVmsWorkload(tempchromosome)
        newfitness = normalizationMia(g.calculateCost(
            tempchromosome, vmLoads)) + normalizationMia(
                g.calculateMttr(tempchromosome)) + normalizationMia(
                    g.calculateLatency(tempchromosome))
        if newfitness <= fitness:
            print "improved to " + str(newfitness)
            tempchromosome
            fitness = newfitness
            chromosome = tempchromosome

    return chromosome
def babai_2D(base):

    B = GA.ga(base)
    B_star = gso(B)

    t = np.array([1, 0])
    b = t

    j = len(base)
    while j >= 1:
        c = np.round(
            np.dot(b, B_star[j - 1]) / np.dot(B_star[j - 1], B_star[j - 1]))
        b = b - np.dot(c, B[j - 1])

        j -= 1

    return t - b
Exemple #25
0
def funGA_single(fp_tuple_combOfInsRuns):
    local_state = np.random.RandomState()
    print("Begin: ins ")
    print("Running......")
    cpuStart = time.process_time()
    # 调用GA求解
    GeneticAlgo = GA.GA(listGAParameters, fp_tuple_combOfInsRuns[0], local_state)
    finalPop, listGenNum, listfBestIndFitness = GeneticAlgo.funGA_main()
    cpuEnd = time.process_time()
    cpuTime = cpuEnd - cpuStart
    print("End")
    # 记录最终种群中最好个体的fitness和目标函数值,累加
    if listfBestIndFitness[-1] != 1/finalPop[0]['objectValue']:
        print("Wrong. Please check GA.")
    # 为绘图准备
    new_listfBestIndFitness = [fitness * 1000 for fitness in listfBestIndFitness]
    return cpuTime, listfBestIndFitness[-1], finalPop[0]['objectValue'], new_listfBestIndFitness
Exemple #26
0
    def __init__(self, rbfn_parameter, data):
        # set parameters
        self.iteration = int(rbfn_parameter[0])
        self.groupNumber = int(rbfn_parameter[1])
        self.matingProbability = float(rbfn_parameter[2])
        self.mutationProbability = float(rbfn_parameter[3])
        self.hiddenNeurons = int(rbfn_parameter[4])

        # process input data
        self.input_length = len(data)
        self.inputDimension = len(data[0]) - 1
        #np.random.shuffle(data)
        #print("data[5]:", data[5])
        self.data = self.normalize_input_data(data)
        #print("normalize data[5]:", self.data[5])
        self.input_x = [[0.] * self.inputDimension] * self.input_length
        self.input_y = []
        for i in range(self.input_length):
            for j in range(self.inputDimension):
                self.input_x[i][j] = self.data[i][j]
            self.input_y.append(self.data[i][self.inputDimension])
        self.vectorDimension = 1 + 2 * self.hiddenNeurons + self.hiddenNeurons * self.inputDimension

        # initial groups
        # 個體向量
        self.individual_vector = \
            [[random.uniform(-1, 1) for col in range(self.vectorDimension)] for row in range(self.groupNumber)]
        # 個體向量中sigma為正
        for i in range(self.groupNumber):
            for j in range(self.vectorDimension - self.hiddenNeurons,
                           self.vectorDimension):
                while self.individual_vector[i][j] == 0.0:
                    self.individual_vector[i][j] = random.random()
                if self.individual_vector[i][j] < 0:
                    self.individual_vector[i][j] *= -1

        # initial F、E
        self.F = [0.] * self.input_length
        self.E = [0.] * self.groupNumber
        self.phi = [1.] * (self.hiddenNeurons + 1)

        # initial pso
        self.ga = GA.GA(self.groupNumber, self.matingProbability,
                        self.mutationProbability, self.hiddenNeurons,
                        self.inputDimension, self.vectorDimension)
Exemple #27
0
def main():
    rep_length = 14
    popsize =12
    sp = 3
    mut = 1./rep_length
    cross = 0.95
    maxgen = 100
    onlyThebest = 0
    run_id = "1"
    nr_processes = 6
    fullpath = os.path.abspath(".")
    for runval in range(39, 1000):
        print "run "+str(runval)
        optimizer = GA.gax(rep_length = rep_length, popsize = popsize, sp = sp, mut = mut, fitfun = evaluation, maxgen = maxgen, cross = cross, nr_processes = nr_processes, run_id = run_id, path = fullpath,onlyThebest=onlyThebest, runval=runval)
        optimizer.run()
        bashCommand = "killall torcs-bin"
        os.system(bashCommand)
        print "killed old torcs processes"
Exemple #28
0
 def Dataset_reduction(self,
                       X_train,
                       supervised=True,
                       X_test=None,
                       labels=None):
     #Description: based on the self.mdl_type it runs one of the 3 algorithms for the dimension reduction.
     #INPUT:  - X_train:train Dataset
     #        - supervised: if the method is supervised, we train the trasformation on the train dataset and trasform the test setself.
     #                        Otherwise, we only apply on train dataset.
     #        - X_test: test Dataset
     #        - labels: labels of the dataset, needed for GA and LDA.
     #OUTPUT: - X_tr: trasformed training set.
     #        - X_te: trasformed test set.
     if self.mdl_type == "GA":
         print '\nFeature Selection'
         print '-----------------'
         params = {
             'C': 0.1,
             'tol': 0.01,
             'kernel': 'linear',
             'max_iter': 10000,
             'verbose': False
         }
         GenAlgo = GA.GeneticAlgSelect(X_train,
                                       labels,
                                       svm.SVC,
                                       params,
                                       dim_out=self.dim_out)
         GenAlgo._perform_iter()
         GAopt = GenAlgo.mdl_pool[GenAlgo.elite_list[-1]]
         X_tr = X_train[:, GAopt.gene.astype(bool)]
         if supervised == True:
             X_te = X_test[:, GAopt.gene.astype(bool)]
     else:
         print '\nFeature Reduction'
         print '-----------------'
         self.fit(X_train, labels)
         X_tr = self.transform(X_train)
         if supervised == True:
             X_te = self.transform(X_test)
     if supervised == True:
         return X_tr, X_te
     else:
         return X_tr
Exemple #29
0
def simulate(game, CHROMOSOME):
    player = game[0]
    asteroids = game[1]
    projectiles = game[2]
    LEVEL = game[3]
    SCORE = game[4]
    steps = game[5]
    action = 0
    for step in range(steps):
        sense(player, asteroids)
        action = GA.updateAction(player, CHROMOSOME)
        player = executeAction(player, projectiles, action)
        projectiles = detectProjectileColision(asteroids, projectiles)
        SCORE += updateScore(player, asteroids)
        if SCORE < 0: SCORE = 0
        player.score = SCORE
        updatePlayer(player)
        LEVEL = updateAsteroids(asteroids, LEVEL)
        updateProjectiles(projectiles)
    return SCORE
Exemple #30
0
def main(gens, number_of_solutions, mating_parents, colors):
    img_new = Image.new('RGB', (image.width, image.height), 'WHITE')

    size_of_population = (number_of_solutions, image.weights, colors)

    total_population = numpy.random.randint(low=0,
                                            high=256,
                                            size=size_of_population)

    for gen in range(gens):
        print("Number of generation: {}".format(gen))

        cycle(size_of_population, total_population, mating_parents, colors,
              img_new, gen)

    fitness = GA.fitness_function(image.get_array(), total_population,
                                  image.use_inverse_img)

    image.create(image.final_destination, image.choose_best(fitness),
                 total_population)
    def __init__(self, max_dist, pop_size, reproducing_frac):
        self.algs = []
        self.all_sols = []  # all solutions, currently unused
        self.gen_best = []  # best in the generation
        self.done_iterations = 0
        self.dist_m = DIST_M
        self.df = DF

        self.num_bikes = NUM_BIKES
        self.max_dist = max_dist
        self.pop_size = pop_size
        self.num_reproducing = int(pop_size * reproducing_frac)

        while len(self.algs
                  ) < self.pop_size:  #Initializing the population of GAs
            new_alg = GA.Algorithm(parents=False, i=0)

            if new_alg.get_distance(
                    self.dist_m) <= self.max_dist:  # feasible solution
                self.algs.append(new_alg)
Exemple #32
0
def get_user_factors(user=None, progress_log=False):
    if not user:
        user = get_user_object()

    if progress_log: print('Getting training data...')
    train_data = get_training_data_from_user(user=get_user_object())
    if progress_log: print('Training data collected')

    g = GA.Guesser(4, mutation_rate=0.1, population_size=250)
    precision_memory = [0] * 15
    gen_counter = 0
    guessed_formula = None
    while g.precision != precision_memory[0] and gen_counter < 100:
        g.train(train_data)
        best = g.test_formulas(train_data)[0]
        precision_memory.append(g.precision)
        precision_memory = precision_memory[1::]
        gen_counter += 1
        if progress_log: print(f'Gen: {gen_counter}, precision: {g.precision}')
        guessed_formula = best['formula']
    return guessed_formula.factors
    def next_generation(self, i):
        pot_par = self.algs[:]
        self.infeasible = 0  # count infeasible solutions over simulation

        num_par = len(pot_par)
        repr_prob = sorted(list(range(0, num_par)), reverse=True)
        repr_prob_sum = sum(repr_prob)
        repr_prob = [r / repr_prob_sum for r in repr_prob]

        while len(self.algs) < self.pop_size:
            first_par = np.random.choice(pot_par, p=repr_prob)
            second_par = np.random.choice(pot_par, p=repr_prob)

            while first_par == second_par:
                second_par = np.random.choice(pot_par, p=repr_prob)

            new_alg = GA.Algorithm(parents=[first_par, second_par], i=i)

            if new_alg.get_distance(
                    self.dist_m) <= self.max_dist:  # feasible solution
                self.algs.append(new_alg)
            else:
                self.infeasible += 1
Exemple #34
0
def main():
    #lower bound is 4135
    #upper bound is 16417
    #best cycle top 			6400
    #best ordered top			6400
    #best cycle tournament		6400
    #best ordered tournament	6302
    getm = GetMatrix.Connections("connections.txt")
    matrix = getm.matrix()
    getp = GetParams.Parameters("params.txt")
    params = getp.params()
    ga = GA.Populate(matrix, int(params[0]), int(params[1]))

    maxIters = int(params[2])
    iters = 0
    while iters < maxIters:
        pairs = ga.topDown(int(params[0] / 2))  #topDown or tournamentPairs
        for n in pairs:
            ga.cycleXOver(n[0], n[1])  #orderedXOver or cycleXOver
        ga.sortPop()
        ga.killBottom()
        iters += 1
        print ga.getCost(ga.population[0])
        print ga.population[0]
Exemple #35
0
def main():
    print("HP model training using Genetic Algorithms\nRun commencing...\n...")

    # parameters, remember to set these!!
    population_size = 500
    monomer_length = 150
    number_h = 75
    generations = 3000
    iterations = 1

    start = time.time()
    algo = GA(population_size, monomer_length, number_h)
    best_conformation = algo.run(generations)
    print(best_conformation.get_representation())
    print(best_conformation.get_fitness())
    for iteration in range(1, iterations):
        algo = GA(population_size, monomer_length, number_h)

        # run the algorithm and return the best monomer which is then written to file
        current_conformation = algo.run(generations)
        print(current_conformation.get_representation())
        print(current_conformation.get_fitness())
        if best_conformation.get_fitness() > current_conformation.get_fitness(
        ):
            best_conformation = current_conformation

    end = time.time()
    time_elapsed = end - start
    print("Time elapsed for " + str(iterations) + " runs/trials with " +
          str(generations) + ": " + str(time_elapsed))

    f = open('HPMoleculeResult.txt', 'w')
    f.write('Molecule   Direction\n')
    molecule = best_conformation.get_molecule()
    best_conformation = best_conformation.get_representation()
    for i in range(monomer_length):
        if i == 0:
            f.write(molecule[0] + '\n')
        elif i < monomer_length - 1:
            f.write(molecule[i] + '\t' + best_conformation[i] + '\n')
        else:
            f.write(molecule[i] + '\t' + best_conformation[i])
    f.close()
Exemple #36
0
def train(shape, mu, generations, data, print_frequency=5, target_fitness=0.0001, crossover_rate=0.5, weight=0.5):
    ''' Trains a network using Differential Evolution. Returns the best network created after either the max
            number of generations have been run, or the target_fitness has been achieved. '''

    population = GA.create_population(shape, mu)
    fitness = 100
    lifetime_error = ['Differential Evolution {}'.format(shape)]
    #print(len(population))
    GA.evaluate(shape, population, data)

    for gen in range(generations+1):

        for j in range(len(population)):
            individuals = random.sample(range(len(population)), 4)
            cross = random.randint(0, len(population[0]))
            temp = copy.deepcopy(population[individuals[0]])
            for i in range(len(population[0]) - 1):
                if (random.random() < crossover_rate or cross == i):
                    temp[i] = population[individuals[1]][i] + (
                    weight * (population[individuals[2]][i] - population[individuals[3]][i]))
            GA.evaluate(shape, [temp, population[individuals[0]]], data)
            if (temp[-1] < population[individuals[0]][-1]):
                population[individuals[0]] = temp
            fitness = population[individuals[0]][-1]
        population = sorted(population, key=itemgetter(-1))[:mu]  # elitist selection
        lifetime_error.append(population[0][-1])

        if population[0][-1] < target_fitness: break

        if gen % print_frequency == 0:
            print("Generation {}'s fitnesses are: ".format(gen), end='')
            for k in population:
                print("{}, ".format(k[-1]), end='')
            print()

    return GA.build_network(shape, population[0]), lifetime_error
Exemple #37
0
from GA import *
from math import sqrt

def split(c):
    return [i+1 for i in range(len(c)) if c[i] == 0], [i+1 for i in range(len(c)) if c[i] == 1]

def p(c):
    return str(tuple(split(c)))

def calc(c):
    sumPile, prodPile = split(c)
    def product(l):
        r = 1
        for i in l:
            r *= i
        return r
    return (sum(sumPile), product(prodPile))

def error(target):
    def errorFn(c):
        return sqrt(sum((a-b)**2 for a,b in zip(target, calc(c))))
    return errorFn

cards = GAModel(15)
cards.setPrint(p)
cards.setError(error((3+4+6+7+13+14+15-5, 1*2*5*8*9*10*11*12)))

g = GA(cards, 100, 0.75, 0.51)
g.run(2000)
Exemple #38
0
def train(inputs, outputs, size, generations, threshold, cRate, mRate, printFile=False):
    """The train method creates a neural netwrok from the sets of 
    inputs and outputs. A population vector of size, is initialized 
    with ranodm weight vectors associated with the weights between 
    nodes in the neural network and will be the values being trained.
    Generations is the max number of generations allowed while 
    threshold is the accuracy needed. cRate and mRate are the 
    crossover and mutation rates respectively."""
    global hero
    global OrigAnswers

    OrigAnswers = copy.deepcopy(outputs)
    # set up NN
    EvaluationNN = GA.create_net(inputs, outputs)

    # initialize population of size as random weights of NN
    population = GA.generatePopulation(EvaluationNN, inputs, outputs, size)

    if printFile:
        f = open("DE.csv", "w")
    gen = 0
    trialV = []
    offspringV = []

    # evaluate the entire population
    GA.evaluate(EvaluationNN, population, inputs, outputs)

    # loop until a hero is found or we've reached max generations
    while gen <= generations and hero == 0:
        for i in range(size):
            # mutate with DE/x/1/bin
            trialV = mutate(population, i, mRate)
            # perform binomial crossover
            offspringV = crossover(population[i], trialV, cRate)
            # evaluation of offspring
            GA.evaluate(EvaluationNN, [offspringV], inputs, outputs)
            # selection of better vector
            if population[i][-1] > offspringV[-1]:
                population[i] = offspringV
        population = sorted(population, key=itemgetter(-1))
        # check for hero in population
        if GA.heroFound(population, threshold):
            break
        else:
            print("Training: {:2.2%}".format(population[0][-1]), "{:2.2%}     ".format(gen / generations), end="\r")
            if printFile:
                f.write("%f," % population[0][-1])
            if printFile:
                f.write("\n")
        gen += 1
    # return best hero if max generations is met and hero hasn't been selected.
    # hero = sorted(population, key=itemgetter(-1))[0]  # default to best in
    # population if no hero steps forward
    if printFile:
        f.close()
    if hero == 0:
        gen -= 1
        hero = sorted(population, key=itemgetter(-1))[0]
    EvaluationNN.SetNNWeights(hero[:-1])  # Load hero into NN, prep for usage.

    # Evaluate the hero on the inputs and outputs
    print("Generations: %d" % gen, " " * 20)
    print("Error Relative: {:2.5%}".format(NN.calcRelativeError(EvaluationNN, inputs, OrigAnswers)))
    print("Least Squares: %d" % NN.calcLeastSquaresError(EvaluationNN, inputs, OrigAnswers))
    print("Loss Squared: %d" % NN.calcLossSquared(EvaluationNN, inputs, OrigAnswers))
    # for x in inputs:
    #    EvaluationNN.SetStartingNodesValues(x)
    #    EvaluationNN.CalculateNNOutputs()
    #    print(x, EvaluationNN.GetNNResults(), EvaluationNN.GetNNResultsInt(), OrigAnswers[inputs.index(x)])
    print()

    return EvaluationNN
Exemple #39
0
def train_test():
    global cRate, mRate, threshold, generations, size, participants, victors, inFile, algo, dataset, resultsFile
    inputs = []
    outputs = []
    evolve()
    
    resultsFile.write("DATASET: " + dataset + "\n")
    #resultsFile.write("ALGORITHM | Generations | Size | Participants | Victors | mRate | cRate | Threshold \n")
    #resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " +
    #          str(size) + "  |     " + str(participants) + "       |    " + str(victors) + 
    #          "    |  " + str(mRate) + "  |  " + str(cRate) + "  |   " + str(threshold) + "     \n")

    dataIn = dataHandler()
    inputs = dataIn[0]
    outputs = dataIn[1]
    testInput = []
    testOutput = []
    learnrate = 0.3
    momentum = 0.5
    # Need 20% of inputs for testing
    for i in range((int(len(inputs)*0.8)+1), len(inputs)):
        x = random.choice(inputs)
        testInput.append(x)
        testOutput.append(outputs[inputs.index(x)])
        del outputs[inputs.index(x)]
        del inputs[inputs.index(x)]
    resultsFile.write("\nTest inputs: \n")
    for i in range(len(testInput)):
        resultsFile.write("%s " % testInput[i])
    resultsFile.write("\nTest expected outputs: \n")
    for i in range(len(testOutput)):
        resultsFile.write("%s " % testOutput[i])
    # Which algorithm gets chosen to run
    if algo in 'G':
        print("DOING GA TRAINING...")
        resultsFile.write("\nALGORITHM | Generations | Size | Participants | Victors | mRate | cRate | Threshold \n")
        resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " + str(size) + "  |     " + str(participants) + "       |    " + str(victors) + "    |  " + str(mRate) + "  |  " + str(cRate) + "  |   " + str(threshold) + "     \n")
        testNN = GA.train(inputs, outputs, size, participants, victors, generations, threshold, cRate, mRate)
    elif algo in 'E':
        print("DOING ES TRAINING...")
        resultsFile.write("\nALGORITHM | Generations | Size | Participants | Victors | mRate | cRate | Threshold \n")
        resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " + str(size) + "  |     " + str(participants) + "       |    " + str(victors) + "    |  " + str(mRate) + "  |  " + str(cRate) + "  |   " + str(threshold) + "     \n")
        testNN = ES.train(inputs, outputs, size, participants, victors, generations, threshold, cRate, mRate)
    elif algo in 'D':
        print("DOING DE TRAINING...")
        resultsFile.write("\nALGORITHM | Generations | Size | mRate | cRate | Threshold \n")
        resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " +  str(size) + "    |  " + str(mRate) + "  |  " + str(cRate) + "  |   " + str(threshold) + "     \n")
        testNN = DE.train(inputs, outputs, size, generations, threshold, cRate, mRate)
    elif algo in 'B':
        print("DOING BP TRAINING...")
        resultsFile.write("\nALGORITHM | Generations | learnrate | momentum | Threshold \n")
        resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " + str(learnrate) + "  |  " + str(momentum) + "  |   " + str(threshold) + "     \n")
        testNN = NN.main(inputs, [['S','S','S'], ['S','S']], ['S'], outputs, generations, learnrate, threshold, momentum)
    else:
        print("Unrecognized algorithm!")
        sys.exit()
    # Print test input/expected output - could be made prettier in a table
    # Start testing testNN
    for x in testInput:
        resultsFile.write("\nSet starting node vals\n")
        resultsFile.write("%s \n" % testNN.SetStartingNodesValues(x))
        testNN.CalculateNNOutputs()
        resultsFile.write("\nTest Input: " + str(x) + "\n")
        resultsFile.write("\nTest results: %s\n" % testNN.GetNNResults())
    resultsFile.write("\nRelative Error: {:2.2%} \n".format(NN.calcRelativeError(testNN, testInput, testOutput)))
    resultsFile.write("\nLeast Squares Error: %s \n" % NN.calcLeastSquaresError(testNN, testInput, testOutput))
    resultsFile.write("\nLoss Squared Error: %s \n" % NN.calcLossSquared(testNN, testInput, testOutput))
    resultsFile.write("\nPercent Misidentified: {:2.2%} \n".format(NN.calcPercentIncorrect(testNN, testInput, testOutput)))
    resultsFile.close()
Exemple #40
0
from GA import *
from math import sqrt

def split(c):
    return [i+1 for i in range(len(c)) if c[i] == 0], [i+1 for i in range(len(c)) if c[i] == 1]

def p(c):
    return tuple((2*n - 1) * a[i] for n, i in zip(c, range(l)))

target = [1, -6, 5, -3, -2, 4, -4, -5, 8, 3, 7, 2, -5, -9, 4]
a = tuple(abs(a) for a in target)
l = len(target)

def errorFn(c):
    s =  sum((2*n - 1) * a[i] for n, i in zip(c, range(l)))
    return 1./(1. + abs(s))

cards = GAModel(l)
cards.setPrint(p)
cards.setError(errorFn)

g = GA(cards, 200, 0.85, 0.21)
g.run(1500)
Exemple #41
0
import GA
from GA import *
import numpy as np

def square(x):
    term1 = (x[0]*x[0]+x[1]-11.0)*(x[0]*x[0]+x[1]-11.0);
    term2 = (x[0]+x[1]*x[1]- 7.0)*(x[0]+x[1]*x[1]- 7.0);
    term3 = term1+term2;
    return -1*term3
    # return -np.sum(np.power(x,2))
ga=GA(square, dim=2, popsize=40, ngen=50, pc=0.9, pm=0.1, etac=2, etam=100)
ga.setbounds(np.zeros(10), 10*np.ones(10))
#ga.pop_init()
print ga.run()

Exemple #42
0
    HL2_neurons = 50
    HL1_HL2_weights = numpy.random.uniform(low=-0.1,
                                           high=0.1,
                                           size=(HL1_neurons, HL2_neurons))

    output_neurons = 1
    HL2_output_weights = numpy.random.uniform(low=-0.1,
                                              high=0.1,
                                              size=(HL2_neurons,
                                                    output_neurons))

    initial_pop_weights.append(
        numpy.array([input_HL1_weights, HL1_HL2_weights, HL2_output_weights]))

pop_weights_mat = numpy.array(initial_pop_weights)
pop_weights_vector = GA.mat_to_vector(pop_weights_mat)

# 遗传算法优化网络权重
bestfit = 999999999
bestfit_idx = 0
bestfit_weight_vector = None
losses = numpy.empty(shape=(num_generations))
for generation in range(num_generations - 1):
    print("Generation : ", generation)

    # 网络权重
    pop_weights_mat = GA.vector_to_mat(pop_weights_vector, pop_weights_mat)

    # 得到每一个种群的损失 损失值是适应度 则适应度越小越好
    loss_value = ANN.fitness(pop_weights_mat,
                             X_train,
print(new_population)
"""
new_population[0, :] = [2.4,  0.7, 8, -2,   5,   1.1]
new_population[1, :] = [-0.4, 2.7, 5, -1,   7,   0.1]
new_population[2, :] = [-1,   2,   2, -3,   2,   0.9]
new_population[3, :] = [4,    7,   12, 6.1, 1.4, -4]
new_population[4, :] = [3.1,  4,   0,  2.4, 4.8,  0]
new_population[5, :] = [-2,   3,   -7, 6,   3,    3]
"""

best_outputs = []
num_generations = 1000
for generation in range(num_generations):
    print("Generation : ", generation)
    # Measuring the fitness of each chromosome in the population.
    fitness = GA.cal_pop_fitness(equation_inputs, new_population)
    print("Fitness")
    print(fitness)

    best_outputs.append(
        numpy.max(numpy.sum(new_population * equation_inputs, axis=1)))
    # The best result in the current iteration.
    print("Best result : ",
          numpy.max(numpy.sum(new_population * equation_inputs, axis=1)))

    # Selecting the best parents in the population for mating.
    parents = GA.select_mating_pool(new_population, fitness,
                                    num_parents_mating)
    print("Parents")
    print(parents)
Exemple #44
0
def main():
    GA.maintest()