示例#1
0
def SA():
    SA_iters = 10
    correctCount = 0
    t=0
    totalTime=0
    totalIters =0
    global sa
    sa = SimulatedAnnealing(1e11, .85, hcp)
    while correctCount < NUM_RIGHT:
        start = time.time()
        fit = FixedIterationTrainer(sa, SA_iters)
        fitness = fit.train()
        t = time.time() - start
        totalTime+=t
        totalIters+= SA_iters
        myWriter.addValue(fitness, "SA_fitness", runNum)
        myWriter.addValue(t, "SA_searchTimes",runNum)
        v = ef.value(sa.getOptimal())
        if v == N:
            correctCount += 1
        else:
            correctCount = 0
            #SA_iters += 1
    myWriter.addValue(t,"SA_times",0)
    myWriter.addValue(int(SA_iters),"SA_iters",0)
    print str(N) + ": SA: " + str(ef.value(sa.getOptimal())) + " took "+str(totalIters)+ " seconds and " + str(totalIters) + " iterations"
示例#2
0
def main():
    """
    Run algorithms on the gamma dataset.
    Essentially ran twice for 2-fold cross validation
    Metrics are evaluated outside of this file
    """
    train_data = initialize_instances(TRAIN_FILE)
    test_data = initialize_instances(TEST_FILE)                 # Get data
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_data)

    networks = []  # BackPropagationNetwork
    nnop = []      # NeuralNetworkOptimizationProblem
    oa = []        # OptimizationAlgorithm
    oa_names = ["RHC", "SA", "GA"]
    results = ""

    # Create each network architecture and an optimization instance
    for name in oa_names:
        activation = RELU()
        # Change network size
        classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], activation)
        networks.append(classification_network)
        nnop.append(NeuralNetworkOptimizationProblem(data_set, classification_network, measure))

    # Randomized Optimzation Algos
    oa.append(RandomizedHillClimbing(nnop[0]))
    oa.append(SimulatedAnnealing(1E11, .95, nnop[1]))
    oa.append(StandardGeneticAlgorithm(200, 100, 10, nnop[2]))

    # Go through each optimization problem and do 2-fold CV
    for i, name in enumerate(oa_names):
        start = time.time()
        metrics = train(oa[i], networks[i], oa_names[i], train_data, test_data, measure)
        end = time.time()
        training_time = end - start
        results += "\nFold 1 train time: %0.03f seconds" % (training_time,)

        # Write data to CSV file
        with open("metrics/" + oa_names[i] + '_f1.csv', 'w') as f:
            writer = csv.writer(f)
            for metric in metrics:
                writer.writerow(metric)

    print results

    # 2nd fold;
    train_data = initialize_instances(TEST_FILE)
    test_data = initialize_instances(TRAIN_FILE)                 # Get data
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_data)

    networks = []  # BackPropagationNetwork
    nnop = []      # NeuralNetworkOptimizationProblem
    oa = []        # OptimizationAlgorithm
    oa_names = ["RHC", "SA", "GA"]
    results = ""

    # Create each network architecture and an optimization instance
    for name in oa_names:
        activation = RELU()
        # Change network size
        classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], activation)
        networks.append(classification_network)
        nnop.append(NeuralNetworkOptimizationProblem(data_set, classification_network, measure))

    # Randomized Optimzation Algos
    oa.append(RandomizedHillClimbing(nnop[0]))
    oa.append(SimulatedAnnealing(1E11, .95, nnop[1]))
    oa.append(StandardGeneticAlgorithm(200, 100, 10, nnop[2]))

    # Go through each optimization problem and do 2-fold CV
    for i, name in enumerate(oa_names):
        start = time.time()
        metrics = train(oa[i], networks[i], oa_names[i], train_data, test_data, measure)
        end = time.time()
        training_time = end - start
        results += "\nFold 1 train time: %0.03f seconds" % (training_time,)

        # Write data to CSV file
        with open("metrics/" + oa_names[i] + '_f2.csv', 'w') as f:
            writer = csv.writer(f)
            for metric in metrics:
                writer.writerow(metric)

    print results
network_rhc = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer])
nnop_rhc = NeuralNetworkOptimizationProblem(set, network_rhc, measure)
rhc = RandomizedHillClimbing(nnop_rhc)
fit = FixedIterationTrainer(rhc, it_rhc)
fit.train()
op = rhc.getOptimal();
network_rhc.setWeights(op.getData())
print "\nRHC training error:", errorRate(network_rhc, train)
print "RHC training confusion matrix:", confusionMatrix(network_rhc, train)
print "    RHC test error:", errorRate(network_rhc, test)
print "    RHC test confusion matrix:", confusionMatrix(network_rhc, test)

# learn weights with simulated annealing
network_sa = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer])
nnop_sa = NeuralNetworkOptimizationProblem(set, network_sa, measure)
sa = SimulatedAnnealing(1E11, 0.95, nnop_sa)
fit = FixedIterationTrainer(sa, it_sa)
fit.train()
op = sa.getOptimal();
network_sa.setWeights(op.getData())
print "\nSA training error:", errorRate(network_sa, train)
print "SA training confusion matrix:", confusionMatrix(network_sa, train)
print "    SA test error:", errorRate(network_sa, test)
print "    SA test confusion matrix:", confusionMatrix(network_sa, test)

exit()

# learn weights with generic algorithms
network_ga = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer])
nnop_ga = NeuralNetworkOptimizationProblem(set, network_ga, measure)
ga = StandardGeneticAlgorithm(200, 100, 10, nnop_ga)
    fitness = fit.train()
    results.append(ef.value(rhc.getOptimal()))
    calls.append(ef.getTotalCalls())    
    ef.clearCount()
print "RHC, average results , " + str(sum(results)/float(runs))
print "RHC, average feval calls , " + str(sum(calls)/float(runs))
t1 = time.time() - t0
print "RHC, average time , " + str(float(t1)/runs)



t0 = time.time()
calls = []
results = []
for _ in range(runs):
    sa = SimulatedAnnealing(1E11, .95, hcp)
    fit = FixedIterationTrainer(sa, iters)
    fitness = fit.train()
    results.append(ef.value(sa.getOptimal()))
    calls.append(ef.getTotalCalls())
    ef.clearCount()    
print "SA95, average results , " + str(sum(results)/float(runs))
print "SA95, average feval calls , " + str(sum(calls)/float(runs))
t1 = time.time() - t0
print "SA95, average time , " + str(t1/float(runs))


t0 = time.time()
calls = []
results = []
for _ in range(runs):
示例#5
0
mf = SwapMutation()
cf = TravelingSalesmanCrossOver(ef)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
print "Route:"
path = []
for x in range(0, N):
    path.append(rhc.getOptimal().getDiscrete(x))
print path

sa = SimulatedAnnealing(1e12, 0.999, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
print "Route:"
path = []
for x in range(0, N):
    path.append(sa.getOptimal().getDiscrete(x))
print path


ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))
print "Route:"
if (max > goal):
    goal = max

# run RHC
rhc = RandomizedHillClimbing(hcp)
max = 0
i = 0
while (max < goal and i < timeout):
    rhc.train()
    i += 1
    max = ef.value(rhc.getOptimal())
    #print "rhc,", i,",", max, ',', goal
print "rhc,", i,",", max, ',', goal

# run SA
sa = SimulatedAnnealing(1E11, .95, hcp)
max = 0
i = 0
while (max < goal and i < timeout):
    sa.train()
    i += 1
    max = ef.value(sa.getOptimal())
    #print "sa,", i,",", max, ',', goal
print "sa,", i,",", max, ',', goal

# run GA
ga = StandardGeneticAlgorithm(200, 100, 25, gap)
max = 0
i = 0
while (max < goal and i < timeout):
    ga.train()
ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))

sa = SimulatedAnnealing(100, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA: " + str(ef.value(sa.getOptimal()))

ga = StandardGeneticAlgorithm(200, 150, 25, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA: " + str(ef.value(ga.getOptimal()))

mimic = MIMIC(200, 100, pop)
fit = FixedIterationTrainer(mimic, 1000)
fit.train()
print "MIMIC: " + str(ef.value(mimic.getOptimal()))
示例#8
0
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    rhc = RandomizedHillClimbing(hcp)
    sa = SimulatedAnnealing(SA_TEMPERATURE, SA_COOLING_FACTOR, hcp)
    ga = StandardGeneticAlgorithm(GA_POPULATION, GA_CROSSOVER, GA_MUTATION,
                                  gap)
    mimic = MIMIC(MIMIC_SAMPLES, MIMIC_TO_KEEP, pop)

    for n_iteration in iterations:
        fit_rhc = FixedIterationTrainer(rhc, n_iteration)
        fit_sa = FixedIterationTrainer(sa, n_iteration)
        fit_ga = FixedIterationTrainer(ga, n_iteration)
        fit_mimic = FixedIterationTrainer(mimic, n_iteration)

        print("calculating the %d th iteration" % n_iteration)

        # Training
        start_rhc = time.time()
        fit_rhc.train()
示例#9
0
    start = time.time()
    fit.train()
    dur = time.time() - start
    print "Iters: " + str(iters) + ", Fitness: " + str(
        ef.value(rhc.getOptimal())) + ", Dur: " + str(dur)
# print "Route:"
# path = []
# for x in range(0,N):
#     path.append(rhc.getOptimal().getDiscrete(x))
# print path

print "Simulated Annealing"
# 1e13, 0.8, 1e12 0.85, ... dang
temp = 1E13
cooling_rate = 0.90
sa = SimulatedAnnealing(temp, cooling_rate, hcp)
for iters in iters_list:
    fit = FixedIterationTrainer(sa, iters)
    start = time.time()
    fit.train()
    dur = time.time() - start
    print "Iters: " + str(iters) + ", Fitness: " + str(
        ef.value(sa.getOptimal())) + ", Dur: " + str(dur)
# print "Route:"
# path = []
# for x in range(0,N):
#     path.append(sa.getOptimal().getDiscrete(x))
# print path

# print "Genetic Algorithm"
# # 2000, 1500, 250 gives good results
示例#10
0
def knapsackfunc(NUM_ITEMS,  iterations):


        rhcMult = 600
        saMult = 600
        gaMult = 4
        mimicMult = 3


	# Random number generator */
	random = Random()
	# The number of items
	#NUM_ITEMS = 40
	# The number of copies each
	COPIES_EACH = 4
	# The maximum weight for a single element
	MAX_WEIGHT = 50
	# The maximum volume for a single element
	MAX_VOLUME = 50
	# The volume of the knapsack 
	KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

	# create copies
	fill = [COPIES_EACH] * NUM_ITEMS
	copies = array('i', fill)

	# create weights and volumes
	fill = [0] * NUM_ITEMS
	weights = array('d', fill)
	volumes = array('d', fill)
	for i in range(0, NUM_ITEMS):
		weights[i] = random.nextDouble() * MAX_WEIGHT
		volumes[i] = random.nextDouble() * MAX_VOLUME


	# create range
	fill = [COPIES_EACH + 1] * NUM_ITEMS
	ranges = array('i', fill)

	ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
	odd = DiscreteUniformDistribution(ranges)
	nf = DiscreteChangeOneNeighbor(ranges)
	mf = DiscreteChangeOneMutation(ranges)
	cf = UniformCrossOver()
	df = DiscreteDependencyTree(.1, ranges)
	hcp = GenericHillClimbingProblem(ef, odd, nf)
	gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
	pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

	optimalOut = []
	timeOut = []
	evalsOut = []

	for niter in iterations:

		iterOptimalOut = [NUM_ITEMS, niter]
		iterTimeOut = [NUM_ITEMS, niter]
		iterEvals = [NUM_ITEMS, niter]



		start = time.time()
		rhc = RandomizedHillClimbing(hcp)
		fit = FixedIterationTrainer(rhc, niter*rhcMult)
		fit.train()
		end = time.time()
		rhcOptimal = ef.value(rhc.getOptimal())
		rhcTime = end-start
		print "RHC optimum: " + str(rhcOptimal)
		print "RHC time: " + str(rhcTime)
		iterOptimalOut.append(rhcOptimal)
		iterTimeOut.append(rhcTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)

		start = time.time()
		sa = SimulatedAnnealing(100, .95, hcp)
		fit = FixedIterationTrainer(sa, niter*saMult)
		fit.train()
		end = time.time()
		saOptimal = ef.value(sa.getOptimal())
		saTime = end-start
		print "SA optimum: " + str(saOptimal)
		print "SA time: " + str(saTime)
		iterOptimalOut.append(saOptimal)
		iterTimeOut.append(saTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)

		start = time.time()
		ga = StandardGeneticAlgorithm(200, 150, 25, gap)
		fit = FixedIterationTrainer(ga, niter*gaMult)
		fit.train()
		end = time.time()
		gaOptimal = ef.value(ga.getOptimal())
		gaTime = end - start
		print "GA optimum: " + str(gaOptimal)
		print "GA time: " + str(gaTime)
		iterOptimalOut.append(gaOptimal)
		iterTimeOut.append(gaTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)


		start = time.time()
		mimic = MIMIC(200, 100, pop)
		fit = FixedIterationTrainer(mimic, niter*mimicMult)
		fit.train()
		end = time.time()
		mimicOptimal = ef.value(mimic.getOptimal())
		mimicTime = end - start
		print "MIMIC optimum: " + str(mimicOptimal)
		print "MIMIC time: " + str(mimicTime)
		iterOptimalOut.append(mimicOptimal)
		iterTimeOut.append(mimicTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)
		
		optimalOut.append(iterOptimalOut)
		timeOut.append(iterTimeOut)
		evalsOut.append(iterEvals)		
	
	return [optimalOut, timeOut, evalsOut]
        df = DiscreteDependencyTree(.1, ranges)
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

        start = time.time()
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iteration)
        fit.train()
        end = time.time()
        rhc_time = end - start
        rhc_fit = ef.value(rhc.getOptimal())
        #         print "RHC: " + str(rhc_fit)

        start = time.time()
        sa = SimulatedAnnealing(1e11, .95, hcp)
        fit = FixedIterationTrainer(sa, iteration / 200)
        fit.train()
        end = time.time()
        sa_time = end - start
        sa_fit = ef.value(sa.getOptimal())
        #         print "SA: " + str(sa_fit)

        start = time.time()
        ga = StandardGeneticAlgorithm(200, 150, 25, gap)
        fit = FixedIterationTrainer(ga, iteration)
        fit.train()
        end = time.time()
        ga_time = end - start
        ga_fit = ef.value(ga.getOptimal())
        #         print "GA: " + str(ga_fit)
示例#12
0
def main():
    """Run algorithms on the cancer dataset."""

    instances = initialize_instances()
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(instances)

    max_iterations = TRAINING_ITERATIONS

    hidden_layer_size = HIDDEN_LAYER

    # for _hidden_layer in xrange(HIDDEN_LAYER):
    # hidden_layer_size = _hidden_layer + 1

    network = None  # BackPropagationNetwork
    nnop = None  # NeuralNetworkOptimizationProblem
    oa = None  # OptimizationAlgorithm
    results = ""

    for cooling in [.95, .8, .65, .5, .35, .2]:
        RandomOrderFilter().filter(data_set)
        train_test_split = TestTrainSplitFilter(TRAIN_TEST_SPLIT)
        train_test_split.filter(data_set)

        train_set = train_test_split.getTrainingSet()
        test_set = train_test_split.getTestingSet()

        network = factory.createClassificationNetwork(
            [INPUT_LAYER, hidden_layer_size, OUTPUT_LAYER])
        nnop = NeuralNetworkOptimizationProblem(train_set, network, measure)

        oa = SimulatedAnnealing(1E11, cooling, nnop)

        start = time.time()
        correct = 0
        incorrect = 0

        train(oa, network, "SA", train_set, test_set, measure, cooling)
        end = time.time()
        training_time = end - start

        optimal_instance = oa.getOptimal()
        network.setWeights(optimal_instance.getData())

        start = time.time()
        for instance in test_set.getInstances():
            network.setInputValues(instance.getData())
            network.run()

            predicted = instance.getLabel().getContinuous()
            actual = network.getOutputValues().get(0)

            if abs(predicted - actual) < 0.5:
                correct += 1
            else:
                incorrect += 1

        end = time.time()
        testing_time = end - start

        _results = ""
        _results += "\n[SA] cooling=%0.02f" % (cooling)
        _results += "\nResults for SA: \nCorrectly classified %d instances." % (
            correct)
        _results += "\nIncorrectly classified %d instances.\nPercent correctly classified: %0.03f%%" % (
            incorrect, float(correct) / (correct + incorrect) * 100.0)
        _results += "\nTraining time: %0.03f seconds" % (training_time, )
        _results += "\nTesting time: %0.03f seconds\n" % (testing_time, )

        with open('out/sa/cooling-%0.02f.log' % (cooling), 'w') as f:
            f.write(_results)

        results += _results

    print results
示例#13
0
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# ---------------------------------------------------------------
N_ITERS = 10001

# SA

for cool_fac in [0.9, 0.92, 0.94, 0.96, 0.98]:
    print cool_fac
    fit_hist = []
    for i in xrange(30):
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        sa = SimulatedAnnealing(1E12, cool_fac, hcp)

        fh = FixedIterTrainer(ef, sa, N_ITERS)
        fit_hist.append(fh)
    write_hist_csv(fit_hist, 'fitness_sa_cf_' + str(cool_fac))

for power in range(1, 12, 2):
    fit_hist = []
    for i in xrange(30):
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        sa = SimulatedAnnealing(10**power, 0.94, hcp)

        fh = FixedIterTrainer(ef, sa, N_ITERS)
        fit_hist.append(fh)
    write_hist_csv(fit_hist, 'fitness_sa_t_' + str(power))
for i in xrange(30):
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    rhc = RandomizedHillClimbing(hcp)

    fh, th = FixedIterTrainer(ef, rhc, N_ITERS)
    fit_hist.append(fh)
    time_hist.append(th)
write_hist_csv(fit_hist, 'fitness_rhc')
write_hist_csv(time_hist, 'time_rhc')

# SA
fit_hist = []
time_hist = []
for i in xrange(30):
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    sa = SimulatedAnnealing(1E12, .99, hcp)

    fh, th = FixedIterTrainer(ef, sa, N_ITERS)
    fit_hist.append(fh)
    time_hist.append(th)
write_hist_csv(fit_hist, 'fitness_sa')
write_hist_csv(time_hist, 'time_sa')

# GA
fit_hist = []
time_hist = []
for i in xrange(30):
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    ga = StandardGeneticAlgorithm(200, 100, 20, gap)

    fh, th = FixedIterTrainer(ef, ga, N_ITERS)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

from time import time

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 600000)
t0 = time()
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal())), "time taken", time() - t0

sa = SimulatedAnnealing(1E11, .95, hcp)
fit = FixedIterationTrainer(sa, 600000)

t0 = time()
fit.train()
print "SA: " + str(ef.value(sa.getOptimal())), "time taken", time() - t0

ga = StandardGeneticAlgorithm(200, 100, 10, gap)
fit = FixedIterationTrainer(ga, 20000)

t0 = time()
fit.train()

print "GA: " + str(ef.value(ga.getOptimal())), "time taken", time() - t0

mimic = MIMIC(50, 10, pop)
示例#16
0
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, N)
    start = time.time()
    fit.train()
    end = time.time()
    training_time = end - start
    print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))

    OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "RHC")
    with open(OUTFILE, 'a+') as f:
        f.write("%d,%f,%f\n" % (N, training_time, ef.value(rhc.getOptimal())))

    sa = SimulatedAnnealing(1E12, .999, hcp)
    fit = FixedIterationTrainer(sa, N)
    start = time.time()
    fit.train()
    end = time.time()
    training_time = end - start
    print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))

    OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "SA")
    with open(OUTFILE, 'a+') as f:
        f.write("%d,%f,%f\n" % (N, training_time, ef.value(sa.getOptimal())))

    ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
    fit = FixedIterationTrainer(ga, N)
    start = time.time()
    fit.train()
def main():
    """Run algorithms on the abalone dataset."""
    instances = initialize_instances()
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(instances)

    networks = []  # BackPropagationNetwork
    nnop = []  # NeuralNetworkOptimizationProblem
    oa = []  # OptimizationAlgorithm
    oa_names = ["RHC", "SA", "GA"]
    results = ""

    for name in oa_names:
        classification_network = factory.createClassificationNetwork(
            [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
        networks.append(classification_network)
        nnop.append(
            NeuralNetworkOptimizationProblem(data_set, classification_network,
                                             measure))

    oa.append(RandomizedHillClimbing(nnop[0]))
    oa.append(SimulatedAnnealing(1E11, .95, nnop[1]))
    oa.append(StandardGeneticAlgorithm(200, 100, 10, nnop[2]))

    for i, name in enumerate(oa_names):
        start = time.time()
        correct = 0
        incorrect = 0

        train(oa[i], networks[i], oa_names[i], instances, measure)
        end = time.time()
        training_time = end - start

        optimal_instance = oa[i].getOptimal()
        networks[i].setWeights(optimal_instance.getData())

        start = time.time()
        for instance in instances:
            networks[i].setInputValues(instance.getData())
            networks[i].run()

            predicted = instance.getLabel().getContinuous()
            actual = networks[i].getOutputValues().get(0)

            if abs(predicted - actual) < 0.5:
                correct += 1
            else:
                incorrect += 1

        end = time.time()
        testing_time = end - start

        results += "\nResults for %s: \nCorrectly classified %d instances." % (
            name, correct)
        results += "\nIncorrectly classified %d instances.\nPercent correctly classified: %0.03f%%" % (
            incorrect, float(correct) / (correct + incorrect) * 100.0)
        results += "\nTraining time: %0.03f seconds" % (training_time, )
        results += "\nTesting time: %0.03f seconds\n" % (testing_time, )

    print results
示例#18
0
mf = SwapMutation()
cf = TravelingSalesmanCrossOver(ef)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
print "Route:"
path = []
for x in range(0, N):
    path.append(rhc.getOptimal().getDiscrete(x))
print path

sa = SimulatedAnnealing(1E12, .999, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
print "Route:"
path = []
for x in range(0, N):
    path.append(sa.getOptimal().getDiscrete(x))
print path

ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))
print "Route:"
path = []
示例#19
0
for CE in [0.15, 0.35, 0.55, 0.75, 0.95]:
    fname = outfile.replace('@ALG@', 'SA{}'.format(CE))
    with open(fname, 'w') as f:
        f.write('N,trial,iterations,fitness,time,fevals\n')
    # Iterate over bitstring length (model complexity)
    for N in range(N_min, N_max + 1, N_min):
        fill = [2] * N
        ranges = array('i', fill)
        # Iterate over number of trials
        for t in range(numTrials):
            # Setup
            ef = FlipFlopEvaluationFunction()
            odd = DiscreteUniformDistribution(ranges)
            nf = DiscreteChangeOneNeighbor(ranges)
            hcp = GenericHillClimbingProblem(ef, odd, nf)
            sa = SimulatedAnnealing(1E10, CE, hcp)
            fit = FixedIterationTrainer(sa, increments)
            times = [0]
            lastScore = 0
            halt_count = 0
            total_iter = 0
            # Evaluate at iteration increments
            for i in range(0, maxIters, increments):
                start = clock()
                fit.train()
                elapsed = time.clock() - start
                times.append(times[-1] + elapsed)
                fevals = ef.fevals
                score = ef.value(sa.getOptimal())
                ef.fevals -= 1
                st = '{},{},{},{},{},{},{}\n'.format(N, t, i, score, times[-1],
示例#20
0
def main():
    """Run algorithms on the abalone dataset."""
    train_instances = initialize_instances()
    test_instances = initialize_instances(test=True)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_instances)

    networks = []  # BackPropagationNetwork
    oa = []  # OptimizationAlgorithm
    oa_names = []
    if do_rhc:
        oa_names.append("RHC")
    if do_sa:
        oa_names.append("SA")
    if do_ga:
        oa_names.append("GA")
    if do_bp:
        oa_names.append("BP")
    results = ""

    # For each algo, need to see if we are doing sweeps

    # No need to sweep rhc as there are no parameters
    if do_rhc and sweep == False:
        training_iter = TRAINING_ITERATIONS
        if do_fmnist:
            classification_network = factory.createClassificationNetwork(
                [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
        if do_chess:
            classification_network = factory.createClassificationNetwork(
                [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER])
        nnop = NeuralNetworkOptimizationProblem(data_set,
                                                classification_network,
                                                measure)
        oa = RandomizedHillClimbing(nnop)
        name = "RHC"
        train(oa, classification_network, name, train_instances, measure,
              training_iter, test_instances, True)

    if do_sa:
        training_iter = TRAINING_ITERATIONS
        count = 0
        for temp, cooling in product(sa_temp, sa_cooling):
            if do_fmnist:
                classification_network = factory.createClassificationNetwork(
                    [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
            if do_chess:
                classification_network = factory.createClassificationNetwork(
                    [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER])
            nnop = NeuralNetworkOptimizationProblem(data_set,
                                                    classification_network,
                                                    measure)
            oa = SimulatedAnnealing(temp, cooling, nnop)
            name = "SA_sweep"
            if count == 0:
                print_head = True
            else:
                print_head = False
            train(oa, classification_network, name, train_instances, measure,
                  training_iter, test_instances, print_head, temp, cooling)
            count += 1

    if do_ga:
        training_iter = GA_TRAINING_ITERATIONS
        count = 0
        for pop, prop_mate, prop_mutate in product(ga_pop, ga_prop_mate,
                                                   ga_prop_mutate):
            if do_fmnist:
                classification_network = factory.createClassificationNetwork(
                    [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
            if do_chess:
                classification_network = factory.createClassificationNetwork(
                    [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER])
            nnop = NeuralNetworkOptimizationProblem(data_set,
                                                    classification_network,
                                                    measure)
            mate = int(math.floor(pop * prop_mate))
            mutate = int(math.floor(pop * prop_mutate))
            oa = StandardGeneticAlgorithm(pop, mate, mutate, nnop)
            name = "GA_sweep"
            if count == 0:
                print_head = True
            else:
                print_head = False
            train(oa, classification_network, name, train_instances, measure,
                  training_iter, test_instances, print_head, pop, prop_mate,
                  prop_mutate)
            count += 1

    if do_bp and sweep == False:
        training_iter = TRAINING_ITERATIONS
        if do_fmnist:
            classification_network = factory.createClassificationNetwork(
                [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
        if do_chess:
            classification_network = factory.createClassificationNetwork(
                [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER])
        oa = BatchBackPropagationTrainer(data_set, classification_network,
                                         measure, RPROPUpdateRule())
        name = "BP"
        train(oa, classification_network, name, train_instances, measure,
              training_iter, test_instances, True)
示例#21
0
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, N)
    start = time.time()
    fit.train()
    end = time.time()
    training_time = end - start
    print "RHC: " + str(ef.value(rhc.getOptimal()))
    OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "RHC")
    with open(OUTFILE, 'a+') as f:
        f.write("%d,%f,%f\n" % (N, training_time, ef.value(rhc.getOptimal())))

    sa = SimulatedAnnealing(1E11, .95, hcp)
    fit = FixedIterationTrainer(sa, N)
    start = time.time()
    fit.train()
    end = time.time()
    training_time = end - start
    print "SA: " + str(ef.value(sa.getOptimal()))
    OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "SA")
    with open(OUTFILE, 'a+') as f:
        f.write("%d,%f,%f\n" % (N, training_time, ef.value(sa.getOptimal())))

    ga = StandardGeneticAlgorithm(200, 100, 10, gap)
    fit = FixedIterationTrainer(ga, N)
    start = time.time()
    fit.train()
    end = time.time()
示例#22
0
def main():
    N=200
    tempDenom = 5
    T=N/tempDenom
    fill = [2] * N
    ranges = array('i', fill)
    iterations = 2000
    gaIters = 1000
    mimicIters = 1000
    gaPop = 200
    gaMate = 100
    gaMutate = 10
    mimicSamples = 200
    mimicToKeep = 20
    saTemp = 1E11
    saCooling = .95
    alg = 'all'
    run = 0
    settings = []

    try:
       opts, args = getopt.getopt(sys.argv[1:], "ahn:rsgN:m:t:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="])
    except:
       print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
       sys.exit(2)
    for opt, arg in opts:
       if opt == '-h':
          print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
          sys.exit(1)
       elif opt == '-i':
          iterations = int(arg)
       elif opt == '-N':
          N = int(arg)
       elif opt == '-t':
           T = float(arg)
       elif opt == '-d':
          tempDenom = int(arg)
       elif opt == '-r':
           alg = 'RHC'
       elif opt == '-a':
           alg = 'all'
       elif opt == '-s':
           alg = 'SA'
       elif opt == '-g':
           alg = 'GA'
       elif opt == '-m':
           alg = 'MIMIC'
       elif opt == '--gaPop':
          gaPop = int(arg)
       elif opt == '--gaMate':
          gaMate = int(arg)
       elif opt == '--gaMutate':
          gaMutate = int(arg)
       elif opt == '--mimicSamples':
          mimicSamples = int(arg)
       elif opt == '--mimicToKeep':
          mimicToKeep = int(arg)
       elif opt == '--saTemp':
          saTemp = float(arg)
       elif opt == '--saCooling':
          saCooling = float(arg)
       elif opt == '--gaIters':
          gaIters = int(arg)
       elif opt == '--mimicIters':
          mimicIters = int(arg)
       elif opt == '-n':
           run = int(arg)


    vars = {
        'N':N,
        'tempDenom':tempDenom,
        'T':T,
        'fill':fill,
        'ranges':ranges,
        'iterations' :iterations,
        'gaIters':gaIters,
        'mimicIters':mimicIters,
        'gaPop' :gaPop,
        'gaMate' :gaMate,
        'gaMutate' :gaMutate,
        'mimicSamples' : mimicSamples,
        'mimicToKeep' : mimicToKeep,
        'saTemp' : saTemp,
        'saCooling' : saCooling,
        'alg' : alg,
        'run' : run
    }

    settings = getSettings(alg, settings, vars)

    T=N/tempDenom
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if alg == 'RHC' or alg == 'all':
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(rhc.getOptimal()))
        rows.append(row)
        print "RHC: " + str(ef.value(rhc.getOptimal()))
        output2('4Peaks', 'RHC', rows, settings)
        rows = []
        buildFooter("4Peaks", "RHC", rows, settings),
        outputFooter("4Peaks", "RHC", rows,   settings)

    if alg == 'SA' or alg == 'all':
        sa = SimulatedAnnealing(saTemp, saCooling, hcp)
        fit = FixedIterationTrainer(sa, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(sa.getOptimal()))
        rows.append(row)
        print "SA: " + str(ef.value(sa.getOptimal()))
        output2('4Peaks', 'SA', rows, settings)
        rows = []
        buildFooter("4Peaks", "SA", rows, settings)
        outputFooter("4Peaks", "SA", rows, settings)

    if alg == 'GA' or alg == 'all':
        ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap)
        fit = FixedIterationTrainer(ga, gaIters)
        fit.train()
        print "GA: " + str(ef.value(ga.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(ga.getOptimal()))
        rows.append(row)
        output2('4Peaks', 'GA', rows, settings)
        rows = []
        buildFooter("4Peaks", "GA", rows, settings)
        outputFooter("4Peaks", "GA", rows , settings)

    if alg == 'MIMIC' or alg == 'all':
        mimic = MIMIC(mimicSamples, mimicToKeep, pop)
        fit = FixedIterationTrainer(mimic, mimicIters)
        fit.train()
        print "MIMIC: " + str(ef.value(mimic.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(mimic.getOptimal()))
        rows.append(row)
        output2('4Peaks', 'MIMIC', rows, settings)
        rows = []
        buildFooter("4Peaks", "GA", rows, settings)
        outputFooter("4Peaks", "MIMIC", rows, settings)
示例#23
0

for item in x:
    stdout.write("\nRunning Four Peaks with %d iterations...\n" % item)

    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, item)
    start = time.time()
    fit.train()
    end = time.time()
    value = ef.value(rhc.getOptimal())
    stdout.write("RHC took %0.03f seconds and found value %d\n" % (end -
                                                                  start, value))
    optimal_value['RHC'].append(value)

    sa = SimulatedAnnealing(1E11, .95, hcp)
    fit = FixedIterationTrainer(sa, item)
    start = time.time()
    fit.train()
    end = time.time()
    value = ef.value(sa.getOptimal())
    stdout.write("SA took %0.03f seconds and found value %d\n" % (end -
                                                                   start, value))
    optimal_value['SA'].append(value)

    ga = StandardGeneticAlgorithm(200, 100, 20, gap)
    fit = FixedIterationTrainer(ga, item)
    start = time.time()
    fit.train()
    end = time.time()
    value = ef.value(ga.getOptimal())
示例#24
0
for i in range(len(iteration_list)):
    iteration = iteration_list[i]
    sa_total = 0
    sa_time = 0

    for x in range(runs):
        ranges = array('i', [2] * N)
        fitness = TravelingSalesmanRouteEvaluationFunction(points)
        discrete_dist = DiscretePermutationDistribution(N)
        discrete_neighbor = SwapNeighbor()
        discrete_mutation = SwapMutation()
        discrete_dependency = DiscreteDependencyTree(.1, ranges)
        hill_problem = GHC(fitness, discrete_dist, discrete_neighbor)

        start = time.clock()
        sa_problem = SA(1E11, .95, hill_problem)
        fit = FixedIterationTrainer(sa_problem, iteration)
        fit.train()
        end = time.clock()
        full_time = end - start
        sa_total += fitness.value(sa_problem.getOptimal())
        sa_time += full_time

    sa_total_avg = sa_total / runs
    sa_time_avg = sa_time / runs

    data = '{},{},{}\n'.format(iteration, sa_total_avg, sa_time_avg)
    print(data)
    with open(output_directory, 'a') as f:
        f.write(data)
示例#25
0
def main():

    iterations = 200000
    alg = 'all'
    gaPop = 2000
    gaMate = 1500
    gaMutate = 250
    mimicSamples = 500
    mimicToKeep = 100
    saTemp = 1E12
    saCooling = .999
    gaIters = 1000
    mimicIters = 1000
    run = 0
    settings = []

    try:
        opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:i:", ["gaIters=", "mimicIters=", "gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="])
    except:
        print 'travelingsalesman.py -i <iterations>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'travelingsalesman.py -i <iterations>'
            sys.exit(1)
        elif opt == '-i':
            if arg < 1:
                print 'Iterations must be greater than 0'
                sys.exit(2)
            iterations = int(arg)
        elif opt == '-a':
            alg = 'all'
        elif opt == '-r':
            alg = 'RHC'
        elif opt == '-s':
            alg = 'SA'
        elif opt == '-g':
            alg = 'GA'
        elif opt == '-m':
            alg = 'MIMIC'
        elif opt == '--gaPop':
            if arg < 1:
                print 'Population must be greater than 0'
                sys.exit(2)
            gaPop = int(arg)
        elif opt == '--gaMate':
            if arg < 1:
                print 'Mating must be greater than 0'
                sys.exit(2)
            gaMate = int(arg)
        elif opt == '--gaMutate':
            if arg < 1:
                print 'Mutators must be greater than 0'
                sys.exit(2)
            gaMutate = int(arg)
        elif opt == '--mimicSamples':
            if arg < 1:
                print 'MIMIC samples must be greater than 0'
                sys.exit(2)
            mimicSamples = int(arg)
        elif opt == '--mimicToKeep':
            if arg < 1:
                print 'MIMIC to keep must be greater than 0'
                sys.exit(2)
            mimicToKeep = int(arg)
        elif opt == '--saTemp':
            saTemp = float(arg)
        elif opt == '--saCooling':
            saCooling = float(arg)
        elif opt == '-n':
            run = int(arg)
        elif opt == '--gaIters':
            if arg < 1:
                print 'GA Iterations must be greater than 0'
                sys.exit(2)
            gaIters = int(arg)
        elif opt == '--mimicIters':
            if arg < 1:
                print 'MIMIC Iterations must be greater than 0'
                sys.exit(2)
            mimicIters = int(arg)

    vars = {
            'iterations' : iterations,
            'alg' : alg,
            'gaPop' : gaPop,
            'gaMate' : gaMate,
            'gaMutate' : gaMutate,
            'mimicSamples' : mimicSamples,
            'mimicToKeep' : mimicToKeep,
            'saTemp' : saTemp,
            'saCooling' : saCooling,
            'gaIters' : gaIters,
            'mimicIters' : mimicIters,
            'run' : run
            }

    settings = getSettings(alg, settings, vars)
    if gaPop < gaMate or gaPop < gaMutate or gaMate < gaMutate:
        pebkac({gaPop: 'total population',gaMate : 'mating population', gaMutate : 'mutating population'}, alg, 'total population', settings)
    if mimicSamples < mimicToKeep:
        pebkac({mimicSamples: 'mimic samples', mimicToKeep : 'mimic to keep'}, alg, 'mimic samples', settings)
    prob = 'Traveling Sales Problem'
    invDist = {}
    cities = CityList()
    N = len(cities)
    #random = Random()
    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        coords = cities.getCoords(i)
        points[i][0] = coords[0]
        points[i][1] = coords[1]
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    rows = []


    if alg == 'RHC' or alg == 'all':
        print '\n----------------------------------'
        print 'Using Random Hill Climbing'
        for label, setting in settings:
            print label + ":" + str(setting)
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iterations)
        fit.train()
        path = []
        for x in range(0,N):
            path.append(rhc.getOptimal().getDiscrete(x))
        output(prob, 'RHC', path, points, settings)
        rows = []
        row = []
        row.append("Inverse of Distance")
        row.append(ef.value(rhc.getOptimal()))
        rows.append(row)
        invDist['RHC'] = ef.value(rhc.getOptimal())
        buildFooter(prob, 'RHC', rows, settings)
        outputFooter(prob, 'RHC', rows, settings)


    if alg == 'SA' or alg == 'all':
        print 'Using Simulated Annealing'
        for label, setting in settings:
            print label + ":" + str(setting)
        sa = SimulatedAnnealing(saTemp, saCooling, hcp)
        fit = FixedIterationTrainer(sa, iterations)
        fit.train()
        path = []
        for x in range(0,N):
            path.append(sa.getOptimal().getDiscrete(x))
        output(prob, 'SA', path, points, settings)
        rows = []
        row = []
        row.append("Inverse of Distance")
        row.append(ef.value(sa.getOptimal()))
        rows.append(row)
        invDist['SA'] = ef.value(sa.getOptimal())
        buildFooter(prob, 'SA', rows, settings)
        outputFooter(prob, 'SA', rows, settings)

    if alg == 'GA' or alg == 'all':
        print '\n----------------------------------'
        print 'Using Genetic Algorithm'
        for label, setting in settings:
            print label + ":" + str(setting)
        ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap)
        fit = FixedIterationTrainer(ga, gaIters)
        fit.train()
        path = []
        for x in range(0,N):
            path.append(ga.getOptimal().getDiscrete(x))
        output(prob, 'GA', path, points, settings)
        rows = []
        row = []
        row.append("Inverse of Distance")
        row.append(ef.value(ga.getOptimal()))
        rows.append(row)
        invDist['GA'] = ef.value(ga.getOptimal())
        buildFooter(prob, 'GA', rows, settings)
        outputFooter(prob, 'GA', rows, settings)

    if alg == 'MIMIC' or alg == 'all':
        print '\n----------------------------------'
        print 'Using MIMIC'
        for label, setting in settings:
            print label + ":" + str(setting)
        # for mimic we use a sort encoding
        ef = TravelingSalesmanSortEvaluationFunction(points);
        fill = [N] * N
        ranges = array('i', fill)
        odd = DiscreteUniformDistribution(ranges);
        df = DiscreteDependencyTree(.1, ranges);
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df);
        mimic = MIMIC(mimicSamples, mimicToKeep, pop)
        fit = FixedIterationTrainer(mimic, mimicIters)
        fit.train()
        path = []
        optimal = mimic.getOptimal()
        fill = [0] * optimal.size()
        ddata = array('d', fill)
        for i in range(0,len(ddata)):
            ddata[i] = optimal.getContinuous(i)
        order = ABAGAILArrays.indices(optimal.size())
        ABAGAILArrays.quicksort(ddata, order)
        output(prob, 'MIMIC', order, points, settings)
        rows = []
        row = []
        row.append("Inverse of Distance")
        row.append(ef.value(mimic.getOptimal()))
        rows.append(row)
        invDist['MIMIC'] = ef.value(mimic.getOptimal())
        buildFooter(prob, 'MIMIC', rows, settings)
        outputFooter(prob, 'MIMIC', rows, settings)


    maxn = max(len(key) for key in invDist)
    maxd = max(len(str(invDist[key])) for key in invDist)
    print "Results"
    for result in invDist:
        print "%-*s %s %-*s" % (len('Best Alg') + 2, result, ':', maxd, invDist[result])
    if alg == 'all':
        print "%-*s %s %-*s" % (len('Best Alg') + 2, 'Best Alg', ':', maxd, max(invDist.iterkeys(), key=(lambda key: invDist[key])))
    print '----------------------------------'
def main():
    trainingInstances, testingInstances = initialize_instances()
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()

    data_set = DataSet(trainingInstances)

    networks = []  # BackPropagationNetwork
    nnop = []  # NeuralNetworkOptimizationProblem
    oa = []  # OptimizationAlgorithm
    oa_names = ["RHC", "SA_15", "SA_35", "SA_55", "SA_75", "SA_95"]
    #oa_names=["GA_100_50_5", "GA_200_50_5", "GA_100_50_10", "GA_200_50_10", "GA_100_100_5", "GA_200_100_5", "GA_100_100_10", "GA_200_100_10"]
    #oa_names=["GA_200_100_5", "GA_100_100_10", "GA_200_100_10"]

    for name in oa_names:
        #use RELU activation function
        classification_network = factory.createClassificationNetwork(
            [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], ReLU())
        networks.append(classification_network)
        nnop.append(
            NeuralNetworkOptimizationProblem(data_set, classification_network,
                                             measure))

    oa.append(RandomizedHillClimbing(nnop[0]))
    oa.append(SimulatedAnnealing(1E11, .15, nnop[1]))
    oa.append(SimulatedAnnealing(1E11, .35, nnop[2]))
    oa.append(SimulatedAnnealing(1E11, .55, nnop[3]))
    oa.append(SimulatedAnnealing(1E11, .75, nnop[4]))
    oa.append(SimulatedAnnealing(1E11, .95, nnop[5]))

    # oa.append(StandardGeneticAlgorithm(100, 50, 5, nnop[0]))
    # oa.append(StandardGeneticAlgorithm(200, 50, 5, nnop[1]))
    # oa.append(StandardGeneticAlgorithm(100, 50, 10, nnop[2]))
    # oa.append(StandardGeneticAlgorithm(200, 50, 10, nnop[3]))
    # oa.append(StandardGeneticAlgorithm(100, 100, 5, nnop[4]))
    #oa.append(StandardGeneticAlgorithm(200, 100, 5, nnop[0]))
    #oa.append(StandardGeneticAlgorithm(100, 100, 10, nnop[1]))
    #oa.append(StandardGeneticAlgorithm(200, 100, 10, nnop[2]))

    with open('nn_spam_results_RHC_SA.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        for i, name in enumerate(oa_names):
            results = ''

            start = time.time()
            traincorrect = 0
            trainincorrect = 0
            testcorrect = 0
            testincorrect = 0

            train(oa[i], networks[i], oa_names[i], trainingInstances,
                  testingInstances, measure)
            end = time.time()
            training_time = end - start

            optimal_instance = oa[i].getOptimal()
            networks[i].setWeights(optimal_instance.getData())

            start = time.time()
            for instance in trainingInstances:
                networks[i].setInputValues(instance.getData())
                networks[i].run()

                predicted = instance.getLabel().getContinuous()
                actual = networks[i].getOutputValues().get(0)

                if abs(predicted - actual) < 0.5:
                    traincorrect += 1
                else:
                    trainincorrect += 1

            for instance in testingInstances:
                networks[i].setInputValues(instance.getData())
                networks[i].run()

                predicted = instance.getLabel().getContinuous()
                actual = networks[i].getOutputValues().get(0)

                if abs(predicted - actual) < 0.5:
                    testcorrect += 1
                else:
                    testincorrect += 1

            end = time.time()
            testing_time = end - start

            results += "\nResults for %s: \nCorrectly classified %d training instances." % (
                name, traincorrect)
            results += "\nIncorrectly classified %d instances.\nPercent correctly classified: %0.03f%%" % (
                trainincorrect, float(traincorrect) /
                (traincorrect + trainincorrect) * 100.0)
            results += "\nResults for %s: \nCorrectly classified %d testing instances." % (
                name, testcorrect)
            results += "\nIncorrectly classified %d instances.\nPercent correctly classified: %0.03f%%" % (
                testincorrect, float(testcorrect) /
                (testcorrect + testincorrect) * 100.0)
            results += "\nTraining time: %0.03f seconds" % (training_time, )
            results += "\nTesting time: %0.03f seconds\n" % (testing_time, )

            print results
            writer.writerow([results])
            writer.writerow('')
示例#27
0
ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))

sa = SimulatedAnnealing(1E11, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA: " + str(ef.value(sa.getOptimal()))

ga = StandardGeneticAlgorithm(200, 100, 10, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA: " + str(ef.value(ga.getOptimal()))

mimic = MIMIC(200, 20, pop)
fit = FixedIterationTrainer(mimic, 1000)
fit.train()
print "MIMIC: " + str(ef.value(mimic.getOptimal()))

示例#28
0
calls = []
results = []
for _ in range(runs):
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    fitness = fit.train()
    results.append(ef.value(rhc.getOptimal()))
    calls.append(ef.getTotalCalls())    
    ef.clearCount()
print "RHC, average results , " + str(sum(results)/float(runs)) + ", fourpeaks%d.txt" % N
print "RHC, average feval calls , " + str(sum(calls)/float(runs)) + ", fourpeaks%d.txt" % N
t1 = time.time() - t0
print "RHC, average time , " + str(float(t1)/runs) + ", fourpeaks%d.txt" % N


t0 = time.time()
calls = []
results = []
for _ in range(runs):
    sa = SimulatedAnnealing(1e10, .95, hcp)
    fit = FixedIterationTrainer(sa, iters)
    fitness = fit.train()
    results.append(ef.value(sa.getOptimal()))
    calls.append(ef.getTotalCalls())
    ef.clearCount()    
print "SA, average results , " + str(sum(results)/float(runs)) + ", fourpeaks%d.txt" % N
print "SA, average feval calls , " + str(sum(calls)/float(runs)) + ", fourpeaks%d.txt" % N
t1 = time.time() - t0
print "SA, average time , " + str(t1/float(runs)) + ", fourpeaks%d.txt" % N

示例#29
0
def main():

    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50

    iterations = 20000
    gaIters = 1000
    mimicIters = 1000
    gaPop = 200
    gaMate = 150
    gaMutate = 25
    mimicSamples = 200
    mimicToKeep = 100
    saTemp = 100
    saCooling = .95
    alg = 'all'
    run = 0
    settings = []

    try:
        opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:N:c:w:v:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="])
    except:
        print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
            sys.exit(1)
        elif opt == '-i':
            iterations = int(arg)
        elif opt == '-N':
            NUM_ITEMS = int(arg)
        elif opt == '-c':
            COPIES_EACH = int(arg)
        elif opt == '-w':
            MAX_WEIGHT = int(arg)
        elif opt == '-v':
            MAX_VOLUME = int(arg)
        elif opt == '-n':
            run = int(arg)
        elif opt == '-r':
            alg = 'RHC'
        elif opt == '-s':
            alg = 'SA'
        elif opt == '-g':
            alg = 'GA'
        elif opt == '-m':
            alg = 'MIMIC'
        elif opt == '-a':
            alg = 'all'
        elif opt == '--gaPop':
            gaPop = int(arg)
        elif opt == '--gaMate':
            gaMate = int(arg)
        elif opt == '--gaMutate':
            gaMutate = int(arg)
        elif opt == '--mimicSamples':
            mimicSamples = int(arg)
        elif opt == '--mimicToKeep':
            mimicToKeep = int(arg)
        elif opt == '--saTemp':
            saTemp = float(arg)
        elif opt == '--saCooling':
            saCooling = float(arg)
        elif opt == '--gaIters':
            gaIters = int(arg)
        elif opt == '--mimicIters':
            mimicIters = int(arg)
    vars ={
    'NUM_ITEMS' : NUM_ITEMS,
    'COPIES_EACH' : COPIES_EACH,
    'MAX_WEIGHT' : MAX_WEIGHT,
    'MAX_VOLUME' : MAX_VOLUME,
    'iterations' : iterations,
    'gaIters' : gaIters,
    'mimicIters' : mimicIters,
    'gaPop' : gaPop,
    'gaMate' : gaMate,
    'gaMutate' : gaMutate,
    'mimicSamples' : mimicSamples,
    'mimicToKeep' : mimicToKeep,
    'saTemp' : saTemp,
    'saCooling' : saCooling,
    'alg' : alg,
    'run' : run
    }

    settings = getSettings(alg, settings, vars)
    # Random number generator */
    random = Random()

    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME


    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if alg == 'RHC' or alg == 'all':
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iterations)
        fit.train()
        print "RHC: " + str(ef.value(rhc.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(str(ef.value(rhc.getOptimal())))
        rows.append(row)
        output2('Knapsack', 'RHC', rows, settings)
        rows = []
        buildFooter("Knapsack", "RHC", rows, settings)
        outputFooter("Knapsack", "RHC", rows , settings)
    if alg == 'SA' or alg == 'all':
        sa = SimulatedAnnealing(saTemp, saCooling, hcp)
        fit = FixedIterationTrainer(sa, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(sa.getOptimal()))
        rows.append(row)
        print "SA: " + str(ef.value(sa.getOptimal()))
        output2('Knapsack', 'SA', rows, settings)
        rows = []
        buildFooter("Knapsack", "SA", rows, settings)
        outputFooter("Knapsack", "SA", rows, settings)
    if alg == 'GA' or alg == 'all':
        ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap)
        fit = FixedIterationTrainer(ga, gaIters)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(ga.getOptimal()))
        rows.append(row)
        print "GA: " + str(ef.value(ga.getOptimal()))
        output2('Knapsack', 'GA', rows, settings)
        buildFooter("Knapsack", "GA", rows, settings)
        outputFooter("Knapsack", "GA", rows , settings)
    if alg == 'MIMIC' or alg == 'all':
        mimic = MIMIC(mimicSamples, mimicToKeep, pop)
        fit = FixedIterationTrainer(mimic, mimicIters)
        fit.train()
        print "MIMIC: " + str(ef.value(mimic.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(mimic.getOptimal()))
        rows.append(row)
        output2('Knapsack', 'MIMIC', rows, settings)
        rows = []
        buildFooter("Knapsack", "MIMIC", rows, settings)
        outputFooter("Knapsack", "MIMIC", rows , settings)
示例#30
0
def run_all_2(N=200, T=40, fout=None):
    problem = 'fourpeaks'
    # N=200
    # T=N/10
    maxEpochs = 10**6
    maxTime = 300  #5 minutes
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    # mf = SwapMutation()
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    def run_algo(alg, fit, label, difficulty, iters):
        trainTimes = [0.]
        trainTime = []
        scoreChange = [0.]
        stuckCount = 10**3
        prev = 0.
        for epoch in range(0, maxEpochs, 1):

            st = time.clock()
            fit.train()
            et = time.clock()

            trainTimes.append(trainTimes[-1] + (et - st))
            trainTime.append((et - st))
            rollingMean = 10
            avgTime = (math.fsum(trainTime[-rollingMean:]) /
                       float(rollingMean))

            score = ef.value(alg.getOptimal())

            # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1])
            trialData = [
                problem, difficulty, label, score, epoch, trainTimes[-1],
                avgTime, iters
            ]
            # print(trialData)
            # fout.writerow(trialData)
            # print(trialData)
            print(trialData, max(scoreChange))
            # print(max(scoreChange))
            optimum = (difficulty - 1 - T) + difficulty
            if score >= optimum: break

            scoreChange.append(abs(score - prev))
            prev = score
            scoreChange = scoreChange[-stuckCount:]
            # print(scoreChange)
            if max(scoreChange) == 0: break

            if trainTimes[-1] > maxTime: break

        # print(trialData)
        fout.writerow(trialData)

    iters = 1000
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    run_algo(rhc, fit, 'RHC', N, iters)

    iters = 1000
    startTemp = 1E10
    coolingFactor = .95
    sa = SimulatedAnnealing(startTemp, coolingFactor, hcp)
    fit = FixedIterationTrainer(sa, iters)
    run_algo(sa, fit, 'SA', N, iters)

    iters = 10
    population = 300
    mates = 100
    mutations = 50
    ga = StandardGeneticAlgorithm(population, mates, mutations, gap)
    fit = FixedIterationTrainer(ga, iters)
    run_algo(ga, fit, 'GA', N, iters)

    iters = 10
    samples = 200
    keep = 20
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, iters)
    run_algo(mimic, fit, 'MIMIC', N, iters)