# Adapted from https://github.com/JonathanTay/CS-7641-assignment-2/blob/master/tsp.py

# set N value.  This is the number of points
N = 100
random = Random()
maxIters = 3001
numTrials = 5

points = [[0 for x in xrange(2)] for x in xrange(N)]
for i in range(0, len(points)):
    points[i][0] = random.nextDouble()
    points[i][1] = random.nextDouble()
outfile = OUTPUT_DIRECTORY + '/TSP/TSP_{}_{}_LOG.csv'
ef = TravelingSalesmanRouteEvaluationFunction(points)
odd = DiscretePermutationDistribution(N)
nf = SwapNeighbor()
mf = SwapMutation()
cf = TravelingSalesmanCrossOver(ef)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

# MIMIC
fill = [N] * N
ranges = array('i', fill)
ef = TravelingSalesmanRouteEvaluationFunction(points)
odd = DiscreteUniformDistribution(ranges)

# for t in range(numTrials):
#     for samples, keep, m in product([100], [50], [0.1, 0.3, 0.5, 0.7, 0.9]):
#         fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m), str(t + 1))
#         df = DiscreteDependencyTree(m, ranges)
def travelingsalesmanfunc(N, iterations):

    rhcMult = 1500
    saMult = 1500
    gaMult = 1
    mimicMult = 3

    random = Random()

    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    optimalOut = []
    timeOut = []
    evalsOut = []

    for niter in iterations:

        ef = TravelingSalesmanRouteEvaluationFunction(points)
        odd = DiscretePermutationDistribution(N)
        nf = SwapNeighbor()
        mf = SwapMutation()
        cf = TravelingSalesmanCrossOver(ef)
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

        iterOptimalOut = [N, niter]
        iterTimeOut = [N, niter]
        iterEvals = [N, niter]

        start = time.time()
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, niter * rhcMult)
        fit.train()
        end = time.time()
        rhcOptimal = ef.value(rhc.getOptimal())
        rhcTime = end - start
        print "RHC Inverse of Distance: optimum: " + str(rhcOptimal)
        print "RHC time: " + str(rhcTime)
        #print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
        print "Route:"
        path = []
        for x in range(0, N):
            path.append(rhc.getOptimal().getDiscrete(x))
        print path
        iterOptimalOut.append(rhcOptimal)
        iterTimeOut.append(rhcTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        sa = SimulatedAnnealing(1E12, .999, hcp)
        fit = FixedIterationTrainer(sa, niter * saMult)
        fit.train()
        end = time.time()
        saOptimal = ef.value(sa.getOptimal())
        saTime = end - start
        print "SA Inverse of Distance optimum: " + str(saOptimal)
        print "SA time: " + str(saTime)
        #print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
        print "Route:"
        path = []
        for x in range(0, N):
            path.append(sa.getOptimal().getDiscrete(x))
        print path
        iterOptimalOut.append(saOptimal)
        iterTimeOut.append(saTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
        fit = FixedIterationTrainer(ga, niter * gaMult)
        fit.train()
        end = time.time()
        gaOptimal = ef.value(ga.getOptimal())
        gaTime = end - start
        print "GA Inverse of Distance optimum: " + str(gaOptimal)
        print "GA time: " + str(gaTime)
        #print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))
        print "Route:"
        path = []
        for x in range(0, N):
            path.append(ga.getOptimal().getDiscrete(x))
        print path
        iterOptimalOut.append(gaOptimal)
        iterTimeOut.append(gaTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        # for mimic we use a sort encoding
        ef = TravelingSalesmanSortEvaluationFunction(points)
        fill = [N] * N
        ranges = array('i', fill)
        odd = DiscreteUniformDistribution(ranges)
        df = DiscreteDependencyTree(.1, ranges)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

        start = time.time()
        mimic = MIMIC(500, 100, pop)
        fit = FixedIterationTrainer(mimic, niter * mimicMult)
        fit.train()
        end = time.time()
        mimicOptimal = ef.value(mimic.getOptimal())
        mimicTime = end - start
        print "MIMIC Inverse of Distance optimum: " + str(mimicOptimal)
        print "MIMIC time: " + str(mimicTime)
        #print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal()))
        print "Route:"
        path = []
        optimal = mimic.getOptimal()
        fill = [0] * optimal.size()
        ddata = array('d', fill)
        for i in range(0, len(ddata)):
            ddata[i] = optimal.getContinuous(i)
        order = ABAGAILArrays.indices(optimal.size())
        ABAGAILArrays.quicksort(ddata, order)
        print order
        iterOptimalOut.append(mimicOptimal)
        iterTimeOut.append(mimicTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        optimalOut.append(iterOptimalOut)
        timeOut.append(iterTimeOut)
        evalsOut.append(iterEvals)

    return [optimalOut, timeOut, evalsOut]
Beispiel #3
0
#RHC
output_directory = "Results/Small/TSP_RHC.csv"
with open(output_directory, 'w') as f:
    f.write('iterations,fitness,time\n')

for i in range(len(iteration_list)):
    iteration = iteration_list[i]
    rhc_total = 0
    rhc_time = 0

    for x in range(runs):
        ranges = array('i', [2] * N)
        fitness = TravelingSalesmanRouteEvaluationFunction(points)
        discrete_dist = DiscretePermutationDistribution(N)
        discrete_neighbor = SwapNeighbor()
        discrete_mutation = SwapMutation()
        discrete_dependency = DiscreteDependencyTree(.1, ranges)
        hill_problem = GHC(fitness, discrete_dist, discrete_neighbor)

        start = time.clock()
        rhc_problem = RHC(hill_problem)
        fit = FixedIterationTrainer(rhc_problem, iteration)
        fit.train()
        end = time.clock()
        full_time = end - start
        rhc_total += fitness.value(rhc_problem.getOptimal())
        rhc_time += full_time

    rhc_total_avg = rhc_total / runs
    rhc_time_avg = rhc_time / runs