コード例 #1
0
ファイル: knapsack.py プロジェクト: eigendreams/cs7641_hw2
def knapsack():

    # Random number generator */
    random = Random()
    random.setSeed(0)
    NUM_ITEMS = 40  # The number of items
    COPIES_EACH = 4  # The number of copies each
    MAX_WEIGHT = 50  # The maximum weight for a single element
    MAX_VOLUME = 50  # The maximum volume for a single element
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4  # The volume of the knapsack

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)
    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME
    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()

    rhc_generic("KnSrhc50", ef, odd, nf, 1.0, 10000, 10, 5)
    sa_generic("KnSsa50", ef, odd, nf, 1.0, 10000, 10, 5,
               ([1E12, 1E6], [0.999, 0.99, 0.95]))
    ga_generic("KnSga50", ef, odd, mf, cf, 50.0, 10000, 10, 1,
               ([2000, 200], [0.5, 0.25], [0.25, 0.1, 0.02]))
    mimic_discrete("KnSmimic50", ef, odd, ranges, 300.0, 10000, 10, 1,
                   ([200], [100], [0.1, 0.5, 0.9]))

    print "KnS all done"
コード例 #2
0
import opt.ga.MutationFunction as MutationFunction
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
import opt.ga.UniformCrossOver as UniformCrossOver
import opt.prob.GenericProbabilisticOptimizationProblem as GenericProbabilisticOptimizationProblem
import opt.prob.MIMIC as MIMIC
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer
import opt.example.CountOnesEvaluationFunction as CountOnesEvaluationFunction
from array import array

# the size of the vector in question

import opt.ga.DoubleCrossOver as DoubleCrossOver

sf = SingleCrossOver()
uf = UniformCrossOver()
tpf = DoubleCrossOver()

try:
    N = int(sys.argv[1])
except:
    N = 100

try:
    ga_pop = int(sys.argv[2])
    #ga_pop = ga_pop*N
except:
    ga_pop = 200

try:
    co_type = int(sys.argv[3])
コード例 #3
0
def run_knapsack():
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            #print "RHC: " + str(ef.value(rhc.getOptimal()))

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(100, .95, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            end = time.time()

            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            #print "SA: " + str(ef.value(sa.getOptimal()))

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(200, 150, 25, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(sa.getOptimal()))
            ga_times.append(end - start)
            #print "GA: " + str(ef.value(ga.getOptimal()))

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(200, 100, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()
            mimic_results.append(ef.value(mimic.getOptimal()))
            mimic_times.append(end - start)
            #print "MIMIC: " + str(ef.value(mimic.getOptimal()))

    with open('knapsack.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
コード例 #4
0
def run_all():
    problem = 'knapsack'
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack 
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME


    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    maxEpochs = 400

    columns = ['problem','label','score','epoch','time','avgTrainTime','iterations']
    outFile = open(filename+'_all.csv','wb')
    fout = csv.writer(outFile,delimiter=',')
    fout.writerow(columns)


    def run_algo(alg,fit,label,iters):
        print(alg)
        trainTimes = [0.]
        trainTime = []
        scores = [0]
        deltaScores = []
        for epoch in range(0,maxEpochs,1):
        
            st = time.clock()
            fit.train()
            et = time.clock()
            
            trainTimes.append(trainTimes[-1]+(et-st))
            trainTime.append((et-st))
            rollingMean = 10
            avgTime = (math.fsum(trainTime[-rollingMean:]) / float(rollingMean))
      
            score = ef.value(alg.getOptimal())
            scores.append(score)
            deltaScores.append(math.fabs(scores[-2] - scores[-1]))
            
            # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1])
            trialData = [problem,label,score,epoch,trainTimes[-1],avgTime,iters]
            print(trialData)
            fout.writerow(trialData)
            
            
    iters = 10
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    run_algo(rhc,fit,'RHC',10)


    startTemp = 1E11
    coolingFactor = .95
    sa = SimulatedAnnealing(startTemp, coolingFactor, hcp)
    fit = FixedIterationTrainer(sa, iters)
    run_algo(sa,fit,'HCP',10)

    population = 300
    mates = 100
    mutations = 50
    ga = StandardGeneticAlgorithm(population, mates, mutations, gap)
    fit = FixedIterationTrainer(ga, iters)
    run_algo(ga,fit,'GA',10)
    
    
    samples = 200
    keep = 20
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, iters)
    run_algo(mimic,fit,'MIMIC',10)
    
    outFile.close()
コード例 #5
0
def run_all_2(N=40,fout=None):
    maxEpochs = 10**5
    maxTime = 300 #5 minutes
    problem = 'knapsack'
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = N
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack 
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME


    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    

    def run_algo(alg,fit,label,difficulty,iters):
        trainTimes = [0.]
        trainTime = []
        scoreChange = [0.]
        stuckCount = 10**3
        prev = 0.
        for epoch in range(0,maxEpochs,1):
        
            st = time.clock()
            fit.train()
            et = time.clock()
            
            trainTimes.append(trainTimes[-1]+(et-st))
            trainTime.append((et-st))
            rollingMean = 10
            avgTime = (math.fsum(trainTime[-rollingMean:]) / float(rollingMean))
        
            score = ef.value(alg.getOptimal())
            
            # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1])
            trialData = [problem,difficulty,label,score,epoch,trainTimes[-1],avgTime,iters]
            # print(trialData)
            # fout.writerow(trialData)
            # print(trialData)
            print(trialData,max(scoreChange))
            # print(max(scoreChange))
            # optimum = (difficulty-1-T) + difficulty
            # if score >= optimum: break
            
            scoreChange.append(abs(score-prev))
            prev = score
            scoreChange = scoreChange[-stuckCount:]
            # print(scoreChange)
            if max(scoreChange) < 1.0: break
            
            if trainTimes[-1] > maxTime: break
            
    
        # print(trialData)
        fout.writerow(trialData)
        
        
    iters = 1000
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    run_algo(rhc,fit,'RHC',N,iters)

    iters = 1000
    startTemp = 1E10
    coolingFactor = .99
    sa = SimulatedAnnealing(startTemp, coolingFactor, hcp)
    fit = FixedIterationTrainer(sa, iters)
    run_algo(sa,fit,'SA',N,iters)

    iters = 10
    population = 300
    mates = 100
    mutations = 50
    ga = StandardGeneticAlgorithm(population, mates, mutations, gap)
    fit = FixedIterationTrainer(ga, iters)
    run_algo(ga,fit,'GA',N,iters)
    
    iters = 10
    samples = 200
    keep = 20
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, iters)
    run_algo(mimic,fit,'MIMIC',N,iters)
コード例 #6
0
def solveit(oaname, params):
    iterations = 10000
    tryi = 1
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if oaname == 'RHC':
        iterations = int(params[0])
        tryi = int(params[1])
        oa = RandomizedHillClimbing(hcp)
    if oaname == 'SA':
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), hcp)
    if oaname == 'GA':
        iterations = 1000
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]),
                                      int(params[2]), gap)
    if oaname == 'MMC':
        iterations = 1000
        oa = MIMIC(int(params[0]), int(params[1]), pop)

    print "Running %s using %s for %d iterations, try %d" % (
        oaname, ','.join(params), iterations, tryi)
    print "=" * 20
    starttime = timeit.default_timer()
    output = []
    for i in range(iterations):
        oa.train()
        if i % 10 == 0:
            optimal = oa.getOptimal()
            score = ef.value(optimal)
            elapsed = int(timeit.default_timer() - starttime)
            output.append([str(i), str(score), str(elapsed)])

    print 'score: %.3f' % score
    print 'train time: %d secs' % (int(timeit.default_timer() - starttime))

    scsv = 'kn-%s-%s.csv' % (oaname, '-'.join(params))
    print "Saving to %s" % (scsv),
    with open(scsv, 'w') as csvf:
        writer = csv.writer(csvf)
        for row in output:
            writer.writerow(row)
    print "saved."
    print "=" * 20
コード例 #7
0
def run_knapsack_experiments():
    OUTPUT_DIRECTORY = './output'
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    max_iter = 5000
    outfile = OUTPUT_DIRECTORY + '/knapsack_{}_log.csv'

    # Randomized Hill Climber
    filename = outfile.format('rhc')
    with open(filename, 'w') as f:
        f.write('iterations,fitness,time\n')
    for it in range(0, max_iter, 10):
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, it)
        start_time = time.clock()
        fit.train()
        elapsed_time = time.clock() - start_time
        # fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        data = '{},{},{}\n'.format(it, score, elapsed_time)
        print(data)
        with open(filename, 'a') as f:
            f.write(data)

    # Simulated Annealing
    filename = outfile.format('sa')
    with open(filename, 'w') as f:
        f.write('iteration,cooling_value,fitness,time\n')
    for cooling_value in (.19, .38, .76, .95):
        for it in range(0, max_iter, 10):
            sa = SimulatedAnnealing(200, cooling_value, hcp)
            fit = FixedIterationTrainer(sa, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(sa.getOptimal())
            data = '{},{},{},{}\n'.format(it, cooling_value, score,
                                          elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # Genetic Algorithm
    filename = outfile.format('ga')
    with open(filename, 'w') as f:
        f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n')
    for population_size, to_mate, to_mutate in itertools.product(
        [200], [110, 120, 130, 140, 150], [2, 4, 6, 8]):
        for it in range(0, max_iter, 10):
            ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate,
                                          gap)
            fit = FixedIterationTrainer(ga, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(ga.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate,
                                                to_mutate, score, elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # MIMIC
    filename = outfile.format('mm')
    with open(filename, 'w') as f:
        f.write('iterations,samples,to_keep,m,fitness,time\n')
    for samples, to_keep, m in itertools.product([200], [100],
                                                 [0.1, 0.3, 0.5, 0.7, 0.9]):
        for it in range(0, 500, 10):
            df = DiscreteDependencyTree(m, ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mm = MIMIC(samples, 20, pop)
            fit = FixedIterationTrainer(mm, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(mm.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score,
                                                elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)
コード例 #8
0
def run_algorithm_test(weights, volumes, knapsack_volume, copies, ranges, algorithms, output_file_name, trial_number, iterations=False):

    with open(output_file_name,'w') as f:
        f.write('algorithm,optimal_result,iterations,time,trial\n')

    ef = KnapsackEvaluationFunction(weights, volumes, knapsack_volume, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)

    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    for trial in range(trial_number):
        if iterations is False:
            for item in algorithms:
                start_time = time.time()
                if item in ['rhc']:
                    optimal_result, run_iters = run_rhc(hcp, ef)
                elif item in ['sa']:
                    optimal_result, run_iters = run_sa(hcp, ef)
                elif item in ['ga']:
                    optimal_result, run_iters = run_ga(gap, ef)
                elif item in ['mimic']:
                    optimal_result, run_iters = run_mimic(pop, ef)
                else:
                    print "The algorithm type {} is not supported.".format(item)

                end_time = time.time()
                time_elapsed = end_time - start_time

                run_output = '{},{},{},{},{}\n'.format(item, optimal_result, run_iters, time_elapsed, trial)
                with open(output_file_name,'a') as f:
                    f.write(run_output)
        else:
            for iter in iterations:
                for item in algorithms:
                    start_time = time.time()
                    if item in ['rhc']:
                        optimal_result, run_iters = run_rhc(hcp, ef, iter)
                    elif item in ['sa']:
                        optimal_result, run_iters = run_sa(hcp, ef, iter)
                    elif item in ['ga']:
                        optimal_result, run_iters = run_ga(gap, ef, iter)
                    elif item in ['mimic']:
                        optimal_result, run_iters = run_mimic(pop, ef, iter)
                    else:
                        print "The algorithm type {} is not supported.".format(item)

                    end_time = time.time()
                    time_elapsed = end_time - start_time

                    run_output = '{},{},{},{},{}\n'.format(item, optimal_result, run_iters, time_elapsed, trial)

                    with open(output_file_name,'a') as f:
                        f.write(run_output)

    print "time elapsed is {}".format(time_elapsed)

    return
コード例 #9
0
ファイル: knapsackfunc.py プロジェクト: omarka/datascience
def knapsackfunc(NUM_ITEMS,  iterations):


        rhcMult = 600
        saMult = 600
        gaMult = 4
        mimicMult = 3


	# Random number generator */
	random = Random()
	# The number of items
	#NUM_ITEMS = 40
	# The number of copies each
	COPIES_EACH = 4
	# The maximum weight for a single element
	MAX_WEIGHT = 50
	# The maximum volume for a single element
	MAX_VOLUME = 50
	# The volume of the knapsack 
	KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

	# create copies
	fill = [COPIES_EACH] * NUM_ITEMS
	copies = array('i', fill)

	# create weights and volumes
	fill = [0] * NUM_ITEMS
	weights = array('d', fill)
	volumes = array('d', fill)
	for i in range(0, NUM_ITEMS):
		weights[i] = random.nextDouble() * MAX_WEIGHT
		volumes[i] = random.nextDouble() * MAX_VOLUME


	# create range
	fill = [COPIES_EACH + 1] * NUM_ITEMS
	ranges = array('i', fill)

	ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
	odd = DiscreteUniformDistribution(ranges)
	nf = DiscreteChangeOneNeighbor(ranges)
	mf = DiscreteChangeOneMutation(ranges)
	cf = UniformCrossOver()
	df = DiscreteDependencyTree(.1, ranges)
	hcp = GenericHillClimbingProblem(ef, odd, nf)
	gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
	pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

	optimalOut = []
	timeOut = []
	evalsOut = []

	for niter in iterations:

		iterOptimalOut = [NUM_ITEMS, niter]
		iterTimeOut = [NUM_ITEMS, niter]
		iterEvals = [NUM_ITEMS, niter]



		start = time.time()
		rhc = RandomizedHillClimbing(hcp)
		fit = FixedIterationTrainer(rhc, niter*rhcMult)
		fit.train()
		end = time.time()
		rhcOptimal = ef.value(rhc.getOptimal())
		rhcTime = end-start
		print "RHC optimum: " + str(rhcOptimal)
		print "RHC time: " + str(rhcTime)
		iterOptimalOut.append(rhcOptimal)
		iterTimeOut.append(rhcTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)

		start = time.time()
		sa = SimulatedAnnealing(100, .95, hcp)
		fit = FixedIterationTrainer(sa, niter*saMult)
		fit.train()
		end = time.time()
		saOptimal = ef.value(sa.getOptimal())
		saTime = end-start
		print "SA optimum: " + str(saOptimal)
		print "SA time: " + str(saTime)
		iterOptimalOut.append(saOptimal)
		iterTimeOut.append(saTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)

		start = time.time()
		ga = StandardGeneticAlgorithm(200, 150, 25, gap)
		fit = FixedIterationTrainer(ga, niter*gaMult)
		fit.train()
		end = time.time()
		gaOptimal = ef.value(ga.getOptimal())
		gaTime = end - start
		print "GA optimum: " + str(gaOptimal)
		print "GA time: " + str(gaTime)
		iterOptimalOut.append(gaOptimal)
		iterTimeOut.append(gaTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)


		start = time.time()
		mimic = MIMIC(200, 100, pop)
		fit = FixedIterationTrainer(mimic, niter*mimicMult)
		fit.train()
		end = time.time()
		mimicOptimal = ef.value(mimic.getOptimal())
		mimicTime = end - start
		print "MIMIC optimum: " + str(mimicOptimal)
		print "MIMIC time: " + str(mimicTime)
		iterOptimalOut.append(mimicOptimal)
		iterTimeOut.append(mimicTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)
		
		optimalOut.append(iterOptimalOut)
		timeOut.append(iterTimeOut)
		evalsOut.append(iterEvals)		
	
	return [optimalOut, timeOut, evalsOut]