def testBestHillClimb(self): """Test the best hill climb algorithm. Test whether it finds the best results as expected. """ # Initiate the build/test command and the log directory. Task.InitLogCommand(None, None, 'output') # Generate the testing specs. specs = self._GenerateFlagSpecifications() # Generate the initial generations for a test whose cost function is the # summation of the values of all the flags. generation_tasks = _GenerateAllFlagsTasks(specs) generations = [ HillClimbingBestBranch(generation_tasks, set([]), specs) ] # Test the algorithm. The cost function is the summation of all the values # of all the flags. Therefore, the best value is supposed to be 0, i.e., # when all the flags are disabled. _TestAlgorithm('sum(values[0:len(values)])', specs, generations, 0) # This test uses a cost function that is the negative of the previous cost # function. Therefore, the best result should be found in task with all the # flags enabled. cost_function = 'sys.maxint - sum(values[0:len(values)])' all_flags = list(generation_tasks)[0].GetFlags() cost = _ComputeCost(cost_function, specs, all_flags) # Generate the initial generations. generation_tasks = _GenerateNoFlagTask() generations = [ HillClimbingBestBranch(generation_tasks, set([]), specs) ] # Test the algorithm. The cost function is negative of the summation of all # the values of all the flags. Therefore, the best value is supposed to be # 0, i.e., when all the flags are disabled. _TestAlgorithm(cost_function, specs, generations, cost)
def testIterativeElimination(self): """Test the iterative elimination algorithm. Test whether it finds the best results as expected. """ # Initiate the build/test command and the log directory. Task.InitLogCommand(None, None, 'output') # Generate the testing specs. specs = self._GenerateFlagSpecifications() # Generate the initial generations. The generation contains the base line # task that turns on all the flags and tasks that each turn off one of the # flags. generations = _GenerateAllIterativeEliminationTasks(specs) # Test the algorithm. The cost function is the summation of all the values # of all the flags. Therefore, the best value is supposed to be 0, i.e., # when all the flags are disabled. _TestAlgorithm('sum(values[0:len(values)])', specs, generations, 0) # This test uses a cost function that is the negative of the previous cost # function. Therefore, the best result should be found in task with all the # flags enabled. all_flags_tasks = _GenerateAllFlagsTasks(specs) cost_function = 'sys.maxint - sum(values[0:len(values)])' # Compute the cost of the task that turns on all the flags. all_flags = list(all_flags_tasks)[0].GetFlags() cost = _ComputeCost(cost_function, specs, all_flags) # Test the algorithm. The cost function is negative of the summation of all # the values of all the flags. Therefore, the best value is supposed to be # 0, i.e., when all the flags are disabled. # The concrete type of the generation decides how the next generation will # be generated. _TestAlgorithm(cost_function, specs, generations, cost)
def testGeneticAlgorithm(self): """Test the Genetic Algorithm. Do a functional testing here and see how well it scales. """ # Initiate the build/test command and the log directory. Task.InitLogCommand(None, None, 'output') # Generate the testing specs. specs = self._GenerateFlagSpecifications() # Initiate the build/test command and the log directory. GAGeneration.InitMetaData(STOP_THRESHOLD, NUM_CHROMOSOMES, NUM_TRIALS, specs, MUTATION_RATE) # Generate the initial generations. generation_tasks = GenerateRandomGATasks(specs, NUM_CHROMOSOMES, NUM_TRIALS) generations = [GAGeneration(generation_tasks, set([]), 0)] # Test the algorithm. _TestAlgorithm('sum(values[0:len(values)])', specs, generations, -1) cost_func = 'sys.maxint - sum(values[0:len(values)])' _TestAlgorithm(cost_func, specs, generations, -1)
def _ProcessGA(meta_data): """Set up the meta data for the genetic algorithm. Args: meta_data: the meta data for the genetic algorithm. """ assert BUILD_CMD in meta_data build_cmd = meta_data[BUILD_CMD] assert TEST_CMD in meta_data test_cmd = meta_data[TEST_CMD] if OUTPUT not in meta_data: output_file = DEFAULT_OUTPUT else: output_file = meta_data[OUTPUT] if CONF not in meta_data: conf_file = DEFAULT_CONF else: conf_file = meta_data[CONF] if NUM_BUILDER not in meta_data: num_builders = DEFAULT_NUM_BUILDER else: num_builders = meta_data[NUM_BUILDER] if NUM_TESTER not in meta_data: num_testers = DEFAULT_NUM_TESTER else: num_testers = meta_data[NUM_TESTER] if STOP_THRESHOLD not in meta_data: stop_threshold = DEFAULT_STOP_THRESHOLD else: stop_threshold = meta_data[STOP_THRESHOLD] if NUM_CHROMOSOMES not in meta_data: num_chromosomes = DEFAULT_NUM_CHROMOSOMES else: num_chromosomes = meta_data[NUM_CHROMOSOMES] if NUM_TRIALS not in meta_data: num_trials = DEFAULT_NUM_TRIALS else: num_trials = meta_data[NUM_TRIALS] if MUTATION_RATE not in meta_data: mutation_rate = DEFAULT_MUTATION_RATE else: mutation_rate = meta_data[MUTATION_RATE] specs = flags.ReadConf(conf_file) # Initiate the build/test command and the log directory. Task.InitLogCommand(build_cmd, test_cmd, output_file) # Initiate the build/test command and the log directory. GAGeneration.InitMetaData(stop_threshold, num_chromosomes, num_trials, specs, mutation_rate) # Generate the initial generations. generation_tasks = testing_batch.GenerateRandomGATasks( specs, num_chromosomes, num_trials) generations = [GAGeneration(generation_tasks, set([]), 0)] # Execute the experiment. _StartExperiment(num_builders, num_testers, generations)