def test_get_parsing_results(self):
        self.initialise_segment_table("abnese_lengthening_segment_table.txt")
        configurations["MORPHEME_BOUNDARY_FLAG"] = True
        configurations["LENGTHENING_FLAG"] = True
        configurations["HMM_ENCODING_LENGTH_MULTIPLIER"] = 100
        configurations["DATA_ENCODING_LENGTH_MULTIPLIER"] = 20
        hmm = HMM({
            'q0': ['q1'],
            'q1': (['qf'], ['aabb', 'abb', 'bbaabb', 'aba', 'aaba', 'bbaa'])
        })

        rule1 = Rule([], [{
            "long": "+"
        }], [], [{}, {
            "bound": "+"
        }],
                     obligatory=True)
        rule2 = Rule([], [{
            "syll": "+"
        }], [{
            "cons": "+"
        }], [{
            "cons": "+"
        }],
                     obligatory=True)
        rule_set = RuleSet([rule1, rule2])

        grammar = Grammar(hmm, rule_set)
        data = [
            u'baba:a', u'babaab:ab', u'ab:a', u'aab:a', u'aab:ab', u'ab:ab'
        ]

        hypothesis = Hypothesis(grammar, data)
        simulated_annealing = SimulatedAnnealing(hypothesis, 0)
        print(simulated_annealing._get_parsing_results())
Esempio n. 2
0
class TestSimulatedAnnealing(unittest.TestCase):

    def setUp(self):
        self.flow = [
                [0, 1, 2],
                [4, 0, 5],
                [7, 2, 0]]
        self.distance = [
                [0, 5, 2],
                [1, 0, 1],
                [6, 2, 0]]
        self.i = Instance(None, self.distance, self.flow)
        self.sa = SimulatedAnnealing()
        self.startpoint = Solution((0, 1, 2))

    def test_solve_with_startpoint(self):
        expected = (2, 1, 0)
        actual = self.sa.solve(self.i, self.startpoint).sequence
        self.assertEqual(expected, actual)

    def test_guess_temperature(self):
        expected = 255.0
        actual = self.sa._guess_temp(None, prob=0.98, df=1000)
        self.assertAlmostEqual(expected, actual, -1)

    def test_guess_temperature_for_given_instance(self):
        expected = 3.5
        actual = self.sa._guess_temp(self.i, prob=0.95)
        self.assertAlmostEqual(expected, actual, 0)
Esempio n. 3
0
def main():
    try:
        n = int(sys.argv[1])

    # Run Single File
    except ValueError:
        g = GraphInterface.fromFile(sys.argv[1])
        a = SimulatedAnnealing(g)
        start = datetime.now()
        paths, costs = a.run(max_iterations=100)

        cost = costs[-1]
        end = datetime.now()
        print(f"File: {sys.argv[1]}.\nTime (s): {(end - start).total_seconds()}. \nCost: {int_just(cost,5)} \nSolution: {paths}.")
    else:

        # Run all files for all problems <= argv[1]
        if len(sys.argv) >= 3 and sys.argv[2] == "all":
            results = []
            for i in range(1, n + 1):
                a, b = run_problem_size(i, print_individual=False)
                results.append((a, b))

            for t, i in zip(results, range(1, n + 1)):
                print(
                    f"Cities: {int_just(i, 3)}Average time: {int_just(t[0], 6)} Average Cost (s): {int_just( t[1], 12)}")

        # Run all files for problems of size argv[1]
        else:
            a, b = run_problem_size(n)
            print(
                f"\nFor {n} cities. \nAverage Time: {a} \nAverage Cost: {b}.")
Esempio n. 4
0
    def test_simulate_annealing(self):
        sa_parameter = {
            "cycles": 100,
            "trails": 10,
            "P_start": 0.7,
            "P_end": 0.001
        }
        parameter = json.dumps(sa_parameter)

        self.simulated_annealing = SimulatedAnnealing(
            self.dataset, self.combinations_df, self.normal_profile_selection,
            parameter)
        self.simulated_annealing.start()

        best_costs = self.simulated_annealing.best_results_list

        best_scrap_list = []
        for profile_dict in best_costs:
            scrap_sum = 0
            for uuid, raw_profile in profile_dict.items():
                scrap_sum += raw_profile.scrap

            best_scrap_list.append(scrap_sum)

        plt.plot(best_scrap_list)
        plt.show()
Esempio n. 5
0
def load_modules_and_run(feature_table_file_path, corpus_file_path, constraint_set_file_path,
                         configuration_files_dir_path):
    #TODO finish the loading from file
    #file, path, desc = imp.find_module("bb", [configuration_files_dir_path])
    #
    #module = imp.load_module("bb", file, path, desc)
    #print(type(module))
    #print(dir(module))
    #module.print_()

    #importing in here because it is after OtmlConfigurationManager initialization
    from grammar.lexicon import Lexicon
    from grammar.feature_table import FeatureTable
    from grammar.constraint_set import ConstraintSet
    from grammar.grammar import Grammar
    from traversable_grammar_hypothesis import TraversableGrammarHypothesis
    from corpus import Corpus
    from simulated_annealing import SimulatedAnnealing

    feature_table = FeatureTable.load(feature_table_file_path)
    corpus = Corpus.load(corpus_file_path)
    constraint_set = ConstraintSet.load(constraint_set_file_path, feature_table)
    lexicon = Lexicon(corpus.get_words(), feature_table)
    grammar = Grammar(feature_table, constraint_set, lexicon)
    data = corpus.get_words()
    traversable_hypothesis = TraversableGrammarHypothesis(grammar, data)
    simulated_annealing = SimulatedAnnealing(traversable_hypothesis)
    simulated_annealing.run()
Esempio n. 6
0
def findRoutes():
    start_time = time()
    body = None
    body = request.get_json(force=True)
    temp = request.args.get('use_heuristic')
    flag = False
    if temp != "false":
        flag = True
    print(body)
    lista = list(body)
    if len(lista) < 2:
        return []
    destinations = None
    destinations = RouteController()
    for i in lista:
        destinations.add_cord(Coordinate(i['lat'], i['lng'], 'A'))
    #TODO
    sa = None
    sa = SimulatedAnnealing(destinations,
                            initial_temperature=1000,
                            cooling_rate=0.0015,
                            use_heuristic=flag)
    sa.run()
    response = []
    for i in sa.best:
        response.append({'lat': i.lat, 'lng': i.long})
    elapsed_time = time() - start_time
    print("Elapsed time: %0.10f seconds." % elapsed_time)
    return jsonify(response)
Esempio n. 7
0
    def run(self, path, DEBUG=False):
        painter = Painter(path, DEBUG=DEBUG)
        simulated_annealing = SimulatedAnnealing(self.coordinates,
                                                 max_interations=5000000,
                                                 alpha=0.9995,
                                                 min_temperature=0.00000001)
        solution, costs = simulated_annealing.execute()

        painter.plot_path(solution, self.coordinates)
        painter.plot_costs(costs)

        print("Best solution: {}".format(solution))
        print("Costs: {}".format(costs))
Esempio n. 8
0
def run_simulation(configurations_tuples, simulation_number, log_file_template, feature_table_file_name, corpus_file_name, constraint_set_file_name,
                  sample_target_lexicon=None, sample_target_outputs=None, target_lexicon_indicator_function=None,
                  target_constraint_set_file_name=None, target_lexicon_file_name=None, convert_corpus_word_to_target_word_function=None,
                  initial_lexicon_file_name=None):

    for configurations_tuple in configurations_tuples:
        configurations[configurations_tuple[0]] = configurations_tuple[1]

    log_file_name = log_file_template.format(platform.node(), simulation_number)
    dirname, filename = split(abspath(__file__))
    log_file_path = join(dirname, "../logging/", log_file_name)

    # if os.path.exists(log_file_path):
    #     raise ValueError("log name already exits")

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    file_log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s', "%Y-%m-%d %H:%M:%S")
    file_log_handler = logging.FileHandler(log_file_path, mode='w')
    file_log_handler.setFormatter(file_log_formatter)
    logger.addHandler(file_log_handler)

    feature_table = FeatureTable.load(get_feature_table_fixture(feature_table_file_name))
    corpus = Corpus.load(get_corpus_fixture(corpus_file_name))
    constraint_set = ConstraintSet.load(get_constraint_set_fixture(constraint_set_file_name),
                                              feature_table)

    if initial_lexicon_file_name:
        corpus_for_lexicon = Corpus.load(get_corpus_fixture(initial_lexicon_file_name))
        lexicon = Lexicon(corpus_for_lexicon.get_words(), feature_table)
    else:
        lexicon = Lexicon(corpus.get_words(), feature_table)
    grammar = Grammar(feature_table, constraint_set, lexicon)
    data = corpus.get_words()
    traversable_hypothesis = TraversableGrammarHypothesis(grammar, data)

    keyargs_dict = {}
    
    if sample_target_lexicon and sample_target_outputs and target_lexicon_indicator_function:
        keyargs_dict["sample_target_lexicon"] = sample_target_lexicon
        keyargs_dict["sample_target_outputs"] = sample_target_outputs
        keyargs_dict["target_lexicon_indicator_function"] = target_lexicon_indicator_function

    if target_constraint_set_file_name and (target_lexicon_file_name or convert_corpus_word_to_target_word_function):
        target_energy = get_target_hypothesis_energy(feature_table, target_constraint_set_file_name, corpus,
                                                     target_lexicon_file_name, convert_corpus_word_to_target_word_function)
        keyargs_dict["target_energy"] = target_energy

    simulated_annealing = SimulatedAnnealing(traversable_hypothesis, **keyargs_dict)
    simulated_annealing.run()
Esempio n. 9
0
def main():
    parser = argparse.ArgumentParser(
        description='Input TSP file and hyperparameters')
    parser.add_argument('--tsp_file', '-f', help='TSP file path')

    args = parser.parse_args()
    tsp_file = args.tsp_file
    processor = TSPProcessor(tsp_file)
    nodes = processor.euc2d_process()
    '''set the simulated annealing algorithm params'''
    temp = 10000
    stopping_temp = 0.00000001
    alpha = 0.999995
    stopping_iter = 10000000
    '''set the dimensions of the grid'''
    size_width = 200
    size_height = 200
    '''set the number of nodes'''
    population_size = 70
    '''generate random list of nodes'''
    # nodes = NodeGenerator(size_width, size_height, population_size).generate()
    '''run simulated annealing algorithm with 2-opt'''
    sa = SimulatedAnnealing(nodes, temp, alpha, stopping_temp, stopping_iter)
    sa.anneal()
    '''animate'''
    save_path = os.path.join(
        os.path.join(os.path.dirname(tsp_file), 'result_gif'),
        os.path.basename(tsp_file) + '.gif')
    sa.animateSolutions(save_path)
    '''show the improvement over time'''
    sa.plotLearning()
Esempio n. 10
0
def main():
    '''define some global variables'''
    results = []
    '''set the simulated annealing algorithm parameter grid'''
    temp = 50000
    stopping_temp = 0.000000001
    alpha = .999
    stopping_iter = 10000000
    '''generate random list of nodes'''
    nodes = np.array([[20, 20], [60, 20], [100, 40], [120, 80], [160, 20],
                      [200, 40], [180, 60], [180, 100], [140, 140], [200, 160],
                      [180, 200], [140, 180], [100, 160], [80, 180], [60, 200],
                      [20, 160], [40, 120], [100, 120], [60, 80], [20, 40]])

    nodes = np.random.permutation(nodes)
    '''run simulated annealing algorithm with 2-opt'''
    sa = SimulatedAnnealing(nodes, temp, alpha, stopping_temp, stopping_iter)
    start_time = time.time()
    sa.anneal()
    execution_time = time.time() - start_time
    '''general plots'''
    print('Min weight: ', sa.min_weight)
    print('Iterations: ', sa.iteration)
    print('Execution time: ', execution_time)
    '''animate'''
    sa.animateSolutions()
    '''show the improvement over time'''
    sa.plotLearning()
Esempio n. 11
0
def main():
    '''set the simulated annealing algorithm params'''
    temp = 1000
    stopping_temp = 0.00000001
    alpha = 0.9995
    stopping_iter = 10000000

    '''set the dimensions of the grid'''
    size_width = 200
    size_height = 200

    '''set the number of nodes'''
    population_size = 70

    '''generate random list of nodes'''
    nodes = NodeGenerator(size_width, size_height, population_size).generate()

    '''run simulated annealing algorithm with 2-opt'''
    sa = SimulatedAnnealing(nodes, temp, alpha, stopping_temp, stopping_iter)
    sa.anneal()

    '''animate'''
    sa.animateSolutions()

    '''show the improvement over time'''
    sa.plotLearning()
Esempio n. 12
0
def main():
    '''set the simulated annealing algorithm params'''
    temp = 1000
    stopping_temp = 0.00000001
    alpha = 0.9995
    stopping_iter = 10000000
    '''set the dimensions of the grid'''
    size_width = 50
    size_height = 50
    '''set the number of nodes'''
    population_size = 100
    '''generate random list of nodes'''
    '''nodes = NodeGenerator(size_width, size_height, population_size).generate()'''
    f = open('data.txt')
    xs = []
    ys = []
    for data in f.readlines():
        data = data.strip('\n')
        nums = data.split(" ")
        while '' in nums:
            nums.remove('')
        xs.append(int(nums[0]))
        ys.append(int(nums[1]))
    nodes = np.column_stack((xs, ys))
    f.close()
    '''run simulated annealing algorithm with 2-opt'''
    sa = SimulatedAnnealing(nodes, temp, alpha, stopping_temp, stopping_iter)
    sa.anneal()
    '''animate'''
    sa.animateSolutions()
    '''show the improvement over time'''
    sa.plotLearning()
Esempio n. 13
0
def run_annealing(problem_path: str, temperature: float, iterations: int) -> List[
    float]:
    """

    Args:
        problem_path:
        temperature

    Returns:
        A list detailing the costs at each iteration.
    """
    g = GraphInterface.fromFile(problem_path)
    a = SimulatedAnnealing(g, temperature=temperature)
    path, costs = a.run(max_iterations=iterations)
    return costs
Esempio n. 14
0
    def test_random_solution_generation(self):

        sa_parameter = {
            "cycles": 100,
            "trails": 50,
            "P_start": 0.7,
            "P_end": 0.001
        }
        parameter = json.dumps(sa_parameter)

        self.simulated_annealing = SimulatedAnnealing(self.dataset,
                                                      self.combinations_df,
                                                      parameter)
        dict = self.simulated_annealing.get_random_solution(
            self.dataset, self.combinations_df, self.normal_profile_selection)
Esempio n. 15
0
 def __init__(self,
              all_cities_and_distances,
              chosen_scheduler=params.beta_scheduler,
              cooling_rate=params.cooling_rate,
              initial_temperature=params.initial_temperature,
              max_steps=params.max_steps,
              min_temp=params.min_temp):
     SimulatedAnnealing.__init__(self,
                                 chosen_scheduler=chosen_scheduler,
                                 cooling_rate=cooling_rate,
                                 initial_temperature=initial_temperature,
                                 max_steps=max_steps,
                                 min_temp=min_temp)
     self.init_state(all_cities_and_distances)
     self.distances = init_distance_matrix(all_cities_and_distances)
def run_simulated_annealing(initial_temperature, cooling_coefficient,
                            minimal_temperature):
    print('Running Simulated Annealing...')
    print()

    sa = SimulatedAnnealing(states, initial_temperature, cooling_coefficient,
                            minimal_temperature, inc_support, dec_support)
    sa.run()
    print('Found optimal route with value of ' + str(sa.best_solution.value) +
          '.')
    print(
        str(sa.best_solution.calculate_real_value()) +
        ' electoral votes were collected.')
    sa.best_solution.print()
    print()
Esempio n. 17
0
def run_large_problem(temperature: float, iterations: int) -> None:
    """ Runs the simulated annealing on a 36 city problem.

    Args:
        temperature: An annealing constant to use in the model.
        iterations: The number of iterations to run through.
    """
    g = GraphInterface.fromFile("problems/problem36")
    a = SimulatedAnnealing(g, temperature=temperature)
    start = datetime.now()
    path, costs = a.run(max_iterations=iterations)
    print(f"total seconds: {(datetime.now()-start).total_seconds()}")
    with open("big_problem_costs.csv", "w") as f:
        f.write(",".join([str(c) for c in costs]))

    print(f"Max cost was {max(costs)}| Min cost was {min(costs)}.")
Esempio n. 18
0
def main():
    cities = City.load_cities('./data/data50.txt')
    graph = Graph(cities)
    init_sol = graph.nearestNeighbourSolution()
    history = SimulatedAnnealing(graph, init_sol, 0.9998, 10, 0.0000001,
                                 1000000).anneal()
    DynamicPlot().show(cities, history, graph)
Esempio n. 19
0
    def setUp(self):
        configurations["CONSTRAINT_SET_MUTATION_WEIGHTS"] = {
            "insert_constraint": 1,
            "remove_constraint": 1,
            "demote_constraint": 1,
            "insert_feature_bundle_phonotactic_constraint": 1,
            "remove_feature_bundle_phonotactic_constraint": 1,
            "augment_feature_bundle": 0}

        configurations["CONSTRAINT_INSERTION_WEIGHTS"] = {
            "Dep": 1,
            "Max": 1,
            "Ident": 0,
            "Phonotactic": 1}

        configurations["LEXICON_MUTATION_WEIGHTS"] = {
            "insert_segment": 1,
            "delete_segment": 1,
            "change_segment": 0}

        configurations["RANDOM_SEED"] = True
        #configurations["SEED"] = 84
        configurations["INITIAL_TEMPERATURE"] = 100
        configurations["COOLING_PARAMETER"] = 0.999985
        configurations["INITIAL_NUMBER_OF_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 1
        configurations["MIN_FEATURE_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 1
        configurations["DATA_ENCODING_LENGTH_MULTIPLIER"] = 100
        configurations["RESTRICTION_ON_ALPHABET"] = True
        configurations["MAX_FEATURE_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = float("INF")
        configurations["MAX_NUMBER_OF_CONSTRAINTS_IN_CONSTRAINT_SET"] = float("INF")


        configurations["DEBUG_LOGGING_INTERVAL"] = 50
        configurations["LOG_FILE_NAME"] = "{}_d_lengthening_INF_INF_{}.txt".format(platform.node(), simulation_number)
        self._set_up_logging()
        configurations["CORPUS_DUPLICATION_FACTOR"] = 1
        self.feature_table = FeatureTable.load(get_feature_table_fixture("d_lengthening_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("d_lengthening_corpus.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
        def desired_lexicon_indicator_function(words):
            number_of_long_vowels = sum([word.count(":") for word in words])
            return "number of long vowels: {}".format(number_of_long_vowels)

        def convert_corpus_word_to_target_word(word):
            return word.replace(':', '')

        target_energy = self.get_target_hypothesis_energy(self.feature_table, "d_lengthening_target_constraint_set.json", corpus,
                                   convert_corpus_word_to_target_word_function=convert_corpus_word_to_target_word)
        #391689

        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                      target_lexicon_indicator_function=desired_lexicon_indicator_function,
                                                      sample_target_lexicon=["id", "ad"],
                                                      sample_target_outputs=["i:d", "a:d"],
                                                      target_energy=target_energy)
Esempio n. 20
0
    def compute_route(self):
        util = JobConfig()
        max_step = self.max_step
        max_util = self.num_mappers * self.num_reducers * self.bandwidth

        if self.build_paths():
            # Executing simulated annealing for map-reduce routing
            simulated_annealing = SimulatedAnnealing(max_util, \
                                                     max_step, \
                                                     self.routing_init_state, \
                                                     self.routing_generate_neighbor, \
                                                     self.routing_compute_util)

            util = simulated_annealing.run()

        # print "util: ", util.get_util()
        return util
Esempio n. 21
0
def simulatedAnnealingSolver(inputs, T=100, n_iter=10000, temp_update=.9):
    if inputs["rides"] < 100:
        n_iter = 300
        temp_update = .9
    elif inputs["rides"] < 1000:
        n_iter = 1000
        temp_update = .95
    else:
        n_iter = 5000
        temp_update = .99
    model = SimulatedAnnealing(inputs,
                               T=T,
                               n_iter=n_iter,
                               temp_update=temp_update)
    model.fit()
    print("score ", model.cur_score)
    return model.solution
class TestOtmlWithAspirationAndLengtheningDemoteOnly(unittest.TestCase):
    def setUp(self):
        self.feature_table = FeatureTable.load(get_feature_table_fixture("aspiration_and_lengthening_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("aspiration_and_lengthening_corpus.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("aspiration_and_lengthening_demote_only_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)

        def function(words):
            number_of_long_vowels = sum([word.count(":") for word in words])
            number_of_aspirated_consonants = sum([word.count("h") for word in words])
            combined_number = number_of_long_vowels + number_of_aspirated_consonants
            return "number of long vowels and aspirated consonants in lexicon: {} (long vowels = {}, " \
                   "aspirated consonants = {})".format(combined_number, number_of_long_vowels,
                                                       number_of_aspirated_consonants)
        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                      target_lexicon_indicator_function=function,
                                                      sample_target_lexicon= ["ad", "id", "ta", "ti"],
                                                      sample_target_outputs= ["a:d", "i:d", "tha", "thi"])



    run_test = True
    @unittest.skipUnless(run_test, "long running test skipped")
    def test_run(self):
        random.seed(1)
        configurations["CONSTRAINT_SET_MUTATION_WEIGHTS"] = {
            "insert_constraint": 0,
            "remove_constraint": 0,
            "demote_constraint": 1,
            "insert_feature_bundle_phonotactic_constraint": 0,
            "remove_feature_bundle_phonotactic_constraint": 0,
            "augment_feature_bundle": 0}

        configurations["CONSTRAINT_INSERTION_WEIGHTS"] = {
            "Dep": 1,
            "Max": 1,
            "Ident": 0,
            "Phonotactic": 1}

        configurations["LEXICON_MUTATION_WEIGHTS"] = {
            "insert_segment": 1,
            "delete_segment": 1,
            "change_segment": 0}

        configurations["COOLING_PARAMETER"] = 0.9995
        configurations["INITIAL_NUMBER_OF_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 2
        configurations["MAX_FEATURE_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 2
        configurations["DATA_ENCODING_LENGTH_MULTIPLIER"] = 100
        configurations["MAX_NUMBER_OF_CONSTRAINTS_IN_CONSTRAINT_SET"] = 12
        configurations["RESTRICTION_ON_ALPHABET"] = True

        configurations["DEBUG_LOGGING_INTERVAL"] = 50

        number_of_steps_performed, hypothesis = self.simulated_annealing.run()
class TestOtmlWithFrenchDeletion(unittest.TestCase):
    def setUp(self):
        self._set_up_logging()
        configurations["CORPUS_DUPLICATION_FACTOR"] = 25
        self.feature_table = FeatureTable.load(get_feature_table_fixture("french_deletion_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("french_deletion_corpus_for_with_restrictions.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("french_deletion_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(get_corpus_fixture("french_deletion_corpus_for_with_restrictions.txt"), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis)


    run_test = True
    @unittest.skipUnless(run_test, "long running test skipped")
    def test_run(self):
        configurations["CONSTRAINT_SET_MUTATION_WEIGHTS"] = {
            "insert_constraint": 0,
            "remove_constraint": 0,
            "demote_constraint": 1,
            "insert_feature_bundle_phonotactic_constraint": 0,
            "remove_feature_bundle_phonotactic_constraint": 0,
            "augment_feature_bundle": 0}

        configurations["LEXICON_MUTATION_WEIGHTS"]= {
            "insert_segment": 1,
            "delete_segment": 1,
            "change_segment": 0}

        configurations["DATA_ENCODING_LENGTH_MULTIPLIER"] = 1
        configurations["INITIAL_TEMPERATURE"] = 100
        configurations["COOLING_PARAMETER"] = 0.9999
        configurations["RESTRICTION_ON_ALPHABET"] = True


        configurations["DEBUG_LOGGING_INTERVAL"] = 50

        number_of_steps_performed, hypothesis = self.simulated_annealing.run()

    def _set_up_logging(self):
        unit_tests_log_file_name = log_file_name.format(platform.node(), simulation_number)

        #if os.path.exists(unit_tests_log_file_name):
        #    raise ValueError("log name already exits")

        logger = logging.getLogger()
        logger.setLevel(logging.INFO)

        file_log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s', "%Y-%m-%d %H:%M:%S")
        dirname, filename = split(abspath(__file__))
        unit_tests_log_path = normpath(join(dirname, unit_tests_log_file_name))
        file_log_handler = logging.FileHandler(unit_tests_log_path, mode='w')
        file_log_handler.setFormatter(file_log_formatter)
        logger.addHandler(file_log_handler)
Esempio n. 24
0
 def setUp(self):
     self.feature_table = FeatureTable.load(get_feature_table_fixture("a_b_and_cons_feature_table.json"))
     corpus = Corpus.load(get_corpus_fixture("bb_corpus.txt"))
     self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                               self.feature_table)
     self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
     self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
     self.data = corpus.get_words()
     self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
     self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis)
Esempio n. 25
0
    def compute_route(self):
        util = JobConfig()

        if self.build_paths():
            max_step = self.max_step
            max_util = self.cur_demand.get_net()

            # Executing simulated annealing for map-reduce routing
            simulated_annealing = SimulatedAnnealing(max_util, \
                                                     max_step, \
                                                     self.routing_init_state, \
                                                     self.routing_generate_neighbor, \
                                                     self.routing_compute_util, \
                                                     self.check_constraint)

            util = simulated_annealing.run()

        # print "util: ", util.get_util()
        return util
Esempio n. 26
0
    def execute_job(self, job):
        available_hosts = [h for h in self.graph.get_hosts() if h.is_free()]

        util = JobConfig()

        # There are enough nodes to run the job
        if len(available_hosts) > (self.num_mappers + self.num_reducers):
            max_util = self.num_mappers * self.num_reducers * self.bandwidth
            max_step = self.max_step

            # Executing simulated annealing for map-reduce placement
            simulated_annealing = SimulatedAnnealing(max_util, \
                                                     max_step, \
                                                     self.placement_init_state, \
                                                     self.placement_generate_neighbor, \
                                                     self.placement_compute_util)

            util = simulated_annealing.run()

        return util
Esempio n. 27
0
def run_problem_size(n: int, print_individual: bool = True) -> List[
    Tuple[float, float]]:
    """ Runs all problems for a single problem size.

    Args:
        n: The size of problems to consider
    """
    times = []
    nodes = []

    for f in os.listdir(f"problems/{n}/"):
        g = GraphInterface.fromFile(f"problems/{n}/{f}")
        a = SimulatedAnnealing(g)
        start = datetime.now()
        path, costs = a.run(max_iterations=10000)
        cost = costs[-1]
        end = datetime.now()
        times.append((end - start).total_seconds())
        nodes.append(cost)
    return ((sum(times)/ len(times)), (sum(nodes) / len(nodes)))
 def setUp(self):
     self._set_up_logging()
     configurations["CORPUS_DUPLICATION_FACTOR"] = 25
     self.feature_table = FeatureTable.load(get_feature_table_fixture("french_deletion_feature_table.json"))
     corpus = Corpus.load(get_corpus_fixture("french_deletion_corpus_for_with_restrictions.txt"))
     self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("french_deletion_constraint_set.json"),
                                               self.feature_table)
     self.lexicon = Lexicon(get_corpus_fixture("french_deletion_corpus_for_with_restrictions.txt"), self.feature_table)
     self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
     self.data = corpus.get_words()
     self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
     self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis)
Esempio n. 29
0
 def setUp(self):
     self.flow = [
             [0, 1, 2],
             [4, 0, 5],
             [7, 2, 0]]
     self.distance = [
             [0, 5, 2],
             [1, 0, 1],
             [6, 2, 0]]
     self.i = Instance(None, self.distance, self.flow)
     self.sa = SimulatedAnnealing()
     self.startpoint = Solution((0, 1, 2))
    def test_simulated_annealing_runtime(self):
        import simulations.turkish_vowel_harmony as current_simulation
        configurations.load_configurations_from_dict(
            current_simulation.configurations_dict)
        self.initialise_segment_table('turkish_segment_table.txt')

        initial_hmm = None
        initial_rule_set = None
        initial_hypothesis = Hypothesis.create_initial_hypothesis(
            current_simulation.data, initial_hmm, initial_rule_set)
        target_tuple = current_simulation.target_tuple
        data = current_simulation.data
        target_rule_set = RuleSet.load_form_flat_list(target_tuple[1])
        target_hypothesis = Hypothesis.create_hypothesis(
            HMM(target_tuple[0]), target_rule_set, data)
        target_energy = target_hypothesis.get_energy()

        simulated_annealing = SimulatedAnnealing(initial_hypothesis,
                                                 target_energy)
        simulated_annealing.before_loop()

        # mutate hypothesis for some time before measuring steps
        for i in range(500):
            simulated_annealing.make_step()

        @timeit_best_of_N
        def make_step_profiled():
            simulated_annealing.make_step()

        make_step_profiled()
Esempio n. 31
0
File: 3sat.py Progetto: dstlmrk/3sat
def solve(files, variables, temperature, cooling, inner_loop, ratio):
    instances = 50
    duration_sum = 0
    weight_usage_ratio_sum = 0
    satisfied_ratio_sum = 0

    for file in files:
        sa = SimulatedAnnealing(temperature, cooling, inner_loop, ratio,
                                File(file))
        start_time = time.time()
        result = sa.evaluate()
        duration = time.time() - start_time
        print(result.value, result.satisfied_ratio, result.weight_usage_ratio,
              result.bit_array)
        duration_sum += duration
        weight_usage_ratio_sum += result.weight_usage_ratio
        satisfied_ratio_sum += result.satisfied_ratio
        # break

    # from seconds to milliseconds
    duration_avg = (duration_sum / instances) * 1000
    weight_usage_ratio_avg = (weight_usage_ratio_sum / instances)
    satisfied_ratio_avg = (satisfied_ratio_sum / instances)
    return duration_avg, weight_usage_ratio_avg, satisfied_ratio_avg
Esempio n. 32
0
	def breakCipher(self,enc,dictionary):
		
		ngrams = NGram()

		uniEnc = ngrams.unigrams(enc)

		biEnc = ngrams.bigrams(enc)

		triEnc = ngrams.trigrams(enc)


		uniDictionary = ngrams.unigrams(dictionary)

		biDictionary = ngrams.bigrams(dictionary)

		triDictionary = ngrams.trigrams(dictionary)

		sort_tg_enc = sorted(uniEnc,key=uniEnc.get,reverse=True)
		sort_tg_dic = sorted(uniDictionary,key=uniDictionary.get,reverse=True)

		key = {}

		for index,x in enumerate(sort_tg_enc):
			key[x] = sort_tg_dic[index]

		# text = ""
		# for x in enc:
		# 	if x == " " or x == "\n":
		# 		text += x
		# 	else:
		# 		text += key[x]

		h = SimulatedAnnealing()

		h.breakOpen(enc,key,uniDictionary,triDictionary)
		
    def setUp(self):
        self._set_up_logging()
        configurations["CORPUS_DUPLICATION_FACTOR"] = 1
        self.feature_table = FeatureTable.load(get_feature_table_fixture(feature_table_file_name))
        corpus = Corpus.load(get_corpus_fixture(corpus_file_name))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture(constraint_set_file_name),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)

        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                      target_lexicon_indicator_function=indicator_function,
                                                      sample_target_lexicon=sample_desired_lexicon,
                                                      sample_target_outputs=sample_desired_outputs)
Esempio n. 34
0
 def setUp(self):
     self._set_up_logging()
     configurations["CORPUS_DUPLICATION_FACTOR"] = 1
     self.feature_table = FeatureTable.load(get_feature_table_fixture("tk_aspiration_feature_table.json"))
     corpus = Corpus.load(get_corpus_fixture("tk_aspiration_corpus.txt"))
     self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                               self.feature_table)
     self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
     self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
     self.data = corpus.get_words()
     self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
     def function(words):
         number_of_aspirated_consonants = sum([word.count("h") for word in words])
         return "number of aspirated consonants = {})".format(number_of_aspirated_consonants)
     self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                   target_lexicon_indicator_function=function,
                                                   sample_target_lexicon=["ti", "ta", "ki", "ka"],
                                                   sample_target_outputs=["thi", "tha", "khi", "kha"])
Esempio n. 35
0
    def setUp(self):
        self._set_up_logging()
        configurations["CORPUS_DUPLICATION_FACTOR"] = 1
        self.feature_table = FeatureTable.load(get_feature_table_fixture("a_b_and_cons_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("bb_for_paper_corpus.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)

        def function(words):
            return "number of bab's: {}".format(sum([word.count("bab") for word in words]))
        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                      target_lexicon_indicator_function=function,
                                                      sample_target_lexicon=["bb", "abb"],
                                                      sample_target_outputs=["bab", "abab"])
Esempio n. 36
0
    def executar(self):

        self.listaDadosIniciais = Configuracao.gerarDadosIniciais(
            self.configuracao)

        resultado = Resultado(self.configuracao.problema)

        for a in range(4):

            algoritmo = None

            if (a == 0):
                algoritmo = Algoritmo(Configuracao.algoritmos[a],
                                      HillClimbing(self.configuracao))
            elif (a == 1):
                algoritmo = Algoritmo(Configuracao.algoritmos[a],
                                      HillClimbingRestart(self.configuracao))
            elif (a == 2):
                algoritmo = Algoritmo(Configuracao.algoritmos[a],
                                      SimulatedAnnealing(self.configuracao))
            else:
                algoritmo = Algoritmo(Configuracao.algoritmos[a],
                                      GeneticAlgorithm(self.configuracao))

            for iteracao in range(10):
                algoritmo.executar(self.listaDadosIniciais[iteracao], iteracao)

            algoritmo.gerarEstatisticas()

            Graficos.gerarGraficoFuncaoObjetivo(algoritmo, self.configuracao)

            resultado.adicionar(algoritmo)

        inicioComparativo = time.perf_counter()

        self.finalizar(resultado)

        terminoComparativo = time.perf_counter()

        print(
            f"Geração da tabela/gráfico de comparativo de performance em {terminoComparativo - inicioComparativo:0.4f} segundos"
        )
    def setUp(self):
        configurations["CORPUS_DUPLICATION_FACTOR"] = 2
        self.feature_table = FeatureTable.load(get_feature_table_fixture("aspiration_and_lengthening_extended_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("aspiration_and_lengthening_extended_260_corpus.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)

        def function(words):
            number_of_long_vowels = sum([word.count(":") for word in words])
            number_of_aspirated_consonants = sum([word.count("h") for word in words])
            combined_number = number_of_long_vowels + number_of_aspirated_consonants
            return "number of long vowels and aspirated consonants in lexicon: {} (long vowels = {}, " \
                   "aspirated consonants = {})".format(combined_number, number_of_long_vowels,
                                                       number_of_aspirated_consonants)
        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                      target_lexicon_indicator_function=function)
Esempio n. 38
0
class TestOtmlWithFaith(unittest.TestCase):
    def setUp(self):
        self.feature_table = FeatureTable.load(get_feature_table_fixture("a_b_and_cons_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("bb_corpus.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis)


    run_test = True
    @unittest.skipUnless(run_test, "long running test skipped")
    def test_run(self):
        configurations["CONSTRAINT_SET_MUTATION_WEIGHTS"] = {
            "insert_constraint": 1,
            "remove_constraint": 1,
            "demote_constraint": 1,
            "insert_feature_bundle_phonotactic_constraint": 0,
            "remove_feature_bundle_phonotactic_constraint": 0,
            "augment_feature_bundle": 0}

        configurations["CONSTRAINT_INSERTION_WEIGHTS"] = {
            "Dep": 1,
            "Max": 1,
            "Ident": 0,
            "Phonotactic": 1}

        configurations["LEXICON_MUTATION_WEIGHTS"] = {
            "insert_segment": 1,
            "delete_segment": 1,
            "change_segment": 0}


        configurations["DEBUG_LOGGING_INTERVAL"] = 50
        configurations["COOLING_PARAMETER"] = 0.999985
        configurations["INITIAL_NUMBER_OF_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 2

        number_of_steps_performed, hypothesis = self.simulated_annealing.run()
    def setUp(self):
        self.feature_table = FeatureTable.load(get_feature_table_fixture("aspiration_and_lengthening_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("aspiration_and_lengthening_corpus.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("aspiration_and_lengthening_demote_only_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)

        def function(words):
            number_of_long_vowels = sum([word.count(":") for word in words])
            number_of_aspirated_consonants = sum([word.count("h") for word in words])
            combined_number = number_of_long_vowels + number_of_aspirated_consonants
            return "number of long vowels and aspirated consonants in lexicon: {} (long vowels = {}, " \
                   "aspirated consonants = {})".format(combined_number, number_of_long_vowels,
                                                       number_of_aspirated_consonants)
        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                      target_lexicon_indicator_function=function,
                                                      sample_target_lexicon= ["ad", "id", "ta", "ti"],
                                                      sample_target_outputs= ["a:d", "i:d", "tha", "thi"])
 def setUp(self):
     self._set_up_logging()
     configurations["CORPUS_DUPLICATION_FACTOR"] = 1
     self.feature_table = FeatureTable.load(get_feature_table_fixture("td_kg_ai_aspiration_and_lengthening_feature_table.json"))
     corpus = Corpus.load(get_corpus_fixture("td_kg_ai_aspiration_and_lengthening_400_enhanced_corpus.txt"))
     self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                               self.feature_table)
     self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
     self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
     self.data = corpus.get_words()
     self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
     def desired_lexicon_indicator_function(words):
         number_of_long_vowels = sum([word.count(":") for word in words])
         number_of_aspirated_consonants = sum([word.count("h") for word in words])
         combined_number = number_of_long_vowels + number_of_aspirated_consonants
         return "number of long vowels and aspirated consonants in lexicon: {} (long vowels = {}, " \
                "aspirated consonants = {})".format(combined_number, number_of_long_vowels,
                                                    number_of_aspirated_consonants)
     self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                   target_lexicon_indicator_function=desired_lexicon_indicator_function,
                                                   sample_target_lexicon=["ti", "ta", "ki", "ka", "id", "ad", "ig", "ag"],
                                                   sample_target_outputs=["thi", "tha", "khi", "kha", "i:d", "a:d", "i:g", "a:g"],
                                                   target_energy=3333)
Esempio n. 41
0
File: main.py Progetto: 2easy/ctsp
#dists1 = gen_dists([(0,0), (-4,0), (1,4), (3,4), (3,2), (8,-6), (-2,-3), (-4,-3)])
dists1 = gen_dists(gen_random(128))

#print(dists1)

# example usage of greedy algorithm sovle
greedy = Greedy(dists1)
g_sol = greedy.solve()
print("GREEDY SOLUTION:\t\t\t"+str(g_sol)+" ---> " + str(greedy.cost))

# example usage of SimpleTabooSearch find solution at once
sts = SimpleTabooSearch(dists1)
sts_sol = sts.solve()
print("SIMPLE TABOO SEARCH SOLUTION:\t\t"+ str(sts_sol)+ " ---> "+ str(sts.cost))

san = SimulatedAnnealing(dists1,g_sol)
san_sol = san.solve()
print("GREEDY - SIMULATED ANNEALING SOLUTION:\t\t"+ str(san_sol)+ " ---> "+ str(san.cost))

san2 = SimulatedAnnealing(dists1,sts_sol)
san_sol2 = san2.solve()
print("SIMPLE TABOO - SIMULATED ANNEALING SOLUTION:\t\t"+ str(san_sol2)+ " ---> "+ str(san2.cost))

aco = AntColony(dists1,g_sol)
aco_sol = aco.solve()
print("ANT COLONY SOLUTION:\t\t"+ str(aco_sol)+ " ---> "+ str(aco.cost))
# example usage of SimpleTabooSearch step by step with access to intermediate values
#sts1 = SimpleTabooSearch(dists1)
#while sts1.step():
#    #print(str(sts1.i)+" step --best-->\t"+str(sts1.best))
#    print(str(sts1.i)+" step --current-->\t"+str(sts1.current))
Esempio n. 42
0
    file_log_handler.setFormatter(file_log_formatter)
    logger.addHandler(file_log_handler)

    feature_tables_dir_path = join(dir_name, "tests/fixtures/feature_tables")
    constraint_sets_dir_path = join(dir_name, "tests/fixtures/constraint_sets")

    feature_table_file_path = join(feature_tables_dir_path,
                                   current_simulation.feature_table_file_name)
    feature_table = FeatureTable.load(feature_table_file_path)

    constraint_set_file_path = join(
        constraint_sets_dir_path, current_simulation.constraint_set_file_name)
    constraint_set = ConstraintSet.load(constraint_set_file_path)

    corpus = Corpus(current_simulation.corpus)

    data = corpus.get_words()
    max_word_length_in_data = max([len(word) for word in data])
    lexicon = Lexicon(data, max_word_length_in_data)

    grammar = Grammar(constraint_set, lexicon)
    hypothesis = Hypothesis(grammar, data)

    if hasattr(current_simulation, "target_energy"):
        target_energy = current_simulation.target_energy
    else:
        target_energy = None

    simulated_annealing = SimulatedAnnealing(hypothesis, target_energy)
    simulated_annealing.run()
Esempio n. 43
0
        Argmax(Map(Function(Sum(HoleNode())), VarList('actions'))),
        Argmax(
            Map(Function(Sum(Map(Function(HoleNode()), NoneNode()))),
                VarList('actions'))),
        Argmax(
            Map(
                Function(
                    Sum(
                        Map(
                            Function(
                                Minus(Times(HoleNode(), HoleNode()),
                                      HoleNode())), NoneNode()))),
                VarList('actions'))),
    ]

    chosen = int(sys.argv[1])
    n_SA_iterations = 3000
    max_game_rounds = 500
    n_games = 1000
    init_temp = 1
    d = 1
    algo_name = 'HOLESA_' + str(chosen)
    start_SA = time.time()
    SA = SimulatedAnnealing(n_SA_iterations, max_game_rounds, n_games,
                            init_temp, d, algo_name)

    best_program, _ = SA.run(incomplete[chosen])
    end_SA = time.time() - start_SA
    print('Best program after SA - Time elapsed = ', end_SA)
    print(best_program.to_string())
Esempio n. 44
0
    for q_cities in num_cities:
        for i in xrange(q_cities):
            city = City()
            cities_list.append(city)

        if args.numtests == 1:
            show_window = True
        print '----------- SIMULATED ANNEALING WITH %d CITIES -----------' % q_cities
        simulated_annealing_s = []
        for i in xrange(args.numtests):
            print 'Test ', i + 1
            if show_window:
                simulated_window = ManageGraph()
            else:
                simulated_window = None
            simulated_annealing_s.append(SimulatedAnnealing(simulated_window, cities_list, show_window=show_window))

        min_simulated.append(min(i.get_best_distance() for i in simulated_annealing_s))
        simulated_avg.append(sum(sa_best.get_best_distance() for sa_best in simulated_annealing_s) / float(len(simulated_annealing_s)))
        max_simulated.append(max(i.get_best_distance() for i in simulated_annealing_s))

        print '----------- GENETIC ALGORITHM WITH %d CITIES -----------' % q_cities
        genetic_algorithm_s = []
        for i in xrange(args.numtests):
            print 'Test ', i + 1
            if show_window:
                genetic_window = ManageGraph()
            else:
                genetic_window = None
            genetic_algorithm_s.append(GeneticAlgorithm(genetic_window, cities_list, show_window=show_window))
from board import Board
from simulated_annealing import SimulatedAnnealing
import time

if __name__ == '__main__':

    startTime = time.time()

    for i in range(0, 5):
        print("i: {}".format(i))
        board = Board()
        print("Rainhas:")
        print(board)
        SimulatedAnnealing(board).run()

    endTime = time.time()
    elapsedTime = (endTime - startTime)
    average = elapsedTime / 10
    print("Tempo total: {} ".format(elapsedTime))
    print("Tempo médio: {}".format(average))
    def setUp(self):
        configurations["CONSTRAINT_SET_MUTATION_WEIGHTS"] = {
            "insert_constraint": 1,
            "remove_constraint": 1,
            "demote_constraint": 1,
            "insert_feature_bundle_phonotactic_constraint": 1,
            "remove_feature_bundle_phonotactic_constraint": 1,
            "augment_feature_bundle": 0}

        configurations["CONSTRAINT_INSERTION_WEIGHTS"] = {
            "Dep": 1,
            "Max": 1,
            "Ident": 0,
            "Phonotactic": 1}

        configurations["LEXICON_MUTATION_WEIGHTS"] = {
            "insert_segment": 1,
            "delete_segment": 1,
            "change_segment": 0}


        configurations["INITIAL_TEMPERATURE"] = 100
        configurations["COOLING_PARAMETER"] = 0.999985
        configurations["INITIAL_NUMBER_OF_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 1
        configurations["MIN_FEATURE_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 1
        configurations["MAX_FEATURE_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = float("INF")
        configurations["DATA_ENCODING_LENGTH_MULTIPLIER"] = 100
        configurations["MAX_NUMBER_OF_CONSTRAINTS_IN_CONSTRAINT_SET"] = float("INF")
        configurations["RESTRICTION_ON_ALPHABET"] = True

        configurations["DEBUG_LOGGING_INTERVAL"] = 50
        self.unit_tests_log_file_name = "../../logging/{}_td_kg_aiueo_aspiration_and_lengthening_400_INF_INF_{}.txt".format(platform.node(), simulation_number)
        self._set_up_logging()
        configurations["CORPUS_DUPLICATION_FACTOR"] = 1
        self.feature_table = FeatureTable.load(get_feature_table_fixture("td_kg_aiueo_aspiration_and_lengthening_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("td_kg_aiueo_aspiration_and_lengthening_400_corpus.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
        def desired_lexicon_indicator_function(words):
            number_of_long_vowels = sum([word.count(":") for word in words])
            number_of_aspirated_consonants = sum([word.count("h") for word in words])
            combined_number = number_of_long_vowels + number_of_aspirated_consonants
            return "number of long vowels and aspirated consonants in lexicon: {} (long vowels = {}, " \
                   "aspirated consonants = {})".format(combined_number, number_of_long_vowels,
                                                       number_of_aspirated_consonants)

        def convert_corpus_word_to_target_word(word):
            return word.replace('h', '').replace(':', '')

        target_energy = self.get_target_hypothesis_energy(self.feature_table, "td_kg_ai_aspiration_and_lengthening_target_constraint_set.json", corpus,
                                   convert_corpus_word_to_target_word_function=convert_corpus_word_to_target_word)
        #391689

        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                      target_lexicon_indicator_function=desired_lexicon_indicator_function,
                                                      sample_target_lexicon=["ti", "ta", "ki", "ka", "id", "ad", "ig", "ag", "tu", "te"],
                                                      sample_target_outputs=["thi", "tha", "khi", "kha", "i:d", "a:d", "i:g", "a:g", "thu", "the"],
                                                      target_energy=target_energy)
Esempio n. 47
0
    # k and n_states are paired together in order to make a fair comparison
    # between algorithms. This is because IW if k is low will have a low number
    # of states returned.
    k_options = [3, 4, 5]
    k = k_options[int(sys.argv[4])]
    n_states_options = [94, 201, 622] # k=3 94, k=4 201, k=5 622, k=6 1244
    n_states = n_states_options[int(sys.argv[4])]

    n_games = 100
    init_temp = 1
    d = 1
    max_game_rounds = 500
    max_nodes = 100
    n_MC_simulations = 1000
    inner_SA = SimulatedAnnealing(n_SA_iterations, max_game_rounds, n_games, init_temp, d, False)
    outer_SA = SimulatedAnnealing(1000, max_game_rounds, n_games, init_temp, d, True)
    
    dsl = DSL()
    
    if search_type == 0:
        # IW
        tree = ParseTree(dsl=dsl, max_nodes=max_nodes, k=k, is_IW=True)
        search_algo = IteratedWidth(tree, n_states, k)
        suffix = 'IW' + '_LS' + str(LS_type) + '_SA' + str(n_SA_iterations) + '_ST' + str(n_states) + '_k' + str(k) + '_GA' + str(n_games)
    elif search_type == 1:
        # BFS
        tree = ParseTree(dsl=dsl, max_nodes=max_nodes, k=k, is_IW=False)
        search_algo = BFS(tree, n_states)
        suffix = 'BFS' + '_LS' + str(LS_type) + '_SA' + str(n_SA_iterations) + '_ST' + str(n_states) + '_k' + str(k) + '_GA' + str(n_games)
Esempio n. 48
0
class TestOtmlWithTAspiration(unittest.TestCase):
    def setUp(self):
        self._set_up_logging()
        configurations["CORPUS_DUPLICATION_FACTOR"] = 1
        self.feature_table = FeatureTable.load(get_feature_table_fixture("tk_aspiration_feature_table.json"))
        corpus = Corpus.load(get_corpus_fixture("tk_aspiration_corpus.txt"))
        self.constraint_set = ConstraintSet.load(get_constraint_set_fixture("faith_constraint_set.json"),
                                                  self.feature_table)
        self.lexicon = Lexicon(corpus.get_words(), self.feature_table)
        self.grammar = Grammar(self.feature_table, self.constraint_set, self.lexicon)
        self.data = corpus.get_words()
        self.traversable_hypothesis = TraversableGrammarHypothesis(self.grammar, self.data)
        def function(words):
            number_of_aspirated_consonants = sum([word.count("h") for word in words])
            return "number of aspirated consonants = {})".format(number_of_aspirated_consonants)
        self.simulated_annealing = SimulatedAnnealing(self.traversable_hypothesis,
                                                      target_lexicon_indicator_function=function,
                                                      sample_target_lexicon=["ti", "ta", "ki", "ka"],
                                                      sample_target_outputs=["thi", "tha", "khi", "kha"])


    run_test = True
    @unittest.skipUnless(run_test, "long running test skipped")
    def test_run(self):
        configurations["CONSTRAINT_SET_MUTATION_WEIGHTS"] = {
            "insert_constraint": 1,
            "remove_constraint": 1,
            "demote_constraint": 1,
            "insert_feature_bundle_phonotactic_constraint": 1,
            "remove_feature_bundle_phonotactic_constraint": 1,
            "augment_feature_bundle": 0}

        configurations["CONSTRAINT_INSERTION_WEIGHTS"] = {
            "Dep": 1,
            "Max": 1,
            "Ident": 0,
            "Phonotactic": 1}

        configurations["LEXICON_MUTATION_WEIGHTS"] = {
            "insert_segment": 1,
            "delete_segment": 1,
            "change_segment": 0}


        configurations["INITIAL_TEMPERATURE"] = 100
        configurations["COOLING_PARAMETER"] = 0.999985
        configurations["INITIAL_NUMBER_OF_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 1
        configurations["MIN_FEATURE_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = 1
        configurations["MAX_FEATURE_BUNDLES_IN_PHONOTACTIC_CONSTRAINT"] = float("INF")
        configurations["DATA_ENCODING_LENGTH_MULTIPLIER"] = 100
        configurations["MAX_NUMBER_OF_CONSTRAINTS_IN_CONSTRAINT_SET"] = float("INF")
        configurations["RESTRICTION_ON_ALPHABET"] = True

        configurations["DEBUG_LOGGING_INTERVAL"] = 50

        number_of_steps_performed, hypothesis = self.simulated_annealing.run()

    def _set_up_logging(self):
        unit_tests_log_file_name = "../../logging/{}_tk_aspiration_INF_INF_{}.txt".format(platform.node(), simulation_number)

        if os.path.exists(unit_tests_log_file_name):
            raise ValueError("log name already exits")

        logger = logging.getLogger()
        logger.setLevel(logging.INFO)

        file_log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s', "%Y-%m-%d %H:%M:%S")
        dirname, filename = split(abspath(__file__))
        unit_tests_log_path = normpath(join(dirname, unit_tests_log_file_name))
        file_log_handler = logging.FileHandler(unit_tests_log_path, mode='w')
        file_log_handler.setFormatter(file_log_formatter)
        logger.addHandler(file_log_handler)
def benchmark():
    REPEATS = 10
    SECONDS = [5, 10, 30, 60, 300, 1200]

    for seconds in SECONDS:
        v = 0
        time_s = datetime.now()
        for k in range(REPEATS):
            rs = RandomSearch(states, seconds, inc_support, dec_support)
            rs.run()
            v += rs.best_solution.value
        time_e = datetime.now()
        tt = (time_e - time_s).total_seconds()
        print_csv('Random Search', str(seconds), str(v / REPEATS),
                  str(tt / REPEATS))

    for seconds in SECONDS:
        v = 0
        time_s = datetime.now()
        for k in range(REPEATS):
            ls = LocalSearch(states, seconds, inc_support, dec_support)
            ls.run()
            v += ls.best_solution.value
        time_e = datetime.now()
        tt = (time_e - time_s).total_seconds()
        print_csv('Local Search', str(seconds), str(v / REPEATS),
                  str(tt / REPEATS))

    for seconds in SECONDS:
        for initial_cadence in [10, 25, 50]:
            for critical_event in [10, 25, 50]:
                v = 0
                time_s = datetime.now()
                for k in range(REPEATS):
                    ts = TabuSearch(states, seconds, initial_cadence,
                                    critical_event, inc_support, dec_support)
                    ts.run()
                    v += ts.best_solution.value
                time_e = datetime.now()
                tt = (time_e - time_s).total_seconds()
                print_csv('Tabu Search', str(seconds), str(initial_cadence),
                          str(critical_event), str(v / REPEATS),
                          str(tt / REPEATS))

    for crossover in ['pmx', 'ox']:
        for mutate in ['transposition', 'insertion', 'inversion']:
            for seconds in SECONDS:
                for population_size in [10, 25, 50]:
                    v = 0
                    time_s = datetime.now()
                    for k in range(REPEATS):
                        ga = GeneticAlgorithm(states, seconds, population_size,
                                              crossover, mutate, inc_support,
                                              dec_support)
                        ga.run()
                        v += ga.best_solution.value
                    time_e = datetime.now()
                    tt = (time_e - time_s).total_seconds()
                    print_csv('Genetic Algorithm ' + crossover + ' ' + mutate,
                              str(seconds), str(population_size),
                              str(v / REPEATS), str(tt / REPEATS))

    for initial_temperature in [100, 500, 1000]:
        for cooling_coefficient in [0.9, 0.99, 0.999, 0.9999]:
            for minimal_temperature in [
                    initial_temperature * 0.25, initial_temperature * 0.5,
                    initial_temperature * 0.75
            ]:
                v = 0
                time_s = datetime.now()
                for k in range(REPEATS):
                    sa = SimulatedAnnealing(states, initial_temperature,
                                            cooling_coefficient,
                                            minimal_temperature, inc_support,
                                            dec_support)
                    sa.run()
                    v += sa.best_solution.value
                time_e = datetime.now()
                tt = (time_e - time_s).total_seconds()
                print_csv('Simulated Annealing', str(initial_temperature),
                          str(cooling_coefficient), str(minimal_temperature),
                          str(v / REPEATS), str(tt / REPEATS))
Esempio n. 50
0
    max_temp = settings.options.max_temp  # initial temperature
    min_temp = settings.options.min_temp  # final temperature
    eq_iter = settings.options.iters  # iterations at same temperature
    temp_change = settings.options.temp_rate  # temperature reduction factor
    # execute the algorithm
    filename = settings.options.data_filename
    if not filename:
        raise UserWarning("enter data filename through the -df flag.")

    constraints = [
        SameRoomAndTime(),
        SameStudents(),
        DifferentTime(),
        Precedence(),
        SameInstructor(),
        Spread()
    ]
    soft_constraints = [RoomCost(), PreferenceRoom(), StudentsTakingClass()]

    solver = SimulatedAnnealing(TtDataReader(), H1())
    plotter = TtPlotter()
    plotter.register_constraints(constraints)
    plotter.register_soft_constraints(soft_constraints)
    solver.register_plotter(plotter)
    solver.register_constraints(constraints)
    solver.register_soft_constraints(soft_constraints)
    best_solution = solver.solve(filename)

    # print(best_solution)
    # print(best_solution.cost())
Esempio n. 51
0
from grammar.feature_table import FeatureTable
from grammar.constraint_set import ConstraintSet
from grammar.grammar import Grammar
from traversable_grammar_hypothesis import TraversableGrammarHypothesis
from corpus import Corpus
from simulated_annealing import SimulatedAnnealing






feature_table_file_path = "/Users/iddoberger/Documents/MercurialRepositories/otml/source/tests/fixtures/feature_table/french_deletion_feature_table.json"
corpus_file_path = "/Users/iddoberger/Documents/MercurialRepositories/otml/source/tests/fixtures/corpora/french_deletion_corpus.txt"
constraint_set_file_path = "/Users/iddoberger/Documents/MercurialRepositories/otml/source/tests/fixtures/constraint_sets/french_deletion_constraint_set.json"


configuration_json_str = codecs.open(configuration_file_path, 'r').read()
OtmlConfigurationManager(configuration_json_str)


feature_table = FeatureTable.load(feature_table_file_path)
corpus = Corpus.load(corpus_file_path)
constraint_set = ConstraintSet.load(constraint_set_file_path, feature_table)
lexicon = Lexicon(corpus.get_words(), feature_table)
grammar = Grammar(feature_table, constraint_set, lexicon)
data = corpus.get_words()
traversable_hypothesis = TraversableGrammarHypothesis(grammar, data)
simulated_annealing = SimulatedAnnealing(traversable_hypothesis)
simulated_annealing.run()