def main(args):   
    setup = experiment_setups.parse(args.setup)
    dirname = fileutil.run_dir(args.dest_dir, setup.name, args.max_quantifier_length, args.model_size, args.name)
    file_util = FileUtil(dirname)
    
    languages = []
    universe = generator.generate_simplified_models(args.model_size)

    FakeEvaluatedExpression = namedtuple('FakeEvaluatedExpression', 'meaning')
    expressions = [FakeEvaluatedExpression(tuple([random.choice([True, False]) for model in universe]))
                    for i in range(args.random_size)]

    if args.sample is None:
        print("generate_all() called.")
        languages = generate_all(expressions, args.max_words, args.fixedwordcount)
    else:
        print("generate_sampled() called.")
        languages = generate_sampled(expressions, args.max_words, args.sample)

    complexity_measurer = WordCountComplexityMeasurer(args.max_words)
    informativeness_measurer_exact = InformativenessMeasurer(len(universe))
    informativeness_measurer_simmax = SimMaxInformativenessMeasurer(universe)

    with ProcessPool(nodes=args.processes) as pool:
        complexity = pool.map(complexity_measurer, languages)
        informativeness_exact = pool.map(informativeness_measurer_exact, languages)
        informativeness_simmax = pool.map(informativeness_measurer_simmax, languages)

    file_util.dump_dill(complexity, 'complexity_wordcount.dill')
    file_util.dump_dill(informativeness_exact, 'informativeness_exact.dill')
    file_util.dump_dill(informativeness_simmax, 'informativeness_simmax.dill')
    
    print("coinflip_languages.py finished.")
示例#2
0
def main(args):
    setup = experiment_setups.parse(args.setup)
    dirname = fileutil.run_dir(args.dest_dir, setup.name,
                               args.max_quantifier_length, args.model_size,
                               args.name)
    file_util = FileUtil(dirname)

    languages = language_loader.load_languages(file_util)

    universe = generator.generate_simplified_models(args.model_size)

    if args.inf_strat == 'exact':
        informativeness_measurer = InformativenessMeasurer(len(universe))
    elif args.inf_strat == 'simmax':
        informativeness_measurer = SimMaxInformativenessMeasurer(universe)
    else:
        raise ValueError('{0} is not a valid informativeness strategy.'.format(
            args.inf_strat))

    with ProcessPool(nodes=args.processes) as pool:
        informativeness = pool.map(informativeness_measurer, languages)

    file_util.dump_dill(informativeness,
                        'informativeness_{0}.dill'.format(args.inf_strat))

    print("measure_informativeness.py finished.")
def main(args):
    setup = experiment_setups.parse(args.setup)

    file_util = FileUtil(
        fileutil.run_dir(args.dest_dir, setup.name, args.max_quantifier_length,
                         args.model_size, args.name))

    universe = generator.generate_simplified_models(args.model_size)
    monotonicity_measure = MonotonicityMeasurer(universe, args.model_size, 'A')
    conservativity_measurer = ConservativityMeasurer(universe, args.model_size,
                                                     'A')
    #special_complexity_measurer = SpecialComplexityMeasurer(args.max_words)
    special_complexity_measurer = SpecialComplexityMeasurer(
        setup.operators, args.model_size)

    if args.inf_strat == 'exact':
        informativeness_measurer = InformativenessMeasurer(len(universe))
    elif args.inf_strat == 'simmax':
        informativeness_measurer = SimMaxInformativenessMeasurer(universe)
    else:
        raise ValueError('{0} is not a valid informativeness strategy.'.format(
            args.inf_strat))

    if args.comp_strat == 'wordcount':
        complexity_measurer = WordCountComplexityMeasurer(args.max_words)
    elif args.comp_strat == 'wordcomplexity':
        complexity_measurer = SumComplexityMeasurer(args.max_words, 1)
    else:
        raise ValueError('{0} is not a valid complexity strategy.'.format(
            args.comp_strat))

    languages_dir = setup.natural_languages_dirname

    languages = []
    language_names = []

    for filename in os.listdir(languages_dir):
        if filename.endswith('.json'):
            language_file = os.path.join(languages_dir, filename)
            language = parse_language(language_file, setup, universe, \
                                      monotonicity_measure, \
                                      conservativity_measurer, \
                                      special_complexity_measurer)
            languages.append(language)
            language_names.append(filename[:-5])  # Name without extension

    informativeness = zip(language_names,
                          map(informativeness_measurer, languages))
    complexity = zip(language_names, map(complexity_measurer, languages))

    file_util.dump_dill(
        informativeness,
        'informativeness_{0}_{1}.dill'.format(setup.name, args.inf_strat))
    file_util.dump_dill(
        complexity, 'complexity_{0}_{1}.dill'.format(setup.name,
                                                     args.comp_strat))

    print("measure_lexicalized.py finished")
def main(args):
    setup = experiment_setups.parse(args.setup)
    # use_base_dir = False
    dirname = fileutil.run_dir(setup.dest_dir, setup.name,
                               setup.max_quantifier_length, setup.model_size,
                               setup.pareto_name)
    file_util = FileUtil(dirname)

    expressions = language_loader.load_all_evaluated_expressions(file_util)
    languages_0 = language_generator.generate_sampled(
        expressions, args.lang_size, int(args.sample_size / args.lang_size))
    universe = generator.generate_simplified_models(setup.model_size)

    measure_complexity = SumComplexityMeasurer(args.lang_size, 1)
    measure_informativeness = SimMaxInformativenessMeasurer(universe)
    pool = ProcessPool(nodes=setup.processes)
    languages = languages_0  #lanuages will be iteratively updated in subsequent loop

    for gen in range(args.generations):
        print('GENERATION {0}'.format(gen))
        print('measuring')
        complexity = pool.map(measure_complexity, languages)
        informativeness = pool.map(measure_informativeness, languages)

        measurements = [(1 - inf, comp)
                        for inf, comp in zip(informativeness, complexity)]

        print('calculating dominating')
        dominating_indices = pygmo.non_dominated_front_2d(measurements)
        dominating_languages = [languages[i] for i in dominating_indices]

        print('mutating')
        languages = sample_mutated(dominating_languages, args.sample_size,
                                   expressions)

    language_indices = [[e.index for e in lang]
                        for lang in dominating_languages]
    dominating_complexity = [complexity[i] for i in dominating_indices]
    dominating_informativeness = [
        informativeness[i] for i in dominating_indices
    ]

    file_util.dump_dill(dominating_complexity,
                        'complexity_wordcomplexity.dill')
    file_util.dump_dill(dominating_informativeness,
                        'informativeness_simmax.dill')
    file_util.dump_dill(language_indices, 'language_indices.dill')
    file_util.save_stringlist([list(map(str, lang)) for lang in languages],
                              'languages.txt')

    print("generate_evolutionary.py finished.")
示例#5
0
def main():

    (args, setup, file_util) = analysisutil.init(use_base_dir=True)

    meanings = file_util.load_dill('meanings.dill')

    universe = generator.generate_simplified_models(setup.model_size)

    measurer_a = ConservativityMeasurer(universe, setup.model_size, 'A')
    measurer_b = ConservativityMeasurer(universe, setup.model_size, 'B')

    with ProcessPool(nodes=setup.processes) as process_pool:
        conservativities_a = process_pool.map(measurer_a, meanings)
        conservativities_b = process_pool.map(measurer_b, meanings)
        conservativities_max = process_pool.map(max, conservativities_a,
                                                conservativities_b)

    file_util.dump_dill(conservativities_a, 'conservativities_a.dill')
    file_util.dump_dill(conservativities_b, 'conservativities_b.dill')
    file_util.dump_dill(conservativities_max, 'conservativities_max.dill')

    print("measure_expression_conservativity.py finished.")
示例#6
0
def main():

    (args, setup, file_util) = analysisutil.init(use_base_dir=True)

    meanings = file_util.load_dill('meanings.dill')

    universe = generator.generate_simplified_models(setup.model_size)

    measurer_a_up = MonotonicityMeasurer(universe, setup.model_size, 'A')
    measurer_b_up = MonotonicityMeasurer(universe, setup.model_size, 'B')
    measurer_a_down = MonotonicityMeasurer(universe,
                                           setup.model_size,
                                           'A',
                                           down=True)
    measurer_b_down = MonotonicityMeasurer(universe,
                                           setup.model_size,
                                           'B',
                                           down=True)

    with ProcessPool(nodes=setup.processes) as process_pool:
        monotonicities_a_up = process_pool.map(measurer_a_up, meanings)
        monotonicities_b_up = process_pool.map(measurer_b_up, meanings)
        monotonicities_a_down = process_pool.map(measurer_a_down, meanings)
        monotonicities_b_down = process_pool.map(measurer_b_down, meanings)
        monotonicities_a_max = process_pool.map(max, monotonicities_a_up,
                                                monotonicities_a_down)
        monotonicities_b_max = process_pool.map(max, monotonicities_b_up,
                                                monotonicities_b_down)
        monotonicities_max = process_pool.map(lambda x, y: (x + y) / 2,
                                              monotonicities_a_max,
                                              monotonicities_b_max)

    file_util.dump_dill(monotonicities_a_up, 'monotonicities_a_up.dill')
    file_util.dump_dill(monotonicities_a_down, 'monotonicities_a_down.dill')
    file_util.dump_dill(monotonicities_b_up, 'monotonicities_b_up.dill')
    file_util.dump_dill(monotonicities_b_down, 'monotonicities_b_down.dill')
    file_util.dump_dill(monotonicities_max, 'monotonicities_max.dill')

    print("measure_expression_monotonicity.py finished.")
示例#7
0
def main(args):
    setup = experiment_setups.parse(args.setup)

    file_util = FileUtil(
        fileutil.run_dir(setup.dest_dir, setup.name,
                         setup.max_quantifier_length, setup.model_size,
                         args.name))

    languages = language_loader.load_languages(file_util)

    universe = generator.generate_simplified_models(setup.model_size)

    pool = ProcessPool(nodes=setup.processes)

    if setup.inf_strat == 'exact':
        informativeness_measurer = InformativenessMeasurer(len(universe))
    elif setup.inf_strat == 'simmax':
        informativeness_measurer = SimMaxInformativenessMeasurer(universe)
    else:
        raise ValueError('{0} is not a valid informativeness strategy.'.format(
            setup.inf_strat))

    if setup.comp_strat == 'wordcount':
        complexity_measurer = WordCountComplexityMeasurer(setup.max_words)
    elif setup.comp_strat == 'wordcomplexity':
        complexity_measurer = SumComplexityMeasurer(setup.max_words, 1)
    else:
        raise ValueError('{0} is not a valid complexity strategy.'.format(
            setup.comp_strat))

    informativeness = pool.map(informativeness_measurer, languages)
    complexity = pool.map(complexity_measurer, languages)

    file_util.dump_dill(informativeness,
                        'informativeness_{0}.dill'.format(setup.inf_strat))
    file_util.dump_dill(complexity,
                        'complexity_{0}.dill'.format(setup.comp_strat))

    print("measure.py finished.")
def main():

    (args, setup, file_util) = analysisutil.init(use_base_dir=True)

    expressions = file_util.load_dill('expressions.dill')
    meanings = file_util.load_dill('meanings.dill')
    universe = generator.generate_simplified_models(setup.model_size)

    filename = os.path.join(os.path.dirname(args.setup),
                            'natural_expressions/{0}.json'.format(setup.name))

    with open(filename, 'r') as file:
        specs = json.load(file)

    natural_expressions = []
    for spec in specs:
        natural_expressions.extend(
            parser.parse_expression_options(spec, setup.model_size))

    natural_meanings = map(generator.MeaningCalculator(universe),
                           natural_expressions)

    natural_expression_indices = []

    for (i, natural_meaning) in enumerate(natural_meanings):
        try:
            existing_index = next(i for (i, meaning) in enumerate(meanings)
                                  if meaning == natural_meaning)
            natural_expression_indices.append(existing_index)
        except StopIteration:
            print('No existing quantifier equivalent to {0} found'.format(
                natural_expressions[i]))
            pass

    print([str(expressions[i]) for i in natural_expression_indices])
    file_util.dump_dill(natural_expression_indices,
                        'natural_expression_indices.dill')
    print("generate_natural_expressions.py finished.")