Ejemplo n.º 1
0
def main(args):
    setup = experiment_setups.parse(args.setup)
    dirname = fileutil.run_dir(args.dest_dir, setup.name,
                               args.max_quantifier_length, args.model_size,
                               args.name)
    file_util = FileUtil(dirname)

    languages = language_loader.load_languages(file_util)
    if args.comp_strat == 'wordcount':
        complexity_measurer = WordCountComplexityMeasurer(args.max_words)
    elif args.comp_strat == 'wordcomplexity':
        complexity_measurer = SumComplexityMeasurer(args.max_words, 1)
    elif args.comp_strat == 'special':
        complexity_measurer = SpecialComplexityMeasurer(args.max_words)
    else:
        raise ValueError('{0} is not a valid complexity strategy.'.format(
            args.comp_strat))

    with ProcessPool(nodes=args.processes) as pool:
        complexity = pool.map(complexity_measurer, languages)

    file_util.dump_dill(complexity,
                        'complexity_{0}.dill'.format(args.comp_strat))

    print("measure_complexity.py finished.")
Ejemplo n.º 2
0
def main(args):
    setup = experiment_setups.parse(args.setup)
    dirname = fileutil.run_dir(args.dest_dir, setup.name,
                               args.max_quantifier_length, args.model_size,
                               args.name)
    file_util = FileUtil(dirname)

    languages = language_loader.load_languages(file_util)

    universe = generator.generate_simplified_models(args.model_size)

    if args.inf_strat == 'exact':
        informativeness_measurer = InformativenessMeasurer(len(universe))
    elif args.inf_strat == 'simmax':
        informativeness_measurer = SimMaxInformativenessMeasurer(universe)
    else:
        raise ValueError('{0} is not a valid informativeness strategy.'.format(
            args.inf_strat))

    with ProcessPool(nodes=args.processes) as pool:
        informativeness = pool.map(informativeness_measurer, languages)

    file_util.dump_dill(informativeness,
                        'informativeness_{0}.dill'.format(args.inf_strat))

    print("measure_informativeness.py finished.")
Ejemplo n.º 3
0
def main(args):

    setup = experiment_setups.parse(args.setup)
    processes = setup.processes
    max_quantifier_length = setup.max_quantifier_length
    model_size = setup.model_size

    file_util = FileUtil(
        fileutil.base_dir(setup.dest_dir, setup.name, max_quantifier_length,
                          model_size))

    folderName = "{0}/{1}_length={2}_size={3}".format(setup.dest_dir,
                                                      setup.name,
                                                      max_quantifier_length,
                                                      model_size)

    processpool = ProcessPool(nodes=processes)

    expressions = file_util.load_dill('expressions.dill')

    complexities = processpool.map(
        lambda ex: setup.measure_expression_complexity(
            ex, max_quantifier_length), expressions)

    file_util.dump_dill(complexities, 'expression_complexities.dill')

    processpool.close()
    processpool.join()

    print('Complexity Measuring finished.')
Ejemplo n.º 4
0
def main(args):
    setup = experiment_setups.parse(args.setup)

    file_util_out = FileUtil(
        fileutil.run_dir(setup.dest_dir, setup.name,
                         setup.max_quantifier_length, setup.model_size,
                         setup.random_name))
    file_util_in = FileUtil(
        fileutil.base_dir(setup.dest_dir, setup.name,
                          setup.max_quantifier_length, setup.model_size))

    unevaluated_expressions = file_util_in.load_dill('expressions.dill')

    if args.indices is not None:
        index_sets = []
        for indices_name in args.indices:
            index_sets.append(
                set(
                    file_util_in.load_dill(
                        '{0}_expression_indices.dill'.format(indices_name))))
        indices = set.intersection(*index_sets)
    else:
        indices = range(len(unevaluated_expressions))

    if args.sample is None:
        languages = generate_all(indices, args.max_words, args.fixedwordcount)
    else:
        languages = generate_sampled(indices, setup.max_words, args.sample)

    file_util_out.dump_dill(languages, 'language_indices.dill')
    file_util_out.save_stringlist([list(map(str, lang)) for lang in languages],
                                  'languages.txt')

    print("languages.py finished.")
def main(args):
    setup = experiment_setups.parse(args.setup)
    dirname = fileutil.run_dir(setup.dest_dir, setup.name, setup.max_quantifier_length, setup.model_size, args.name)
    file_util = FileUtil(dirname)
    languages = language_loader.load_languages(file_util)
    
    with ProcessPool(nodes=setup.processes) as process_pool:
        monotonicities = process_pool.map(measure_monotonicity, languages)

    file_util.dump_dill(monotonicities, 'monotonicity.dill')

    print("measure_monotonicity.py finished.")
def main(args):
    setup = experiment_setups.parse(args.setup)
    # use_base_dir = False
    dirname = fileutil.run_dir(setup.dest_dir, setup.name,
                               setup.max_quantifier_length, setup.model_size,
                               setup.pareto_name)
    file_util = FileUtil(dirname)

    expressions = language_loader.load_all_evaluated_expressions(file_util)
    languages_0 = language_generator.generate_sampled(
        expressions, args.lang_size, int(args.sample_size / args.lang_size))
    universe = generator.generate_simplified_models(setup.model_size)

    measure_complexity = SumComplexityMeasurer(args.lang_size, 1)
    measure_informativeness = SimMaxInformativenessMeasurer(universe)
    pool = ProcessPool(nodes=setup.processes)
    languages = languages_0  #lanuages will be iteratively updated in subsequent loop

    for gen in range(args.generations):
        print('GENERATION {0}'.format(gen))
        print('measuring')
        complexity = pool.map(measure_complexity, languages)
        informativeness = pool.map(measure_informativeness, languages)

        measurements = [(1 - inf, comp)
                        for inf, comp in zip(informativeness, complexity)]

        print('calculating dominating')
        dominating_indices = pygmo.non_dominated_front_2d(measurements)
        dominating_languages = [languages[i] for i in dominating_indices]

        print('mutating')
        languages = sample_mutated(dominating_languages, args.sample_size,
                                   expressions)

    language_indices = [[e.index for e in lang]
                        for lang in dominating_languages]
    dominating_complexity = [complexity[i] for i in dominating_indices]
    dominating_informativeness = [
        informativeness[i] for i in dominating_indices
    ]

    file_util.dump_dill(dominating_complexity,
                        'complexity_wordcomplexity.dill')
    file_util.dump_dill(dominating_informativeness,
                        'informativeness_simmax.dill')
    file_util.dump_dill(language_indices, 'language_indices.dill')
    file_util.save_stringlist([list(map(str, lang)) for lang in languages],
                              'languages.txt')

    print("generate_evolutionary.py finished.")
Ejemplo n.º 7
0
def main():

    processes = args.processes
    setup = experiment_setups.parse(args.setup)
    max_quantifier_length = args.max_quantifier_length
    model_size = args.model_size
    
    file_util = FileUtil(fileutil.base_dir(args.dest_dir, setup.name, max_quantifier_length, model_size))
    
    
    universe = setup.generate_models(model_size)
    
    folderName = "{0}/{1}_length={2}_size={3}".format(args.dest_dir,setup.name,max_quantifier_length,model_size)
    os.makedirs("{0}".format(folderName), exist_ok=True)
    
    processpool = ProcessPool(nodes=processes)
    expression_generator = ExpressionGenerator(setup, model_size, universe, processpool)
    (generated_expressions_dict, expressions_by_meaning) = \
          expression_generator.generate_all_expressions(max_quantifier_length)
    
    print("{0} expressions!".format(len(expressions_by_meaning[bool].values())))
    
    file_util.dump_dill(expressions_by_meaning[bool], 'generated_expressions.dill')
    file_util.dump_dill(list(expressions_by_meaning[bool].values()), 'expressions.dill')
    file_util.dump_dill(list(expressions_by_meaning[bool].keys()), 'meanings.dill')
    
    processpool.close()
    processpool.join()
    
    print('Expression generation finished.')
def main(args):   
    setup = experiment_setups.parse(args.setup)
    dirname = fileutil.run_dir(args.dest_dir, setup.name, args.max_quantifier_length, args.model_size, args.name)
    file_util = FileUtil(dirname)
    
    languages = []
    universe = generator.generate_simplified_models(args.model_size)

    FakeEvaluatedExpression = namedtuple('FakeEvaluatedExpression', 'meaning')
    expressions = [FakeEvaluatedExpression(tuple([random.choice([True, False]) for model in universe]))
                    for i in range(args.random_size)]

    if args.sample is None:
        print("generate_all() called.")
        languages = generate_all(expressions, args.max_words, args.fixedwordcount)
    else:
        print("generate_sampled() called.")
        languages = generate_sampled(expressions, args.max_words, args.sample)

    complexity_measurer = WordCountComplexityMeasurer(args.max_words)
    informativeness_measurer_exact = InformativenessMeasurer(len(universe))
    informativeness_measurer_simmax = SimMaxInformativenessMeasurer(universe)

    with ProcessPool(nodes=args.processes) as pool:
        complexity = pool.map(complexity_measurer, languages)
        informativeness_exact = pool.map(informativeness_measurer_exact, languages)
        informativeness_simmax = pool.map(informativeness_measurer_simmax, languages)

    file_util.dump_dill(complexity, 'complexity_wordcount.dill')
    file_util.dump_dill(informativeness_exact, 'informativeness_exact.dill')
    file_util.dump_dill(informativeness_simmax, 'informativeness_simmax.dill')
    
    print("coinflip_languages.py finished.")
def main(args):
    processes = args.processes
    setup = experiment_setups.parse(args.setup)
    max_quantifier_length = args.max_quantifier_length
    model_size = args.model_size

    file_util = FileUtil(fileutil.base_dir(args.dest_dir, setup.name, max_quantifier_length, model_size))

    folderName = "{0}/{1}_length={2}_size={3}".format(args.dest_dir, setup.name, max_quantifier_length, model_size)

    processpool = ProcessPool(nodes=processes)

    meanings = file_util.load_dill('meanings.dill')

    costs = processpool.map(measurer.measure_communicative_cost, meanings)

    file_util.dump_dill(costs, 'expression_costs.dill')

    processpool.close()
    processpool.join()

    print('Informativeness Measuring finished.')
def main(args):
    setup = experiment_setups.parse(args.setup)

    file_util = FileUtil(
        fileutil.run_dir(args.dest_dir, setup.name, args.max_quantifier_length,
                         args.model_size, args.name))

    universe = generator.generate_simplified_models(args.model_size)
    monotonicity_measure = MonotonicityMeasurer(universe, args.model_size, 'A')
    conservativity_measurer = ConservativityMeasurer(universe, args.model_size,
                                                     'A')
    #special_complexity_measurer = SpecialComplexityMeasurer(args.max_words)
    special_complexity_measurer = SpecialComplexityMeasurer(
        setup.operators, args.model_size)

    if args.inf_strat == 'exact':
        informativeness_measurer = InformativenessMeasurer(len(universe))
    elif args.inf_strat == 'simmax':
        informativeness_measurer = SimMaxInformativenessMeasurer(universe)
    else:
        raise ValueError('{0} is not a valid informativeness strategy.'.format(
            args.inf_strat))

    if args.comp_strat == 'wordcount':
        complexity_measurer = WordCountComplexityMeasurer(args.max_words)
    elif args.comp_strat == 'wordcomplexity':
        complexity_measurer = SumComplexityMeasurer(args.max_words, 1)
    else:
        raise ValueError('{0} is not a valid complexity strategy.'.format(
            args.comp_strat))

    languages_dir = setup.natural_languages_dirname

    languages = []
    language_names = []

    for filename in os.listdir(languages_dir):
        if filename.endswith('.json'):
            language_file = os.path.join(languages_dir, filename)
            language = parse_language(language_file, setup, universe, \
                                      monotonicity_measure, \
                                      conservativity_measurer, \
                                      special_complexity_measurer)
            languages.append(language)
            language_names.append(filename[:-5])  # Name without extension

    informativeness = zip(language_names,
                          map(informativeness_measurer, languages))
    complexity = zip(language_names, map(complexity_measurer, languages))

    file_util.dump_dill(
        informativeness,
        'informativeness_{0}_{1}.dill'.format(setup.name, args.inf_strat))
    file_util.dump_dill(
        complexity, 'complexity_{0}_{1}.dill'.format(setup.name,
                                                     args.comp_strat))

    print("measure_lexicalized.py finished")
Ejemplo n.º 11
0
def init(use_base_dir=False):

    parser = argparse.ArgumentParser(description="Analyze")
    parser.add_argument('--setup',
                        help='Path to the setup json file.',
                        required=True)
    args = parser.parse_args()

    setup = experiment_setups.parse(args.setup)

    dirname = fileutil.base_dir(setup.dest_dir, setup.name, setup.max_quantifier_length, setup.model_size) if use_base_dir \
        else fileutil.run_dir(setup.dest_dir, setup.name, setup.max_quantifier_length, setup.model_size, setup.name)
    file_util = FileUtil(dirname)
    return args, setup, file_util
    def __init__(self, props):
        self._props = props  # private
        # common setups
        self.name = props['name']
        self.pareto_name = props['pareto_name']
        self.natural_name = props['natural_name']
        self.random_name = props['random_name']

        self.lexical_quantifiers_filename = \
            path.join(path.dirname(props['setup_filename']), props['lexical_quantifiers_filename'])
        self.generate_models = locate(props['model_generator'])
        self.generate_primitives = locate(props['primitive_generator'])
        self.parse_primitive = locate(props['primitive_parser'])
        self.measure_expression_complexity = locate(
            props['expression_complexity_measurer'])
        self.measure_quantifier_complexity = locate(
            props['quantifier_complexity_measurer'])
        self.operators = {
            name: op.operators[name]
            for name in props['operators']
        }

        self.natural_languages_dirname = \
            path.join(path.dirname(props['setup_filename']), 'Languages/{0}'.format(props['name']))

        self.possible_input_types = []
        for (name, operator) in self.operators.items():
            self.possible_input_types.append(operator.inputTypes)

        #set up of quantifiers, sizes, etc
        self.max_quantifier_length = int(props['max_quantifier_length'])
        self.model_size = int(props['model_size'])
        self.processes = int(props['processes'])
        # self.run_name = props['run_name']
        self.comp_strat = props['comp_strat']
        self.inf_strat = props['inf_strat']
        self.max_words = props['max_words']

        #set up of files
        self.dest_dir = props['dest_dir']
        self.use_base_dir = True if props['use_base_dir'].lower(
        ) == "true" else False

        self.dirname = fileutil.base_dir(self.dest_dir, self.name, self.max_quantifier_length, self.model_size) \
                if self.use_base_dir else \
            fileutil.run_dir(self.dest_dir, self.name, self.max_quantifier_length, self.model_size, self.run_name)

        self.file_util = FileUtil(self.dirname)
def main(args):
    setup = experiment_setups.parse(args.setup)
    dirname = fileutil.run_dir(setup.dest_dir, setup.name,
                               setup.max_quantifier_length, setup.model_size,
                               setup.natural_name)
    file_util_out = FileUtil(dirname)
    file_util_in = file_util_out.get_base_file_util()

    natural_indices = set(
        file_util_in.load_dill('{0}_expression_indices.dill'.format(
            args.indices)))
    expressions = file_util_in.load_dill('expressions.dill')
    non_natural_indices = set(range(len(expressions))) - natural_indices

    language_indices = []
    naturalness = []
    sizes = []

    for lang_size in range(1, setup.max_words + 1):
        for i in range(args.sample):
            len_natural = random.randint(0, lang_size)
            len_random = lang_size - len_natural
            lang_random = next(
                random_combinations(non_natural_indices, len_random, 1))
            lang_natural = next(
                random_combinations(natural_indices, len_natural, 1))
            naturalness.append(len_natural / lang_size)
            language_indices.append(lang_random + lang_natural)

    file_util_out.dump_dill(language_indices, 'language_indices.dill')
    file_util_out.dump_dill(naturalness, 'naturalness.dill')
    file_util_out.save_stringlist([
        list(map(lambda i: str(expressions[i]), lang))
        for lang in language_indices
    ], 'languages.txt')

    print("sample_indexset_degrees.py finished.")
Ejemplo n.º 14
0
def main(args):
    setup = experiment_setups.parse(args.setup)

    file_util = FileUtil(
        fileutil.run_dir(setup.dest_dir, setup.name,
                         setup.max_quantifier_length, setup.model_size,
                         args.name))

    languages = language_loader.load_languages(file_util)

    universe = generator.generate_simplified_models(setup.model_size)

    pool = ProcessPool(nodes=setup.processes)

    if setup.inf_strat == 'exact':
        informativeness_measurer = InformativenessMeasurer(len(universe))
    elif setup.inf_strat == 'simmax':
        informativeness_measurer = SimMaxInformativenessMeasurer(universe)
    else:
        raise ValueError('{0} is not a valid informativeness strategy.'.format(
            setup.inf_strat))

    if setup.comp_strat == 'wordcount':
        complexity_measurer = WordCountComplexityMeasurer(setup.max_words)
    elif setup.comp_strat == 'wordcomplexity':
        complexity_measurer = SumComplexityMeasurer(setup.max_words, 1)
    else:
        raise ValueError('{0} is not a valid complexity strategy.'.format(
            setup.comp_strat))

    informativeness = pool.map(informativeness_measurer, languages)
    complexity = pool.map(complexity_measurer, languages)

    file_util.dump_dill(informativeness,
                        'informativeness_{0}.dill'.format(setup.inf_strat))
    file_util.dump_dill(complexity,
                        'complexity_{0}.dill'.format(setup.comp_strat))

    print("measure.py finished.")