Example #1
0
from pathos.pools import ProcessPool
import Generator
import analysisutil
from Languages import LanguageLoader
from Languages.InformativenessMeasurer import SimMaxInformativenessMeasurer, InformativenessMeasurer

analysisutil.add_argument('inf_strat')
(args, setup, file_util) = analysisutil.init()

languages = LanguageLoader.load_languages(file_util)

universe = Generator.generate_simplified_models(args.model_size)

if args.inf_strat == 'exact':
    informativeness_measurer = InformativenessMeasurer(len(universe))
elif args.inf_strat == 'simmax':
    informativeness_measurer = SimMaxInformativenessMeasurer(universe)
else:
    raise ValueError('{0} is not a valid informativeness strategy.'.format(
        args.inf_strat))

with ProcessPool(nodes=args.processes) as pool:
    informativeness = pool.map(informativeness_measurer, languages)

file_util.dump_dill(informativeness,
                    'informativeness_{0}.dill'.format(args.inf_strat))
Example #2
0
import analysisutil
import plotnine as pn
from Languages import LanguageLoader

analysisutil.add_argument('complexity_strategy')
analysisutil.add_argument('informativeness_strategy')
analysisutil.add_argument('--include_natural', dest='include_natural_languages', default=False, action='store_true')

(args, setup, file_util) = analysisutil.init()

data = LanguageLoader.load_pandas_table(file_util, args.complexity_strategy, args.informativeness_strategy)

fig = pn.ggplot(data, pn.aes('comm_cost', 'complexity')) +\
        pn.geom_point()

#if args.include_natural_languages:
 #   lex_informativeness = [inf for (ex,inf) in file_util.load_dill('informativeness_{0}_{1}.dill'.format(setup.name, args.informativeness_strategy))]
  #  lex_complexity = [com for (ex,com) in file_util.load_dill('complexity_{0}_{1}.dill'.format(setup.name, args.complexity_strategy))]
   # plt.plot(lex_informativeness, lex_complexity, 'o', color='green')

print(fig)

file_util.save_plotnine(fig, '{0}_{1}_plot'.format(
    args.complexity_strategy,
    args.informativeness_strategy
))
import analysisutil
from Languages import LanguageLoader
import pandas as pd
import numpy as np
from numpy.linalg import norm
import pygmo
import plotnine as pn

analysisutil.add_argument('table_name')
analysisutil.add_argument('pareto')
analysisutil.add_argument('run_names', nargs='*')
(args, setup, file_util) = analysisutil.init(use_base_dir=True)

pareto_data = LanguageLoader.load_pandas_table(file_util.get_sub_file_util(
    args.pareto),
                                               'wordcomplexity',
                                               'simmax',
                                               include_monotonicity=False)

run_df = pd.DataFrame({
    'complexity': [],
    'comm_cost': [],
    'run': [],
    'monotonicity': []
})
run_dfs = {}
for run_name in args.run_names:
    df = LanguageLoader.load_pandas_table(
        file_util.get_sub_file_util(run_name), 'wordcomplexity', 'simmax')
    run_dfs[run_name] = df
    df['run'] = run_name
Example #4
0
import plotnine as pn
import analysisutil
from Languages import LanguageLoader
import statsmodels.formula.api as smf

analysisutil.add_argument('comp_strat')
analysisutil.add_argument('inf_strat')

(args, setup, file_util) = analysisutil.init()

data = LanguageLoader.load_pandas_table(file_util, args.comp_strat, args.inf_strat)

model_inf = smf.ols(formula='monotonicity ~ comm_cost', data=data)
model_comp = smf.ols(formula='monotonicity ~ complexity', data=data)
model_both = smf.ols(formula='monotonicity ~ complexity + comm_cost', data=data)
model = smf.ols(formula='monotonicity ~ complexity * comm_cost', data=data)

result_inf = model_inf.fit()
result_comp = model_comp.fit()
result_both = model_both.fit()
result = model.fit()

print(result_inf.summary())
print(result_comp.summary())
print(result_both.summary())
print(result.summary())
# plt = (pn.ggplot(data, pn.aes('comm_cost', 'complexity'))
#        + pn.geom_point()
#        + pn.stat_smooth(method='lm'))

# print(plt)
import pygmo
from pathos.multiprocessing import ProcessPool

import Generator
import analysisutil
from Languages import LanguageLoader, LanguageGenerator
from Languages.ComplexityMeasurer import SumComplexityMeasurer
from Languages.InformativenessMeasurer import SimMaxInformativenessMeasurer

analysisutil.add_argument('lang_size', type=int)
analysisutil.add_argument('sample_size', type=int)
analysisutil.add_argument('generations', type=int)
analysisutil.add_argument('-m', '--max_mutations', type=int, default=1)
(args, setup, file_util) = analysisutil.init()

expressions = LanguageLoader.load_all_evaluated_expressions(file_util)

languages = LanguageGenerator.generate_sampled(
    expressions, args.lang_size, int(args.sample_size / args.lang_size))

universe = Generator.generate_simplified_models(args.model_size)


def remove(language):
    language = copy(language)
    index = random.randint(0, len(language) - 1)
    language.pop(index)
    return language


def add(language):