true_norm_exp = params['true_norm']['exp']
num_observations = params['num_observations']
obs_data_set = params['obs_data_set']

colour_specific = params['colour_specific']
shape_specific = params['shape_specific']
target_area_parts = params['target_area'].replace(' ', '').split(';')
target_area_part0 = position(*map(float, target_area_parts[0].split(',')))
target_area_part1 = position(*map(float, target_area_parts[1].split(',')))
target_area = (target_area_part0, target_area_part1)
print(target_area_part0.coordinates())
print(target_area_part1.coordinates())
the_task = task(colour_specific, shape_specific, target_area)

env = unpickle('data/env.pickle')

rob = robot(the_task, env)
actionable = rob.all_actionable()
print(actionable)

true_norm_prior = get_prob("NORMS", true_norm_exp)
true_norm_log_prior = get_log_prob("NORMS", true_norm_exp)

if math.isclose(true_norm_prior, 0):
    print(
        f'Stopping! True norm expression has near-zero prior ({true_norm_prior})\n'
    )
# elif (num_observations == 100 and obs_data_set == 1):
#     shutil.copyfile('data/default_observations.pickle', 'data/observations.pickle')
else:
sys.path.append('src')

import yaml
import pickle
from pickle_wrapper import pickle_it, unpickle
from mcmc_norm_learning.algorithm_1_v4 import over_dispersed_starting_points
from mcmc_norm_learning.robot_task_new import task
from mcmc_norm_learning.environment import position
import pprint

with open("params.yaml", 'r') as fd:
    params = yaml.safe_load(fd)

colour_specific = params['colour_specific']
shape_specific = params['shape_specific']
target_area_parts = params['target_area'].replace(' ', '').split(';')
target_area_part0 = position(*map(float, target_area_parts[0].split(',')))
target_area_part1 = position(*map(float, target_area_parts[1].split(',')))
target_area = (target_area_part0, target_area_part1)
true_expression = params['true_norm']['exp']

env = unpickle('data/env.pickle')
the_task = task(colour_specific, shape_specific, target_area)
obs = unpickle('data/observations.pickle')
num_starts = 5
odsp, info = over_dispersed_starting_points(num_starts, obs, env, the_task)
print(info)
print('E0s:\n')
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(odsp)
Esempio n. 3
0
from pickle_wrapper import unpickle, pickle_it
from mcmc_norm_learning.mcmc_convergence import prepare_sequences
from mcmc_norm_learning.algorithm_1_v4 import to_tuple
from collections import defaultdict
import itertools
import operator
import yaml
import math

with open("params.yaml", 'r') as fd:
    params = yaml.safe_load(fd)
m = params['m']
num_chains = math.ceil(m/2)

chains_and_log_posteriors = unpickle('data/chains_and_log_posteriors.pickle')[:num_chains]

with open('metrics/chain_info_no_warmup.txt', 'w') as chain_info:
    chain_info.write(f'Number of chains: {len(chains_and_log_posteriors)}\n')
    chain_length = len(chains_and_log_posteriors[0]["chain"])
    print(f'Chain length: {chain_length}')
    chain_info.write(f'Length of each chain: {chain_length}\n')
    
    exps_in_chains = [None]*len(chains_and_log_posteriors)
    for i,chain_data in enumerate(chains_and_log_posteriors): # Consider skipping first few entries
        chain = chain_data['chain'][:int(chain_length/2)]
        log_posteriors = chain_data['log_posteriors'][:int(chain_length/2)]
        exp_lp_pairs = list(zip(chain,log_posteriors))

        exps_in_chains[i] = set(map(to_tuple, chain))
Esempio n. 4
0
import sys
sys.path.append('src')

from pickle_wrapper import unpickle, pickle_it
from mcmc_norm_learning.mcmc_convergence import prepare_sequences, calculate_R
from operator import itemgetter
import yaml

with open("params.yaml", 'r') as fd:
    params = yaml.safe_load(fd)
rhat_step_size = params['rhat_step_size']


def conv_test(chains):
    convergence_result, split_data = calculate_R(chains, rhat_step_size)
    print(convergence_result.to_string())


chains_and_log_likelihoods = unpickle('data/chains_and_log_likelihoods.pickle')
chains = list(map(itemgetter('chain'), chains_and_log_likelihoods))
conv_test(prepare_sequences(chains, warmup=False))
                    help='enter none, true and/or top, separated by spaces')

args = parser.parse_args()
requested_expressions = args.norm_types

with open("params.yaml", 'r') as fd:
    params = yaml.safe_load(fd)

colour_specific = params['colour_specific']
shape_specific = params['shape_specific']
target_area_parts = params['target_area'].replace(' ', '').split(';')
target_area_part0 = position(*map(float, target_area_parts[0].split(',')))
target_area_part1 = position(*map(float, target_area_parts[1].split(',')))
target_area = (target_area_part0, target_area_part1)

the_task = task(colour_specific, shape_specific, target_area)
obs = unpickle('data/observations.pickle')
env = unpickle('data/env.pickle')

if 'none' in requested_expressions:
    ll_no_norm = Likelihood([], the_task, obs, env)
    print(f"Log likelihood of observations given no norm: {ll_no_norm}\n")
if 'true' in requested_expressions:
    true_expression = params['true_norm']['exp']
    ll_true_exp = Likelihood(true_expression, the_task, obs, env)
    print(f"Log likelihood of observations given true exp.: {ll_true_exp}\n")
if 'top' in requested_expressions:
    top_norm = unpickle('data/top_norms.pickle')[0]
    ll_top_norm = Likelihood(top_norm, the_task, obs, env)
    print(f"Log likelihood of observations given top norm.: {ll_top_norm}\n")
Esempio n. 6
0
import sys
sys.path.append('src')

import pickle
from pickle_wrapper import unpickle, pickle_it

observations = unpickle('data/observations.pickle')
print(f'Length of observations is {len(observations)}')
import pandas as pd
import ast

with open("params.yaml", 'r') as fd:
    params = yaml.safe_load(fd)

colour_specific = params['colour_specific']
shape_specific = params['shape_specific']
target_area_parts = params['target_area'].replace(' ', '').split(';')
target_area_part0 = position(*map(float, target_area_parts[0].split(',')))
target_area_part1 = position(*map(float, target_area_parts[1].split(',')))
target_area = (target_area_part0, target_area_part1)
the_task = task(colour_specific, shape_specific, target_area)
true_expression = params['true_norm']['exp']

posterior_sample = unpickle('data/posterior.pickle')
learned_expressions = Counter(map(to_tuple, posterior_sample))
n = 5
top_norms_with_freq = learned_expressions.most_common(n)
top_norms = list(map(operator.itemgetter(0), top_norms_with_freq))
pickle_it(top_norms, 'data/top_norms.pickle')

env = unpickle('data/env.pickle')

exp_posterior_df = pd.read_csv('metrics/chain_posteriors.csv',
                               usecols=['expression', 'log_posterior'])
exp_posterior_df = exp_posterior_df.drop_duplicates()
exp_posterior_df['post_rank'] = exp_posterior_df['log_posterior'].rank(
    method='dense', ascending=False)
exp_posterior_df.sort_values('post_rank', inplace=True)
exp_posterior_df['expression'] = exp_posterior_df['expression'].transform(