示例#1
0
def main():
    # Configure the json numpy handlers
    jp_numpy.register_handlers()

    # Create trajectory queue
    trajectory_queue = Queue()

    # Create ROS node
    node = Node(trajectory_queue)
    node_thread = threading.Thread(target=node.Run)
    node_thread.start()

    # Create graphics root
    root = tk.Tk()

    # Create custom graphics application
    application = Application(root, trajectory_queue)

    # Start. Blocks until main window closes.
    root.mainloop()

    # If window closed, kill rospy
    rospy.signal_shutdown('Quit')

    # Wait for node thread
    node_thread.join()
def json_pickle(file_name: Text, obj: Any) -> None:
    """Pickle an object to a file using json."""
    import jsonpickle.ext.numpy as jsonpickle_numpy
    import jsonpickle

    jsonpickle_numpy.register_handlers()

    io_utils.write_text_file(jsonpickle.dumps(obj), file_name)
示例#3
0
文件: __init__.py 项目: zzBBc/rasa
def json_unpickle(file_name: Text) -> Any:
    """Unpickle an object from file using json."""
    import jsonpickle.ext.numpy as jsonpickle_numpy
    import jsonpickle

    jsonpickle_numpy.register_handlers()

    with open(file_name, "r", encoding="utf-8") as f:
        return jsonpickle.loads(f.read())
def json_unpickle(file_name: Text) -> Any:
    """Unpickle an object from file using json."""
    import jsonpickle.ext.numpy as jsonpickle_numpy
    import jsonpickle

    jsonpickle_numpy.register_handlers()

    file_content = io_utils.read_file(file_name)
    return jsonpickle.loads(file_content)
示例#5
0
文件: __init__.py 项目: zzBBc/rasa
def json_pickle(file_name: Text, obj: Any) -> None:
    """Pickle an object to a file using json."""
    import jsonpickle.ext.numpy as jsonpickle_numpy
    import jsonpickle

    jsonpickle_numpy.register_handlers()

    with open(file_name, "w", encoding="utf-8") as f:
        f.write(jsonpickle.dumps(obj))
示例#6
0
def parse_jsonpickle(db_entry):
    import jsonpickle.ext.numpy as jsonpickle_numpy
    jsonpickle_numpy.register_handlers()
    try:
        parsed = jsonpickle.loads(json.dumps(db_entry,
                                             default=json_util.default),
                                  keys=False)
    except IndexError:
        parsed = db_entry
    return parsed
示例#7
0
 def __encode_as_json(cls, neuroml_document):
     neuroml_document = cls.__sanitize_doc(neuroml_document)
     from jsonpickle import encode as json_encode
     try:
         # Enable encoding of numpy arrays with recent jsonpickle versions
         import jsonpickle.ext.numpy as jsonpickle_numpy
         jsonpickle_numpy.register_handlers()
     except ImportError:
         pass  # older version of jsonpickle
     encoded = json_encode(neuroml_document)
     return encoded
示例#8
0
文件: io.py 项目: zuiwanting/rasa
def json_pickle(file_name: Union[Text, Path], obj: Any) -> None:
    """Pickle an object to a file using json.

    Args:
        file_name: the file to store the object to
        obj: the object to store
    """
    import jsonpickle.ext.numpy as jsonpickle_numpy
    import jsonpickle

    jsonpickle_numpy.register_handlers()

    write_text_file(jsonpickle.dumps(obj), file_name)
示例#9
0
def run_fitting(folder):

    samples = []
    tendencies = [5]  #1, 3, 5, 10, 30, 50, 100
    for tendency in tendencies:
        for trans in [99]:
            for prob in [75]:
                for train in [100]:
                    print(tendency, trans)

                    run_name = "h" + str(tendency) + "_t" + str(
                        trans) + "_p" + str(prob) + "_train" + str(
                            train) + ".json"
                    fname = os.path.join(folder, run_name)

                    jsonpickle_numpy.register_handlers()

                    with open(fname, 'r') as infile:
                        data = json.load(infile)

                    worlds_old = pickle.decode(data)

                    test_trials = list(range(0, 50)) + list(range(train, 150))

                    inferrer = infer.Inferrer(worlds_old[:20],
                                              0.01,
                                              1.,
                                              test_trials=test_trials)
                    # print(1./inferrer.sample_space)
                    # print(inferrer.likelihood.mean(axis=0))
                    # plt.figure()
                    # plt.plot(inferrer.likelihood.mean(axis=0), '.')
                    # plt.show()

                    inferrer.run_single_inference(ndraws=15000,
                                                  nburn=5000,
                                                  cores=4)
                    samples.append(inferrer.samples)

                    fname = os.path.join(folder,
                                         run_name[:-5] + "_samples.json")

                    jsonpickle_numpy.register_handlers()
                    pickled = pickle.encode(
                        [samples[-1], inferrer.sample_space])
                    with open(fname, 'w') as outfile:
                        json.dump(pickled, outfile)

                    pickled = 0

                    gc.collect()
示例#10
0
文件: io.py 项目: zuiwanting/rasa
def json_unpickle(file_name: Union[Text, Path]) -> Any:
    """Unpickle an object from file using json.

    Args:
        file_name: the file to load the object from

    Returns: the object
    """
    import jsonpickle.ext.numpy as jsonpickle_numpy
    import jsonpickle

    jsonpickle_numpy.register_handlers()

    file_content = read_file(file_name)
    return jsonpickle.loads(file_content)
示例#11
0
文件: io.py 项目: praneethgb/rasa
def json_unpickle(file_name: Union[Text, Path],
                  encode_non_string_keys: bool = False) -> Any:
    """Unpickle an object from file using json.

    Args:
        file_name: the file to load the object from
        encode_non_string_keys: If set to `True` then jsonpickle will encode non-string
          dictionary keys instead of coercing them into strings via `repr()`.

    Returns: the object
    """
    import jsonpickle.ext.numpy as jsonpickle_numpy
    import jsonpickle

    jsonpickle_numpy.register_handlers()

    file_content = rasa.shared.utils.io.read_file(file_name)
    return jsonpickle.loads(file_content, keys=encode_non_string_keys)
示例#12
0
文件: io.py 项目: praneethgb/rasa
def json_pickle(file_name: Union[Text, Path],
                obj: Any,
                encode_non_string_keys: bool = False) -> None:
    """Pickle an object to a file using json.

    Args:
        file_name: the file to store the object to
        obj: the object to store
        encode_non_string_keys: If set to `True` then jsonpickle will encode non-string
          dictionary keys instead of coercing them into strings via `repr()`.
    """
    import jsonpickle.ext.numpy as jsonpickle_numpy
    import jsonpickle

    jsonpickle_numpy.register_handlers()

    rasa.shared.utils.io.write_text_file(
        jsonpickle.dumps(obj, keys=encode_non_string_keys), file_name)
def run_renewal_simulations(repetitions, utility, avg, T, ns, na, nr, nc,
                            folder):

    n_training = 1

    Rho = np.zeros((trials, nr, ns))

    Rho[:] = generate_bandit_timeseries_training(trials * 2, nr, ns, nb,
                                                 n_training)[:trials]

    for tendency in [
            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
    ]:
        for trans in [99]:
            print(tendency, trans)
            worlds = []
            par_list = [trans / 100., avg, Rho]

            run_name = "h" + str(tendency) + "_t" + str(
                trans) + "_p90_train100.json"
            fname = os.path.join(folder, run_name)

            jsonpickle_numpy.register_handlers()

            with open(fname, 'r') as infile:
                data = json.load(infile)

            worlds_old = pickle.decode(data)

            repetitions = len(worlds_old)

            for i in range(repetitions):

                w_old = worlds_old[i]
                worlds.append(run_agent(par_list, w_old))

            check_name = "check_" + "h" + str(tendency) + "_t" + str(
                trans) + "_p90_train100.json"
            fname = os.path.join(folder, check_name)

            jsonpickle_numpy.register_handlers()
            pickled = pickle.encode(worlds)
            with open(fname, 'w') as outfile:
                json.dump(pickled, outfile)
def run_training_duration_simulations(repetitions, utility, avg, T, ns, na, nr, nc, folder):

    # set training durations. the longer ones may simulate for a couple of
    # hours when many repetitions are chosen. Uncomment at your own risk.
    for trials_training in [56, 100, 177, 316, 562, 1000, 1778, 3162]:#, 5623, 10000, 17782, 31622, 56234, 10000]:

        n_test = 100
        trials = trials_training + n_test

        Rho = np.zeros((trials, nr, ns))

        for tendency in [1,10,100]:
            for trans in [99]:#[100,99,98,97,96,95,94]:#,93,92,91,90]:
                for prob in [90]:
                    print(trials_training, tendency, trans, prob)

                    Rho[:] = generate_bandit_timeseries_habit(trials_training, nr, ns, n_test,p=prob/100.)

                    plt.figure()
                    plt.plot(Rho[:,2,2])
                    plt.plot(Rho[:,1,1])
                    plt.show()

                    worlds = []
                    learn_pol = tendency
                    parameters = [learn_pol, trans/100., avg, Rho, utility]

                    for i in range(repetitions):
                        worlds.append(run_agent(parameters, trials, T, ns, na, nr, nc))

                    run_name = "h"+str(int(learn_pol))+"_t"+str(trans)+"_p"+str(prob)+"_train"+str(trials_training)+".json"
                    fname = os.path.join(folder, run_name)

                    jsonpickle_numpy.register_handlers()
                    pickled = pickle.encode(worlds)
                    with open(fname, 'w') as outfile:
                        json.dump(pickled, outfile)

                    pickled = 0
                    worlds = 0

                    gc.collect()
def run_deval_simulations(repetitions, utility, avg, T, ns, na, nr, nc, folder):

    n_training = 1
    n_test = 100
    trials =  100+n_test#number of trials
    trials_training = trials - n_test

    Rho = np.zeros((trials, nr, ns))

    for tendency in [1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
        for trans in [99]:
            for prob in [90]:
                print(tendency, trans, prob)

                Rho[:] = generate_bandit_timeseries_habit(trials_training, nr, ns, n_test,p=prob/100.)

                plt.figure()
                plt.plot(Rho[:,2,2])
                plt.plot(Rho[:,1,1])
                plt.show()

                worlds = []
                learn_pol = tendency
                parameters = [learn_pol, trans/100., avg, Rho, utility]

                for i in range(repetitions):
                    worlds.append(run_agent(parameters, trials, T, ns, na, nr, nc, deval=True))

                run_name = "deval_h"+str(int(learn_pol))+"_t"+str(trans)+"_p"+str(prob)+"_train"+str(trials_training)+".json"
                fname = os.path.join(folder, run_name)

                jsonpickle_numpy.register_handlers()
                pickled = pickle.encode(worlds)
                with open(fname, 'w') as outfile:
                    json.dump(pickled, outfile)

                pickled = 0
                worlds = 0

                gc.collect()
def run_rew_prob_simulations(repetitions, utility, avg, T, ns, na, nr, nc,
                             folder):

    n_training = 1
    n_test = 100
    trials = 100 + n_test  #number of trials
    trials_training = trials - n_test

    Rho = np.zeros((trials, nr, ns))

    for tendency in [
            1000
    ]:  #,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
        for trans in [99]:  #[100,99,98,97,96,95,94]:
            for prob in [90]:  #[100,95,90,85,80,75,70,65,60]:
                print(tendency, trans, prob)

                Rho[:] = generate_bandit_timeseries_habit(trials_training,
                                                          nr,
                                                          ns,
                                                          n_test,
                                                          p=prob / 100.)

                plt.figure()
                plt.plot(Rho[:, 2, 2])
                plt.plot(Rho[:, 1, 1])
                plt.show()

                worlds = []
                learn_pol = tendency
                parameters = [learn_pol, trans / 100., avg, Rho, utility]

                ESS = 30

                for i in range(repetitions):
                    worlds.append(
                        run_agent(parameters,
                                  trials,
                                  T,
                                  ns,
                                  na,
                                  nr,
                                  nc,
                                  ESS=ESS))
                    w = worlds[-1]
                    plt.figure()
                    post_pol = np.einsum(
                        'tpc,tc->tp', w.agent.posterior_policies[:, 0, :, :],
                        w.agent.posterior_context[:, 0, :])
                    like = np.einsum('tpc,tc->tp', w.agent.likelihood[:,
                                                                      0, :, :],
                                     w.agent.posterior_context[:, 0, :])
                    plt.plot(post_pol[:, 1], '.')
                    plt.plot(like[:, 1], 'x')
                    plt.ylim([0, 1])
                    plt.show()
                    plt.figure()
                    plt.plot(w.agent.action_selection.RT[:, 0])
                    #plt.plot(Rho[:,2,2])
                    #plt.plot(Rho[:,1,1])
                    #plt.ylim([ESS*10,2000])
                    plt.ylim([0, 2000])
                    plt.savefig(
                        "uncertain_Dir_2pol_RT_timecourse_1000trials.svg"
                    )  #"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#
                    plt.show()
                    plt.figure()
                    plt.hist(w.agent.action_selection.RT[:, 0])
                    plt.savefig(
                        "uncertain_Dir_2pol_RT_hist_1000trials.svg"
                    )  #"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_hist"+str(i)+".svg")#
                    plt.show()

                run_name = "Dir_2pol_uncertain_1000trials.json"
                fname = os.path.join(folder, run_name)

                jsonpickle_numpy.register_handlers()
                pickled = pickle.encode(worlds)
                with open(fname, 'w') as outfile:
                    json.dump(pickled, outfile)

                pickled = 0
                worlds = 0

                gc.collect()
示例#17
0
from .mod.mod_handler import ModHandler
from .model.bar import BarMap
from .trader.account import MixedAccount
from .trader.global_var import GlobalVars
from .trader.strategy_context import StrategyContext
from .utils import create_custom_exception, run_with_user_log_disabled
from .utils.exception import CustomException, is_user_exc, patch_user_exc
from .utils.i18n import gettext as _
from .utils.logger import user_log, system_log, user_print, user_detail_log
from .utils.persisit_helper import CoreObjectsPersistProxy, PersistHelper
from .utils.result_aggregator import ResultAggregator
from .utils.scheduler import Scheduler
from .utils import scheduler as mod_scheduler
from .plot import plot_result

jsonpickle_numpy.register_handlers()


def _adjust_start_date(config, data_proxy):
    origin_start_date, origin_end_date = config.base.start_date, config.base.end_date
    start, end = data_proxy.available_data_range(config.base.frequency)

    # print(repr(start), repr(end))
    config.base.start_date = max(start, config.base.start_date)
    config.base.end_date = min(end, config.base.end_date)
    config.base.trading_calendar = data_proxy.get_trading_dates(
        config.base.start_date, config.base.end_date)
    if len(config.base.trading_calendar) == 0:
        raise patch_user_exc(
            ValueError(
                _('There is no trading day between {start_date} and {end_date}.'
def load_gridworld_simulations(repetitions):
    # prior over outcomes: encodes utility
    utility = []

    #ut = [0.5, 0.6, 0.7, 0.8, 0.9, 1-1e-3]
    u = 0.999
    utility = np.zeros(ns)
    utility[g1] = u
    utility[:g1] = (1 - u) / (ns - 1)
    utility[g1 + 1:] = (1 - u) / (ns - 1)

    # action selection: avergaed or max selection
    avg = True
    tendencies = [1, 1000]
    context = True
    if context:
        name_str = "context_"
    else:
        name_str = ""
    # parameter list
    l = []

    # either observation uncertainty
    #l.append([True, False, False, avg, utility])

    # or state uncertainty
    #l.append([False, True, False, avg, utility])

    # or no uncertainty
    l.append([False, False, False, avg, context, utility])

    par_list = []

    for p in itertools.product(l, tendencies):
        par_list.append(p[0] + [p[1]])

    qs = [0.97, 0.97]
    # num_threads = 11
    # pool = Pool(num_threads)
    RTs = np.zeros((repetitions * trials * len(tendencies)))
    chosen_pols = np.zeros((repetitions * trials * len(tendencies)))
    correct = np.zeros((repetitions * trials * len(tendencies)))
    num_chosen = np.zeros((repetitions * trials * len(tendencies)))
    max_num = np.zeros((repetitions * trials * len(tendencies)))
    for n, pars in enumerate(par_list):
        h = pars[-1]
        q = qs[n]
        #worlds = []
        for i in range(repetitions):
            # worlds.append(run_agent(pars+[q]))
            # w = worlds[-1]

            fname = 'Dir_gridworld_' + name_str + str(
                repetitions) + 'repetitions_h' + str(h) + '_run' + str(
                    i) + '.json'
            #fname = os.path.join(folder, run_name)

            jsonpickle_numpy.register_handlers()

            with open(fname, 'r') as infile:
                data = json.load(infile)

            w = pickle.decode(data)
            #worlds.append(w)
            RTs[i * trials + n * (repetitions * trials):(i + 1) * trials + n *
                (repetitions * trials)] = w.agent.action_selection.RT[:,
                                                                      0].copy(
                                                                      )
            posterior_policies = np.einsum('tpc,tc->tp',
                                           w.agent.posterior_policies[:, -1],
                                           w.agent.posterior_context[:, -1])
            chosen_pols[i * trials + n *
                        (repetitions * trials):(i + 1) * trials + n *
                        (repetitions * trials)] = np.argmax(posterior_policies,
                                                            axis=1)
            chosen = chosen_pols[i * trials + n *
                                 (repetitions * trials):(i + 1) * trials + n *
                                 (repetitions * trials)]
            n_bin = 10
            uniq = np.zeros((trials, 2))
            for k in range(trials // n_bin):
                un, counts = np.unique(chosen[k * n_bin:(k + 1) * n_bin],
                                       return_counts=True)
                uniq[k * n_bin:(k + 1) * n_bin] = [len(un), np.amax(counts)]
            num_chosen[i * trials + n * (repetitions * trials):(
                i + 1
            ) * trials + n * (
                repetitions
                * trials
            )] = uniq[:,
                      0]  #np.repeat(np.array([len(np.unique(chosen[k*n_bin:(k+1)*n_bin])) for k in range(trials//n_bin)]),n_bin)
            max_num[i * trials + n * (repetitions * trials):(i + 1) * trials +
                    n * (repetitions * trials)] = uniq[:, 1]
            correct[i * trials + n * (repetitions * trials):(i + 1) * trials +
                    n * (repetitions * trials) - trials // 2] = (
                        w.environment.hidden_states[:trials // 2,
                                                    -1] == g1).astype(int)
            correct[i * trials + n * (repetitions * trials) +
                    trials // 2:(i + 1) * trials + n *
                    (repetitions * trials)] = (
                        w.environment.hidden_states[trials // 2:,
                                                    -1] == g2).astype(int)

            w = 0
            pickled = 0
            gc.collect()

    runs = np.tile(
        np.tile(np.arange(repetitions), (trials, 1)).reshape(-1, order='f'),
        len(tendencies))
    times = np.tile(np.arange(trials), repetitions * len(tendencies))
    tend_idx = np.array([
        1. / tendencies[i // (repetitions * trials)]
        for i in range(repetitions * trials * len(tendencies))
    ])
    DataFrame = pd.DataFrame({
        'trial': times,
        'run': runs,
        'tendency_h': tend_idx,
        'RT': RTs,
        'correct': correct,
        'policy': chosen_pols,
        'num_chosen': num_chosen,
        'max_num': max_num
    })

    return DataFrame, name_str
示例#19
0
Input:
- audio clip (WAV/FLAC), 2sec, 44100 Hz sampling rate, mono
- model files (architecture, weights)
Ouput: instrument family [brass, guitar, organ, piano, pipe, reed, strings]
"""

import argparse
import keras
from keras.utils import np_utils
import jsonpickle
import jsonpickle.ext.numpy as jsonpickle_numpy
import numpy as np
import pandas as pd
import soundfile as sf

jsonpickle_numpy.register_handlers()


class InstrumentClassifier():
    def __init__(self, model_dir):
        self.model_dir = model_dir

        def load_model(arch_file, weights_file):
            """
            Load Keras model from files - YAML architecture, HDF5 weights.
            """
            with open(arch_file) as f:
                model = keras.models.model_from_yaml(f.read())
            model.load_weights(weights_file)
            model.compile(loss='categorical_crossentropy', optimizer='adam',
                metrics=['accuracy'])
示例#20
0
def run_single_task_simulations(repetitions, folder):

    trials = 100
    T = 2
    ns = 6
    na = 2
    nr = 2
    nc = 1
    u = 0.99
    utility = np.array([1-u,u])
    f = 3.5
    random_draw = False
    pol_lambda = 0
    r_lambda = 0

    Rho = np.zeros((trials, nr, ns))

    for tendency in [1000]:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
        for trans in [97,98,99]:#[80,85,90,91,92,93,94,95,96,97,98,99]:
            for unc in [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,2,3,4,5,6,8,10]:
                print(tendency, trans, unc)

                # Rho[:], contexts, states, state_trans, correct_choice, congruent, num_in_run = \
                #     switching_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc, stable_length=5)

                # plt.figure()
                # plt.plot(Rho[:,2,2])
                # plt.plot(Rho[:,1,1])
                # plt.show()
                if random_draw==False:
                    prefix="acsel_"
                else:
                    prefix=""
                if pol_lambda > 0:
                    s = "alpha_"
                else:
                    s = ""
                if r_lambda > 0:
                    s += "beta_"
                else:
                    s += ""
                run_name = prefix+"single_prior"+s+"h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f"+str(f)+"_ut"+str(u)+"_test.json"
                fname = os.path.join(folder, run_name)

                jsonpickle_numpy.register_handlers()

                if run_name in os.listdir(folder):
                    with open(fname, 'r') as infile:
                        data = json.load(infile)

                    worlds = pickle.decode(data)
                    print(len(worlds))
                    num_w_old = len(worlds)
                else:
                    worlds = []
                    num_w_old = 0

                learn_pol = tendency
                parameters = [learn_pol, trans/100., Rho, utility, unc/100.]

                for i in range(num_w_old, repetitions):
                    Rho[:], contexts, states, state_trans, correct_choice, congruent, num_in_run = \
                    single_task_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc)
                    worlds.append(run_agent(parameters, trials, T, ns, na, nr, nc,\
                                            f, contexts, states, \
                                            state_trans=state_trans, \
                                            correct_choice=correct_choice, \
                                            congruent=congruent, \
                                            num_in_run=num_in_run, \
                                            random_draw=random_draw, \
                                            pol_lambda=pol_lambda))
                    # w = worlds[-1]
                    # choices = w.actions[:,0]
                    # correct = (choices == w.environment.correct_choice).sum()
                    # print("percent correct:", correct/trials)
                    # correct_cong = (choices[w.environment.congruent==1] == w.environment.correct_choice[w.environment.congruent==1]).sum()
                    # print("percent correct congruent:", correct_cong/(w.environment.congruent==1).sum())
                    # correct_incong = (choices[w.environment.congruent==0] == w.environment.correct_choice[w.environment.congruent==0]).sum()
                    # print("percent correct incongruent:", correct_incong/(w.environment.congruent==0).sum())
                    # RTs = w.agent.action_selection.RT[:,0]
                    # RT_cong = np.median(RTs[w.environment.congruent==1])
                    # RT_incong = np.median(RTs[w.environment.congruent==0])
                    # print("congruent RT:", RT_cong)
                    # print("incongruent RT:", RT_incong)
                    # length = int(np.amax(w.environment.num_in_run)) + 1
                    # numbers = w.environment.num_in_run
                    # numbers_cong = numbers[w.environment.congruent==1]
                    # numbers_incong = numbers[w.environment.congruent==0]
                    # RT_medians_cong = [np.median(RTs[w.environment.congruent==1][numbers_cong==i]) for i in range(length)]
                    # RT_medians_incong = [np.median(RTs[w.environment.congruent==0][numbers_incong==i]) for i in range(length)]
                    # plt.figure()
                    # plt.plot(RT_medians_cong, 'x')
                    # plt.plot(RT_medians_incong, 'x')
                    # plt.show()
                    # plt.figure()
                    # post_pol = np.einsum('tpc,tc->tp', w.agent.posterior_policies[:,0,:,:], w.agent.posterior_context[:,0,:])
                    # like = np.einsum('tpc,tc->tp', w.agent.likelihood[:,0,:,:], w.agent.posterior_context[:,0,:])
                    # plt.plot(post_pol[:,1], '.')
                    # plt.plot(like[:,1], 'x')
                    # plt.ylim([0,1])
                    # plt.show()
                    # plt.figure()
                    # plt.plot(w.agent.action_selection.RT[:,0], '.')
                    # #plt.plot(Rho[:,2,2])
                    # #plt.plot(Rho[:,1,1])
                    # #plt.ylim([ESS*10,2000])
                    # plt.ylim([0,2000])
                    # plt.savefig("Dir_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#
                    # plt.show()
                    # plt.figure()
                    # plt.hist(w.agent.action_selection.RT[:,0])
                    # plt.savefig("uncertain_Dir_h"+str(int(learn_pol))+"_RT_hist"+str(i)+"_1000trials.svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_hist"+str(i)+".svg")#
                    # plt.show()
                    # plt.figure()
                    # plt.plot(w.agent.posterior_context[:,0,:], 'x')
                    # plt.show()

                jsonpickle_numpy.register_handlers()
                pickled = pickle.encode(worlds)
                with open(fname, 'w') as outfile:
                    json.dump(pickled, outfile)

                pickled = 0
                worlds = 0

                gc.collect()
示例#21
0
文件: fcm.py 项目: tuchang/PyOpenFCM
from fcmlib.config import Config
import json, jsonpickle

import jsonpickle.ext.numpy as jsonpickle_numpy
jsonpickle_numpy.register_handlers()  #numpy support for object serialization


class Concept:
    """Represents single FCM concept.
    
    Attributes:
    - name      - The unique name of the concept.
    - value     - The activation value of the concept in time "t".
    - newValue  - The activation value of the concept in time "t+1".
    - error     - The activation error in time "t".
    - newError  - The activation error in time "t+1".
    - relation  - The relation (class IRelation) with previous concepts.
    - inputMF   - The function (class IFunction) used for fuzzification
    - outputMF  - The function (class IFunction) used for defuzzification.
    """

    name = None
    value = None
    newValue = None
    error = None
    newError = None
    relation = None
    inputMF = None
    outputMF = None

    def __init__(self, name, value=0, conf=Config):
示例#22
0
from jsonpickle.ext import numpy as jspnp
import json, os, jsonpickle as jsp, numpy as np, subprocess, numpy
from datetime import datetime as dt

jspnp.register_handlers()  # what is purpose of this line?
import h5py, glob, matplotlib, platform, hashlib, shutil, socket
import matplotlib.pyplot as plt
from . import utilities
import Nowack_Lab  # Necessary for saving as Nowack_Lab-defined types
'''
How saving and loading works:
1) Walks through object's __dict__, subdictionaries, and subobjects, picks out
numpy arrays, and saves them in a hirearchy in HDF5. Dictionaries are
represented as groups in HDF5, and objects are also represented as groups, but
with a ! preceding the name. This is parsed when loading.
2) All numpy arrays and matplotlib objects in the dictionary hierarchy are set
to None, and the object is saved to JSON.
3) The saved object is immediately reloaded to see if everything went well.
3a) First, the JSON file is loaded to set up the dictionary hierarchy.
3b) Second, we walk through the HDF5 file (identifying objects and dictionaries
as necessary) and populate the numpy arrays.
'''


class Saver(object):
    subdirectory = ''  # Formerly "appendedpath".

    # Name of subdirectory off the main data directory where data is saved.

    def __init__(self):
        super().__init__()  # To deal with multiple inheritance mro
from gevent import event, pywsgi, signal
import importlib
import jsonpickle
import jsonpickle.ext.numpy as jet
import os
import re
import rospy
import subprocess
import sys
import time
import tf2_ros
import threading
import traceback
import json

jet.register_handlers()

DEFAULT_CONFIG_ROBOT = {
    'file_dirty_state': '/tmp/benchbot_dirty',
    'file_collisions': '/tmp/benchbot_collision',
    'logs_dir': '/tmp/benchbot_logs',
    'start_cmds': [],
}

DEFAULT_CONFIG_ENV = {"object_labels": []}

DEFAULT_STATE = {"selected_environment": 0}

CONN_API_TO_ROS = 'api_to_ros'
CONN_ROS_TO_API = 'ros_to_api'
CONN_ROSCACHE_TO_API = 'roscache_to_api'
def run_agent(par_list, trials=trials, T=T, Lx=Lx, Ly=Ly, ns=ns, na=na):

    #set parameters:
    #obs_unc: observation uncertainty condition
    #state_unc: state transition uncertainty condition
    #goal_pol: evaluate only policies that lead to the goal
    #utility: goal prior, preference p(o)
    obs_unc, state_unc, goal_pol, avg, context, utility, h, q = par_list
    """
    create matrices
    """

    vals = np.array([1., 2 / 3., 1 / 2., 1. / 2.])

    #generating probability of observations in each state
    A = np.eye(ns) + const
    np.fill_diagonal(A, 1 - (ns - 1) * const)

    #generate horizontal gradient for observation uncertainty condition
    # if obs_unc:

    #     condition = 'obs'

    #     for s in range(ns):
    #         x = s//Ly
    #         y = s%Ly

    #         c = 1#vals[L - y - 1]

    #         # look for neighbors
    #         neighbors = []
    #         if (s-4)>=0 and (s-4)!=g1:
    #             neighbors.append(s-4)

    #         if (s%4)!=0 and (s-1)!=g1:
    #             neighbors.append(s-1)

    #         if (s+4)<=(ns-1) and (s+4)!=g1:
    #             neighbors.append(s+4)

    #         if ((s+1)%4)!=0 and (s+1)!=g1:
    #             neighbors.append(s+1)

    #         A[s,s] = c
    #         for n in neighbors:
    #             A[n,s] = (1-c)/len(neighbors)

    #state transition generative probability (matrix)
    B = np.zeros((ns, ns, na)) + const

    cert_arr = np.zeros(ns)
    for s in range(ns):
        x = s // Ly
        y = s % Ly

        #state uncertainty condition
        if state_unc:
            if (x == 0) or (y == 3):
                c = vals[0]
            elif (x == 1) or (y == 2):
                c = vals[1]
            elif (x == 2) or (y == 1):
                c = vals[2]
            else:
                c = vals[3]

            condition = 'state'

        else:
            c = 1.

        cert_arr[s] = c
        for u in range(na):
            x = s // Ly + actions[u][0]
            y = s % Ly + actions[u][1]

            #check if state goes over boundary
            if x < 0:
                x = 0
            elif x == Lx:
                x = Lx - 1

            if y < 0:
                y = 0
            elif y == Ly:
                y = Ly - 1

            s_new = Ly * x + y
            if s_new == s:
                B[s, s, u] = 1 - (ns - 1) * const
            else:
                B[s, s, u] = 1 - c + const
                B[s_new, s, u] = c - (ns - 1) * const

    B_c = np.broadcast_to(B[:, :, :, np.newaxis], (ns, ns, na, nc))
    print(B.shape)
    """
    create environment (grid world)
    """
    Rho = np.zeros((nr, ns)) + const
    Rho[0, :] = 1 - (nr - 1) * const
    Rho[:, np.argmax(utility)] = [0 + const, 1 - (nr - 1) * const]
    print(Rho)
    util = np.array([1 - np.amax(utility), np.amax(utility)])

    environment = env.GridWorld(A,
                                B,
                                Rho,
                                trials=trials,
                                T=T,
                                initial_state=start)

    Rho_agent = np.ones((nr, ns, nc)) / nr

    if True:
        templates = np.ones_like(Rho_agent)
        templates[0] *= 100
        assert ns == nc
        for s in range(ns):
            templates[0, s, s] = 1
            templates[1, s, s] = 100
        dirichlet_rew_params = templates
    else:
        dirichlet_rew_params = np.ones_like(Rho_agent)
    """
    create policies
    """

    if goal_pol:
        pol = []
        su = 3
        for p in itertools.product([0, 1], repeat=T - 1):
            if (np.array(p)[0:6].sum() == su) and (np.array(p)[-1] != 1):
                pol.append(list(p))

        pol = np.array(pol) + 2
    else:
        pol = np.array(list(itertools.product(list(range(na)), repeat=T - 1)))

    #pol = pol[np.where(pol[:,0]>1)]

    npi = pol.shape[0]

    prior_policies = np.ones((npi, nc)) / npi
    dirichlet_pol_param = np.zeros_like(prior_policies) + h
    """
    set state prior (where agent thinks it starts)
    """

    state_prior = np.zeros((ns))

    # state_prior[0] = 1./4.
    # state_prior[1] = 1./4.
    # state_prior[4] = 1./4.
    # state_prior[5] = 1./4.
    state_prior[start] = 1
    """
    set context prior and matrix
    """

    context_prior = np.ones(nc)
    trans_matrix_context = np.ones((nc, nc))
    if nc > 1:
        # context_prior[0] = 0.9
        # context_prior[1:] = 0.1 / (nc-1)
        context_prior /= nc
        trans_matrix_context[:] = (1 - q) / (nc - 1)
        np.fill_diagonal(trans_matrix_context, q)
    """
    set action selection method
    """

    if avg:

        sel = 'avg'

        ac_sel = asl.DirichletSelector(trials=trials,
                                       T=T,
                                       factor=0.5,
                                       number_of_actions=na,
                                       calc_entropy=False,
                                       calc_dkl=False,
                                       draw_true_post=True)
    else:

        sel = 'max'

        ac_sel = asl.MaxSelector(trials=trials, T=T, number_of_actions=na)


#    ac_sel = asl.AveragedPolicySelector(trials = trials, T = T,
#                                        number_of_policies = npi,
#                                        number_of_actions = na)
    """
    set up agent
    """
    #bethe agent
    if agent == 'bethe':

        agnt = 'bethe'

        # perception and planning

        bayes_prc = prc.HierarchicalPerception(
            A,
            B_c,
            Rho_agent,
            trans_matrix_context,
            state_prior,
            util,
            prior_policies,
            dirichlet_pol_params=dirichlet_pol_param,
            dirichlet_rew_params=dirichlet_rew_params)

        bayes_pln = agt.BayesianPlanner(
            bayes_prc,
            ac_sel,
            pol,
            trials=trials,
            T=T,
            prior_states=state_prior,
            prior_policies=prior_policies,
            prior_context=context_prior,
            number_of_states=ns,
            learn_habit=True,
            learn_rew=True,
            #save_everything = True,
            number_of_policies=npi,
            number_of_rewards=nr)
    #MF agent
    else:

        agnt = 'mf'

        # perception and planning

        bayes_prc = prc.MFPerception(A, B, state_prior, utility, T=T)

        bayes_pln = agt.BayesianMFPlanner(bayes_prc, [],
                                          ac_sel,
                                          trials=trials,
                                          T=T,
                                          prior_states=state_prior,
                                          policies=pol,
                                          number_of_states=ns,
                                          number_of_policies=npi)
    """
    create world
    """

    w = world.World(environment, bayes_pln, trials=trials, T=T)
    """
    simulate experiment
    """

    if not context:
        w.simulate_experiment()
    else:
        w.simulate_experiment(curr_trials=range(0, trials // 2))
        Rho_new = np.zeros((nr, ns)) + const
        Rho_new[0, :] = 1 - (nr - 1) * const
        Rho_new[:, g2] = [0 + const, 1 - (nr - 1) * const]
        print(Rho_new)
        w.environment.Rho[:] = Rho_new
        #w.agent.perception.generative_model_rewards = Rho_new
        w.simulate_experiment(curr_trials=range(trials // 2, trials))
    """
    plot and evaluate results
    """
    #find successful and unsuccessful runs
    #goal = np.argmax(utility)
    successfull_g1 = np.where(environment.hidden_states[:, -1] == g1)[0]
    if context:
        successfull_g2 = np.where(environment.hidden_states[:, -1] == g2)[0]
        unsuccessfull1 = np.where(environment.hidden_states[:, -1] != g1)[0]
        unsuccessfull2 = np.where(environment.hidden_states[:, -1] != g2)[0]
        unsuccessfull = np.intersect1d(unsuccessfull1, unsuccessfull2)
    else:
        unsuccessfull = np.where(environment.hidden_states[:, -1] != g1)[0]

    #total  = len(successfull)

    #plot start and goal state
    start_goal = np.zeros((Lx, Ly))

    x_y_start = (start // Ly, start % Ly)
    start_goal[x_y_start] = 1.
    x_y_g1 = (g1 // Ly, g1 % Ly)
    start_goal[x_y_g1] = -1.
    x_y_g2 = (g2 // Ly, g2 % Ly)
    start_goal[x_y_g2] = -2.

    palette = [(159 / 255, 188 / 255, 147 / 255),
               (135 / 255, 170 / 255, 222 / 255),
               (242 / 255, 241 / 255, 241 / 255),
               (242 / 255, 241 / 255, 241 / 255),
               (199 / 255, 174 / 255, 147 / 255),
               (199 / 255, 174 / 255, 147 / 255)]

    #set up figure params
    # ~ factor = 3
    # ~ grid_plot_kwargs = {'vmin': -2, 'vmax': 2, 'center': 0, 'linecolor': '#D3D3D3',
    # ~ 'linewidths': 7, 'alpha': 1, 'xticklabels': False,
    # ~ 'yticklabels': False, 'cbar': False,
    # ~ 'cmap': palette}#sns.diverging_palette(120, 45, as_cmap=True)} #"RdBu_r",

    # ~ # plot grid
    # ~ fig = plt.figure(figsize=[factor*5,factor*4])

    # ~ ax = fig.gca()

    # ~ annot = np.zeros((Lx,Ly))
    # ~ for i in range(Lx):
    # ~ for j in range(Ly):
    # ~ annot[i,j] = i*Ly+j

    # ~ u = sns.heatmap(start_goal, ax = ax, **grid_plot_kwargs, annot=annot, annot_kws={"fontsize": 40})
    # ~ ax.invert_yaxis()
    # ~ plt.savefig('grid.svg', dpi=600)
    # ~ #plt.show()

    # ~ # set up paths figure
    # ~ fig = plt.figure(figsize=[factor*5,factor*4])

    # ~ ax = fig.gca()

    # ~ u = sns.heatmap(start_goal, zorder=2, ax = ax, **grid_plot_kwargs)
    # ~ ax.invert_yaxis()

    # ~ #find paths and count them
    # ~ n1 = np.zeros((ns, na))

    # ~ for i in successfull_g1:

    # ~ for j in range(T-1):
    # ~ d = environment.hidden_states[i, j+1] - environment.hidden_states[i, j]
    # ~ if d not in [1,-1,Ly,-Ly,0]:
    # ~ print("ERROR: beaming")
    # ~ if d == 1:
    # ~ n1[environment.hidden_states[i, j],0] +=1
    # ~ if d == -1:
    # ~ n1[environment.hidden_states[i, j]-1,0] +=1
    # ~ if d == Ly:
    # ~ n1[environment.hidden_states[i, j],1] +=1
    # ~ if d == -Ly:
    # ~ n1[environment.hidden_states[i, j]-Ly,1] +=1

    # ~ n2 = np.zeros((ns, na))

    # ~ if context:
    # ~ for i in successfull_g2:

    # ~ for j in range(T-1):
    # ~ d = environment.hidden_states[i, j+1] - environment.hidden_states[i, j]
    # ~ if d not in [1,-1,Ly,-Ly,0]:
    # ~ print("ERROR: beaming")
    # ~ if d == 1:
    # ~ n2[environment.hidden_states[i, j],0] +=1
    # ~ if d == -1:
    # ~ n2[environment.hidden_states[i, j]-1,0] +=1
    # ~ if d == Ly:
    # ~ n2[environment.hidden_states[i, j],1] +=1
    # ~ if d == -Ly:
    # ~ n2[environment.hidden_states[i, j]-Ly,1] +=1

    # ~ un = np.zeros((ns, na))

    # ~ for i in unsuccessfull:

    # ~ for j in range(T-1):
    # ~ d = environment.hidden_states[i, j+1] - environment.hidden_states[i, j]
    # ~ if d not in [1,-1,Ly,-Ly,0]:
    # ~ print("ERROR: beaming")
    # ~ if d == 1:
    # ~ un[environment.hidden_states[i, j],0] +=1
    # ~ if d == -1:
    # ~ un[environment.hidden_states[i, j]-1,0] +=1
    # ~ if d == Ly:
    # ~ un[environment.hidden_states[i, j],1] +=1
    # ~ if d == -Ly:
    # ~ un[environment.hidden_states[i, j]-4,1] +=1

    # ~ total_num = n1.sum() + n2.sum() + un.sum()

    # ~ if np.any(n1 > 0):
    # ~ n1 /= total_num

    # ~ if np.any(n2 > 0):
    # ~ n2 /= total_num

    # ~ if np.any(un > 0):
    # ~ un /= total_num

    # ~ #plotting
    # ~ for i in range(ns):

    # ~ x = [i%Ly + .5]
    # ~ y = [i//Ly + .5]

    # ~ #plot uncertainties
    # ~ if obs_unc:
    # ~ plt.plot(x,y, 'o', color=(219/256,122/256,147/256), markersize=factor*12/(A[i,i])**2, alpha=1.)
    # ~ if state_unc:
    # ~ plt.plot(x,y, 'o', color=(100/256,149/256,237/256), markersize=factor*12/(cert_arr[i])**2, alpha=1.)

    # ~ #plot unsuccessful paths
    # ~ for j in range(2):

    # ~ if un[i,j]>0.0:
    # ~ if j == 0:
    # ~ xp = x + [x[0] + 1]
    # ~ yp = y + [y[0] + 0]
    # ~ if j == 1:
    # ~ xp = x + [x[0] + 0]
    # ~ yp = y + [y[0] + 1]

    # ~ plt.plot(xp,yp, '-', color='#D5647C', linewidth=factor*75*un[i,j],
    # ~ zorder = 9, alpha=1)

    # ~ #set plot title
    # ~ #plt.title("Planning: successful "+str(round(100*total/trials))+"%", fontsize=factor*9)

    # ~ #plot successful paths on top
    # ~ for i in range(ns):

    # ~ x = [i%Ly + .5]
    # ~ y = [i//Ly + .5]

    # ~ for j in range(2):

    # ~ if n1[i,j]>0.0:
    # ~ if j == 0:
    # ~ xp = x + [x[0] + 1]
    # ~ yp = y + [y[0]]
    # ~ if j == 1:
    # ~ xp = x + [x[0] + 0]
    # ~ yp = y + [y[0] + 1]
    # ~ plt.plot(xp,yp, '-', color='#4682B4', linewidth=factor*75*n1[i,j],
    # ~ zorder = 10, alpha=1)

    # ~ #plot successful paths on top
    # ~ if context:
    # ~ for i in range(ns):

    # ~ x = [i%Ly + .5]
    # ~ y = [i//Ly + .5]

    # ~ for j in range(2):

    # ~ if n2[i,j]>0.0:
    # ~ if j == 0:
    # ~ xp = x + [x[0] + 1]
    # ~ yp = y + [y[0]]
    # ~ if j == 1:
    # ~ xp = x + [x[0] + 0]
    # ~ yp = y + [y[0] + 1]
    # ~ plt.plot(xp,yp, '-', color='#55ab75', linewidth=factor*75*n2[i,j],
    # ~ zorder = 10, alpha=1)

    # ~ #print("percent won", total/trials, "state prior", np.amax(utility))

    # ~ plt.savefig('chosen_paths_'+name_str+'h'+str(h)+'.svg')
    #plt.show()

    # max_RT = np.amax(w.agent.action_selection.RT[:,0])
    # plt.figure()
    # plt.plot(w.agent.action_selection.RT[:,0], '.')
    # plt.ylim([0,1.05*max_RT])
    # plt.xlim([0,trials])
    # plt.savefig("Gridworld_Dir_h"+str(h)+".svg")
    # plt.show()
    """
    save data
    """

    if save_data:
        jsonpickle_numpy.register_handlers()

        ut = np.amax(utility)
        p_o = '{:02d}'.format(round(ut * 10).astype(int))
        fname = agnt + '_' + condition + '_' + sel + '_initUnc_' + p_o + '.json'
        fname = os.path.join(data_folder, fname)
        pickled = pickle.encode(w)
        with open(fname, 'w') as outfile:
            json.dump(pickled, outfile)

    return w
示例#25
0
def run_switching_simulations_one_context(repetitions, folder):

    trials = 100
    T = 2
    ns = 8
    na = 2
    nr = 2
    nc = 1
    u = 0.99
    utility = np.array([1-u,u])
    f = 3.5
    random_draw = False
    pol_lambda = 0
    r_lambda = 0

    Rho = np.zeros((nr, ns, 2))

    for tendency in [1000]:#[1,1000]:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
        # these two params dont exist anyways when there is only one context. 
        # just setting them for naming consistency with the other simulations
        for trans in [95]:#[80,85,90,91,92,93,94,95,96,97,98,99]:
            for unc in [1]:#,2,3,4,5,6,8,10]:
                print(tendency, trans, unc)

                if random_draw==False:
                    prefix="acsel_"
                else:
                    prefix=""
                if pol_lambda > 0:
                    s = "alpha_"
                else:
                    s = ""
                if r_lambda > 0:
                    s += "beta_"
                else:
                    s += ""
                run_name = prefix+"switching_"+s+"h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f"+str(f)+"_ut"+str(u)+"_onecontext.json"
                print(run_name)
                fname = os.path.join(folder, run_name)

                jsonpickle_numpy.register_handlers()

                if False:#run_name in os.listdir(folder):
                    with open(fname, 'r') as infile:
                        data = json.load(infile)

                    worlds = pickle.decode(data)
                    print(len(worlds))
                    num_w_old = len(worlds)
                else:
                    worlds = []
                    num_w_old = 0

                learn_pol = tendency
                parameters = [learn_pol, trans/100., Rho, utility, unc/100.]

                for i in range(num_w_old, repetitions):
                    Rho[:], contexts, states, state_trans, correct_choice, congruent, num_in_run = \
                    switching_timeseries_onecontext(trials, nr=nr, ns=ns, na=na, stable_length=5)
                    worlds.append(run_agent(parameters, trials, T, ns, na, nr, nc,\
                                            f, contexts, states, \
                                            state_trans=state_trans, \
                                            correct_choice=correct_choice, \
                                            congruent=congruent, \
                                            num_in_run=num_in_run, \
                                            random_draw=random_draw, \
                                            pol_lambda=pol_lambda, \
                                            r_lambda=r_lambda,
                                            one_context=True))


                if random_draw==False:
                    prefix="acsel_"
                else:
                    prefix=""
                #run_name = prefix+"switching_h"+str(int(learn_pol))+"_t"+str(trans)+"_u"+str(unc)+".json"
                fname = os.path.join(folder, run_name)

                jsonpickle_numpy.register_handlers()
                pickled = pickle.encode(worlds)
                with open(fname, 'w') as outfile:
                    json.dump(pickled, outfile)

                pickled = 0
                worlds = 0

                gc.collect()
示例#26
0
sentifmdetect17 
11/24/17
Copyright (c) Gilles Jacobs. All rights reserved.  
'''
import os
import logging.config
import yaml
from sentifmdetect import settings
import collections
from itertools import groupby, chain
import glob
import json
import jsonpickle
import jsonpickle.ext.numpy as jsonpickle_numpy

jsonpickle_numpy.register_handlers()  #json pickle can now be used with numpy


def setup_logging(default_path='logging.yaml',
                  default_level=logging.INFO,
                  env_key='LOG_CFG'):
    """Setup logging configuration

    """
    path = default_path
    value = os.getenv(env_key, None)
    if value:
        path = value
    if os.path.exists(path):
        with open(path, 'rt') as f:
            config = yaml.safe_load(f.read())
示例#27
0
def load_fitting(folder):

    samples = []
    tendencies = [1, 3, 10, 30, 100]
    for tendency in tendencies:
        for trans in [99]:
            for prob in [75]:
                for train in [100]:
                    print(tendency, trans)
                    traces = []

                    run_name = "h" + str(tendency) + "_t" + str(
                        trans) + "_p" + str(prob) + "_train" + str(
                            train) + ".json"
                    fname = os.path.join(folder, run_name)

                    fname = os.path.join(folder,
                                         run_name[:-5] + "_samples.json")

                    jsonpickle_numpy.register_handlers()
                    with open(fname, 'r') as infile:
                        data = json.load(infile)

                    curr_samples, sample_space = pickle.decode(data)

                    samples.append(curr_samples)

    labels = np.tile(1. / np.array(tendencies),
                     (samples[-1].shape[0], 1)).reshape(-1, order='f')
    data = -np.array(samples).flatten()
    pd_h_samples = pd.DataFrame(data={
        'inferred tendencies': data,
        'true tendencies': labels
    })
    print(np.log10(sample_space))
    plt.figure()
    ax = plt.gca()
    # ax.set_ylim([-13,1])
    ax.set_yticks(np.arange(-2., 0.5, 0.25))
    yticklabels = [""] * len(sample_space)
    yticklabels[0] = 0.01
    yticklabels[-1] = 1.
    yticklabels[len(sample_space) // 2] = 0.1
    ax.set_yticklabels(yticklabels)
    sns.boxenplot(data=pd_h_samples,
                  x='true tendencies',
                  y='inferred tendencies',
                  ax=ax)
    plt.plot(np.arange(len(sample_space)) / 2,
             np.flip(np.log10(sample_space)),
             '--',
             color='black',
             alpha=0.5)
    #sns.stripplot(data=pd_h_samples, x='tendencies', y='samples', size=4, color='grey')
    #plt.ylim([0,1])
    plt.savefig('rho_inference.svg')
    plt.show()

    plt.figure()
    ax = plt.gca()
    # ax.set_ylim([-len(sample_space)-1,1])
    # ax.set_yticks(range(-len(sample_space),0+1,1))
    # yticklabels = [""]*len(sample_space)
    # yticklabels[0] = 0.01
    # yticklabels[-1] = 1.
    # yticklabels[len(sample_space)//2] = 0.1
    # ax.set_yticklabels(yticklabels)
    sns.violinplot(data=pd_h_samples,
                   x='true tendencies',
                   y='inferred tendencies',
                   ax=ax)
    #sns.stripplot(data=pd_h_samples, x='tendencies', y='samples', size=4, color='grey')
    #plt.ylim([0,1])
    plt.show()
示例#28
0
def analyze_single_simulations(folder, plot=True, non_dec_time=0, t_s=1):

    tendencies = [1000]
    probs = [90,95,99]#[80,85,90,91,92,93,94,95,96,97,98,99]
    uncertainties = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,2,3,4,5,6,8,10]
    run_name = "acsel_single_h"+str(int(tendencies[0]))+"_t"+str(probs[0])+"_u"+str(uncertainties[0])+"_f3.5_ut0.99.json"
    fname = os.path.join(folder, run_name)

    jsonpickle_numpy.register_handlers()

    with open(fname, 'r') as infile:
        data = json.load(infile)

    worlds_old = pickle.decode(data)

    repetitions = len(worlds_old)
    trials = worlds_old[0].trials
    num_types = len(tendencies)*len(probs)*len(uncertainties)
    correct = np.zeros(repetitions*trials*num_types)
    RT = np.zeros(repetitions*trials*num_types)
    agent = np.zeros(repetitions*trials*num_types)
    num_in_run = np.zeros(repetitions*trials*num_types)
    congruent = np.zeros(repetitions*trials*num_types)
    trial_num = np.zeros(repetitions*trials*num_types)
    epoch = np.zeros(repetitions*trials*num_types)
    tend_arr = np.zeros(repetitions*trials*num_types)
    prob_arr = np.zeros(repetitions*trials*num_types)
    unc_arr  = np.zeros(repetitions*trials*num_types)

    sim_type = 0
    for tendency in tendencies:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
        for trans in probs:#[100,99,98,97,96,95,94]:
            for unc in uncertainties:

                run_name = "acsel_single_h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f3.5_ut0.99.json"
                fname = os.path.join(folder, run_name)

                jsonpickle_numpy.register_handlers()

                with open(fname, 'r') as infile:
                    data = json.load(infile)

                worlds_old = pickle.decode(data)

                repetitions = len(worlds_old)
                trials = worlds_old[0].trials
                
                print("single", len(worlds_old), tendency, trans, unc)

                offset = sim_type*repetitions*trials

                for i in range(repetitions):
                    w = worlds_old[i]
                    correct[offset+i*trials:offset+(i+1)*trials] = (w.actions[:,0] == w.environment.correct_choice).astype(int)
                    RT[offset+i*trials:offset+(i+1)*trials] = t_s*w.agent.action_selection.RT[:,0] + non_dec_time
                    agent[offset+i*trials:offset+(i+1)*trials] = i
                    num_in_run[offset+i*trials:offset+(i+1)*trials] = w.environment.num_in_run
                    congruent[offset+i*trials:offset+(i+1)*trials] = np.logical_not(w.environment.congruent)
                    trial_num[offset+i*trials:offset+(i+1)*trials] = np.arange(0,trials)
                    epoch[offset+i*trials:offset+(i+1)*trials] = [-1]*10 + [0]*10 + [1]*20 + [2]*30 + [3]*(trials-70)
                    tend_arr[offset+i*trials:offset+(i+1)*trials] = tendency
                    prob_arr[offset+i*trials:offset+(i+1)*trials] = 100-trans
                    unc_arr[offset+i*trials:offset+(i+1)*trials] = unc

                sim_type+=1

    data_dict = {"correct": correct, "RT": RT, "agent": agent,
                 "num_in_run": num_in_run, "congruent": congruent,
                 "trial_num": trial_num, "epoch": epoch,
                 "uncertainty": unc_arr, "tendencies": tend_arr,
                 "trans_probs": prob_arr}
    data = pd.DataFrame(data_dict)

    if plot:
        # plt.figure()
        # for i in range(0,3):
        #     sns.lineplot(x='num_in_run', y='RT', data=data.query('epoch == @i'), style='congruent', label=str(i), ci = 95, estimator=np.nanmean, linewidth=3)
        # plt.show()
        tendency=1000
        trans=95
        unc=1
        trans = 100-trans
        
        plt.figure()
        plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
        sns.lineplot(x='num_in_run', y='RT', data=data.query('epoch >= 0 and epoch < 3 and tendencies==@tendency and uncertainty==@unc and trans_probs==@trans'), style='congruent', hue='epoch', ci = 95, estimator=np.nanmean, linewidth=3)
        plt.ylim([0,1800])
        plt.show()
        plt.figure()
        plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
        sns.lineplot(x='num_in_run', y='correct', data=data.query('epoch >= 0 and epoch < 3 and tendencies==@tendency and uncertainty==@unc and trans_probs==@trans'), style='congruent', hue='epoch', ci = 95, estimator=np.nanmean, linewidth=3)
        plt.ylim([0.,1.])
        plt.show()
        plt.figure()
        #plt.title("tendency "+str(tendency)+", trans "+str(trans))
        sns.lineplot(x='uncertainty', y='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty<1 and uncertainty>0'), style='num_in_run', ci = 95, estimator=np.nanmean, linewidth=3)
        plt.ylim([600,1500])
        plt.gca().invert_xaxis()
        plt.savefig("CTI_single.svg")
        plt.show()
        plt.figure()
        plt.title("tendency "+str(tendency)+", trans "+str(trans))
        sns.lineplot(x='num_in_run', y='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty<11 and epoch>2'), hue='uncertainty', style='congruent', ci = 95, estimator=np.nanmean, linewidth=3)
        plt.ylim([400,1800])
        plt.show()
        plt.figure()
        plt.title("tendency "+str(tendency)+", trans "+str(trans))
        sns.lineplot(x='num_in_run', y='correct', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty<11 and epoch>2'), hue='uncertainty', style='congruent', ci = 95, estimator=np.nanmean, linewidth=3)
        plt.ylim([0,1])
        plt.show()
        # plt.figure()
        # sns.lineplot(x='num_in_run', y='RT', data=data.query('congruent == 1 and trial_num > 50'), ci = 95, estimator=np.nanmedian, linewidth=3)
        # sns.lineplot(x='num_in_run', y='RT', data=data.query('congruent == 0 and trial_num > 50'), ci = 95, estimator=np.nanmedian, linewidth=3)
        # plt.show()

    return data
示例#29
0
def analyze_onecontext_simulations(folder):

    tendencies = [1000]
    probs = [95]#
    uncertainties = [1]
    run_name = "acsel_switching_h"+str(int(tendencies[0]))+"_t"+str(probs[0])+"_u"+str(uncertainties[0])+"_f3.5_ut0.99_onecontext.json"
    print(run_name)
    fname = os.path.join(folder, run_name)

    jsonpickle_numpy.register_handlers()

    with open(fname, 'r') as infile:
        data = json.load(infile)

    worlds_old = pickle.decode(data)

    repetitions = len(worlds_old)
    trials = worlds_old[0].trials
    num_types = len(tendencies)*len(probs)*len(uncertainties)
    correct = np.zeros(repetitions*trials*num_types)
    RT = np.zeros(repetitions*trials*num_types)
    agent = np.zeros(repetitions*trials*num_types)
    num_in_run = np.zeros(repetitions*trials*num_types)
    congruent = np.zeros(repetitions*trials*num_types)
    trial_num = np.zeros(repetitions*trials*num_types)
    epoch = np.zeros(repetitions*trials*num_types)
    tend_arr = np.zeros(repetitions*trials*num_types)
    prob_arr = np.zeros(repetitions*trials*num_types)
    unc_arr  = np.zeros(repetitions*trials*num_types)
    non_dec_time = 100
    t_s = 0.2

    sim_type = 0
    for tendency in tendencies:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
        for trans in probs:#[100,99,98,97,96,95,94]:
            for unc in uncertainties:

                run_name = "acsel_switching_h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f3.5_ut0.99_onecontext.json"
                fname = os.path.join(folder, run_name)

                jsonpickle_numpy.register_handlers()

                with open(fname, 'r') as infile:
                    data = json.load(infile)

                worlds_old = pickle.decode(data)

                repetitions = len(worlds_old)
                trials = worlds_old[0].trials
                
                print("switching", len(worlds_old), tendency, trans, unc)

                offset = sim_type*repetitions*trials

                for i in range(repetitions):
                    w = worlds_old[i]
                    correct[offset+i*trials:offset+(i+1)*trials] = (w.actions[:,0] == w.environment.correct_choice).astype(int)
                    RT[offset+i*trials:offset+(i+1)*trials] = t_s*w.agent.action_selection.RT[:,0] + non_dec_time
                    agent[offset+i*trials:offset+(i+1)*trials] = i
                    num_in_run[offset+i*trials:offset+(i+1)*trials] = w.environment.num_in_run
                    congruent[offset+i*trials:offset+(i+1)*trials] = np.logical_not(w.environment.congruent)
                    trial_num[offset+i*trials:offset+(i+1)*trials] = np.arange(0,trials)
                    epoch[offset+i*trials:offset+(i+1)*trials] = [-1]*10 + [0]*10 + [1]*20 + [2]*30 + [3]*(trials-70)#
                    tend_arr[offset+i*trials:offset+(i+1)*trials] = tendency
                    prob_arr[offset+i*trials:offset+(i+1)*trials] = 100-trans
                    unc_arr[offset+i*trials:offset+(i+1)*trials] = unc

                sim_type+=1

    data_dict = {"correct": correct, "RT": RT, "agent": agent,
                 "num_in_run": num_in_run, "congruent": congruent,
                 "trial_num": trial_num, "epoch": epoch,
                 "uncertainty": unc_arr, "tendencies": tend_arr,
                 "trans_probs": prob_arr}
    data = pd.DataFrame(data_dict)

    # plt.figure()
    # for i in range(0,3):
    #     sns.lineplot(x='num_in_run', y='RT', data=data.query('epoch == @i'), style='congruent', label=str(i), ci = 95, estimator=np.nanmean, linewidth=3)
    # plt.show()
    tendency=1000
    trans=95
    unc=1
    trans = 100-trans

    # RT & accuracy as a function of num in run for congruent and incongruent trials and for different training durations (Fig 3 in Steyvers 2019)
    #sns.set_style("white")switching
    current_palette = sns.color_palette("colorblind")
    plot_palette = [(236/255,98/255,103/255), current_palette[2], current_palette[0]]
    sns.set_style("ticks")
    plt.figure(figsize=(3.5,5))
    #plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
    sns.lineplot(x='num_in_run', y='RT', data=data.query('epoch >= 0 and epoch < 3 and tendencies==@tendency and uncertainty==@unc and trans_probs==@trans'), style='congruent', hue='epoch', markers=True, ci = 95, estimator=np.nanmean, linewidth=3, palette=plot_palette)
    #plt.ylim([600,2000])
    plt.xticks([1,2,3,4,5], fontsize=16)
    plt.yticks(fontsize=16)
    plt.xlabel("Trial after switch", fontsize=16)
    plt.ylabel("RT", fontsize=16)
    plt.savefig("numinrun_congruent_RT.svg")
    plt.show()
    plt.figure(figsize=(3.5,5))
    #plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
    sns.lineplot(x='num_in_run', y='correct', data=data.query('epoch >= 0 and epoch < 3 and tendencies==@tendency and uncertainty==@unc and trans_probs==@trans'), style='congruent', hue='epoch', markers=True, ci = 95, estimator=np.nanmean, linewidth=3, palette=plot_palette)
    #plt.ylim([0.65,1.])
    plt.xticks([1,2,3,4,5], fontsize=16)
    plt.yticks(fontsize=16)
    plt.xlabel("Trial after switch", fontsize=16)
    plt.ylabel("Prop correct", fontsize=16)
    plt.savefig("numinrun_congruent_correct.svg")
    plt.show()

    return data, data_single
def run_gridworld_simulations(repetitions):
    # prior over outcomes: encodes utility
    utility = []

    #ut = [0.5, 0.6, 0.7, 0.8, 0.9, 1-1e-3]
    u = 0.999
    utility = np.zeros(ns)
    utility[g1] = u
    utility[:g1] = (1 - u) / (ns - 1)
    utility[g1 + 1:] = (1 - u) / (ns - 1)

    # action selection: avergaed or max selection
    avg = True
    tendencies = [1, 1000]
    context = True
    if context:
        name_str = "context_"
    else:
        name_str = ""
    # parameter list
    l = []

    # either observation uncertainty
    #l.append([True, False, False, avg, utility])

    # or state uncertainty
    #l.append([False, True, False, avg, utility])

    # or no uncertainty
    l.append([False, False, False, avg, context, utility])

    par_list = []

    for p in itertools.product(l, tendencies):
        par_list.append(p[0] + [p[1]])

    qs = [0.97, 0.97]
    # num_threads = 11
    # pool = Pool(num_threads)
    for n, pars in enumerate(par_list):
        h = pars[-1]
        q = qs[n]
        #worlds = []
        for i in range(repetitions):
            # worlds.append(run_agent(pars+[q]))
            # w = worlds[-1]
            w = run_agent(pars + [q])
            #if i == repetitions-1:
            #    if context:
            #        plt.figure()
            #        plt.plot(w.agent.posterior_context[:,0,:])
            #        #plt.plot(w.agent.posterior_context[:,0,g2])
            #        plt.show()
            #    plt.figure()
            #    rew_prob = np.einsum('tsc,tc->ts', w.agent.posterior_dirichlet_rew[:,0,1,:,:],w.agent.posterior_context[:,0])
            #    rew_prob /= rew_prob.sum(axis=1)[:,None]
            #    plt.plot(rew_prob)
            #    plt.ylim([0, .75])
            #    plt.show()
            #    plt.figure()
            #    plt.plot(np.einsum('tsc,tc->ts', w.agent.posterior_dirichlet_rew[:,0,1,:,:],w.agent.posterior_context[:,0]))
            #    plt.ylim([0, 40])
            #    plt.show()
            #    plt.figure()
            #    plt.plot(w.agent.action_selection.entropy_post[:,0])
            #    plt.ylim([2.5,5])
            #    plt.show()
            #    plt.figure()
            #    plt.plot(w.agent.action_selection.entropy_like[:,0])
            #    plt.ylim([2.5,5])
            #    plt.show()
            #    plt.figure()
            #    plt.plot(w.agent.action_selection.entropy_prior[:,0])
            #    plt.ylim([2.5,5])
            #    plt.show()
            #    plt.figure()
            #    plt.figure()
            #    plt.plot(w.agent.action_selection.DKL_post[:,0])
            #    plt.show()
            #    plt.figure()
            #    plt.plot(w.agent.action_selection.DKL_prior[:,0])
            #    plt.show()
            #    posterior_policies = np.einsum('tpc,tc->tp', w.agent.posterior_policies[:,0], w.agent.posterior_context[:,0])
            #    k=6
            #    ind = np.argpartition(posterior_policies, -k, axis=1)[:,-k:]
            #    max_pol = np.array([posterior_policies[i,ind[i]] for i in range(trials)])
            #    plt.figure()
            #    plt.plot(max_pol)
            #    plt.figure()
            #    plt.figure()
            #    plt.plot(posterior_policies.argmax(axis=1))
            #    plt.show()
            #    plt.figure()
            #    plt.plot(w.agent.action_selection.RT[:,0], 'x')
            #    plt.show()
            #    like = np.einsum('tpc,tc->tp', w.agent.likelihood[:,0], w.agent.posterior_context[:,0])
            #    prior = np.einsum('tpc,tc->tp', w.agent.prior_policies[:], w.agent.posterior_context[:,0])
            #    for i in [20,40,100,trials-1]:
            #        plt.figure()
            #        plt.plot(np.sort(like[i]), linewidth=3, label='likelihood')
            #        plt.plot(np.sort(prior[i]), linewidth=3, label='prior')
            #        plt.title("trial "+str(i))
            #        plt.ylim([0,0.25])
            #        plt.xlim([0,len(prior[i])])
            #        plt.xlabel('policy', fontsize=16)
            #        plt.ylabel('probability', fontsize=16)
            #        plt.legend()
            #        plt.savefig('underlying_prior_like_trial_'+str(i)+'_h_'+str(h)+'.svg')
            #        plt.show()
            #     for i in [trials-1]:
            #         plt.figure()
            #         plt.plot(np.sort(prior[i]))
            #         plt.title("trial "+str(i))
            #         plt.ylim([0,1])
            #         plt.show()
            # plt.figure()
            # plt.plot(max_num[i*trials+n*(repetitions*trials):(i+1)*trials+n*(repetitions*trials)])
            # plt.show()
            # w = 0
            # gc.collect()

            jsonpickle_numpy.register_handlers()

            fname = 'Dir_gridworld_' + name_str + str(
                repetitions) + 'repetitions_h' + str(h) + '_run' + str(
                    i) + '.json'
            #fname = os.path.join('data', fname)
            pickled = pickle.encode(w)
            with open(fname, 'w') as outfile:
                json.dump(pickled, outfile)

            w = 0
            pickled = 0
            gc.collect()
示例#31
0
from jsonpickle.ext import numpy as jspnp
import json, os, pickle, bz2, jsonpickle as jsp, numpy as np
from datetime import datetime
jspnp.register_handlers()
from copy import copy
import h5py, glob, matplotlib, inspect, platform, hashlib, shutil
import matplotlib.pyplot as plt
from . import utilities

'''
How saving and loading works:
1) Walks through object's __dict__, subdictionaries, and subobjects, picks out
numpy arrays, and saves them in a hirearchy in HDF5. Dictionaries are
represented as groups in HDF5, and objects are also represented as groups, but
with a ! preceding the name. This is parsed when loading.
2) All numpy arrays and matplotlib objects in the dictionary hierarchy are set
to None, and the object is saved to JSON.
3) The saved object is immediately reloaded to see if everything went well.
3a) First, the JSON file is loaded to set up the dictionary hierarchy.
3b) Second, we walk through the HDF5 file (identifying objects and dictionaries
as necessary) and populate the numpy arrays.
'''


class Measurement:
    _chan_labels = [] # DAQ channel labels expected by this class
    instrument_list = []
    fig = None

    def __init__(self):
        self.timestamp = ''
示例#32
0
from jsonpickle.ext import numpy
import json
import jsonpickle as jsp
numpy.register_handlers()

class Measurement:
    
    @staticmethod
    def tojson(obj, filename):
        obj_string = jsp.encode(obj)
        obj_dict = json.loads(obj_string)
        with open(filename, 'w') as f:
            json.dump(obj_dict, f, sort_keys=True, indent=4)

    @staticmethod
    def fromjson(json_file):
        with open(json_file) as f:
            obj_dict = json.load(f)
        obj_string = json.dumps(obj_dict)
        obj = jsp.decode(obj_string)
        return obj