예제 #1
0
def individual_data(args):
    """
    Produce figures for 'individual' data
    :param args: Parsed args from command line ('Namespace' object)
    :return: None
    """

    for move in (str(i).replace("Move.", "")
                 for i in (model.Move.max_profit, model.Move.strategic,
                           model.Move.max_diff, model.Move.equal_sharing)):

        run_backups = []

        for r in ("25", "50"):  # , "75"):

            parameters_file = "data/json/{}_{}.json".format(r, move)
            data_file = "data/pickle/{}_{}.p".format(r, move)

            if not data_already_produced(parameters_file,
                                         data_file) or args.force:

                json_parameters = parameters.load(parameters_file)
                param = parameters.extract_parameters(json_parameters)
                run_backup = run(param)
                run_backup.save(parameters_file, data_file)

            else:
                run_backup = backup.RunBackup.load(data_file)

            run_backups.append(run_backup)

        analysis.separate.separate(backups=run_backups,
                                   fig_name='fig/separate_{}.pdf'.format(move))
예제 #2
0
def produce_data(parameters_file, data_file):
    """
    Produce data for 'pooled' condition using multiprocessing
    :param parameters_file: Path to parameters file (string)
    :param data_file: Path to the future data files (dictionary with two entries)
    :return: a 'pool backup' (arbitrary Python object)
    """

    json_parameters = parameters.load(parameters_file)

    pool_parameters = parameters.extract_parameters(json_parameters)

    pool = mlt.Pool()

    backups = []

    for bkp in tqdm.tqdm(pool.imap_unordered(run, pool_parameters),
                         total=len(pool_parameters)):
        backups.append(bkp)

    pool_backup = backup.PoolBackup(parameters=json_parameters,
                                    backups=backups)
    pool_backup.save(parameters_file, data_file)

    return pool_backup
예제 #3
0
def produce_data(parameters_file, data_file):

    json_parameters = parameters.load(parameters_file)

    pool_parameters = parameters.extract_parameters(json_parameters)

    pool = mlt.Pool()

    backups = []

    for bkp in tqdm.tqdm(pool.imap_unordered(run, pool_parameters),
                         total=len(pool_parameters)):
        backups.append(bkp)

    pool_backup = backup.PoolBackup(parameters=json_parameters,
                                    backups=backups)
    pool_backup.save(data_file)

    return pool_backup
예제 #4
0
def main(parameters_file=None):
    """Produce data"""

    param = parameters.load(parameters_file)

    if param.running_mode == 'unique':
        seed = np.random.randint(2**32)
        bkp = model.run((seed, param))
        file_name = bkp.save()
        print("Data have been saved using file name: '{}'.".format(file_name))

        try:
            analysis.separate.pos_firmA_over_pos_firmB(file_name)
        except _tkinter.TclError:
            print("Figures can not be produced if there is no graphic server.")

    else:
        print('Parameters are: ', param.dict())

        pool = mlt.Pool()

        backups = []

        seeds = np.random.randint(2**32, size=param.n_simulations)

        for bkp in tqdm.tqdm(pool.imap_unordered(
                model.run, zip(seeds, (param, ) * param.n_simulations)),
                             total=param.n_simulations):
            backups.append(bkp)

        pool_backup = backup.PoolBackup(parameters=param, backups=backups)

        file_name = pool_backup.save()

        print("Data have been saved using file name: '{}'.".format(file_name))

        try:
            analysis.pool.distance_over_fov(file_name=file_name)
        except _tkinter.TclError:
            print("Figures can not be produced if there is no graphic server.")
예제 #5
0
def individual_data(args):

    for condition in ("75", "50", "25"):

        parameters_file, data_file, fig_files = get_files_names(condition)

        if not data_already_produced(data_file) or args.force:

            json_parameters = parameters.load(parameters_file)
            param = parameters.extract_parameters(json_parameters)
            run_backup = run(param)
            run_backup.save(data_file)

        else:
            run_backup = backup.RunBackup.load(data_file["pickle"])

        analysis.separate.eeg_like(backup=run_backup,
                                   fig_name=fig_files["eeg_like"])
        analysis.separate.pos_firmA_over_pos_firmB(
            backup=run_backup, fig_name=fig_files["positions"])

        terminal_msg(condition, parameters_file, data_file, fig_files)
예제 #6
0
def initialize(ini_file_path):

    global_parameters = parameters.load(ini_file_path)

    population = Population()
    genotypes_fraction = int(global_parameters['population_size'] /
                             len(definitions.genotypes))

    output_file = open(global_parameters['output_file'], 'w')
    output_file.write('Generation\t')
    for i, genotype in enumerate(definitions.genotypes):
        output_file.write('/'.join(g for g in genotype))
        if i < len(definitions.genotypes) - 1:
            output_file.write('\t')
        else:
            output_file.write('\n')

    for i in range(global_parameters['population_size']):
        genotype = definitions.genotypes[int(i / genotypes_fraction)
                                         % len(definitions.genotypes)]
        population.add_individual(Individual(genotype))

    return population, global_parameters
예제 #7
0
import sys
import datetime
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2

import parameters

params = parameters.load()

dbp = params['db']
ap = params['actor']
conn = psycopg2.connect(dbp['connection_string'])
conn.autocommit = True
cur = conn.cursor()

if __name__ == "__main__":
    df = pd.read_sql(f'SELECT * FROM param_set', conn)
    print(df)
예제 #8
0
def clustered_data(args):

    for move in (str(i).replace("Move.", "")
                 for i in (model.Move.max_profit, model.Move.strategic,
                           model.Move.max_diff, model.Move.equal_sharing)):

        parameters_file = "data/json/pool_{}.json".format(move)
        data_file = "data/pickle/pool_{}.p".format(move)

        if not data_already_produced(data_file) or args.force:
            pool_backup = produce_data(parameters_file, data_file)

        else:
            pool_backup = backup.PoolBackup.load(data_file)

        run_backups = []

        for r in ("25", "50"):  # , "75"):

            parameters_file = "data/json/{}_{}.json".format(r, move)
            data_file = "data/pickle/{}_{}.p".format(r, move)

            if not data_already_produced(parameters_file,
                                         data_file) or args.force:

                json_parameters = parameters.load(parameters_file)
                param = parameters.extract_parameters(json_parameters)
                run_backup = run(param)
                run_backup.save(parameters_file, data_file)

            else:
                run_backup = backup.RunBackup.load(data_file)

            run_backups.append(run_backup)

        parameters_file = "data/json/batch_{}.json".format(move)
        data_file = "data/pickle/batch_{}.p".format(move)

        if not data_already_produced(data_file) or args.force:
            batch_backup = produce_data(parameters_file, data_file)

        else:
            batch_backup = backup.PoolBackup.load(data_file)

        fig = plt.figure(figsize=(13.5, 7))
        gs = matplotlib.gridspec.GridSpec(nrows=2,
                                          ncols=2,
                                          width_ratios=[1, 0.7])

        analysis.pool.distance_price_and_profit(pool_backup=pool_backup,
                                                subplot_spec=gs[0, 0])
        analysis.separate.separate(backups=run_backups, subplot_spec=gs[:, 1])
        analysis.batch.plot(batch_backup=batch_backup, subplot_spec=gs[1, 0])

        plt.tight_layout()

        ax = fig.add_subplot(gs[:, :], zorder=-10)

        plt.axis("off")
        ax.text(s="B",
                x=-0.05,
                y=0,
                horizontalalignment='center',
                verticalalignment='center',
                transform=ax.transAxes,
                fontsize=20)
        ax.text(s="A",
                x=-0.05,
                y=0.55,
                horizontalalignment='center',
                verticalalignment='center',
                transform=ax.transAxes,
                fontsize=20)
        ax.text(s="C",
                x=0.58,
                y=0,
                horizontalalignment='center',
                verticalalignment='center',
                transform=ax.transAxes,
                fontsize=20)

        fig_name = "fig/clustered_{}.pdf".format(move)
        os.makedirs(os.path.dirname(fig_name), exist_ok=True)
        plt.savefig(fig_name)
        plt.show()
# Se parameters and initialize
data = parameters.default()
parameters.dump(data)
locals().update(data)
np.random.seed(seed)

dat_filename = "data/population-gain.npy"
prm_filename = "data/population-gain-parameters.json"
fig_filename = "figs/monkey-agent-comparison.pdf"

if not os.path.exists(dat_filename):
    raise Exception("Datafile not found. Please run 'gain-analysis.py'")

# Load and normalize score
score = np.load(dat_filename)
data = parameters.load(prm_filename)
locals().update(data)
score = (score - score.min()) / (score.max() - score.min())

# -----------------------------------------------------------------------------
import matplotlib.pyplot as plt

fig = plt.figure(figsize=(12, 6))
ax = plt.subplot(1, 2, 1, aspect=1)

from scipy.ndimage.filters import gaussian_filter
gscore = gaussian_filter(score, 1.0)
median = np.median(score)
C = ax.contour(gscore,
               levels=[
                   median,