Ejemplo n.º 1
0
def run():
    (prog_file, log_file, prop_file, results_file) = utils.gen_file_names(MODEL_NM)
    
    # We store basic parameters in a "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    pa = utils.read_props(MODEL_NM)
    if pa is None:
        pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)
    
    # Now we create a minimal environment for our agents to act within:
    env = itpm.TwoPopEnv("Test grid env",
                     pa.get("grid_width"),
                     pa.get("grid_height"),
                     torus=False,
                     model_nm=MODEL_NM,
                     postact=True,
                     props=pa)
    
    # Now we loop creating multiple agents with numbered names
    # based on the loop variable:
    
    # Get Number of Followers
    for i in range(pa.get("num_agents")):
        env.add_agent(tpm.TestFollower(name="Follower" + str(i),
                      goal="taking up a grid space!",max_move=1))
    
    # Get Number of Hipsters
    for i in range(pa.get("num_agents")):
        env.add_agent(tpm.TestLeader(name="Leader" + str(i),
                      goal="taking up a grid space!",max_move=1))
    
    utils.run_model(env, prog_file, results_file)
Ejemplo n.º 2
0
def run():
    (prog_file, log_file, prop_file, results_file) = utils.gen_file_names(MODEL_NM)
    
    # We store basic parameters in a "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    pa = utils.read_props(MODEL_NM)
    if pa is None:
        pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)
    
    # Now we create a minimal environment for our agents to act within:
    env = ge.GridEnv("Obstacle env",
                     pa.get("grid_height"),
                     pa.get("grid_width"),
                     torus=False,
                     model_nm=MODEL_NM,
                     postact=True,
                     props=pa)
    
    # Now we loop creating multiple agents with numbered names
    # based on the loop variable:
    for i in range(pa.get("num_moving_agents")):
        env.add_agent(om.ObstacleAgent(name="agent" + str(i),
                      goal="Avoiding obstacles!", max_move=4,
                      tolerance=2))
    for i in range(pa.get("num_obstacles")):
        env.add_agent(om.Obstacle(name="obstacle" + str(i)))
    
    utils.run_model(env, prog_file, results_file)
Ejemplo n.º 3
0
def run():
    (prog_file, log_file, prop_file,
     results_file) = utils.gen_file_names(MODEL_NM)

    # We store basic parameters in a "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    pa = utils.read_props(MODEL_NM)
    if pa is None:
        pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)

    # Now we create a minimal environment for our agents to act within:
    env = ge.GridEnv("Obstacle env",
                     pa.get("grid_height"),
                     pa.get("grid_width"),
                     torus=False,
                     model_nm=MODEL_NM,
                     postact=True,
                     props=pa)

    # Now we loop creating multiple agents with numbered names
    # based on the loop variable:
    for i in range(pa.get("num_moving_agents")):
        env.add_agent(
            om.ObstacleAgent(name="agent" + str(i),
                             goal="Avoiding obstacles!",
                             max_move=4,
                             tolerance=2))
    for i in range(pa.get("num_obstacles")):
        env.add_agent(om.Obstacle(name="obstacle" + str(i)))

    utils.run_model(env, prog_file, results_file)
Ejemplo n.º 4
0
def run(prop_dict=None):
    (prog_file, log_file, prop_file, results_file) = utils.gen_file_names(MODEL_NM)
    
    # We store basic parameters in a "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    global pa

    if prop_dict is not None:
        prop_dict[props.PERIODS] = 0
        pa.add_props(prop_dict)
    else:
        result = utils.read_props(MODEL_NM)
        if result:
            pa.add_props(result.props)
        else:
            utils.ask_for_params(pa)
        
    if pa["user_type"] == props.WEB:
        pa["base_dir"] = os.environ['base_dir']
    
    # Now we create a minimal environment for our agents to act within:
    env = wm.WolframEnv("Wolfram Env",
                     pa["grid_width"],
                     pa["grid_height"],
                     model_nm=MODEL_NM,
                     props=pa,
                     rule_id=pa["rule_id"])
    
    # This env adds agents itself.
    
    return utils.run_model(env, prog_file, results_file)
Ejemplo n.º 5
0
def run():
    (prog_file, log_file, prop_file,
     results_file) = utils.gen_file_names(MODEL_NM)

    # We store basic parameters in a "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    pa = utils.read_props(MODEL_NM)
    if pa is None:
        pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)

    # Now we create a minimal environment for our agents to act within:
    env = itpm.TwoPopEnv("Test grid env",
                         pa.get("grid_width"),
                         pa.get("grid_height"),
                         torus=False,
                         model_nm=MODEL_NM,
                         postact=True,
                         props=pa)

    # Now we loop creating multiple agents with numbered names
    # based on the loop variable:

    # Get Number of Followers
    for i in range(pa.get("num_agents")):
        env.add_agent(
            tpm.TestFollower(name="Follower" + str(i),
                             goal="taking up a grid space!",
                             max_move=1))

    # Get Number of Hipsters
    for i in range(pa.get("num_agents")):
        env.add_agent(
            tpm.TestLeader(name="Leader" + str(i),
                           goal="taking up a grid space!",
                           max_move=1))

    utils.run_model(env, prog_file, results_file)
Ejemplo n.º 6
0
def run():
    (prog_file, log_file, prop_file,
     results_file) = utils.gen_file_names(MODEL_NM)

    # We store menu parameters in a
    # "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    pa = utils.read_props(MODEL_NM)
    if pa is None:
        pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)

    # Now we create a minimal environment for our agents to act within:
    env = mm.MenuEnv(model_nm=MODEL_NM, props=pa)

    # Now we loop creating multiple agents
    #  with numbered names based on the loop variable:
    for i in range(pa.get("num_agents")):
        env.add_agent(
            mm.MenuAgent(name="agent" + str(i),
                         goal="testing our menu capabilities!"))

    utils.run_model(env, prog_file, results_file)
Ejemplo n.º 7
0
A script to test our two pop markov capabilities.
"""

import indra.utils as utils
import indra.prop_args as props
import indra.two_pop_markov as itpm
import two_pop_markov_model as tpm

# set up some file names:
MODEL_NM = "two_pop_markov_model"
(prog_file, log_file, prop_file, results_file) = utils.gen_file_names(MODEL_NM)

# We store basic parameters in a "property" file; this allows us to save
#  multiple parameter sets, which is important in simulation work.
#  We can read these in from file or set them here.
pa = utils.read_props(MODEL_NM)
if pa is None:
    pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)
    utils.get_grid_dims(pa, 6)
    utils.get_agent_num(pa, "num_agents", "agents", 16)

# Now we create a minimal environment for our agents to act within:
env = itpm.TwoPopEnv(
    "Test two pop Markov env",
    pa.get("grid_width"),
    pa.get("grid_height"),
    preact=True,
    postact=True,
    trans_str="0.5 0.5; 0.5 0.5",
    model_nm=MODEL_NM,
    torus=False,
Ejemplo n.º 8
0
A script to test our spatial capabilities.
"""

import indra.utils as utils
import indra.prop_args as props
import indra.spatial_env as se
import models.spatial as sm

# set up some file names:
MODEL_NM = "spatial_model"
(prog_file, log_file, prop_file, results_file) = utils.gen_file_names(MODEL_NM)

# We store basic parameters in a "property" file; this allows us to save
#  multiple parameter sets, which is important in simulation work.
#  We can read these in from file or set them here.
pa = utils.read_props(MODEL_NM)
if pa is None:
    pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)

# Now we create a minimal environment for our agents to act within:
env = se.SpatialEnv("Test spatial env", 100.0, 100.0,
                    model_nm=MODEL_NM, props=pa)

# Now we loop creating multiple agents with numbered names
# based on the loop variable:
for i in range(pa.get("num_agents")):
    env.add_agent(
        sm.TestSpatialAgent(name="agent" + str(i),
                            goal="moving around aimlessly!"))

utils.run_model(env, prog_file, results_file)