Пример #1
0
def run():
    (prog_file, log_file, prop_file,
     results_file) = utils.gen_file_names(MODEL_NM)

    # We store basic parameters in a "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    pa = utils.read_props(MODEL_NM)
    if pa is None:
        pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)

    # Now we create a minimal environment for our agents to act within:
    env = ge.GridEnv("Obstacle env",
                     pa.get("grid_height"),
                     pa.get("grid_width"),
                     torus=False,
                     model_nm=MODEL_NM,
                     postact=True,
                     props=pa)

    # Now we loop creating multiple agents with numbered names
    # based on the loop variable:
    for i in range(pa.get("num_moving_agents")):
        env.add_agent(
            om.ObstacleAgent(name="agent" + str(i),
                             goal="Avoiding obstacles!",
                             max_move=4,
                             tolerance=2))
    for i in range(pa.get("num_obstacles")):
        env.add_agent(om.Obstacle(name="obstacle" + str(i)))

    utils.run_model(env, prog_file, results_file)
Пример #2
0
def run():
    (prog_file, log_file, prop_file,
     results_file) = utils.gen_file_names(MODEL_NM)

    # We store basic parameters in a "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    pa = utils.read_props(MODEL_NM)
    if pa is None:
        pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)

    # Now we create a minimal environment for our agents to act within:
    env = itpm.TwoPopEnv("Test grid env",
                         pa.get("grid_width"),
                         pa.get("grid_height"),
                         torus=False,
                         model_nm=MODEL_NM,
                         postact=True,
                         props=pa)

    # Now we loop creating multiple agents with numbered names
    # based on the loop variable:

    # Get Number of Followers
    for i in range(pa.get("num_agents")):
        env.add_agent(
            tpm.TestFollower(name="Follower" + str(i),
                             goal="taking up a grid space!",
                             max_move=1))

    # Get Number of Hipsters
    for i in range(pa.get("num_agents")):
        env.add_agent(
            tpm.TestLeader(name="Leader" + str(i),
                           goal="taking up a grid space!",
                           max_move=1))

    utils.run_model(env, prog_file, results_file)
Пример #3
0
def run():
    (prog_file, log_file, prop_file,
     results_file) = utils.gen_file_names(MODEL_NM)

    # We store menu parameters in a
    # "property" file; this allows us to save
    #  multiple parameter sets, which is important in simulation work.
    #  We can read these in from file or set them here.
    pa = utils.read_props(MODEL_NM)
    if pa is None:
        pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)

    # Now we create a minimal environment for our agents to act within:
    env = mm.MenuEnv(model_nm=MODEL_NM, props=pa)

    # Now we loop creating multiple agents
    #  with numbered names based on the loop variable:
    for i in range(pa.get("num_agents")):
        env.add_agent(
            mm.MenuAgent(name="agent" + str(i),
                         goal="testing our menu capabilities!"))

    utils.run_model(env, prog_file, results_file)
Пример #4
0
"""
import logging
import indra.node as node
import indra.prop_args as props
import predprey_model as ppm

MODEL_NM = "predprey_model"
PROG_NM = MODEL_NM + ".py"
LOG_FILE = MODEL_NM + ".txt"

read_props = False

if read_props:
    pa = props.PropArgs.read_props(MODEL_NM, "predprey.props")
else:
    pa = props.PropArgs(MODEL_NM, logfile=LOG_FILE, props=None)
    pa.set("model", MODEL_NM)
    pa.set("num_foxes", 16)
    pa.set("num_rabbits", 48)
    pa.set("num_zombies", 6)
    pa.set("fox_repro_age", 11)
    pa.set("rabbit_repro_age", 3.6)
    pa.set("fox_life_force", 32.8)
    pa.set("rabbit_life_force", 22.0)
    pa.set("fox_max_move", 12.8)
    pa.set("rabbit_max_move", 10.2)
    pa.set("fox_decay_rate", 5.8)
    pa.set("rabbit_decay_rate", 3.98)
    pa.set("fox_max_detect", 40.0)

env = ppm.PredPreyEnv("meadow", 50.0, 50.0)
Пример #5
0
import indra.utils as utils
import indra.prop_args as props
import indra.two_pop_markov as itpm
import two_pop_markov_model as tpm

# set up some file names:
MODEL_NM = "two_pop_markov_model"
(prog_file, log_file, prop_file, results_file) = utils.gen_file_names(MODEL_NM)

# We store basic parameters in a "property" file; this allows us to save
#  multiple parameter sets, which is important in simulation work.
#  We can read these in from file or set them here.
pa = utils.read_props(MODEL_NM)
if pa is None:
    pa = props.PropArgs(MODEL_NM, logfile=log_file, props=None)
    utils.get_grid_dims(pa, 6)
    utils.get_agent_num(pa, "num_agents", "agents", 16)

# Now we create a minimal environment for our agents to act within:
env = itpm.TwoPopEnv(
    "Test two pop Markov env",
    pa.get("grid_width"),
    pa.get("grid_height"),
    preact=True,
    postact=True,
    trans_str="0.5 0.5; 0.5 0.5",
    model_nm=MODEL_NM,
    torus=False,
)