Пример #1
0
    def test(self):
        logging.basicConfig(level=logging.WARN)
        outstring = ""
        outname = "pet_output%s.tsv" % outstring
        fhandle = open(outname, 'w')
        num_agents = 1000
        num_tsteps = 100
        wealth = np.random.random_sample(num_agents)
        interaction = np.random.random_sample(num_agents)
        responsible = np.random.random_sample(num_agents)
        time = np.random.random_sample(num_agents)
        gender = np.random.randint(1, 3, num_agents)
        pet = np.zeros(1000)
        g = igraph.Graph()
        for i in range(0, num_agents):
            g.add_vertex(i)
        vs = g.vs
        agents = []
        for i in range(0, num_agents):
            traits = np.zeros((7))
            traits[0] = pet[i]
            traits[1] = wealth[i]
            traits[2] = interaction[i]
            traits[3] = responsible[i]
            traits[4] = time[i]
            traits[5] = gender[i]
            traits[6] = 0
            g.add_edge(i, vs[1])
            specialagent = Person(vs[i], 7, traits)
            agents.append(specialagent)

        np.random.seed(347)
        env = PetEnvironment(g)
        time = dworp.BasicTime(num_tsteps)
        scheduler = dworp.RandomOrderScheduler(np.random.RandomState(4587))
        term = PetTerminator(50)
        observer = dworp.ChainedObserver(PetObserver(fhandle))
        sim = dworp.BasicSimulation(agents,
                                    env,
                                    time,
                                    scheduler,
                                    observer,
                                    terminator=term)
        sim.run()
        fhandle.close()

        with open("pet_demographics.tsv", 'w') as f:
            f.write('Wealth\tInteraction\tResponsible\tTime\tGender\tPet\n')
            for i in range(0, num_agents):
                f.write("{}\t{}\t{}\t{}\t{}\t".format(wealth[i],
                                                      interaction[i],
                                                      responsible[i], time[i],
                                                      gender[i]))
                f.write("{}\n".format(agent.state[0] for agent in agents))
Пример #2
0
                quit()


if __name__ == "__main__":
    logging.basicConfig(level=logging.WARN)

    # parse command line
    parser = argparse.ArgumentParser()
    parser.add_argument("--pop",
                        help="population (1-1000)",
                        default=400,
                        type=int)
    parser.add_argument("--fps",
                        help="frames per second",
                        default="20",
                        type=int)
    parser.add_argument("--seed", help="seed of RNG", default=42, type=int)
    args = parser.parse_args()

    # validate parameters of simulation
    assert (1 <= args.pop <= 1000)

    observer = dworp.ChainedObserver(
        SugarscapeObserver(),
        PyGameRenderer(args.fps),
    )

    # create and run one realization of the simulation
    sim = SugarscapeSimulation(args, observer)
    sim.run()
Пример #3
0
                        default=1000,
                        type=int)
    parser.add_argument("--red",
                        help="red fertility (0-10)",
                        default=2.0,
                        type=float)
    parser.add_argument("--blue",
                        help="blue fertility (0-10)",
                        default=2.0,
                        type=float)
    parser.add_argument("--seed", help="seed of RNG", default=42, type=int)
    args = parser.parse_args()

    # prepare parameters of simulation
    assert (1 <= args.capacity <= 4000)
    assert (0 <= args.red <= 10)
    assert (0 <= args.blue <= 10)
    params = BirthParams(args.capacity, args.red, args.blue, args.seed)

    # create and run one realization of the simulation
    observer = dworp.ChainedObserver(
        BirthObserver(), dworp.PauseAtEndObserver(3),
        dworp.plot.VariablePlotter(['red_count', 'blue_count'], ['r', 'b'],
                                   title="Birth and Death Sim",
                                   ylim=[300, 700],
                                   xlim=[0, 20],
                                   xlabel='Generations',
                                   ylabel='Population'))
    sim = BirthSimulation(params, observer)
    sim.run()
Пример #4
0
                        help="number of steps in simulation",
                        default=500,
                        type=int)
    parser.add_argument("--size",
                        help="grid size formatted as XXXxYYY",
                        default="100x100")
    parser.add_argument("--fps",
                        help="frames per second",
                        default="20",
                        type=int)
    parser.add_argument("--seed", help="seed of RNG", default=42, type=int)
    args = parser.parse_args()

    # validate parameters of simulation
    assert (1 <= args.pop <= 1000)
    assert (0 <= args.vision <= 10)
    assert (0 <= args.min_separation <= 5)
    assert (0 <= args.max_separate_turn <= 20)
    assert (0 <= args.max_align_turn <= 20)
    assert (0 <= args.max_cohere_turn <= 20)
    args.area_size = [int(dim) for dim in args.size.split("x")]

    observer = dworp.ChainedObserver(
        FlockingObserver(),
        PyGameRenderer(args.area_size, args.fps),
    )

    # create and run one realization of the simulation
    sim = FlockingSimulation(args, observer)
    sim.run()
Пример #5
0
    parser.add_argument("--density", help="density of agents (1-99)", default=95, type=int)
    parser.add_argument("--similar", help="desired similarity (0-100)", default=30, type=int)
    parser.add_argument("--size", help="grid size formatted as XXXxYYY", default="50x50")
    parser.add_argument("--seed", help="seed of RNG", default=42, type=int)
    parser.add_argument("--fps", help="frames per second", default="2", type=int)
    parser.add_argument("--no-vis", dest='vis', action='store_false')
    parser.set_defaults(vis=True)
    args = parser.parse_args()

    # prepare parameters of simulation
    assert(1 <= args.density <= 99)
    assert(0 <= args.similar <= 100)
    density = args.density / float(100)
    similarity = args.similar / float(100)
    grid_size = [int(dim) for dim in args.size.split("x")]
    seed = args.seed
    vis_flag = args.vis and 'pygame' in sys.modules
    # vis does not support different colors
    colors = ["blue", "orange", "green", "black"]
    params = DiseaseParams(density, similarity, grid_size, seed, colors)

    # create and run one realization of the simulation
    observer = dworp.ChainedObserver(
        ThingObserver(filename)
    )
    if vis_flag:
        observer.append(dworp.PauseAtEndObserver(3))
        observer.append(PyGameRenderer(grid_size, 10, args.fps))
    sim = DiseaseSimulation(params, observer)
    sim.run()
Пример #6
0
if __name__ == "__main__":
    logging.basicConfig(level=logging.WARN)

    # parse command line
    parser = argparse.ArgumentParser()
    parser.add_argument("--density", help="density of agents (1-99)", default=95, type=int)
    parser.add_argument("--similar", help="desired similarity (0-100)", default=30, type=int)
    parser.add_argument("--size", help="grid size formatted as XXXxYYY", default="50x50")
    parser.add_argument("--seed", help="seed of RNG", default=42, type=int)
    args = parser.parse_args()

    # prepare parameters of simulation
    assert(1 <= args.density <= 99)
    assert(0 <= args.similar <= 100)
    density = args.density / float(100)
    similarity = args.similar / float(100)
    grid_size = [int(dim) for dim in args.size.split("x")]
    seed = args.seed
    colors = ["blue", "orange"]
    params = SegregationParams(density, similarity, grid_size, seed, colors)

    # create and run one realization of the simulation
    observer = dworp.ChainedObserver(
        SegObserver(),
        HeatmapPlotObserver(colors),
        dworp.PauseObserver(delay=1, start=True, matplotlib=True)
    )
    sim = SegregationSimulation(params, observer)
    sim.run()
Пример #7
0
    def test(self):
        lastcountshouldbe = 4

        logging.basicConfig(level=logging.WARN)
        n_tsteps = 1000
        n_tsteps = 100
        n_agents = 10000
        n_fps = 4

        mu = np.array([0.544, 0.504, 0.466, 0.482, 0.304])
        cov = np.zeros((5, 5))
        cov[0, :] = [0.360000, 0.066120, 0.059520, 0.093000, 0.092040]
        cov[1, :] = [0.066120, 0.336400, 0.061132, 0.061132, 0.000000]
        cov[2, :] = [0.059520, 0.061132, 0.384400, 0.042284, -0.021948]
        cov[3, :] = [0.093000, 0.061132, 0.042284, 0.384400, 0.098766]
        cov[4, :] = [0.092040, 0.000000, -0.021948, 0.098766, 0.348100]

        scenariostr = "B"
        makevis = True

        # n_frields: each agent has this many friends (based on the n_friends people who are geographically closest)
        if scenariostr == "A":
            n_friends = 2  # Scenario A
            outstring = "_A"
            # ensuring reproducibility by setting the seed
            np.random.seed(45)
            mu[0] = 0.25
        elif scenariostr == "B":
            n_friends = 5  # Scenario B
            # ensuring reproducibility by setting the seed
            np.random.seed(347)
            outstring = "_B"
            mu[0] = 0.50
        else:
            n_friends = 20  # Scenario C
            outstring = "_C"
            # ensuring reproducibility by setting the seed
            np.random.seed(5769)
            mu[0] = 0.75

        n_friends = 5  # constant

        personalities = np.random.multivariate_normal(mu, cov, n_agents)
        personalities[personalities > 1] = 1.0
        personalities[personalities < 0] = 0.0
        wealth = np.random.normal(300000, 100000, n_agents)
        wealth[wealth > 600000] = 600000
        wealth[wealth < 10000] = 10000
        offsets_lat = np.random.random((n_agents, 1))
        offsets_lon = np.random.random((n_agents, 1))
        lat = offsets_lon + 37.4316  # deg north
        lon = offsets_lat + 78.6569  # deg west
        gender = np.random.randint(0, 1, (n_agents, 1))
        education = np.random.randint(0, 4, (n_agents, 1))
        # colddrinks = np.random.normal(0.80, 0.15, n_agents)
        colddrinks = np.random.normal(0.90, 0.1, n_agents)
        colddrinks[colddrinks > 1] = 1
        colddrinks[colddrinks < 0] = 0

        # eatingout = np.random.normal(0.70,0.10,n_agents)
        eatingout = np.random.normal(0.90, 0.10, n_agents)
        eatingout[eatingout > 1] = 1
        eatingout[eatingout < 0] = 0
        envaware = np.random.random((n_agents, 1))

        g = igraph.Graph()
        for i in range(0, n_agents):
            g.add_vertex(i)
        vs = g.vs
        agents = []
        for i in range(0, n_agents):
            traits = np.zeros((14))
            traits[0] = 0  # initially noone uses the reusable straw
            traits[1] = eatingout[i]
            traits[2] = envaware[i]
            traits[3:8] = personalities[i, :]
            traits[8] = wealth[i]
            traits[9] = lat[i]
            traits[10] = lon[i]
            traits[11] = gender[i]
            traits[12] = education[i]
            traits[13] = colddrinks[i]
            difflat = lat - lat[i]
            difflon = lon - lon[i]
            distsq = np.power(difflat, 2) + np.power(difflon, 2)
            sorted = np.argsort(distsq, axis=0)
            friends = sorted[1:n_friends + 1]
            for j in range(0, len(friends)):
                g.add_edge(i, int(friends[j]))
            curagent = Person(vs[i], 14, traits)
            agents.append(curagent)

        env = EEEnvironment(g)
        time = dworp.BasicTime(n_tsteps)
        # ensuring reproducibility by setting the seed
        scheduler = dworp.RandomOrderScheduler(np.random.RandomState(4587))
        outname = "outputs%s.tsv" % (outstring)
        fhandle = open(outname, 'w')
        myobserver = EEObserver(fhandle)

        #vis_flag = args.vis and 'pygame' in sys.modules
        vis_flag = makevis and 'pygame' in sys.modules
        if vis_flag:
            print("vis_flag is True")
        else:
            print("vis_flag is False")

        # create and run one realization of the simulation
        observer = dworp.ChainedObserver(myobserver, )
        if vis_flag:
            observer.append(dworp.PauseAtEndObserver(3))
            pgr = PyGameRenderer(1, n_fps, n_tsteps + 1)
            observer.append(pgr)

        #with open("eatingout.tsv",'w') as f:
        #    for i in range(0,n_agents):
        #        f.write('%f\t%f' % (lat[i],lon[i]))
        #        f.write('\t%f\n' % (eatingout[i]))
        #    f.close()
        initeatingout = eatingout[0:]

        term = EETerminator(100)
        sim = dworp.BasicSimulation(agents,
                                    env,
                                    time,
                                    scheduler,
                                    observer,
                                    terminator=term)
        sim.run()
        fhandle.close()

        # eatingout, age
        age = np.random.randint(16, 65, 1000)
        with open("demog.tsv", 'w') as f:
            for i in range(0, n_agents):
                f.write('%f\t%f' % (lat[i], lon[i]))
                f.write('\t%f\t%f' % (wealth[i], gender[i]))
                f.write(
                    '\t%d\t%d' %
                    (agents[i].state[0], agents[i].state[agents[i].past_i]))
                f.write('\t%f\t%d\t%f\t%f\t%f\t%f\t%f\t%f\n' %
                        (initeatingout[i], age[i], colddrinks[i],
                         personalities[i, 0], personalities[i, 1],
                         personalities[i, 2], personalities[i, 3],
                         personalities[i, 4]))
            f.close()

        if vis_flag:
            filename_list = pgr.filename_list
            seconds_per_frame = 1.0 / n_fps
            frame_delay = str(int(seconds_per_frame * 100))
            #command_list = ['convert', '-delay', frame_delay, '-loop', '0'] + filename_list + ['anim.gif']
            command_list = ['convert', '-delay', frame_delay, '-loop', '0'
                            ] + filename_list + ['anim%s.gif' % (outstring)]
            pdb.set_trace()
            try:
                # Use the "convert" command (part of ImageMagick) to build the animation
                subprocess.call(command_list)
            except:
                print("couldnt create the animation")
                pass
            # Earlier, we saved an image file for each frame of the animation. Now
            # that the animation is assembled, we can (optionally) delete those files
            for filename in filename_list:
                os.remove(filename)
            return

        lastcount = myobserver.computenumreusablestrawusers(0, agents, env)
        print("Last Count = %d" % (lastcount))
        if lastcount == lastcountshouldbe:
            print("Regression test passed!")
            return True
        else:
            print("Regression test failed! last count should be %d" %
                  (lastcountshouldbe))
            return False
Пример #8
0
    def test(self):
        logging.basicConfig(level=logging.WARN)
        outstring = "_B"
        outname = "outputs%s.tsv" % (outstring)
        fhandle = open(outname, 'w')
        num_agents = 5000
        num_tsteps = 150
        wealth = np.random.normal(100000, 20000, num_agents)
        offsets_lat = np.random.random((num_agents, 1))
        offsets_lon = np.random.random((num_agents, 1))
        lat = offsets_lon + 37.4316  # deg north
        lon = offsets_lat + 78.6569  # deg west
        SA = np.random.random((num_agents, 1))
        phone = np.random.random_integers(0, 2, num_agents)
        g = igraph.Graph()
        for i in range(0, num_agents):
            g.add_vertex(i)
        vs = g.vs
        agents = []
        for i in range(0, num_agents):
            traits = np.zeros((5))
            traits[0] = phone[i]
            traits[1] = wealth[i]
            traits[2] = lat[i]
            traits[3] = lon[i]
            traits[4] = SA[i]
            difflat = lat - lat[i]
            difflon = lon - lon[i]
            distsq = np.power(difflat, 2) + np.power(difflon, 2)
            sorted = np.argsort(distsq, axis=0)
            n_friends = 5
            friends = sorted[1: n_friends+1]
            for j in range(0,len(friends)):
                g.add_edge(i,int(friends[j]))
            curagent = Person(vs[i], 5, traits)
            agents.append(curagent)

        np.random.seed(347)
        makevis = True
        vis_flag = makevis and 'pygame' in sys.modules
        if vis_flag:
            print("vis_flag is True")
        else:
            print("vis_flag is False")
        # vis does not support different colors
        # color
        #
        # s = ["blue", "orange"]
        # params = SegregationParams(density, similarity, grid_size, seed, colors)

        # create and run one realization of the simulation
        observer = dworp.ChainedObserver(PhoneObserver(fhandle))
        n_fps = 4

        if vis_flag:
            observer.append(dworp.PauseAtEndObserver(3))
            pgr = PyGameRenderer(1, n_fps, num_tsteps + 1)
            observer.append(pgr)
        env = PhoneEnvironment(g)
        time = dworp.BasicTime(num_tsteps)
        scheduler = dworp.RandomOrderScheduler(np.random.RandomState(4587))
        term = PhoneTerminator(50)
        sim = dworp.BasicSimulation(agents, env, time, scheduler, observer, terminator=term)
        sim.run()
        fhandle.close()

        with open("demographics.tsv",'w') as f:
            f.write('Lat\tLon\tWealth\tSA\tPhone\n')
            for i in range(0,num_agents):
                f.write('{}\t{}\t'.format(lat[i], lon[i]))
                f.write('{}\t'.format(wealth[i]))
                f.write('{}'.format(SA[i]))
                if agents[i].state[0] == 0:
                    f.write('\tNone\n')
                elif agents[i].state[0] == 1:
                    f.write('\tApple\n')
                elif agents[i].state[0] == 2:
                    f.write('\tAndroid\n')
            f.close()
Пример #9
0
                        help="grid size formatted as XXXxYYY",
                        default="50x50")
    parser.add_argument("--seed", help="seed of RNG", default=42, type=int)
    parser.add_argument("--fps",
                        help="frames per second",
                        default="2",
                        type=int)
    parser.add_argument("--no-vis", dest='vis', action='store_false')
    parser.set_defaults(vis=True)
    args = parser.parse_args()

    # prepare parameters of simulation
    assert (1 <= args.density <= 99)
    assert (0 <= args.similar <= 100)
    density = args.density / float(100)
    similarity = args.similar / float(100)
    grid_size = [int(dim) for dim in args.size.split("x")]
    seed = args.seed
    vis_flag = args.vis and 'pygame' in sys.modules
    # vis does not support different colors
    colors = ["blue", "orange"]
    params = SegregationParams(density, similarity, grid_size, seed, colors)

    # create and run one realization of the simulation
    observer = dworp.ChainedObserver(SegObserver(), )
    if vis_flag:
        observer.append(dworp.PauseAtEndObserver(3))
        observer.append(PyGameRenderer(grid_size, 10, args.fps))
    sim = SegregationSimulation(params, observer)
    sim.run()
Пример #10
0
                        default=95,
                        type=int)
    parser.add_argument("--similar",
                        help="desired similarity (0-100)",
                        default=30,
                        type=int)
    parser.add_argument("--size",
                        help="grid size formatted as XXXxYYY",
                        default="200x200")
    parser.add_argument("--seed", help="seed of RNG", default=42, type=int)
    args = parser.parse_args()

    # prepare parameters of simulation
    assert (1 <= args.density <= 99)
    assert (0 <= args.similar <= 100)
    density = args.density / float(100)
    similarity = args.similar / float(100)
    grid_size = [int(dim) for dim in args.size.split("x")]
    seed = args.seed
    colors = ["blue", "orange"]
    params = SegregationParams(density, similarity, grid_size, seed, colors)

    # create and run one realization of the simulation
    observer = dworp.ChainedObserver(
        SegObserver(),
        #HeatmapPlotObserver(colors),
        #dworp.plot.PlotPauseObserver(delay=1, start=True)
    )
    sim = SegregationSimulation(params, observer)
    sim.run()