Esempio n. 1
0
def test_pcg64_1():
    pcg = PCG64(SeedSequence(12345678909876543321))
    cpcg = LCG128Mix()
    st = cpcg.state
    st["state"]["state"] = pcg.state["state"]["state"]
    st["state"]["inc"] = pcg.state["state"]["inc"]
    cpcg.state = st
    expected = pcg.random_raw(1000)
    result = cpcg.random_raw(1000)
    np.testing.assert_equal(expected, result)
Esempio n. 2
0
 def __init__(self, id, fov_radius, resources, metabolic_rate, strategy, seed):
     self.id = id
     self.fov_radius = fov_radius
     self.resources = resources
     self.metabolic_rate = metabolic_rate
     if strategy < -10 or strategy > 10:
         print("Error: unexpected strategy value. Expected [-10, 10]")
     self.strategy = strategy
     self.generator = PCG64(seed, 0).generator
     self.time_alive = 0
Esempio n. 3
0
def test_pcg64_dxsm():
    pcg = PCG64(SeedSequence(12345678909876543321), variant="dxsm-128")
    cpcg = LCG128Mix(output="dxsm")
    st = cpcg.state
    st["state"]["state"] = pcg.state["state"]["state"]
    st["state"]["inc"] = pcg.state["state"]["inc"]
    cpcg.state = st
    expected = pcg.random_raw(1000)
    result = cpcg.random_raw(1000)
    np.testing.assert_equal(expected, result)
def test_pcg64_cm_dxsm():
    pcg = PCG64(SeedSequence(12345678909876543321), variant="dxsm")
    cpcg = LCG128Mix(output="dxsm", post=False, multiplier=DEFAULT_DXSM_MULTIPLIER)
    st = cpcg.state
    st["state"]["state"] = pcg.state["state"]["state"]
    st["state"]["inc"] = pcg.state["state"]["inc"]
    cpcg.state = st
    expected = pcg.random_raw(1000)
    result = cpcg.random_raw(1000)
    np.testing.assert_equal(expected, result)
Esempio n. 5
0
def test_equivalence_pcg64dxsm(seed, inc):
    a = PCG64(seed, inc, mode="sequence", variant="dxsm")
    b = PCG64DXSM(seed, inc)
    assert np.all((a.random_raw(10000) - b.random_raw(10000)) == 0)
    assert np.all((a.random_raw(13) - b.random_raw(13)) == 0)
    a_st = a.state
    b_st = b.state
    assert a_st["state"] == b_st["state"]
    a = a.advance(345671)
    b = b.advance(345671)
    a_st = a.state
    b_st = b.state
    assert a_st["state"] == b_st["state"]
Esempio n. 6
0
    def __init__(self, seed, collect_stepwise_data, N, structure_instance_num,
                 g, randomize_update_seq, b_distro, b_0_is_zero, normalize_bs,
                 error_variance, normalize_yis, uninformed_rate):
        """ Create a new model, configured based on design point inputs.

        Args:
            seed (int): seed for the random number generator. We use multiple
              streams, and each stream will be initialized with this seed, as in
              PCG64(seed=seed, stream=<stream num>).
            collect_stepwise_data (boolean): Indicates if model data should be collected
              each time step.
            N (int): number of agents.
            structure_instance_num (int): Network structure instance number from
              network_graphs.py.
            g (int): number of groups for dividing agents. Must be evenly divided
              by N.
            randomize_update_seq (boolean): If False, groups are updated in same order
              each time step. If True, group update order is randomized each time.
            b_distro (string): Label for distribution function agents use to
              generate their b_ij values. Allowed values are 'U01', 'U-11', and
              'N01'.
            b_0_is_zero (boolean): If True, set b_i0 to 0 for all i. If False,
              b_i0 is sampled from b_distro.
            normalize_bs ('agent', 'network', or 'no'): If 'no', do not
              normalize b_i* values. If 'agent', normalize b_i* within each agent.
              If 'network', normalize b_i* across whole network.
            error_variance (positive float): Variance value for zero-mean
              normal error term.
            normalize_yis (boolean): If True, normalize y_i for all i each time
              step by dividing by sum(y_i). If False, do not normalize y_i.
            uninformed_rate (float): Fraction of agents to be made initially
              uninformed. Must be in interval [0, 1).

        Attributes:
            schedule (obj): Update scheduler for agents.
            running (boolean): Indicates if the model should continue running.
            datacollector (obj:DataCollector): Object for step-wise data
              collection.
            agents (list of Agents): Model agents.
            rg (list of RandomGenerators): Streams for RNG. See RNGStream enum
              for use cases.
            G (NetworkX.DiGraph): Graph structure of network.
            d_i_max (int): Maximum out-degree of agents in network.
            b_distro (obj:RandomGenerator): RNG for b_i* values of agents.
            b_0_is_zero (boolean): Indicates if b_i0 should be 0.
            error_distro (obj:RandomGenerator): RNG for agent error terms.
            normalize_yis (boolean): Indicates if agent y_i values should be
              normalized each time step.
            uninformed_rate (float): Fraction of agents to be made initially
              uninformed.

        """
        # Minor error checking for easy mistakes (larger mistakes are punished)
        if N % g != 0:
            raise Exception()
        if not (0 <= uninformed_rate < 1):
            raise Exception()

        # Prepare random number streams
        self.iseed = seed
        self.rg = [
            RandomGenerator(PCG64(self.iseed, i + 1))
            for i in range(len(RNGStream) + 1)
        ]

        # Create network
        self.G = get_network_instance(N, structure_instance_num)
        self.d_i_max = np.max(self.G.out_degree, axis=0)[1]

        # Create "reachback" functions for agents to use.
        if b_distro == 'U01':
            self.b_distro = lambda: self.rg[RNGStream.B_COEFFS].uniform(0, 1)
        elif b_distro == 'U-11':
            self.b_distro = lambda: self.rg[RNGStream.B_COEFFS].uniform(-1, 1)
        elif b_distro == 'N01':
            self.b_distro = lambda: self.rg[RNGStream.B_COEFFS].normal(0, 1)
        else:
            raise Exception()

        self.b_0_is_zero = b_0_is_zero
        self.error_distro = lambda: self.rg[RNGStream.ERROR_TERM].normal(
            0, error_variance)

        # Create agents
        self.agents = [Agent(i, self) for i in range(N)]
        for agent in self.agents:
            agent.meet_neighbors()  # Must defer until all agents created

        # Create scheduler & groups
        # Agents don't need to know what group they're in, so this data is held
        # only by the scheduler.
        update_rng = self.rg[RNGStream.UPDATE_SEQUENCING]
        if g == 1:
            # Add everyone to same group. Ignores `randomize_update_seq`.
            self.schedule = mesa.time.SimultaneousActivation(self)
            self.schedule.agents = list(self.agents)
        elif g == N:
            # Everyone in own group, so don't actually need groups
            self.schedule = schedulers.SingleAgentSimultaneousActivation(
                self, rg=update_rng, random_order=randomize_update_seq)
            self.schedule.agents = list(self.agents)
        else:
            self.schedule = schedulers.GroupedSimultaneousActivation(
                self,
                randomize_group_order=randomize_update_seq,
                rg=update_rng)

            cur_group_num = 0
            agent_nums = list(range(N))
            self.rg[RNGStream.GROUP_ASSIGNMENT].shuffle(agent_nums)
            for i in agent_nums:
                self.schedule.add(self.agents[i], cur_group_num)
                cur_group_num = (cur_group_num + 1) % g

        # Normalize values as needed
        self._normalize_bs(normalize_bs)
        self.normalize_yis = normalize_yis

        self.uninformed_rate = uninformed_rate
        self._make_agents_uninformed()

        # Prepare step-wise data collection
        self.collect_stepwise_data = collect_stepwise_data
        if self.collect_stepwise_data:
            self._build_datacollector()


#            # One-time collection of agent constants b_i*; not currently using
#            for agent in self.agents:
#                self.datacollector.add_table_row(
#                    'Initial agent settings',
#                    {'id': agent.unique_id, 'b_i0': agent.b_0, 'b_ij': agent.b_j}
#                )

        self.running = True
Esempio n. 7
0
    def __init__(self,
                 num_agents,
                 env_size,
                 resource_prob,
                 metabolic_rate=1,
                 field_of_vision=1,
                 seed=1234567,
                 debug=False):
        self.timestep = 0
        self.agents = list()
        self.resources = list()
        self.agents_loc = dict()
        self.environment = [[self.EMPTY_CELL for x in range(env_size)]
                            for y in range(env_size)]
        self.env_size = env_size
        self.debug = debug

        # Keep track of agents that have been eliminated in the current turn to remove them from the universe
        self.agents_to_remove = []

        # Four streams:
        # 0. To place agents randomly in the environment
        # 1. To determine strategy value for each agent
        # 2. To determine whether a cell is a resource cell
        # 3. To shuffle the list of agents
        self.streams = [PCG64(seed, stream) for stream in range(4)]

        # Place agents randomly in the environment
        for i in range(num_agents):
            rand_start_x = self.streams[0].generator.randint(0,
                                                             env_size - 1,
                                                             closed=True)
            rand_start_y = self.streams[0].generator.randint(0,
                                                             env_size - 1,
                                                             closed=True)
            while self.environment[rand_start_x][
                    rand_start_y] != self.EMPTY_CELL:
                rand_start_x = self.streams[0].generator.randint(0,
                                                                 env_size - 1,
                                                                 closed=True)
                rand_start_y = self.streams[0].generator.randint(0,
                                                                 env_size - 1,
                                                                 closed=True)

            self.agents.append(
                Agent(id=i + 1,
                      fov_radius=field_of_vision,
                      resources=10,
                      metabolic_rate=metabolic_rate,
                      strategy=self.get_random_strategy(),
                      seed=i))

            self.agents_loc[self.agents[i]] = (rand_start_x, rand_start_y)
            self.environment[rand_start_x][rand_start_y] = self.agents[i]

        # Place resources randomly
        for i in range(env_size):
            for j in range(env_size):
                # If an agent has already been placed here
                if self.environment[i][j] != self.EMPTY_CELL:
                    continue
                # With probability resource_prob, place a resource in this cell
                if self.streams[2].generator.uniform(0, 1) < resource_prob:
                    self.resources.append(Resource(10))
                    self.environment[i][j] = self.resources[-1]
Esempio n. 8
0
def test_pcg_numpy_mode_exception():
    with pytest.raises(ValueError):
        PCG64(SeedSequence(0), mode="numpy", inc=3)
import numpy as np
from randomgen import PCG64
import itertools
import time

gene_bases = [base for base in " ABCDEFGHIJKLMNOPQRSTUVWXYZ"]

seed = 1
size_of_generation = 1000

prngs = [
    np.random.Generator(PCG64(seed, i)) for i in range(size_of_generation)
]


def mutate(prng, gene, mutation_rate=0.05):
    return "".join(
        prng.choice(gene_bases) if prng.random() < mutation_rate else base
        for base in gene)


def fitness(gene, reference="METHINKS IT IS LIKE A WEASEL"):
    return sum(base == ref_base for base, ref_base in zip(gene, reference))


def print_status(gen, parent, score):
    print(f"{gen:3d}  {parent}  ({score})")


def weasel_program(mutation_rate=0.05, initial="                            "):
    generation = 0
Esempio n. 10
0
def main():
    '''
    driver method for this file. the firesim class can be used via imports as
    well, but this driver file provides a comprehensive standalone interface
    to the simulation
    '''
    # set up and parse commandline arguments
    parser = ArgumentParser()
    parser.add_argument('-i',
                        '--input',
                        type=str,
                        default='in/twoexitbottleneck.txt',
                        help='input floor plan file (default: '
                        'in/twoexitbottleneck.py)')
    parser.add_argument('-n',
                        '--numpeople',
                        type=int,
                        default=10,
                        help='number of people in the simulation (default:10)')
    parser.add_argument('-r',
                        '--random_state',
                        type=int,
                        default=8675309,
                        help='aka. seed (default:8675309)')
    parser.add_argument(
        '-t',
        '--max_time',
        type=float,
        default=None,
        help='the building collapses at this clock tick. people'
        ' beginning movement before this will be assumed'
        ' to have moved away sufficiently (safe)')
    parser.add_argument('-f',
                        '--no_spread_fire',
                        action='store_true',
                        help='disallow fire to spread around?')
    parser.add_argument('-g',
                        '--no_graphical_output',
                        action='store_true',
                        help='disallow graphics?')
    parser.add_argument('-o',
                        '--output',
                        action='store_true',
                        help='show excessive output?')
    parser.add_argument('-d',
                        '--fire_rate',
                        type=float,
                        default=2,
                        help='rate of spread of fire (this is the exponent)')
    parser.add_argument('-b',
                        '--bottleneck_delay',
                        type=float,
                        default=1,
                        help='how long until the next person may leave the B')
    parser.add_argument('-a',
                        '--animation_delay',
                        type=float,
                        default=1,
                        help='delay per frame of animated visualization (s)')
    args = parser.parse_args()
    # output them as a make-sure-this-is-what-you-meant
    print('commandline arguments:', args, '\n')

    # set up random streams
    streams = [Generator(PCG64(args.random_state, i)) for i in range(5)]
    loc_strm, strat_strm, rate_strm, pax_strm, fire_strm = streams

    location_sampler = loc_strm.choice  # used to make initial placement of pax
    strategy_generator = lambda: strat_strm.uniform(.5, 1)  # used to pick move
    rate_generator = lambda: max(.1, abs(rate_strm.normal(1, .1)))  # used to
    # decide
    # strategies
    person_mover = lambda: pax_strm.uniform()  #
    fire_mover = lambda a: fire_strm.choice(a)  #

    # create an instance of Floor
    floor = FireSim(args.input,
                    args.numpeople,
                    location_sampler,
                    strategy_generator,
                    rate_generator,
                    person_mover,
                    fire_mover,
                    fire_rate=args.fire_rate,
                    bottleneck_delay=args.bottleneck_delay,
                    animation_delay=args.animation_delay,
                    verbose=args.output)

    # floor.visualize(t=5000)
    # call the simulate method to run the actual simulation
    floor.simulate(maxtime=args.max_time,
                   spread_fire=not args.no_spread_fire,
                   gui=not args.no_graphical_output)

    floor.stats()
Esempio n. 11
0
 def __init__(self, seed=0):
     self.rng = RandomGenerator(PCG64(seed))
Esempio n. 12
0
def test_pcg_warnings_and_errors():
    with pytest.warns(FutureWarning, match="The current default"):
        PCG64(0, mode="sequence", variant=None)
    with pytest.raises(ValueError, match="variant unknown is not known"):
        PCG64(0, mode="sequence", variant="unknown")
Esempio n. 13
0
import sp800_22_tests
import compress_bin_files
from randomgen import Generator, PCG64

instance_amnt = int(input("# of instances: "))
bits_per_instance = int(input("# of bits per instance: "))

sp800_22_tests.append_header("sp800_collected_cluster_data_PCG64.csv")

rnd = Generator(PCG64())

for s in range(instance_amnt):
    print("----------------------  Iteration " + str(s) +
          "  ----------------------")
    bits = list(rnd.randint(low=0, high=2, size=bits_per_instance))
    sp800_22_tests.test_func(bits, "sp800_collected_cluster_data_PCG64.csv",
                             "PCG64", s)

#obtain compression ratio for each

# compress_bin_files.compress("compression_ratio_cluster_data_PCG64.csv")
rank = communicator.rank
nnode = communicator.size

gene_bases = [base for base in " ABCDEFGHIJKLMNOPQRSTUVWXYZ"]

seed = 1
size_of_generation = 1000

num_each = size_of_generation // nnode

start = num_each * rank
stop = num_each * (rank + 1)
if rank == nnode - 1:
    stop = size_of_generation

prngs = [np.random.Generator(PCG64(seed, i)) for i in range(start, stop)]


def mutate(prng, gene, mutation_rate=0.05):
    return "".join(
        prng.choice(gene_bases) if prng.random() < mutation_rate else base
        for base in gene)


def fitness(gene, reference="METHINKS IT IS LIKE A WEASEL"):
    return sum(base == ref_base for base, ref_base in zip(gene, reference))


def print_status(gen, parent, score):
    if not rank:
        print(f"{gen:3d}  {parent}  ({score})")
def test_pcg_warnings_and_errors():
    with pytest.raises(ValueError, match="variant unknown is not known"):
        PCG64(0, mode="sequence", variant="unknown")
Esempio n. 16
0
    def run_all(self, processors=1, iseed=1):
        """Run the model for all trials in the designed experiment and store results.

        Model constructor is assumed to take args (seed, collect_stepwise_data,
        trial kwargs).

        Args:
            processors (int, default=1): Number of cpu cores to use for batch run.
            iseed (int, default=1): Initial seed for replication 1 of each trial.
                Seeds for subsequent replications will be PNRG by this class.

        """
        pool = ProcessPool(nodes=processors)
        job_queue = []

        # Generator for initial model seeds. Models use these seeds to manage
        # their own RNGs.
        brng = PCG64(iseed)
        randomgen = RandomGenerator(brng)

        param_names = self.design.columns
        param_names = param_names[(param_names != 'replications')
                                  & (param_names != 'Trial')]

        total_iterations = self.design.replications.sum()

        self.manifest = []  # Records what seed was used where

        for row in self.design.itertuples():
            kwargs = {key: getattr(row, key) for key in param_names}

            # Reset model seed generator for next design point
            brng.seed(iseed)

            for rep in range(1, row.replications + 1):
                # Advance model seed for next replication
                model_seed = randomgen.randint(10000000)

                model_key = (
                    row.Trial,
                    rep,
                )
                self.manifest.append((model_key, model_seed))
                job_queue.append(
                    pool.uimap(self._run_single_replication, (model_seed, ),
                               (kwargs, ), (model_key, )))

        with tqdm(total=total_iterations,
                  desc='Total',
                  unit='dp',
                  disable=not self.display_progress) as pbar_total:
            # empty the queue
            results = []
            for task in job_queue:
                for model_vars, agent_vars, stepwise_vars in list(task):
                    results.append((model_vars, agent_vars, stepwise_vars))
                pbar_total.update()

        if self.data_handler:
            return  # Results already stored to database, nothing more to record

        # store the results in batchrunner
        # FUTURE: rework this module to only support external data_handler.
        # Rationale: Best practice to treat each replication atomically. Key
        # benefit of having all replications in memory is to do experiment-wide
        # analysis, and a data analysis module can read in all per-replication
        # files.
        for model_vars, agent_vars, stepwise_vars in results:
            if self.model_reporters:
                for model_key, model_val in model_vars.items():
                    self.model_vars[model_key] = model_val
            if self.agent_reporters:
                for agent_key, reports in agent_vars.items():
                    self.agent_vars[agent_key] = reports
            if self.collect_stepwise_data:
                for stepwise_key, stepwise_val in stepwise_vars.items():
                    self.stepwise_vars[stepwise_key] = stepwise_val