def ACTIONS(self): LOGGER.debug("%s starting", self.name) if self._rate_encoder.last_update_time != get_current_time(): self._rate_encoder.update_rates(get_current_time()) if self.last_schedulable_time > self.corrected_time: # At least one more event has to be scheduled. next_period = self.corrected_time + self._rate_encoder.update_period next_time = min(next_period, self.last_schedulable_time) _schedule_output_rate_encoder(self._rate_encoder, start_t=next_time, end_t=self._end_t) elif self._end_t == None or self._end_t > get_current_time(): # The rate encoder should go on encoding if the simulation is restarted. # We use a special structure for those. start = get_current_time() + self._rate_encoder.update_period if self._end_t != None: start = min(start, self._end_t) RATE_ENC_RESPAWN_DICT[self._rate_encoder] = (start, self._end_t) yield sim.hold, self, 0
def run_simulation(end_time=None): """Runs the simulation while keeping SimPy and PyNN synchronized at event times. Runs until no event is scheduled unless end_time is provided. if end_time is given, runs until end_time.""" global SIMULATION_END_T def run_pynn(end_t): pynn_now = pynnn.get_current_time() pynn_now_round = round(pynn_now, PYNN_TIME_ROUNDING) delta_t = round(end_t - pynn_now_round, PYNN_TIME_ROUNDING) if pynn_now <= pynn_now_round and delta_t > PYNN_TIME_STEP: delta_t = round(delta_t - PYNN_TIME_STEP, PYNN_TIME_ROUNDING) if delta_t > 0: # necessary because run(0) may run PyNN by timestep pynnn.run(delta_t) # neuralensemble.org/trac/PyNN/ticket/200 is_not_end = None if end_time == None: # Would testing len(sim.Globals.allEventTimes()) be faster? is_not_end = lambda t: not isinstance(t, sim.Infinity) else: DummyProcess().start(at=end_time) e_t = end_time is_not_end = lambda t: t <= e_t for e in RATE_ENC_RESPAWN_DICT.iteritems(): if e[1] == None: continue renc, start_time, end_time = e[0], e[1][0], e[1][1] _schedule_output_rate_encoder(renc, start_t=start_time, end_t=end_time) if SIMULATION_END_T < end_time: SIMULATION_END_T = end_time RATE_ENC_RESPAWN_DICT.clear() # unnecessary as _schedule_output_rate_encoder performs cleanup t_event_start = sim.peek() while is_not_end(t_event_start): LOGGER.debug("Progressing to SimPy event at time %s", t_event_start) run_pynn(t_event_start) # run until event start sim.step() # process the event run_pynn(get_current_time()) # run PyNN until event end t_event_start = sim.peek() if SIMULATION_END_T < get_current_time(): SIMULATION_END_T = get_current_time()
def run_simulation(end_time=None): """Runs the simulation while keeping SimPy and PyNN synchronized at event times. Runs until no event is scheduled unless end_time is provided. if end_time is given, runs until end_time.""" global SIMULATION_END_T def run_pynn(end_t): pynn_now = pynnn.get_current_time() pynn_now_round = round(pynn_now, PYNN_TIME_ROUNDING) delta_t = round(end_t - pynn_now_round, PYNN_TIME_ROUNDING) if pynn_now <= pynn_now_round and delta_t > PYNN_TIME_STEP: delta_t = round(delta_t - PYNN_TIME_STEP, PYNN_TIME_ROUNDING) if delta_t > 0: # necessary because run(0) may run PyNN by timestep pynnn.run(delta_t) # neuralensemble.org/trac/PyNN/ticket/200 is_not_end = None if end_time == None: # Would testing len(sim.Globals.allEventTimes()) be faster? is_not_end = lambda t: not isinstance(t, sim.Infinity) else: DummyProcess().start(at=end_time) e_t = end_time is_not_end = lambda t: t <= e_t for e in RATE_ENC_RESPAWN_DICT.iteritems(): if e[1] == None: continue renc, start_time, end_time = e[0], e[1][0], e[1][1] _schedule_output_rate_encoder(renc, start_t=start_time, end_t=end_time) if SIMULATION_END_T < end_time: SIMULATION_END_T = end_time RATE_ENC_RESPAWN_DICT.clear( ) # unnecessary as _schedule_output_rate_encoder performs cleanup t_event_start = sim.peek() while is_not_end(t_event_start): LOGGER.debug("Progressing to SimPy event at time %s", t_event_start) run_pynn(t_event_start) # run until event start sim.step() # process the event run_pynn(get_current_time()) # run PyNN until event end t_event_start = sim.peek() if SIMULATION_END_T < get_current_time(): SIMULATION_END_T = get_current_time()
def main(): ## Uninteresting setup, start up the visu process,... logfile = make_logfile_name() ensure_dir(logfile) f_h = logging.FileHandler(logfile) f_h.setLevel(SUBDEBUG) d_h = logging.StreamHandler() d_h.setLevel(INFO) utils.configure_loggers(debug_handler=d_h, file_handler=f_h) parent_conn, child_conn = multiprocessing.Pipe() p = multiprocessing.Process( target=visualisation.visualisation_process_f, name="display_process", args=(child_conn, LOGGER)) p.start() pynnn.setup(timestep=SIMU_TIMESTEP) init_logging("logfile", debug=True) LOGGER.info("Simulation started with command: %s", sys.argv) ## Network setup # First population p1 = pynnn.Population(100, pynnn.IF_curr_alpha, structure=pynnn.space.Grid2D()) p1.set({'tau_m':20, 'v_rest':-65}) # Second population p2 = pynnn.Population(20, pynnn.IF_curr_alpha, cellparams={'tau_m': 15.0, 'cm': 0.9}) # Projection 1 -> 2 prj1_2 = pynnn.Projection( p1, p2, pynnn.AllToAllConnector(allow_self_connections=False), target='excitatory') # I may need to make own PyNN Connector class. Otherwise, this is # neat: exponentially decaying probability of connections depends # on distance. Distance is only calculated using x and y, which # are on a toroidal topo with boundaries at 0 and 500. connector = pynnn.DistanceDependentProbabilityConnector( "exp(-abs(d))", space=pynnn.Space( axes='xy', periodic_boundaries=((0,500), (0,500), None))) # Alternately, the powerful connection set algebra (python CSA # module) can be used. weight_distr = pynnn.RandomDistribution(distribution='gamma', parameters=[1,0.1]) prj1_2.randomizeWeights(weight_distr) # This one is in NEST but not in Brian: # source = pynnn.NoisyCurrentSource( # mean=100, stdev=50, dt=SIMU_TIMESTEP, # start=10.0, stop=SIMU_DURATION, rng=pynnn.NativeRNG(seed=100)) source = pynnn.DCSource( start=10.0, stop=SIMU_DURATION, amplitude=100) source.inject_into(list(p1.sample(50).all())) p1.record(to_file=False) p2.record(to_file=False) ## Build and send the visualizable network structure adapter = pynn_to_visu.PynnToVisuAdapter(LOGGER) adapter.add_pynn_population(p1) adapter.add_pynn_population(p2) adapter.add_pynn_projection(p1, p2, prj1_2.connection_manager) adapter.commit_structure() parent_conn.send(adapter.output_struct) # Number of chunks to run the simulation: n_chunks = SIMU_DURATION // SIMU_TO_VISU_MESSAGE_PERIOD last_chunk_duration = SIMU_DURATION % SIMU_TO_VISU_MESSAGE_PERIOD # Run the simulator for visu_i in xrange(n_chunks): pynnn.run(SIMU_TO_VISU_MESSAGE_PERIOD) parent_conn.send(adapter.make_activity_update_message()) LOGGER.debug("real current p1 spike counts: %s", p1.get_spike_counts().values()) if last_chunk_duration > 0: pynnn.run(last_chunk_duration) parent_conn.send(adapter.make_activity_update_message()) # Cleanup pynnn.end() # Wait for the visualisation process to terminate p.join(VISU_PROCESS_JOIN_TIMEOUT)
def ACTIONS(self): LOGGER.debug("%s starting", self.name) self.input_layer.apply_input(self.input_sample, get_current_time(), self.duration) yield sim.hold, self, 0
def main(): ## Uninteresting setup, start up the visu process,... logfile = make_logfile_name() ensure_dir(logfile) f_h = logging.FileHandler(logfile) f_h.setLevel(SUBDEBUG) d_h = logging.StreamHandler() d_h.setLevel(INFO) utils.configure_loggers(debug_handler=d_h, file_handler=f_h) parent_conn, child_conn = multiprocessing.Pipe() p = multiprocessing.Process(target=visualisation.visualisation_process_f, name="display_process", args=(child_conn, LOGGER)) p.start() pynnn.setup(timestep=SIMU_TIMESTEP) init_logging("logfile", debug=True) LOGGER.info("Simulation started with command: %s", sys.argv) ## Network setup # First population p1 = pynnn.Population(100, pynnn.IF_curr_alpha, structure=pynnn.space.Grid2D()) p1.set({'tau_m': 20, 'v_rest': -65}) # Second population p2 = pynnn.Population(20, pynnn.IF_curr_alpha, cellparams={ 'tau_m': 15.0, 'cm': 0.9 }) # Projection 1 -> 2 prj1_2 = pynnn.Projection( p1, p2, pynnn.AllToAllConnector(allow_self_connections=False), target='excitatory') # I may need to make own PyNN Connector class. Otherwise, this is # neat: exponentially decaying probability of connections depends # on distance. Distance is only calculated using x and y, which # are on a toroidal topo with boundaries at 0 and 500. connector = pynnn.DistanceDependentProbabilityConnector( "exp(-abs(d))", space=pynnn.Space(axes='xy', periodic_boundaries=((0, 500), (0, 500), None))) # Alternately, the powerful connection set algebra (python CSA # module) can be used. weight_distr = pynnn.RandomDistribution(distribution='gamma', parameters=[1, 0.1]) prj1_2.randomizeWeights(weight_distr) # This one is in NEST but not in Brian: # source = pynnn.NoisyCurrentSource( # mean=100, stdev=50, dt=SIMU_TIMESTEP, # start=10.0, stop=SIMU_DURATION, rng=pynnn.NativeRNG(seed=100)) source = pynnn.DCSource(start=10.0, stop=SIMU_DURATION, amplitude=100) source.inject_into(list(p1.sample(50).all())) p1.record(to_file=False) p2.record(to_file=False) ## Build and send the visualizable network structure adapter = pynn_to_visu.PynnToVisuAdapter(LOGGER) adapter.add_pynn_population(p1) adapter.add_pynn_population(p2) adapter.add_pynn_projection(p1, p2, prj1_2.connection_manager) adapter.commit_structure() parent_conn.send(adapter.output_struct) # Number of chunks to run the simulation: n_chunks = SIMU_DURATION // SIMU_TO_VISU_MESSAGE_PERIOD last_chunk_duration = SIMU_DURATION % SIMU_TO_VISU_MESSAGE_PERIOD # Run the simulator for visu_i in xrange(n_chunks): pynnn.run(SIMU_TO_VISU_MESSAGE_PERIOD) parent_conn.send(adapter.make_activity_update_message()) LOGGER.debug("real current p1 spike counts: %s", p1.get_spike_counts().values()) if last_chunk_duration > 0: pynnn.run(last_chunk_duration) parent_conn.send(adapter.make_activity_update_message()) # Cleanup pynnn.end() # Wait for the visualisation process to terminate p.join(VISU_PROCESS_JOIN_TIMEOUT)