def connect(self, id_0, id_1, pat, int_0=0, int_1=1, compat_check=True): if not isinstance(pat, Pattern): raise ValueError('pat is not a Pattern instance') if id_0 not in self.rank_to_id.values(): raise ValueError('unrecognized module id %s' % id_0) if id_1 not in self.rank_to_id.values(): raise ValueError('unrecognized module id %s' % id_1) if not (int_0 in pat.interface_ids and int_1 in pat.interface_ids): raise ValueError('unrecognized pattern interface identifiers') self.log_info('connecting modules {0} and {1}' .format(id_0, id_1)) # Check compatibility of the interfaces exposed by the modules and the # pattern; since the manager only contains module classes and not class # instances, we need to create Interface instances from the selectors # associated with the modules in order to test their compatibility: if compat_check: rank_0 = self.rank_to_id.inv[id_0] rank_1 = self.rank_to_id.inv[id_1] self.log_info('checking compatibility of modules {0} and {1} and' ' assigned pattern'.format(id_0, id_1)) mod_int_0 = Interface(self._kwargs[rank_0]['sel']) mod_int_0[self._kwargs[rank_0]['sel']] = 0 mod_int_1 = Interface(self._kwargs[rank_1]['sel']) mod_int_1[self._kwargs[rank_1]['sel']] = 0 mod_int_0[self._kwargs[rank_0]['sel_in'], 'io'] = 'in' mod_int_0[self._kwargs[rank_0]['sel_out'], 'io'] = 'out' mod_int_0[self._kwargs[rank_0]['sel_gpot'], 'type'] = 'gpot' mod_int_0[self._kwargs[rank_0]['sel_spike'], 'type'] = 'spike' mod_int_1[self._kwargs[rank_1]['sel_in'], 'io'] = 'in' mod_int_1[self._kwargs[rank_1]['sel_out'], 'io'] = 'out' mod_int_1[self._kwargs[rank_1]['sel_gpot'], 'type'] = 'gpot' mod_int_1[self._kwargs[rank_1]['sel_spike'], 'type'] = 'spike' if not mod_int_0.is_compatible(0, pat.interface, int_0, True): raise ValueError('module %s interface incompatible ' 'with pattern interface %s' % (id_0, int_0)) if not mod_int_1.is_compatible(0, pat.interface, int_1, True): raise ValueError('module %s interface incompatible ' 'with pattern interface %s' % (id_1, int_1)) # XXX Need to check for fan-in XXX # Store the pattern information in the routing table: self.log_info('updating routing table with pattern') if pat.is_connected(0, 1): self.routing_table[id_0, id_1] = {'pattern': pat, 'int_0': int_0, 'int_1': int_1} if pat.is_connected(1, 0): self.routing_table[id_1, id_0] = {'pattern': pat, 'int_0': int_1, 'int_1': int_0} self.log_info('connected modules {0} and {1}'.format(id_0, id_1))
def connect(self, id_0, id_1, pat, int_0=0, int_1=1, compat_check=True): if not isinstance(pat, Pattern): raise ValueError("pat is not a Pattern instance") if id_0 not in self.rank_to_id.values(): raise ValueError("unrecognized module id %s" % id_0) if id_1 not in self.rank_to_id.values(): raise ValueError("unrecognized module id %s" % id_1) if not (int_0 in pat.interface_ids and int_1 in pat.interface_ids): raise ValueError("unrecognized pattern interface identifiers") self.log_info("connecting modules {0} and {1}".format(id_0, id_1)) # Check compatibility of the interfaces exposed by the modules and the # pattern; since the manager only contains module classes and not class # instances, we need to create Interface instances from the selectors # associated with the modules in order to test their compatibility: if compat_check: rank_0 = self.rank_to_id.inv[id_0] rank_1 = self.rank_to_id.inv[id_1] self.log_info("checking compatibility of modules {0} and {1} and" " assigned pattern".format(id_0, id_1)) mod_int_0 = Interface(self._kwargs[rank_0]["sel"]) mod_int_0[self._kwargs[rank_0]["sel"]] = 0 mod_int_1 = Interface(self._kwargs[rank_1]["sel"]) mod_int_1[self._kwargs[rank_1]["sel"]] = 0 mod_int_0[self._kwargs[rank_0]["sel_in"], "io"] = "in" mod_int_0[self._kwargs[rank_0]["sel_out"], "io"] = "out" mod_int_0[self._kwargs[rank_0]["sel_gpot"], "type"] = "gpot" mod_int_0[self._kwargs[rank_0]["sel_spike"], "type"] = "spike" mod_int_1[self._kwargs[rank_1]["sel_in"], "io"] = "in" mod_int_1[self._kwargs[rank_1]["sel_out"], "io"] = "out" mod_int_1[self._kwargs[rank_1]["sel_gpot"], "type"] = "gpot" mod_int_1[self._kwargs[rank_1]["sel_spike"], "type"] = "spike" if not mod_int_0.is_compatible(0, pat.interface, int_0, True): raise ValueError("module %s interface incompatible " "with pattern interface %s" % (id_0, int_0)) if not mod_int_1.is_compatible(0, pat.interface, int_1, True): raise ValueError("module %s interface incompatible " "with pattern interface %s" % (id_1, int_1)) # XXX Need to check for fan-in XXX # Store the pattern information in the routing table: self.log_info("updating routing table with pattern") if pat.is_connected(0, 1): self.routing_table[id_0, id_1] = {"pattern": pat, "int_0": int_0, "int_1": int_1} if pat.is_connected(1, 0): self.routing_table[id_1, id_0] = {"pattern": pat, "int_0": int_1, "int_1": int_0} self.log_info("connected modules {0} and {1}".format(id_0, id_1))
def connect(self, id_0, id_1, pat, int_0=0, int_1=1, compat_check=True): if not isinstance(pat, Pattern): raise ValueError('pat is not a Pattern instance') if id_0 not in self.rank_to_id.values(): raise ValueError('unrecognized module id %s' % id_0) if id_1 not in self.rank_to_id.values(): raise ValueError('unrecognized module id %s' % id_1) if not (int_0 in pat.interface_ids and int_1 in pat.interface_ids): raise ValueError('unrecognized pattern interface identifiers') self.log_info('connecting modules {0} and {1}'.format(id_0, id_1)) # Check compatibility of the interfaces exposed by the modules and the # pattern; since the manager only contains module classes and not class # instances, we need to create Interface instances from the selectors # associated with the modules in order to test their compatibility: if compat_check: rank_0 = self.rank_to_id.inv[id_0] rank_1 = self.rank_to_id.inv[id_1] self.log_info('checking compatibility of modules {0} and {1} and' ' assigned pattern'.format(id_0, id_1)) mod_int_0 = Interface(self._kwargs[rank_0]['sel']) mod_int_0[self._kwargs[rank_0]['sel']] = 0 mod_int_1 = Interface(self._kwargs[rank_1]['sel']) mod_int_1[self._kwargs[rank_1]['sel']] = 0 mod_int_0[self._kwargs[rank_0]['sel_in'], 'io'] = 'in' mod_int_0[self._kwargs[rank_0]['sel_out'], 'io'] = 'out' mod_int_0[self._kwargs[rank_0]['sel_gpot'], 'type'] = 'gpot' mod_int_0[self._kwargs[rank_0]['sel_spike'], 'type'] = 'spike' mod_int_1[self._kwargs[rank_1]['sel_in'], 'io'] = 'in' mod_int_1[self._kwargs[rank_1]['sel_out'], 'io'] = 'out' mod_int_1[self._kwargs[rank_1]['sel_gpot'], 'type'] = 'gpot' mod_int_1[self._kwargs[rank_1]['sel_spike'], 'type'] = 'spike' if not mod_int_0.is_compatible(0, pat.interface, int_0, True): raise ValueError('module %s interface incompatible ' 'with pattern interface %s' % (id_0, int_0)) if not mod_int_1.is_compatible(0, pat.interface, int_1, True): raise ValueError('module %s interface incompatible ' 'with pattern interface %s' % (id_1, int_1)) # XXX Need to check for fan-in XXX # Store the pattern information in the routing table: self.log_info('updating routing table with pattern') if pat.is_connected(0, 1): self.routing_table[id_0, id_1] = { 'pattern': pat, 'int_0': int_0, 'int_1': int_1 } if pat.is_connected(1, 0): self.routing_table[id_1, id_0] = { 'pattern': pat, 'int_0': int_1, 'int_1': int_0 } self.log_info('connected modules {0} and {1}'.format(id_0, id_1))
class Module(BaseModule): """ Processing module. This class repeatedly executes a work method until it receives a quit message via its control port. Parameters ---------- sel : str, unicode, or sequence Path-like selector describing the module's interface of exposed ports. sel_in : str, unicode, or sequence Selector describing all input ports in the module's interface. sel_out : str, unicode, or sequence Selector describing all input ports in the module's interface. sel_gpot : str, unicode, or sequence Selector describing all graded potential ports in the module's interface. sel_spike : str, unicode, or sequence Selector describing all spiking ports in the module's interface. data_gpot : numpy.ndarray Data array to associate with graded potential ports. Array length must equal the number of graded potential ports in the module's interface. data_spike : numpy.ndarray Data array to associate with spiking ports. Array length must equal the number of spiking ports in the module's interface. columns : list of str Interface port attributes. This list must at least contain 'interface', 'io', and 'type'. port_data : int Network port for transmitting data. port_ctrl : int Network port for controlling the module instance. id : str Module identifier. If no identifier is specified, a unique identifier is automatically generated. device : int GPU device to use. debug : bool Debug flag. time_sync : bool Time synchronization flag. When True, debug messages are not emitted during module synchronization and the time taken to receive all incoming data is computed. Notes ----- A module instance connected to other module instances contains a list of the connectivity objects that describe incoming connects and a list of masks that select for the neurons whose data must be transmitted to destination modules. """ def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], port_data=PORT_DATA, port_ctrl=PORT_CTRL, port_time=PORT_TIME, id=None, device=None, debug=False, time_sync=False): self.debug = debug self.time_sync = time_sync self.device = device # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns # Generate a unique ID if none is specified: if id is None: id = uid() # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(port_ctrl, id) # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data if port_time == port_ctrl or port_time == port_data: raise ValueError('time port must differ from data and control ports') self.port_time = port_time # Initial network connectivity: self.net = 'none' # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0 # we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set port types: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.interface.gpot_ports()) assert len(data_spike) == len(self.interface.spike_ports()) self.data = {} self.data['gpot'] = data_gpot self.data['spike'] = data_spike self.pm = {} self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot']) self.pm['spike'] = PortMapper(sel_spike, self.data['spike']) # Patterns connecting this module instance with other modules instances. # Keyed on the IDs of those modules: self.patterns = {} # Each entry in pat_ints is a tuple containing the identifiers of which # of a pattern's identifiers are connected to the current module (first # entry) and the modules to which it is connected (second entry). # Keyed on the IDs of those modules: self.pat_ints = {} # Dict for storing incoming data; each entry (corresponding to each # module that sends input to the current module) is a deque containing # incoming data, which in turn contains transmitted data arrays. Deques # are used here to accommodate situations when multiple data from a # single source arrive: self._in_data = {} # List for storing outgoing data; each entry is a tuple whose first # entry is the source or destination module ID and whose second entry is # the data to transmit: self._out_data = [] # Dictionaries containing ports of source modules that # send output to this module. Must be initialized immediately before # an emulation begins running. Keyed on source module ID: self._in_port_dict = {} self._in_port_dict_ids = {} self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} # Dictionaries containing ports of destination modules that # receive input from this module. Must be initialized immediately before # an emulation begins running. Keyed on destination module ID: self._out_port_dict = {} self._out_port_dict_ids = {} self._out_port_dict['gpot'] = {} self._out_port_dict['spike'] = {} self._out_ids = [] self._in_ids = [] def _init_gpu(self): """ Initialize GPU device. Notes ----- Must be called from within the `run()` method, not from within `__init__()`. """ if self.device == None: self.log_info('no GPU specified - not initializing ') else: # Import pycuda.driver here so as to facilitate the # subclassing of Module to create pure Python LPUs that don't use GPUs: import pycuda.driver as drv drv.init() try: self.gpu_ctx = drv.Device(self.device).make_context() except Exception as e: self.log_info('_init_gpu exception: ' + e.message) else: atexit.register(self.gpu_ctx.pop) self.log_info('GPU initialized') @property def N_gpot_ports(self): """ Number of exposed graded-potential ports. """ return len(self.interface.gpot_ports()) @property def N_spike_ports(self): """ Number of exposed spiking ports. """ return len(self.interface.spike_ports()) def _get_in_data(self): """ Get input data from incoming transmission buffer. Populate the data arrays associated with a module's ports using input data received from other modules. """ if self.net in ['none', 'ctrl']: self.log_info('not retrieving from input buffer') else: self.log_info('retrieving from input buffer') # Since fan-in is not permitted, the data from all source modules # must necessarily map to different ports; we can therefore write each # of the received data to the array associated with the module's ports # here without worry of overwriting the data from each source module: for in_id in self._in_ids: # Check for exceptions so as to not fail on the first emulation # step when there is no input data to retrieve: try: # The first entry of `data` contains graded potential values, # while the second contains spiking port values (i.e., 0 or # 1): data = self._in_data[in_id].popleft() except: self.log_info('no input data from [%s] retrieved' % in_id) else: self.log_info('input data from [%s] retrieved' % in_id) # Assign transmitted values directly to port data array: if len(self._in_port_dict_ids['gpot'][in_id]) and data[0] is not None: self.pm['gpot'].set_by_inds(self._in_port_dict_ids['gpot'][in_id], data[0]) if len(self._in_port_dict_ids['spike'][in_id]) and data[1] is not None: self.pm['spike'].set_by_inds(self._in_port_dict_ids['spike'][in_id], data[1]) def _put_out_data(self): """ Put specified output data in outgoing transmission buffer. Stage data from the data arrays associated with a module's ports for output to other modules. Notes ----- The output spike port selection algorithm could probably be made faster. """ if self.net in ['none', 'ctrl']: self.log_info('not populating output buffer') else: self.log_info('populating output buffer') # Clear output buffer before populating it: self._out_data = [] # Select data that should be sent to each destination module and append # it to the outgoing queue: for out_id in self._out_ids: # Select port data using list of graded potential ports that can # transmit output: if len(self._out_port_dict_ids['gpot'][out_id]): gpot_data = \ self.pm['gpot'].get_by_inds(self._out_port_dict_ids['gpot'][out_id]) else: gpot_data = None if len(self._out_port_dict_ids['spike'][out_id]): spike_data = \ self.pm['spike'].get_by_inds(self._out_port_dict_ids['spike'][out_id]) else: spike_data = None # Attempt to stage the emitted port data for transmission: try: self._out_data.append((out_id, (gpot_data, spike_data))) except: self.log_info('no output data to [%s] sent' % out_id) else: self.log_info('output data to [%s] sent' % out_id) def run_step(self): """ Module work method. This method should be implemented to do something interesting with new input port data in the module's `pm` attribute and update the attribute's output port data if necessary. It should not interact with any other class attributes. """ self.log_info('running execution step') def _init_port_dicts(self): """ Initial dictionaries of source/destination ports in current module. """ # Extract identifiers of source ports in the current module's interface # for all modules receiving output from the current module: self._out_port_dict['gpot'] = {} self._out_port_dict['spike'] = {} self._out_port_dict_ids['gpot'] = {} self._out_port_dict_ids['spike'] = {} self._out_ids = self.out_ids for out_id in self._out_ids: self.log_info('extracting output ports for %s' % out_id) # Get interfaces of pattern connecting the current module to # destination module `out_id`; `from_int` is connected to the # current module, `to_int` is connected to the other module: from_int, to_int = self.pat_ints[out_id] # Get ports in interface (`from_int`) connected to the current # module that are connected to the other module via the pattern: self._out_port_dict['gpot'][out_id] = \ self.patterns[out_id].src_idx(from_int, to_int, 'gpot', 'gpot') self._out_port_dict_ids['gpot'][out_id] = \ self.pm['gpot'].ports_to_inds(self._out_port_dict['gpot'][out_id]) self._out_port_dict['spike'][out_id] = \ self.patterns[out_id].src_idx(from_int, to_int, 'spike', 'spike') self._out_port_dict_ids['spike'][out_id] = \ self.pm['spike'].ports_to_inds(self._out_port_dict['spike'][out_id]) # Extract identifiers of destination ports in the current module's # interface for all modules sending input to the current module: self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} self._in_port_dict_ids['gpot'] = {} self._in_port_dict_ids['spike'] = {} self._in_ids = self.in_ids for in_id in self._in_ids: self.log_info('extracting input ports for %s' % in_id) # Get interfaces of pattern connecting the current module to # source module `out_id`; `to_int` is connected to the current # module, `from_int` is connected to the other module: to_int, from_int = self.pat_ints[in_id] # Get ports in interface (`to_int`) connected to the current # module that are connected to the other module via the pattern: self._in_port_dict['gpot'][in_id] = \ self.patterns[in_id].dest_idx(from_int, to_int, 'gpot', 'gpot') self._in_port_dict_ids['gpot'][in_id] = \ self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id]) self._in_port_dict['spike'][in_id] = \ self.patterns[in_id].dest_idx(from_int, to_int, 'spike', 'spike') self._in_port_dict_ids['spike'][in_id] = \ self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id]) def pre_run(self, *args, **kwargs): """ Code to run before main module run loop. Code in this method will be executed after a module's process has been launched and all connectivity objects made available, but before the main run loop begins. Initialization routines (such as GPU initialization) should be performed in this method. """ pass def run(self): """ Body of process. """ # Don't allow keyboard interruption of process: self.log_info('starting') with IgnoreKeyboardInterrupt(): # Initialize environment: self._init_net() # Initialize _out_port_dict and _in_port_dict attributes: self._init_port_dicts() # Initialize Buffer for incoming data. Dict used to store the # incoming data keyed by the source module id. Each value is a # queue buferring the received data: self._in_data = {k: collections.deque() for k in self._in_ids} # Perform any pre-emulation operations: self.pre_run() self.running = True self.steps = 0 if self.time_sync: self.sock_time.send(msgpack.packb((self.id, self.steps, 'start', time.time()))) self.log_info('sent start time to master') # Counter for number of steps between synchronizations: steps_since_sync = 0 while self.steps < self.max_steps: self.log_info('execution step: %s/%s' % (self.steps, self.max_steps)) # If the debug flag is set, don't catch exceptions so that # errors will lead to visible failures: if self.debug: # Run the processing step: self.run_step() # Do post-processing: self.post_run_step() # Synchronize: if steps_since_sync == self.sync_period: self._sync() steps_since_sync = 0 else: self.log_info('skipping sync (%s/%s)' % \ (steps_since_sync, self.sync_period)) steps_since_sync += 1 else: # Run the processing step: catch_exception(self.run_step, self.log_info) # Do post processing: catch_exception(self.post_run_step, self.log_info) # Synchronize: if steps_since_sync == self.sync_period: catch_exception(self._sync, self.log_info) steps_since_sync = 0 else: self.log_info('skipping sync (%s/%s)' % \ (steps_since_sync, self.sync_period)) steps_since_sync += 1 # Exit run loop when a quit signal has been received: if not self.running: self.log_info('run loop stopped') break self.steps += 1 if self.time_sync: self.sock_time.send(msgpack.packb((self.id, self.steps, 'stop', time.time()))) self.log_info('sent stop time to master') self.log_info('maximum number of steps reached') # Perform any post-emulation operations: self.post_run() # Shut down the control handler and inform the manager that the # module has shut down: self._ctrl_stream_shutdown() ack = 'shutdown' self.sock_ctrl.send(ack) self.log_info('sent to manager: %s' % ack) self.log_info('exiting')
pat12.interface[m2_sel_in_gpot] = [1, 'out', 'gpot'] pat12.interface[m2_sel_out_gpot] = [1, 'in', 'gpot'] pat12.interface[m2_sel_in_spike] = [1, 'out', 'spike'] pat12.interface[m2_sel_out_spike] = [1, 'in', 'spike'] pat12['/a/out/gpot[0]', '/b/in/gpot[0]'] = 1 pat12['/a/out/gpot[1]', '/b/in/gpot[1]'] = 1 pat12['/b/out/gpot[0]', '/a/in/gpot[0]'] = 1 pat12['/b/out/gpot[1]', '/a/in/gpot[1]'] = 1 pat12['/a/out/spike[0]', '/b/in/spike[0]'] = 1 pat12['/a/out/spike[1]', '/b/in/spike[1]'] = 1 pat12['/b/out/spike[0]', '/a/in/spike[0]'] = 1 pat12['/b/out/spike[1]', '/a/in/spike[1]'] = 1 check_compatible = True if check_compatible: m1_int = Interface.from_selectors(m1_sel, m1_sel_in, m1_sel_out, m1_sel_spike, m1_sel_gpot, m1_sel) m2_int = Interface.from_selectors(m2_sel, m2_sel_in, m2_sel_out, m2_sel_spike, m2_sel_gpot, m2_sel) assert m1_int.is_compatible(0, pat12.interface, 0, True) assert m2_int.is_compatible(0, pat12.interface, 1, True) man.connect(m1_id, m2_id, pat12, 0, 1) # Start emulation and allow it to run for a little while before shutting # down. To set the emulation to exit after executing a fixed number of # steps, start it as follows and remove the sleep statement: # man.start(500) man.spawn() man.start(5) man.wait()
def connect(self, id_0, id_1, pat, int_0=0, int_1=1, compat_check=True): """ Specify connection between two module instances with a Pattern instance. Parameters ---------- id_0, id_1 : str Identifiers of module instances to connect. pat : Pattern Pattern instance. int_0, int_1 : int Which of the pattern's interfaces to connect to `id_0` and `id_1`, respectively. compat_check : bool Check whether the interfaces of the specified modules are compatible with the specified pattern. This option is provided because compatibility checking can be expensive. Notes ----- Assumes that the constructors of the module types contain a `sel` parameter. """ if not isinstance(pat, Pattern): raise ValueError('pat is not a Pattern instance') if id_0 not in self.rank_to_id.values(): raise ValueError('unrecognized module id %s' % id_0) if id_1 not in self.rank_to_id.values(): raise ValueError('unrecognized module id %s' % id_1) if not (int_0 in pat.interface_ids and int_1 in pat.interface_ids): raise ValueError('unrecognized pattern interface identifiers') self.log_info('connecting modules {0} and {1}' .format(id_0, id_1)) # Check compatibility of the interfaces exposed by the modules and the # pattern; since the manager only contains module classes and not class # instances, we need to create Interface instances from the selectors # associated with the modules in order to test their compatibility: if compat_check: rank_0 = self.rank_to_id.inv[id_0] rank_1 = self.rank_to_id.inv[id_1] self.log_info('checking compatibility of modules {0} and {1} and' ' assigned pattern'.format(id_0, id_1)) mod_int_0 = Interface(self._kwargs[rank_0]['sel']) mod_int_0[self._kwargs[rank_0]['sel']] = 0 mod_int_1 = Interface(self._kwargs[rank_1]['sel']) mod_int_1[self._kwargs[rank_1]['sel']] = 0 mod_int_0[self._kwargs[rank_0]['sel_in'], 'io'] = 'in' mod_int_0[self._kwargs[rank_0]['sel_out'], 'io'] = 'out' mod_int_0[self._kwargs[rank_0]['sel_gpot'], 'type'] = 'gpot' mod_int_0[self._kwargs[rank_0]['sel_spike'], 'type'] = 'spike' mod_int_1[self._kwargs[rank_1]['sel_in'], 'io'] = 'in' mod_int_1[self._kwargs[rank_1]['sel_out'], 'io'] = 'out' mod_int_1[self._kwargs[rank_1]['sel_gpot'], 'type'] = 'gpot' mod_int_1[self._kwargs[rank_1]['sel_spike'], 'type'] = 'spike' if not mod_int_0.is_compatible(0, pat.interface, int_0, True): raise ValueError('module %s interface incompatible ' 'with pattern interface %s' % (id_0, int_0)) if not mod_int_1.is_compatible(0, pat.interface, int_1, True): raise ValueError('module %s interface incompatible ' 'with pattern interface %s' % (id_1, int_1)) # XXX Need to check for fan-in XXX # Store the pattern information in the routing table: self.log_info('updating routing table with pattern') if pat.is_connected(0, 1): self.routing_table[id_0, id_1] = {'pattern': pat, 'int_0': int_0, 'int_1': int_1} if pat.is_connected(1, 0): self.routing_table[id_1, id_0] = {'pattern': pat, 'int_0': int_1, 'int_1': int_0} self.log_info('connected modules {0} and {1}'.format(id_0, id_1))
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, debug=False, time_sync=False): super(Module, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: if 'interface' not in columns: raise ValueError('interface column required') if 'io' not in columns: raise ValueError('io column required') if 'type' not in columns: raise ValueError('type column required') # Initialize GPU here so as to be able to initialize a port mapper # containing GPU memory: self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: if not SelectorMethods.is_in(sel_in, sel): raise ValueError('input port selector not in selector of all ports') if not SelectorMethods.is_in(sel_out, sel): raise ValueError('output port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_in, sel_out): raise ValueError('input and output port selectors not disjoint') # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: if not SelectorMethods.is_in(sel_gpot, sel): raise ValueError('gpot port selector not in selector of all ports') if not SelectorMethods.is_in(sel_spike, sel): raise ValueError('spike port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_gpot, sel_spike): raise ValueError('gpot and spike port selectors not disjoint') # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # If a unique ID was specified and the routing table is not empty # (i.e., there are connections between multiple modules), the id # must be a node in the routing table: if routing_table is not None and len(routing_table.ids) and \ not routing_table.has_node(id): raise ValueError('routing table must contain specified ' 'module ID: {}'.format(id)) self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: if len(data_gpot) != len(self.gpot_ports): raise ValueError('incompatible gpot port data array length') if len(data_spike) != len(self.spike_ports): raise ValueError('incompatible spike port data array length') self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False) # MPI Request object for resolving asynchronous transfers: self.req = MPI.Request()
def __init__(self, selector, data, columns=['interface', 'io', 'type'], port_data=PORT_DATA, port_ctrl=PORT_CTRL, id=None, debug=False): self.debug = debug # Generate a unique ID if none is specified: if id is None: id = uid() super(BaseModule, self).__init__(port_ctrl, id) # Logging: self.logger = twiggy.log.name('module %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data # Initial network connectivity: self.net = 'none' # Create module interface given the specified ports: self.interface = Interface(selector, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[selector, 'interface'] = 0 # Set up mapper between port identifiers and their associated data: assert len(data) == len(self.interface) self.data = data self.pm = PortMapper(self.data, selector) # Patterns connecting this module instance with other modules instances. # Keyed on the IDs of those modules: self.patterns = {} # Each entry in pat_ints is a tuple containing the identifiers of which # of a pattern's identifiers are connected to the current module (first # entry) and the modules to which it is connected (second entry). # Keyed on the IDs of those modules: self.pat_ints = {} # Dict for storing incoming data; each entry (corresponding to each # module that sends input to the current module) is a deque containing # incoming data, which in turn contains transmitted data arrays. Deques # are used here to accommodate situations when multiple data from a # single source arrive: self._in_data = {} # List for storing outgoing data; each entry is a tuple whose first # entry is the source or destination module ID and whose second entry is # the data to transmit: self._out_data = [] # Dictionary containing ports of source modules that # send output to this module. Must be initialized immediately before # an emulation begins running. Keyed on source module ID: self._in_port_dict = {} # Dictionary containing ports of destination modules that # receive input from this module. Must be initialized immediately before # an emulation begins running. Keyed on destination module ID: self._out_port_dict = {} self._out_ids = [] self._in_ids = []
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, pm_all=None, debug=False, time_sync=False): # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Save module interface data (stored in a dict of BasePortMapper instances): self.pm_all = pm_all # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # Save routing table; if a unique ID was specified, it must be a node in # the routing table: if routing_table is not None and not routing_table.has_node(id): raise ValueError( 'routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports( ).to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports( ).to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports( ).to_tuples() # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.gpot_ports) assert len(data_spike) == len(self.spike_ports) self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False)
class BaseModule(ControlledProcess): """ Processing module. This class repeatedly executes a work method until it receives a quit message via its control network port. Parameters ---------- sel : str, unicode, or sequence Path-like selector describing the module's interface of exposed ports. sel_in : str, unicode, or sequence Selector describing all input ports in the module's interface. sel_out : str, unicode, or sequence Selector describing all input ports in the module's interface. data : numpy.ndarray Data array to associate with ports. Array length must equal the number of ports in a module's interface. columns : list of str Interface port attributes. port_data : int Network port for transmitting data. port_ctrl : int Network port for controlling the module instance. id : str Module identifier. If no identifier is specified, a unique identifier is automatically generated. debug : bool Debug flag. When True, exceptions raised during the work method are not be suppressed. time_sync : bool Time synchronization flag. When True, debug messages are not emitted during module synchronization and the time taken to receive all incoming data is computed. Attributes ---------- interface : Interface Object containing information about a module's ports. patterns : dict of Pattern Pattern objects connecting the module instance with other module instances. Keyed on the ID of the other module instances. pat_ints : dict of tuple of int Interface of each pattern that is connected to the module instance. Keyed on the ID of the other module instances. pm : plsel.PortMapper Map between a module's ports and the contents of the `data` attribute. data : numpy.ndarray Array of data associated with a module's ports. Notes ----- If the network ports specified upon instantiation are None, the module instance ignores the network entirely. """ # Define properties to perform validation when connectivity status # is set: _net = 'none' @property def net(self): """ Network connectivity. """ return self._net @net.setter def net(self, value): if value not in ['none', 'ctrl', 'in', 'out', 'full']: raise ValueError('invalid network connectivity value') self.log_info('net status changed: %s -> %s' % (self._net, value)) self._net = value # Define properties to perform validation when the maximum number of # execution steps set: _max_steps = float('inf') @property def max_steps(self): """ Maximum number of steps to execute. """ return self._max_steps @max_steps.setter def max_steps(self, value): if value <= 0: raise ValueError('invalid maximum number of steps') self.log_info('maximum number of steps changed: %s -> %s' % \ (self._max_steps, value)) self._max_steps = value def __init__(self, sel, sel_in, sel_out, data, columns=['interface', 'io', 'type'], port_data=PORT_DATA, port_ctrl=PORT_CTRL, port_time=PORT_TIME, id=None, debug=False, time_sync=False): self.debug = debug self.time_sync = time_sync # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns # Generate a unique ID if none is specified: if id is None: id = uid() super(BaseModule, self).__init__(port_ctrl, id) # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data if port_time == port_ctrl or port_time == port_data: raise ValueError('time port must differ from data and control ports') self.port_time = port_time # Initial network connectivity: self.net = 'none' # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one # interface: self.interface[sel, 'interface'] = 0 # Set port I/O status: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' # Set up mapper between port identifiers and their associated data: assert len(data) == len(self.interface) self.data = data self.pm = PortMapper(sel, self.data) # Patterns connecting this module instance with other modules instances. # Keyed on the IDs of those modules: self.patterns = {} # Each entry in pat_ints is a tuple containing the identifiers of which # of a pattern's identifiers are connected to the current module (first # entry) and the modules to which it is connected (second entry). # Keyed on the IDs of those modules: self.pat_ints = {} # Dict for storing incoming data; each entry (corresponding to each # module that sends input to the current module) is a deque containing # incoming data, which in turn contains transmitted data arrays. Deques # are used here to accommodate situations when multiple data from a # single source arrive: self._in_data = {} # List for storing outgoing data; each entry is a tuple whose first # entry is the source or destination module ID and whose second entry is # the data to transmit: self._out_data = [] # Dictionary containing ports of source modules that # send output to this module. Must be initialized immediately before # an emulation begins running. Keyed on source module ID: self._in_port_dict = {} # Dictionary containing ports of destination modules that # receive input from this module. Must be initialized immediately before # an emulation begins running. Keyed on destination module ID: self._out_port_dict = {} self._out_ids = [] self._in_ids = [] @property def N_ports(self): """ Number of ports exposed by module's interface. """ return len(self.interface.ports()) @property def all_ids(self): """ IDs of modules to which the current module is connected. """ return self.patterns.keys() @property def in_ids(self): """ IDs of modules that send data to this module. """ return [m for m in self.patterns.keys() \ if self.patterns[m].is_connected(self.pat_ints[m][1], self.pat_ints[m][0])] @property def out_ids(self): """ IDs of modules that receive data from this module. """ return [m for m in self.patterns.keys() \ if self.patterns[m].is_connected(self.pat_ints[m][0], self.pat_ints[m][1])] def connect(self, m, pat, int_0, int_1, compat_check=True): """ Connect the current module instance to another module with a pattern instance. Parameters ---------- m : BaseModule Module instance to connect. pat : Pattern Pattern instance. int_0, int_1 : int Which of the pattern's interface to connect to the current module and the specified module, respectively. compat_check : bool Check whether the interfaces of the current and specified modules are compatible with the specified pattern. This option is provided because compatibility checking can be expensive. """ assert isinstance(m, BaseModule) assert isinstance(pat, Pattern) assert int_0 in pat.interface_ids and int_1 in pat.interface_ids self.log_info('connecting to %s' % m.id) # Check compatibility of the interfaces exposed by the modules and the # pattern: if compat_check: self.log_info('checking compatibility of modules {0} and {1} and' ' assigned pattern'.format(self.id, m.id)) assert self.interface.is_compatible(0, pat.interface, int_0, True) assert m.interface.is_compatible(0, pat.interface, int_1, True) # Check that no fan-in from different source modules occurs as a result # of the new connection by getting the union of all connected input # ports for the interfaces of all existing patterns connected to the # current module and ensuring that the input ports from the new pattern # don't overlap: if self.patterns: curr_in_ports = reduce(set.union, [set(self.patterns[i].connected_ports(self.pat_ints[i][0]).in_ports(tuples=True)) \ for i in self.patterns.keys()]) assert not curr_in_ports.intersection(pat.connected_ports(int_0).in_ports(tuples=True)) # The pattern instances associated with the current # module are keyed on the IDs of the modules to which they connect: self.patterns[m.id] = pat self.pat_ints[m.id] = (int_0, int_1) # Update internal connectivity based upon contents of connectivity # object. When this method is invoked, the module's internal # connectivity is always upgraded to at least 'ctrl': if self.net == 'none': self.net = 'ctrl' if pat.is_connected(int_0, int_1): old_net = self.net if self.net == 'ctrl': self.net = 'out' elif self.net == 'in': self.net = 'full' self.log_info('net status changed: %s -> %s' % (old_net, self.net)) if pat.is_connected(int_1, int_0): old_net = self.net if self.net == 'ctrl': self.net = 'in' elif self.net == 'out': self.net = 'full' self.log_info('net status changed: %s -> %s' % (old_net, self.net)) def _ctrl_stream_shutdown(self): """ Shut down control port handler's stream and ioloop. """ try: self.stream_ctrl.flush() self.stream_ctrl.stop_on_recv() self.ioloop_ctrl.stop() except IOError: self.log_info('streams already closed') except: self.log_info('other error occurred') else: self.log_info('ctrl stream shut down') def _ctrl_handler(self, msg): """ Control port handler. """ self.log_info('recv ctrl message: %s' % str(msg)) if msg[0] == 'quit': self._ctrl_stream_shutdown() # Force the module's main loop to exit: self.running = False ack = 'shutdown' # One can define additional messages to be recognized by the control # handler: # elif msg[0] == 'conn': # self.log_info('conn payload: '+str(msgpack.unpackb(msg[1]))) # ack = 'ack' else: ack = 'ack' self.sock_ctrl.send(ack) self.log_info('sent to manager: %s' % ack) def _init_net(self): """ Initialize network connection. """ # Initialize control port handler: self.log_info('initializing ctrl network connection') super(BaseModule, self)._init_net() # Initialize data port handler: if self.net == 'none': self.log_info('not initializing data network connection') else: # Don't allow interrupts to prevent the handler from # completely executing each time it is called: with IgnoreKeyboardInterrupt(): self.log_info('initializing data network connection') # Use a nonblocking port for the data interface; set # the linger period to prevent hanging on unsent # messages when shutting down: self.sock_data = self.zmq_ctx.socket(zmq.DEALER) self.sock_data.setsockopt(zmq.IDENTITY, self.id) self.sock_data.setsockopt(zmq.LINGER, LINGER_TIME) self.sock_data.connect("tcp://localhost:%i" % self.port_data) self.log_info('data network connection initialized') # Initialize timing port: self.log_info('initializing time port') self.sock_time = self.zmq_ctx.socket(zmq.DEALER) self.sock_time.setsockopt(zmq.IDENTITY, self.id) self.sock_data.setsockopt(zmq.LINGER, LINGER_TIME) self.sock_time.connect("tcp://localhost:%i" % self.port_time) sync_dealer(self.sock_time, self.id) self.log_info('time port initialized') def _get_in_data(self): """ Get input data from incoming transmission buffer. Populate the data array associated with a module's ports using input data received from other modules. """ if self.net in ['none', 'ctrl']: self.log_info('not retrieving from input buffer') else: self.log_info('retrieving from input buffer') # Since fan-in is not permitted, the data from all source modules # must necessarily map to different ports; we can therefore write each # of the received data to the array associated with the module's ports # here without worry of overwriting the data from each source module: for in_id in self._in_ids: # Check for exceptions so as to not fail on the first emulation # step when there is no input data to retrieve: try: self.pm.set_by_inds(self._in_port_dict_ids[in_id], self._in_data[in_id].popleft()) except: self.log_info('no input data from [%s] retrieved' % in_id) else: self.log_info('input data from [%s] retrieved' % in_id) def _put_out_data(self): """ Put output data in outgoing transmission buffer. Stage data from the data array associated with a module's ports for output to other modules. """ if self.net in ['none', 'ctrl']: self.log_info('not populating output buffer') else: self.log_info('populating output buffer') # Clear output buffer before populating it: self._out_data = [] # Select data that should be sent to each destination module and append # it to the outgoing queue: for out_id in self._out_ids: try: data = self.pm.get_by_inds(self._out_port_dict_ids[out_id]) self._out_data.append((out_id, data)) except: self.log_info('no output data to [%s] sent' % out_id) else: self.log_info('output data to [%s] sent' % out_id) def _sync(self): """ Send output data and receive input data. Notes ----- Assumes that the attributes used for input and output already exist. Each message is a tuple containing a module ID and data; for outbound messages, the ID is that of the destination module. for inbound messages, the ID is that of the source module. Data is serialized before being sent and unserialized when received. """ if self.net in ['none', 'ctrl']: self.log_info('not synchronizing with network') else: self.log_info('synchronizing with network') # Send outbound data: start = time.time() self._put_out_data() if self.net in ['out', 'full']: # Send all data in outbound buffer: send_ids = [out_id for out_id in self._out_ids] for out_id, data in self._out_data: self.sock_data.send(msgpack.packb((out_id, data))) send_ids.remove(out_id) if not self.time_sync: self.log_info('sent to %s: %s' % (out_id, str(data))) # Send data tuples containing None to those modules for which no # actual data was generated to satisfy the barrier condition: for out_id in send_ids: self.sock_data.send(msgpack.packb((out_id, None))) if not self.time_sync: self.log_info('sent to %s: %s' % (out_id, None)) # All output IDs should be sent data by this point: if not self.time_sync: self.log_info('sent data to all output IDs') # Receive inbound data: if self.net in ['in', 'full']: # Wait until inbound data is received from all source modules: recv_ids = set(self._in_ids) nbytes = 0 while recv_ids: # Poll to avoid blocking: if self.sock_data.poll(POLL_TIMEOUT): data_packed = self.sock_data.recv() in_id, data = msgpack.unpackb(data_packed) if not self.time_sync: self.log_info('recv from %s: %s' % (in_id, str(data))) # Ignore incoming data containing None: if data is not None: self._in_data[in_id].append(data) # Record number of bytes of transmitted serialized data: nbytes += len(data_packed) # Remove source module ID from set of IDs from which to # expect data: recv_ids.discard(in_id) # Stop the synchronization if a quit message has been received: if not self.running: if not self.time_sync: self.log_info('run loop stopped - stopping sync') break if not self.time_sync: self.log_info('recv data from all input IDs') self._get_in_data() # Transmit synchronization time: stop = time.time() if self.time_sync: self.log_info('sent timing data to master') self.sock_time.send(msgpack.packb((self.id, self.steps, 'sync', (start, stop, nbytes)))) def pre_run(self, *args, **kwargs): """ Code to run before main module run loop. Code in this method will be executed after a module's process has been launched and all connectivity objects made available, but before the main run loop begins. """ self.log_info('performing pre-emulation operations') def post_run(self, *args, **kwargs): """ Code to run after main module run loop. Code in this method will be executed after a module's main loop has terminated. """ self.log_info('performing post-emulation operations') def run_step(self): """ Module work method. This method should be implemented to do something interesting with new input port data in the module's `pm` attribute and update the attribute's output port data if necessary. It should not interact with any other class attributes. """ self.log_info('running execution step') def post_run_step(self): """ Code to run after each execution step. This method can be implemented to do something immediately after each invocation of `self.run_step()`, e.g., save generated data to a file, etc. """ pass def _init_port_dicts(self): """ Initial dictionaries of source/destination ports in current module. """ # Extract identifiers of source ports in the current module's interface # for all modules receiving output from the current module: self._out_port_dict = {} self._out_port_dict_ids = {} self._out_ids = self.out_ids for out_id in self._out_ids: self.log_info('extracting output ports for %s' % out_id) # Get interfaces of pattern connecting the current module to # destination module `out_id`; `from_int` is connected to the # current module, `to_int` is connected to the other module: from_int, to_int = self.pat_ints[out_id] # Get ports in interface (`from_int`) connected to the current # module that are connected to the other module via the pattern: self._out_port_dict[out_id] = \ self.patterns[out_id].src_idx(from_int, to_int) self._out_port_dict_ids[out_id] = \ self.pm.ports_to_inds(self._out_port_dict[out_id]) # Extract identifiers of destination ports in the current module's # interface for all modules sending input to the current module: self._in_port_dict = {} self._in_port_dict_ids = {} self._in_ids = self.in_ids for in_id in self._in_ids: self.log_info('extracting input ports for %s' % in_id) # Get interfaces of pattern connecting the current module to # source module `out_id`; `to_int` is connected to the current # module, `from_int` is connected to the other module: to_int, from_int = self.pat_ints[in_id] # Get ports in interface (`to_int`) connected to the current # module that are connected to the other module via the pattern: self._in_port_dict[in_id] = \ self.patterns[in_id].dest_idx(from_int, to_int) self._in_port_dict_ids[in_id] = \ self.pm.ports_to_inds(self._in_port_dict[in_id]) def run(self): """ Body of process. """ # Don't allow keyboard interruption of process: self.log_info('starting') with IgnoreKeyboardInterrupt(): # Initialize environment: self._init_net() # Initialize _out_port_dict and _in_port_dict attributes: self._init_port_dicts() # Initialize Buffer for incoming data. Dict used to store the # incoming data keyed by the source module id. Each value is a # queue buffering the received data: self._in_data = {k: collections.deque() for k in self.in_ids} # Perform any pre-emulation operations: self.pre_run() self.running = True self.steps = 0 if self.time_sync: self.sock_time.send(msgpack.packb((self.id, self.steps, 'start', time.time()))) self.log_info('sent start time to master') while self.steps < self.max_steps: self.log_info('execution step: %s/%s' % (self.steps, self.max_steps)) # If the debug flag is set, don't catch exceptions so that # errors will lead to visible failures: if self.debug: # Run the processing step: self.run_step() # Do post-processing: self.post_run_step() # Synchronize: self._sync() else: # Run the processing step: catch_exception(self.run_step, self.log_info) # Do post processing: catch_exception(self.post_run_step, self.log_info) # Synchronize: catch_exception(self._sync, self.log_info) # Exit run loop when a quit message has been received: if not self.running: self.log_info('run loop stopped') break self.steps += 1 if self.time_sync: self.sock_time.send(msgpack.packb((self.id, self.steps, 'stop', time.time()))) self.log_info('sent stop time to master') self.log_info('maximum number of steps reached') # Perform any post-emulation operations: self.post_run() # Shut down the control handler and inform the manager that the # module has shut down: self._ctrl_stream_shutdown() ack = 'shutdown' self.sock_ctrl.send(ack) self.log_info('sent to manager: %s' % ack) self.log_info('exiting')
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, pm_all=None, debug=False, time_sync=False): # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Save module interface data (stored in a dict of BasePortMapper instances): self.pm_all = pm_all # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # Save routing table; if a unique ID was specified, it must be a node in # the routing table: if routing_table is not None and not routing_table.has_node(id): raise ValueError('routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.gpot_ports) assert len(data_spike) == len(self.spike_ports) self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False)
class Module(BaseModule): def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, pm_all=None, debug=False, time_sync=False): # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Save module interface data (stored in a dict of BasePortMapper instances): self.pm_all = pm_all # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # Save routing table; if a unique ID was specified, it must be a node in # the routing table: if routing_table is not None and not routing_table.has_node(id): raise ValueError('routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.gpot_ports) assert len(data_spike) == len(self.spike_ports) self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False) def _init_port_dicts(self): """ Initial dictionaries of source/destination ports in current module. """ # Extract identifiers of source ports in all modules sending input to # the current module's ports and of destination ports in the current # module's interface for all modules sending input to the current # module: self._in_port_dict = {} self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} self._in_port_dict_ids = {} self._in_port_dict_ids['gpot'] = {} self._in_port_dict_ids['spike'] = {} self._from_port_dict = {} self._from_port_dict['gpot'] = {} self._from_port_dict['spike'] = {} self._from_port_dict_ids = {} self._from_port_dict_ids['gpot'] = {} self._from_port_dict_ids['spike'] = {} self._in_ids = self.routing_table.src_ids(self.id) for in_id in self._in_ids: self.log_info('extracting input ports for %s' % in_id) # Get interfaces of pattern connecting the current module to # source module `in_id`; `int_1` is connected to the current # module, `int_0` is connected to the other module: pat = self.routing_table[in_id, self.id]['pattern'] int_0 = self.routing_table[in_id, self.id]['int_0'] int_1 = self.routing_table[in_id, self.id]['int_1'] # Get ports in interface (`int_1`) connected to the current # module that are connected to the other module via the pattern: self._in_port_dict['gpot'][in_id] = \ pat.dest_idx(int_0, int_1, 'gpot', 'gpot') self._in_port_dict_ids['gpot'][in_id] = \ self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id]) self._in_port_dict['spike'][in_id] = \ pat.dest_idx(int_0, int_1, 'spike', 'spike') self._in_port_dict_ids['spike'][in_id] = \ self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id]) # Get ports in interface (`int_0`) connected to the other module # that are connected to the current module via the pattern: self._from_port_dict['gpot'][in_id] = \ pat.src_idx(int_0, int_1, 'gpot', 'gpot') self._from_port_dict_ids['gpot'][in_id] = \ self.pm_all['gpot'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id]) self._from_port_dict['spike'][in_id] = \ pat.src_idx(int_0, int_1, 'spike', 'spike') self._from_port_dict_ids['spike'][in_id] = \ self.pm_all['spike'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id]) def _init_comm_bufs(self): """ Buffers for receiving data from other modules. Notes ----- Must be executed after `_init_port_dicts()`. """ # Buffer interface to and MPI type of this module's port data array: self._data_int = {} self._data_int['gpot'] = bufint(self.data['gpot']) self._data_int['spike'] = bufint(self.data['spike']) self._data_mtype = {} self._data_mtype['gpot'] = dtype_to_mpi(self.data['gpot'].dtype) self._data_mtype['spike'] = dtype_to_mpi(self.data['spike'].dtype) # Buffers (and their interfaces and MPI types) for receiving data # transmitted from source modules: self._in_buf = {} self._in_buf['gpot'] = {} self._in_buf['spike'] = {} self._in_buf_int = {} self._in_buf_int['gpot'] = {} self._in_buf_int['spike'] = {} self._in_buf_mtype = {} self._in_buf_mtype['gpot'] = {} self._in_buf_mtype['spike'] = {} for in_id in self._in_ids: # Get interfaces of pattern connecting the current module to # source module `in_id`; `int_1` is connected to the current # module, `int_0` is connected to the other module: pat = self.routing_table[in_id, self.id]['pattern'] int_0 = self.routing_table[in_id, self.id]['int_0'] int_1 = self.routing_table[in_id, self.id]['int_1'] # The buffers must be the same size as the port data arrays of the # modules from which they received data: self._in_buf['gpot'][in_id] = \ gpuarray.empty(len(self.pm_all['gpot'][in_id]), self.pm['gpot'].dtype) self._in_buf_int['gpot'][in_id] = bufint(self._in_buf['gpot'][in_id]) self._in_buf_mtype['gpot'][in_id] = \ dtype_to_mpi(self._in_buf['gpot'][in_id]) self._in_buf['spike'][in_id] = \ gpuarray.empty(len(self.pm_all['spike'][in_id]), self.pm['spike'].dtype) self._in_buf_int['spike'][in_id] = bufint(self._in_buf['spike'][in_id]) self._in_buf_mtype['spike'][in_id] = \ dtype_to_mpi(self._in_buf['spike'][in_id]) def _sync(self): """ Send output data and receive input data. """ if self.time_sync: start = time.time() req = MPI.Request() requests = [] # Transmit the entire port data array to each destination module: dest_ids = self.routing_table.dest_ids(self.id) for dest_id in dest_ids: dest_rank = self.rank_to_id[:dest_id] r = MPI.COMM_WORLD.Isend([self._data_int['gpot'], self._data_mtype['gpot']], dest_rank, GPOT_TAG) requests.append(r) r = MPI.COMM_WORLD.Isend([self._data_int['spike'], self._data_mtype['spike']], dest_rank, SPIKE_TAG) requests.append(r) if not self.time_sync: self.log_info('sending to %s' % dest_id) if not self.time_sync: self.log_info('sent all data from %s' % self.id) # For each source module, receive elements and copy them into the # current module's port data array: src_ids = self.routing_table.src_ids(self.id) for src_id in src_ids: src_rank = self.rank_to_id[:src_id] r = MPI.COMM_WORLD.Irecv([self._in_buf_int['gpot'][src_id], self._in_buf_mtype['gpot'][src_id]], source=src_rank, tag=GPOT_TAG) requests.append(r) r = MPI.COMM_WORLD.Irecv([self._in_buf_int['spike'][src_id], self._in_buf_mtype['spike'][src_id]], source=src_rank, tag=SPIKE_TAG) requests.append(r) if not self.time_sync: self.log_info('receiving from %s' % src_id) req.Waitall(requests) if not self.time_sync: self.log_info('received all data received by %s' % self.id) # Copy received elements into the current module's data array: n_gpot = 0 n_spike = 0 for src_id in src_ids: ind_from_gpot = self._from_port_dict_ids['gpot'][src_id] ind_in_gpot = self._in_port_dict_ids['gpot'][src_id] set_by_inds_from_inds(self.data['gpot'], ind_in_gpot, self._in_buf['gpot'][src_id], ind_from_gpot) n_gpot += len(self._in_buf['gpot'][src_id]) ind_from_spike = self._from_port_dict_ids['spike'][src_id] ind_in_spike = self._in_port_dict_ids['spike'][src_id] set_by_inds_from_inds(self.data['spike'], ind_in_spike, self._in_buf['spike'][src_id], ind_from_spike) n_spike += len(self._in_buf['spike'][src_id]) # Save timing data: if self.time_sync: stop = time.time() #self.log_info('sent timing data to master') self.intercomm.isend(['time', (self.rank, self.steps, start, stop, n_gpot*self.pm['gpot'].dtype.itemsize+\ n_spike*self.pm['spike'].dtype.itemsize)], dest=0, tag=self._ctrl_tag) else: self.log_info('saved all data received by %s' % self.id)
def connect(self, id_0, id_1, pat, int_0=0, int_1=1, compat_check=True): """ Specify connection between two module instances with a Pattern instance. Parameters ---------- id_0, id_1 : str Identifiers of module instances to connect. pat : Pattern Pattern instance. int_0, int_1 : int Which of the pattern's interfaces to connect to `id_0` and `id_1`, respectively. compat_check : bool Check whether the interfaces of the specified modules are compatible with the specified pattern. This option is provided because compatibility checking can be expensive. Notes ----- Assumes that the constructors of the module types contain a `sel` parameter. """ if not isinstance(pat, Pattern): raise ValueError('pat is not a Pattern instance') if id_0 not in self.rank_to_id.values(): raise ValueError('unrecognized module id %s' % id_0) if id_1 not in self.rank_to_id.values(): raise ValueError('unrecognized module id %s' % id_1) if not (int_0 in pat.interface_ids and int_1 in pat.interface_ids): raise ValueError('unrecognized pattern interface identifiers') self.log_info('connecting modules {0} and {1}'.format(id_0, id_1)) # Check compatibility of the interfaces exposed by the modules and the # pattern; since the manager only contains module classes and not class # instances, we need to create Interface instances from the selectors # associated with the modules in order to test their compatibility: if compat_check: rank_0 = self.rank_to_id.inv[id_0] rank_1 = self.rank_to_id.inv[id_1] self.log_info('checking compatibility of modules {0} and {1} and' ' assigned pattern'.format(id_0, id_1)) mod_int_0 = Interface(self._kwargs[rank_0]['sel']) mod_int_0[self._kwargs[rank_0]['sel']] = 0 mod_int_1 = Interface(self._kwargs[rank_1]['sel']) mod_int_1[self._kwargs[rank_1]['sel']] = 0 mod_int_0[self._kwargs[rank_0]['sel_in'], 'io'] = 'in' mod_int_0[self._kwargs[rank_0]['sel_out'], 'io'] = 'out' mod_int_0[self._kwargs[rank_0]['sel_gpot'], 'type'] = 'gpot' mod_int_0[self._kwargs[rank_0]['sel_spike'], 'type'] = 'spike' mod_int_1[self._kwargs[rank_1]['sel_in'], 'io'] = 'in' mod_int_1[self._kwargs[rank_1]['sel_out'], 'io'] = 'out' mod_int_1[self._kwargs[rank_1]['sel_gpot'], 'type'] = 'gpot' mod_int_1[self._kwargs[rank_1]['sel_spike'], 'type'] = 'spike' if not mod_int_0.is_compatible(0, pat.interface, int_0, True): raise ValueError('module %s interface incompatible ' 'with pattern interface %s' % (id_0, int_0)) if not mod_int_1.is_compatible(0, pat.interface, int_1, True): raise ValueError('module %s interface incompatible ' 'with pattern interface %s' % (id_1, int_1)) # XXX Need to check for fan-in XXX # Store the pattern information in the routing table: self.log_info('updating routing table with pattern') if pat.is_connected(0, 1): self.routing_table[id_0, id_1] = { 'pattern': pat, 'int_0': int_0, 'int_1': int_1 } if pat.is_connected(1, 0): self.routing_table[id_1, id_0] = { 'pattern': pat, 'int_0': int_1, 'int_1': int_0 } self.log_info('connected modules {0} and {1}'.format(id_0, id_1))
class Module(BaseModule): """ Processing module. This class repeatedly executes a work method until it receives a quit message via its control port. Parameters ---------- selector : str, unicode, or sequence Path-like selector describing the module's interface of exposed ports. sel_gpot : str, unicode, or sequence Path-like selector describing the graded potential ports in the module's interface. sel_spike : str, unicode, or sequence Path-like selector describing the spiking ports in the module's interface. data_gpot : numpy.ndarray Data array to associate with graded potential ports. Array length must equal the number of graded potential ports in the module's interface. data_spike : numpy.ndarray Data array to associate with spiking ports. Array length must equal the number of spiking ports in the module's interface. columns : list of str Interface port attributes. This list must at least contain 'interface', 'io', and 'type'. port_data : int Network port for transmitting data. port_ctrl : int Network port for controlling the module instance. id : str Module identifier. If no identifier is specified, a unique identifier is automatically generated. device : int GPU device to use. debug : bool Debug flag. Notes ----- A module instance connected to other module instances contains a list of the connectivity objects that describe incoming connects and a list of masks that select for the neurons whose data must be transmitted to destination modules. """ def __init__(self, selector, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], port_data=PORT_DATA, port_ctrl=PORT_CTRL, id=None, device=None, debug=False): self.debug = debug self.device = device # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns # Generate a unique ID if none is specified: if id is None: id = uid() super(BaseModule, self).__init__(port_ctrl, id) # Logging: self.logger = twiggy.log.name('module %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data # Initial network connectivity: self.net = 'none' # Create module interface given the specified ports: self.interface = Interface(selector, columns) # Set the interface ID to 0 # we assume that a module only has one interface: self.interface[selector, 'interface'] = 0 # Set port types: assert PathLikeSelector.is_in(sel_gpot, selector) assert PathLikeSelector.is_in(sel_spike, selector) assert PathLikeSelector.are_disjoint(sel_gpot, sel_spike) self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.interface.gpot_ports()) assert len(data_spike) == len(self.interface.spike_ports()) self.data = {} self.data['gpot'] = data_gpot self.data['spike'] = data_spike self.pm = {} self.pm['gpot'] = PortMapper(self.data['gpot'], sel_gpot) self.pm['spike'] = PortMapper(self.data['spike'], sel_spike) # Patterns connecting this module instance with other modules instances. # Keyed on the IDs of those modules: self.patterns = {} # Each entry in pat_ints is a tuple containing the identifiers of which # of a pattern's identifiers are connected to the current module (first # entry) and the modules to which it is connected (second entry). # Keyed on the IDs of those modules: self.pat_ints = {} # Dict for storing incoming data; each entry (corresponding to each # module that sends input to the current module) is a deque containing # incoming data, which in turn contains transmitted data arrays. Deques # are used here to accommodate situations when multiple data from a # single source arrive: self._in_data = {} # List for storing outgoing data; each entry is a tuple whose first # entry is the source or destination module ID and whose second entry is # the data to transmit: self._out_data = [] # Dictionaries containing ports of source modules that # send output to this module. Must be initialized immediately before # an emulation begins running. Keyed on source module ID: self._in_port_dict = {} self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} # Dictionaries containing ports of destination modules that # receive input from this module. Must be initialized immediately before # an emulation begins running. Keyed on destination module ID: self._out_port_dict = {} self._out_port_dict['gpot'] = {} self._out_port_dict['spike'] = {} def _init_gpu(self): """ Initialize GPU device. Notes ----- Must be called from within the `run()` method, not from within `__init__()`. """ if self.device == None: self.logger.info('no GPU specified - not initializing ') else: drv.init() try: self.gpu_ctx = drv.Device(self.device).make_context() except Exception as e: self.logger.info('_init_gpu exception: ' + e.message) else: atexit.register(self.gpu_ctx.pop) self.logger.info('GPU initialized') @property def N_gpot_ports(self): """ Number of exposed graded-potential ports. """ return len(self.interface.gpot_ports()) @property def N_spike_ports(self): """ Number of exposed spiking ports. """ return len(self.interface.spike_ports()) def _get_in_data(self): """ Get input data from incoming transmission buffer. Populate the data arrays associated with a module's ports using input data received from other modules. """ self.logger.info('retrieving from input buffer') # Since fan-in is not permitted, the data from all source modules # must necessarily map to different ports; we can therefore write each # of the received data to the array associated with the module's ports # here without worry of overwriting the data from each source module: for in_id in self.in_ids: # Check for exceptions so as to not fail on the first emulation # step when there is no input data to retrieve: try: # The first entry of `data` contains graded potential values, # while the second contains integer indices of the current # module's ports that should receive transmitted spikes: data = self._in_data[in_id].popleft() except IndexError: self.logger.info('no input data from [%s] retrieved' % in_id) else: self.logger.info('input data from [%s] retrieved' % in_id) # Assign transmitted graded potential values directly to port # data array: self.pm['gpot'][self._in_port_dict['gpot'][in_id]] = data[0] # Clear all input spike port data.. self.pm['spike'][self._in_port_dict['spike'][in_id]] = 0 # ..and then set the port data using the transmitted # information about source module spikes: self.pm['spike'][data[1]] = 1 def _put_out_data(self): """ Put specified output data in outgoing transmission buffer. Stage data from the data arrays associated with a module's ports for output to other modules. """ self.logger.info('populating output buffer') # Clear output buffer before populating it: self._out_data = [] # Select data that should be sent to each destination module and append # it to the outgoing queue: for out_id in self.out_ids: # Select graded potential data using list of # graded potential ports that can transmit output: gpot_data = self.pm['gpot'][self._out_port_dict['gpot'][out_id]] # Select spiking ports that can transmit output: out_spike_ports_all = self._out_port_dict['spike'][out_id] # Find those ports that have emitted a spike: out_spike_ports_with_spikes = \ self.pm['spike'].get_ports_nonzero() # Compute the intersection of the two sets of spiking # ports obtained above to determine which ports the destination # module must be informed about: out_spike_ports = \ list(set(out_spike_ports_all).intersection(out_spike_ports_with_spikes)) # Find the input ports in the destination module's interface to # which the spikes emitted by the current module's spiking ports # must be sent: from_int, to_int = self.pat_ints[out_id] spike_data = \ self.patterns[out_id].dest_idx(from_int, to_int, 'spike', 'spike', out_spike_ports) try: # Stage the emitted port data for transmission: self._out_data.append((out_id, (gpot_data, spike_data))) except: self.logger.info('no output data to [%s] sent' % out_id) else: self.logger.info('output data to [%s] sent' % out_id) def run_step(self): """ Module work method. This method should be implemented to do something interesting with new input port data in the module's `pm` attribute and update the attribute's output port data if necessary. It should not interact with any other class attributes. """ self.logger.info('running execution step') def _init_port_dicts(self): """ Initial dictionaries of source/destination ports in current module. """ # Extract identifiers of source ports in the current module's interface # for all modules receiving output from the current module: self._out_port_dict['gpot'] = {} self._out_port_dict['spike'] = {} for out_id in self.out_ids: self.logger.info('extracting output ports for %s' % out_id) # Get interfaces of pattern connecting the current module to # destination module `out_id`; `from_int` is connected to the # current module, `to_int` is connected to the other module: from_int, to_int = self.pat_ints[out_id] # Get ports in interface (`from_int`) connected to the current # module that are connected to the other module via the pattern: self._out_port_dict['gpot'][out_id] = \ self.patterns[out_id].src_idx(from_int, to_int, 'gpot', 'gpot') self._out_port_dict['spike'][out_id] = \ self.patterns[out_id].src_idx(from_int, to_int, 'spike', 'spike') # Extract identifiers of destination ports in the current module's # interface for all modules sending input to the current module: self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} for in_id in self.in_ids: self.logger.info('extracting input ports for %s' % in_id) # Get interfaces of pattern connecting the current module to # source module `out_id`; `to_int` is connected to the current # module, `from_int` is connected to the other module: to_int, from_int = self.pat_ints[in_id] # Get ports in interface (`to_int`) connected to the current # module that are connected to the other module via the pattern: self._in_port_dict['gpot'][in_id] = \ self.patterns[in_id].dest_idx(from_int, to_int, 'gpot', 'gpot') self._in_port_dict['spike'][in_id] = \ self.patterns[in_id].dest_idx(from_int, to_int, 'spike', 'spike') def pre_run(self, *args, **kwargs): """ Code to run before main module run loop. Code in this method will be executed after a module's process has been launched and all connectivity objects made available, but before the main run loop begins. """ self._init_gpu() pass def run(self): """ Body of process. """ # Don't allow keyboard interruption of process: self.logger.info('starting') with IgnoreKeyboardInterrupt(): # Initialize environment: self._init_net() # Initialize Buffer for incoming data. Dict used to store the # incoming data keyed by the source module id. Each value is a # queue buffering the received data: self._in_data = {k: collections.deque() for k in self.in_ids} # Initialize _out_port_dict and _in_port_dict attributes: self._init_port_dicts() # Perform any pre-emulation operations: self.pre_run() cProfile.runctx('self.main_run()', globals(), locals(), 'prof%s.prof' % self.id) # Perform any post-emulation operations: self.post_run() # Shut down the control handler and inform the manager that the # module has shut down: self._ctrl_stream_shutdown() ack = 'shutdown' self.sock_ctrl.send(ack) self.logger.info('sent to manager: %s' % ack) self.logger.info('exiting') def main_run(self): self.running = True curr_steps = 0 while curr_steps < self._steps: self.logger.info('execution step: %s' % curr_steps) # If the debug flag is set, don't catch exceptions so that # errors will lead to visible failures: if self.debug: # Get transmitted input data for processing: self._get_in_data() # Run the processing step: self.run_step() # Stage generated output data for transmission to other # modules: self._put_out_data() # Synchronize: self._sync() else: # Get transmitted input data for processing: catch_exception(self._get_in_data, self.logger.info) # Run the processing step: catch_exception(self.run_step, self.logger.info) # Stage generated output data for transmission to other # modules: catch_exception(self._put_out_data, self.logger.info) # Synchronize: catch_exception(self._sync, self.logger.info) # Exit run loop when a quit signal has been received: if not self.running: self.logger.info('run loop stopped') break curr_steps += 1
class BaseModule(ControlledProcess): """ Processing module. This class repeatedly executes a work method until it receives a quit message via its control network port. Parameters ---------- selector : str, unicode, or sequence Path-like selector describing the module's interface of exposed ports. data : numpy.ndarray Data array to associate with ports. Array length must equal the number of ports in a module's interface. columns : list of str Interface port attributes. port_data : int Network port for transmitting data. port_ctrl : int Network port for controlling the module instance. id : str Module identifier. If no identifier is specified, a unique identifier is automatically generated. debug : bool Debug flag. When True, exceptions raised during the work method are not be suppressed. Attributes ---------- interface : Interface Object containing information about a module's ports. patterns : dict of Pattern Pattern objects connecting the module instance with other module instances. Keyed on the ID of the other module instances. pat_ints : dict of tuple of int Interface of each pattern that is connected to the module instance. Keyed on the ID of the other module instances. pm : plsel.PortMapper Map between a module's ports and the contents of the `data` attribute. data : numpy.ndarray Array of data associated with a module's ports. Notes ----- If the network ports specified upon instantiation are None, the module instance ignores the network entirely. """ # Define properties to perform validation when connectivity status # is set: _net = 'none' @property def net(self): """ Network connectivity. """ return self._net @net.setter def net(self, value): if value not in ['none', 'ctrl', 'in', 'out', 'full']: raise ValueError('invalid network connectivity value') self.logger.info('net status changed: %s -> %s' % (self._net, value)) self._net = value # Define properties to perform validation when the maximum number of # execution steps set: _steps = np.inf @property def steps(self): """ Maximum number of steps to execute. """ return self._steps @steps.setter def steps(self, value): if value <= 0: raise ValueError('invalid maximum number of steps') self.logger.info('maximum number of steps changed: %s -> %s' % (self._steps, value)) self._steps = value def __init__(self, selector, data, columns=['interface', 'io', 'type'], port_data=PORT_DATA, port_ctrl=PORT_CTRL, id=None, debug=False): self.debug = debug # Generate a unique ID if none is specified: if id is None: id = uid() super(BaseModule, self).__init__(port_ctrl, id) # Logging: self.logger = twiggy.log.name('module %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data # Initial network connectivity: self.net = 'none' # Create module interface given the specified ports: self.interface = Interface(selector, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[selector, 'interface'] = 0 # Set up mapper between port identifiers and their associated data: assert len(data) == len(self.interface) self.data = data self.pm = PortMapper(self.data, selector) # Patterns connecting this module instance with other modules instances. # Keyed on the IDs of those modules: self.patterns = {} # Each entry in pat_ints is a tuple containing the identifiers of which # of a pattern's identifiers are connected to the current module (first # entry) and the modules to which it is connected (second entry). # Keyed on the IDs of those modules: self.pat_ints = {} # Dict for storing incoming data; each entry (corresponding to each # module that sends input to the current module) is a deque containing # incoming data, which in turn contains transmitted data arrays. Deques # are used here to accommodate situations when multiple data from a # single source arrive: self._in_data = {} # List for storing outgoing data; each entry is a tuple whose first # entry is the source or destination module ID and whose second entry is # the data to transmit: self._out_data = [] # Dictionary containing ports of source modules that # send output to this module. Must be initialized immediately before # an emulation begins running. Keyed on source module ID: self._in_port_dict = {} # Dictionary containing ports of destination modules that # receive input from this module. Must be initialized immediately before # an emulation begins running. Keyed on destination module ID: self._out_port_dict = {} self._out_ids = [] self._in_ids = [] @property def N_ports(self): """ Number of ports exposed by module's interface. """ return len(self.interface.ports()) @property def all_ids(self): """ IDs of modules to which the current module is connected. """ return self.patterns.keys() @property def in_ids(self): """ IDs of modules that send data to this module. """ return [m for m in self.patterns.keys() \ if self.patterns[m].is_connected(self.pat_ints[m][1], self.pat_ints[m][0])] @property def out_ids(self): """ IDs of modules that receive data from this module. """ return [m for m in self.patterns.keys() \ if self.patterns[m].is_connected(self.pat_ints[m][0], self.pat_ints[m][1])] def connect(self, m, pat, int_0, int_1): """ Connect the current module instance to another module with a pattern instance. Parameters ---------- m : BaseModule Module instance to connect. pat : Pattern Pattern instance. int_0, int_1 : int Which of the pattern's interface to connect to the current module and the specified module, respectively. """ assert isinstance(m, BaseModule) assert isinstance(pat, Pattern) assert int_0 in pat.interface_ids and int_1 in pat.interface_ids self.logger.info('connecting to %s' % m.id) # Check compatibility of the interfaces exposed by the modules and the # pattern: assert self.interface.is_compatible(0, pat.interface, int_0) assert m.interface.is_compatible(0, pat.interface, int_1) # Check that no fan-in from different source modules occurs as a result # of the new connection by getting the union of all input ports for the # interfaces of all existing patterns connected to the current module # and ensuring that the input ports from the new pattern don't overlap: if self.patterns: curr_in_ports = reduce(set.union, [set(self.patterns[i].in_ports(self.pat_ints[i][0]).to_tuples()) \ for i in self.patterns.keys()]) assert curr_in_ports.intersection(pat.in_ports(int_0).to_tuples()) # The pattern instances associated with the current # module are keyed on the IDs of the modules to which they connect: self.patterns[m.id] = pat self.pat_ints[m.id] = (int_0, int_1) # Update internal connectivity based upon contents of connectivity # object. When this method is invoked, the module's internal # connectivity is always upgraded to at least 'ctrl': if self.net == 'none': self.net = 'ctrl' if pat.is_connected(int_0, int_1): old_net = self.net if self.net == 'ctrl': self.net = 'out' elif self.net == 'in': self.net = 'full' self.logger.info('net status changed: %s -> %s' % (old_net, self.net)) if pat.is_connected(int_1, int_0): old_net = self.net if self.net == 'ctrl': self.net = 'in' elif self.net == 'out': self.net = 'full' self.logger.info('net status changed: %s -> %s' % (old_net, self.net)) def _ctrl_stream_shutdown(self): """ Shut down control port handler's stream and ioloop. """ try: self.stream_ctrl.flush() self.stream_ctrl.stop_on_recv() self.ioloop_ctrl.stop() except IOError: self.logger.info('streams already closed') except: self.logger.info('other error occurred') else: self.logger.info('ctrl stream shut down') def _ctrl_handler(self, msg): """ Control port handler. """ self.logger.info('recv ctrl message: %s' % str(msg)) if msg[0] == 'quit': self._ctrl_stream_shutdown() # Force the module's main loop to exit: self.running = False ack = 'shutdown' # One can define additional messages to be recognized by the control # handler: # elif msg[0] == 'conn': # self.logger.info('conn payload: '+str(msgpack.unpackb(msg[1]))) # ack = 'ack' else: ack = 'ack' self.sock_ctrl.send(ack) self.logger.info('sent to manager: %s' % ack) def _init_net(self): """ Initialize network connection. """ # Initialize control port handler: self.logger.info('initializing ctrl network connection') super(BaseModule, self)._init_net() if self.net == 'none': self.logger.info('not initializing data network connection') else: # Don't allow interrupts to prevent the handler from # completely executing each time it is called: with IgnoreKeyboardInterrupt(): self.logger.info('initializing data network connection') # Use a nonblocking port for the data interface; set # the linger period to prevent hanging on unsent # messages when shutting down: self.sock_data = self.zmq_ctx.socket(zmq.DEALER) self.sock_data.setsockopt(zmq.IDENTITY, self.id) self.sock_data.setsockopt(zmq.LINGER, LINGER_TIME) self.sock_data.connect("tcp://localhost:%i" % self.port_data) self.logger.info('network connection initialized') # Set up a poller for detecting incoming data: self.data_poller = zmq.Poller() self.data_poller.register(self.sock_data, zmq.POLLIN) def _get_in_data(self): """ Get input data from incoming transmission buffer. Populate the data array associated with a module's ports using input data received from other modules. """ self.logger.info('retrieving from input buffer') # Since fan-in is not permitted, the data from all source modules # must necessarily map to different ports; we can therefore write each # of the received data to the array associated with the module's ports # here without worry of overwriting the data from each source module: for in_id in self._in_ids: # Check for exceptions so as to not fail on the first emulation # step when there is no input data to retrieve: try: self.pm[self. _in_port_dict[in_id]] = self._in_data[in_id].popleft() except: self.logger.info('no input data from [%s] retrieved' % in_id) else: self.logger.info('input data from [%s] retrieved' % in_id) def _put_out_data(self): """ Put output data in outgoing transmission buffer. Stage data from the data array associated with a module's ports for output to other modules. """ self.logger.info('populating output buffer') # Clear output buffer before populating it: self._out_data = [] # Select data that should be sent to each destination module and append # it to the outgoing queue: for out_id in self._out_ids: try: self._out_data.append( (out_id, self.pm[self._out_port_dict[out_id]])) except: self.logger.info('no output data to [%s] sent' % out_id) else: self.logger.info('output data to [%s] sent' % out_id) def _sync(self): """ Send output data and receive input data. Notes ----- Assumes that the attributes used for input and output already exist. Each message is a tuple containing a module ID and data; for outbound messages, the ID is that of the destination module. for inbound messages, the ID is that of the source module. Data is serialized before being sent and unserialized when received. """ if self.net in ['none', 'ctrl']: self.logger.info('not synchronizing with network') else: self.logger.info('synchronizing with network') # Send outbound data: if self.net in ['out', 'full']: # Send all data in outbound buffer: send_ids = [out_id for out_id in self._out_ids] for out_id, data in self._out_data: self.sock_data.send(msgpack.packb((out_id, data))) send_ids.remove(out_id) self.logger.info('sent to %s: %s' % (out_id, str(data))) # Send data tuples containing None to those modules for which no # actual data was generated to satisfy the barrier condition: for out_id in send_ids: self.sock_data.send(msgpack.packb((out_id, None))) self.logger.info('sent to %s: %s' % (out_id, None)) # All output IDs should be sent data by this point: self.logger.info('sent data to all output IDs') # Receive inbound data: if self.net in ['in', 'full']: # Wait until inbound data is received from all source modules: while not all((q for q in self._in_data.itervalues())): # Use poller to avoid blocking: if is_poll_in(self.sock_data, self.data_poller): in_id, data = msgpack.unpackb(self.sock_data.recv()) self.logger.info('recv from %s: %s ' % (in_id, str(data))) # Ignore incoming data containing None: if data is not None: self._in_data[in_id].append(data) # Stop the synchronization if a quit message has been received: if not self.running: self.logger.info('run loop stopped - stopping sync') break self.logger.info('recv data from all input IDs') def pre_run(self, *args, **kwargs): """ Code to run before main module run loop. Code in this method will be executed after a module's process has been launched and all connectivity objects made available, but before the main run loop begins. """ self.logger.info('performing pre-emulation operations') def post_run(self, *args, **kwargs): """ Code to run after main module run loop. Code in this method will be executed after a module's main loop has terminated. """ self.logger.info('performing post-emulation operations') def run_step(self): """ Module work method. This method should be implemented to do something interesting with new input port data in the module's `pm` attribute and update the attribute's output port data if necessary. It should not interact with any other class attributes. """ self.logger.info('running execution step') def _init_port_dicts(self): """ Initial dictionaries of source/destination ports in current module. """ # Extract identifiers of source ports in the current module's interface # for all modules receiving output from the current module: self._out_port_dict = {} self._out_ids = self.out_ids for out_id in self._out_ids: self.logger.info('extracting output ports for %s' % out_id) # Get interfaces of pattern connecting the current module to # destination module `out_id`; `from_int` is connected to the # current module, `to_int` is connected to the other module: from_int, to_int = self.pat_ints[out_id] # Get ports in interface (`from_int`) connected to the current # module that are connected to the other module via the pattern: self._out_port_dict[out_id] = \ self.patterns[out_id].src_idx(from_int, to_int) # Extract identifiers of destination ports in the current module's # interface for all modules sending input to the current module: self._in_port_dict = {} self._in_ids = self.in_ids for in_id in self._in_ids: self.logger.info('extracting input ports for %s' % in_id) # Get interfaces of pattern connecting the current module to # source module `out_id`; `to_int` is connected to the current # module, `from_int` is connected to the other module: to_int, from_int = self.pat_ints[in_id] # Get ports in interface (`to_int`) connected to the current # module that are connected to the other module via the pattern: self._in_port_dict[in_id] = \ self.patterns[in_id].dest_idx(from_int, to_int) def run(self): """ Body of process. """ # Don't allow keyboard interruption of process: self.logger.info('starting') with IgnoreKeyboardInterrupt(): # Initialize environment: self._init_net() # Initialize _out_port_dict and _in_port_dict attributes: self._init_port_dicts() # Initialize Buffer for incoming data. Dict used to store the # incoming data keyed by the source module id. Each value is a # queue buffering the received data: self._in_data = {k: collections.deque() for k in self.in_ids} # Perform any pre-emulation operations: self.pre_run() self.running = True curr_steps = 0 while curr_steps < self._steps: self.logger.info('execution step: %s' % curr_steps) # If the debug flag is set, don't catch exceptions so that # errors will lead to visible failures: if self.debug: # Get input data: self._get_in_data() # Run the processing step: self.run_step() # Prepare the generated data for output: self._put_out_data() # Synchronize: self._sync() else: # Get input data: catch_exception(self._get_in_data, self.logger.info) # Run the processing step: catch_exception(self.run_step, self.logger.info) # Prepare the generated data for output: catch_exception(self._put_out_data, self.logger.info) # Synchronize: catch_exception(self._sync, self.logger.info) # Exit run loop when a quit message has been received: if not self.running: self.logger.info('run loop stopped') break curr_steps += 1 # Perform any post-emulation operations: self.post_run() # Shut down the control handler and inform the manager that the # module has shut down: self._ctrl_stream_shutdown() ack = 'shutdown' self.sock_ctrl.send(ack) self.logger.info('sent to manager: %s' % ack) self.logger.info('exiting')
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], port_data=PORT_DATA, port_ctrl=PORT_CTRL, port_time=PORT_TIME, id=None, device=None, debug=False, time_sync=False): self.debug = debug self.time_sync = time_sync self.device = device # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns # Generate a unique ID if none is specified: if id is None: id = uid() # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(port_ctrl, id) # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data if port_time == port_ctrl or port_time == port_data: raise ValueError('time port must differ from data and control ports') self.port_time = port_time # Initial network connectivity: self.net = 'none' # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0 # we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set port types: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.interface.gpot_ports()) assert len(data_spike) == len(self.interface.spike_ports()) self.data = {} self.data['gpot'] = data_gpot self.data['spike'] = data_spike self.pm = {} self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot']) self.pm['spike'] = PortMapper(sel_spike, self.data['spike']) # Patterns connecting this module instance with other modules instances. # Keyed on the IDs of those modules: self.patterns = {} # Each entry in pat_ints is a tuple containing the identifiers of which # of a pattern's identifiers are connected to the current module (first # entry) and the modules to which it is connected (second entry). # Keyed on the IDs of those modules: self.pat_ints = {} # Dict for storing incoming data; each entry (corresponding to each # module that sends input to the current module) is a deque containing # incoming data, which in turn contains transmitted data arrays. Deques # are used here to accommodate situations when multiple data from a # single source arrive: self._in_data = {} # List for storing outgoing data; each entry is a tuple whose first # entry is the source or destination module ID and whose second entry is # the data to transmit: self._out_data = [] # Dictionaries containing ports of source modules that # send output to this module. Must be initialized immediately before # an emulation begins running. Keyed on source module ID: self._in_port_dict = {} self._in_port_dict_ids = {} self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} # Dictionaries containing ports of destination modules that # receive input from this module. Must be initialized immediately before # an emulation begins running. Keyed on destination module ID: self._out_port_dict = {} self._out_port_dict_ids = {} self._out_port_dict['gpot'] = {} self._out_port_dict['spike'] = {} self._out_ids = [] self._in_ids = []
class Module(BaseModule): def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, pm_all=None, debug=False, time_sync=False): # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Save module interface data (stored in a dict of BasePortMapper instances): self.pm_all = pm_all # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # Save routing table; if a unique ID was specified, it must be a node in # the routing table: if routing_table is not None and not routing_table.has_node(id): raise ValueError( 'routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports( ).to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports( ).to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports( ).to_tuples() # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.gpot_ports) assert len(data_spike) == len(self.spike_ports) self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False) def _init_port_dicts(self): """ Initial dictionaries of source/destination ports in current module. """ # Extract identifiers of source ports in all modules sending input to # the current module's ports and of destination ports in the current # module's interface for all modules sending input to the current # module: self._in_port_dict = {} self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} self._in_port_dict_ids = {} self._in_port_dict_ids['gpot'] = {} self._in_port_dict_ids['spike'] = {} self._from_port_dict = {} self._from_port_dict['gpot'] = {} self._from_port_dict['spike'] = {} self._from_port_dict_ids = {} self._from_port_dict_ids['gpot'] = {} self._from_port_dict_ids['spike'] = {} self._in_ids = self.routing_table.src_ids(self.id) for in_id in self._in_ids: self.log_info('extracting input ports for %s' % in_id) # Get interfaces of pattern connecting the current module to # source module `in_id`; `int_1` is connected to the current # module, `int_0` is connected to the other module: pat = self.routing_table[in_id, self.id]['pattern'] int_0 = self.routing_table[in_id, self.id]['int_0'] int_1 = self.routing_table[in_id, self.id]['int_1'] # Get ports in interface (`int_1`) connected to the current # module that are connected to the other module via the pattern: self._in_port_dict['gpot'][in_id] = \ pat.dest_idx(int_0, int_1, 'gpot', 'gpot') self._in_port_dict_ids['gpot'][in_id] = \ self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id]) self._in_port_dict['spike'][in_id] = \ pat.dest_idx(int_0, int_1, 'spike', 'spike') self._in_port_dict_ids['spike'][in_id] = \ self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id]) # Get ports in interface (`int_0`) connected to the other module # that are connected to the current module via the pattern: self._from_port_dict['gpot'][in_id] = \ pat.src_idx(int_0, int_1, 'gpot', 'gpot') self._from_port_dict_ids['gpot'][in_id] = \ self.pm_all['gpot'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id]) self._from_port_dict['spike'][in_id] = \ pat.src_idx(int_0, int_1, 'spike', 'spike') self._from_port_dict_ids['spike'][in_id] = \ self.pm_all['spike'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id]) def _init_comm_bufs(self): """ Buffers for receiving data from other modules. Notes ----- Must be executed after `_init_port_dicts()`. """ # Buffer interface to and MPI type of this module's port data array: self._data_int = {} self._data_int['gpot'] = bufint(self.data['gpot']) self._data_int['spike'] = bufint(self.data['spike']) self._data_mtype = {} self._data_mtype['gpot'] = dtype_to_mpi(self.data['gpot'].dtype) self._data_mtype['spike'] = dtype_to_mpi(self.data['spike'].dtype) # Buffers (and their interfaces and MPI types) for receiving data # transmitted from source modules: self._in_buf = {} self._in_buf['gpot'] = {} self._in_buf['spike'] = {} self._in_buf_int = {} self._in_buf_int['gpot'] = {} self._in_buf_int['spike'] = {} self._in_buf_mtype = {} self._in_buf_mtype['gpot'] = {} self._in_buf_mtype['spike'] = {} for in_id in self._in_ids: # Get interfaces of pattern connecting the current module to # source module `in_id`; `int_1` is connected to the current # module, `int_0` is connected to the other module: pat = self.routing_table[in_id, self.id]['pattern'] int_0 = self.routing_table[in_id, self.id]['int_0'] int_1 = self.routing_table[in_id, self.id]['int_1'] # The buffers must be the same size as the port data arrays of the # modules from which they received data: self._in_buf['gpot'][in_id] = \ gpuarray.empty(len(self.pm_all['gpot'][in_id]), self.pm['gpot'].dtype) self._in_buf_int['gpot'][in_id] = bufint( self._in_buf['gpot'][in_id]) self._in_buf_mtype['gpot'][in_id] = \ dtype_to_mpi(self._in_buf['gpot'][in_id]) self._in_buf['spike'][in_id] = \ gpuarray.empty(len(self.pm_all['spike'][in_id]), self.pm['spike'].dtype) self._in_buf_int['spike'][in_id] = bufint( self._in_buf['spike'][in_id]) self._in_buf_mtype['spike'][in_id] = \ dtype_to_mpi(self._in_buf['spike'][in_id]) def _sync(self): """ Send output data and receive input data. """ if self.time_sync: start = time.time() req = MPI.Request() requests = [] # Transmit the entire port data array to each destination module: dest_ids = self.routing_table.dest_ids(self.id) for dest_id in dest_ids: dest_rank = self.rank_to_id[:dest_id] r = MPI.COMM_WORLD.Isend( [self._data_int['gpot'], self._data_mtype['gpot']], dest_rank, GPOT_TAG) requests.append(r) r = MPI.COMM_WORLD.Isend( [self._data_int['spike'], self._data_mtype['spike']], dest_rank, SPIKE_TAG) requests.append(r) if not self.time_sync: self.log_info('sending to %s' % dest_id) if not self.time_sync: self.log_info('sent all data from %s' % self.id) # For each source module, receive elements and copy them into the # current module's port data array: src_ids = self.routing_table.src_ids(self.id) for src_id in src_ids: src_rank = self.rank_to_id[:src_id] r = MPI.COMM_WORLD.Irecv([ self._in_buf_int['gpot'][src_id], self._in_buf_mtype['gpot'][src_id] ], source=src_rank, tag=GPOT_TAG) requests.append(r) r = MPI.COMM_WORLD.Irecv([ self._in_buf_int['spike'][src_id], self._in_buf_mtype['spike'][src_id] ], source=src_rank, tag=SPIKE_TAG) requests.append(r) if not self.time_sync: self.log_info('receiving from %s' % src_id) req.Waitall(requests) if not self.time_sync: self.log_info('received all data received by %s' % self.id) # Copy received elements into the current module's data array: n_gpot = 0 n_spike = 0 for src_id in src_ids: ind_from_gpot = self._from_port_dict_ids['gpot'][src_id] ind_in_gpot = self._in_port_dict_ids['gpot'][src_id] set_by_inds_from_inds(self.data['gpot'], ind_in_gpot, self._in_buf['gpot'][src_id], ind_from_gpot) n_gpot += len(self._in_buf['gpot'][src_id]) ind_from_spike = self._from_port_dict_ids['spike'][src_id] ind_in_spike = self._in_port_dict_ids['spike'][src_id] set_by_inds_from_inds(self.data['spike'], ind_in_spike, self._in_buf['spike'][src_id], ind_from_spike) n_spike += len(self._in_buf['spike'][src_id]) # Save timing data: if self.time_sync: stop = time.time() #self.log_info('sent timing data to master') self.intercomm.isend(['time', (self.rank, self.steps, start, stop, n_gpot*self.pm['gpot'].dtype.itemsize+\ n_spike*self.pm['spike'].dtype.itemsize)], dest=0, tag=self._ctrl_tag) else: self.log_info('saved all data received by %s' % self.id)
class Module(mpi.Worker): """ Processing module. This class repeatedly executes a work method until it receives a quit message via its control network port. Parameters ---------- sel : str, unicode, or sequence Path-like selector describing the module's interface of exposed ports. sel_in, sel_out, sel_gpot, sel_spike : str, unicode, or sequence Selectors respectively describing all input, output, graded potential, and spiking ports in the module's interface. data_gpot, data_spike : numpy.ndarray Data arrays associated with the graded potential and spiking ports in the . Array length must equal the number of ports in a module's interface. columns : list of str Interface port attributes. Network port for controlling the module instance. ctrl_tag, gpot_tag, spike_tag : int MPI tags that respectively identify messages containing control data, graded potential port values, and spiking port values transmitted to worker nodes. id : str Module identifier. If no identifier is specified, a unique identifier is automatically generated. device : int GPU device to use. May be set to None if the module does not perform GPU processing. routing_table : neurokernel.routing_table.RoutingTable Routing table describing data connections between modules. If no routing table is specified, the module will be executed in isolation. rank_to_id : bidict.bidict Mapping between MPI ranks and module object IDs. debug : bool Debug flag. When True, exceptions raised during the work method are not be suppressed. time_sync : bool Time synchronization flag. When True, debug messages are not emitted during module synchronization and the time taken to receive all incoming data is computed. Attributes ---------- interface : Interface Object containing information about a module's ports. pm : dict `pm['gpot']` and `pm['spike']` are instances of neurokernel.pm.PortMapper that map a module's ports to the contents of the values in `data`. data : dict `data['gpot']` and `data['spike']` are arrays of data associated with a module's graded potential and spiking ports. """ def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, debug=False, time_sync=False): super(Module, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: if 'interface' not in columns: raise ValueError('interface column required') if 'io' not in columns: raise ValueError('io column required') if 'type' not in columns: raise ValueError('type column required') # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: if not SelectorMethods.is_in(sel_in, sel): raise ValueError('input port selector not in selector of all ports') if not SelectorMethods.is_in(sel_out, sel): raise ValueError('output port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_in, sel_out): raise ValueError('input and output port selectors not disjoint') # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: if not SelectorMethods.is_in(sel_gpot, sel): raise ValueError('gpot port selector not in selector of all ports') if not SelectorMethods.is_in(sel_spike, sel): raise ValueError('spike port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_gpot, sel_spike): raise ValueError('gpot and spike port selectors not disjoint') # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # If a unique ID was specified and the routing table is not empty # (i.e., there are connections between multiple modules), # the id must be a node in the table: if routing_table is not None and len(routing_table.ids) and \ not routing_table.has_node(id): raise ValueError('routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: if len(data_gpot) != len(self.gpot_ports): raise ValueError('incompatible gpot port data array length') if len(data_spike) != len(self.spike_ports): raise ValueError('incompatible spike port data array length') self.data = {} self.data['gpot'] = data_gpot self.data['spike'] = data_spike self.pm = {} self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot']) self.pm['spike'] = PortMapper(sel_spike, self.data['spike']) def _init_gpu(self): """ Initialize GPU device. Notes ----- Must be called from within the `run()` method, not from within `__init__()`. """ if self.device == None: self.log_info('no GPU specified - not initializing ') else: # Import pycuda.driver here so as to facilitate the # subclassing of Module to create pure Python LPUs that don't use GPUs: import pycuda.driver as drv drv.init() try: self.gpu_ctx = drv.Device(self.device).make_context() except Exception as e: self.log_info('_init_gpu exception: ' + e.message) else: atexit.register(self.gpu_ctx.pop) self.log_info('GPU initialized') def _init_port_dicts(self): """ Initial dictionaries of source/destination ports in current module. """ # Extract identifiers of source ports in the current module's interface # for all modules receiving output from the current module: self._out_port_dict = {} self._out_port_dict['gpot'] = {} self._out_port_dict['spike'] = {} self._out_port_dict_ids = {} self._out_port_dict_ids['gpot'] = {} self._out_port_dict_ids['spike'] = {} self._out_ids = self.routing_table.dest_ids(self.id) self._out_ranks = [self.rank_to_id.inv[i] for i in self._out_ids] for out_id in self._out_ids: self.log_info('extracting output ports for %s' % out_id) # Get interfaces of pattern connecting the current module to # destination module `out_id`; `int_0` is connected to the # current module, `int_1` is connected to the other module: pat = self.routing_table[self.id, out_id]['pattern'] int_0 = self.routing_table[self.id, out_id]['int_0'] int_1 = self.routing_table[self.id, out_id]['int_1'] # Get ports in interface (`int_0`) connected to the current # module that are connected to the other module via the pattern: self._out_port_dict['gpot'][out_id] = \ pat.src_idx(int_0, int_1, 'gpot', 'gpot') self._out_port_dict_ids['gpot'][out_id] = \ self.pm['gpot'].ports_to_inds(self._out_port_dict['gpot'][out_id]) self._out_port_dict['spike'][out_id] = \ pat.src_idx(int_0, int_1, 'spike', 'spike') self._out_port_dict_ids['spike'][out_id] = \ self.pm['spike'].ports_to_inds(self._out_port_dict['spike'][out_id]) # Extract identifiers of destination ports in the current module's # interface for all modules sending input to the current module: self._in_port_dict = {} self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} self._in_port_dict_ids = {} self._in_port_dict_ids['gpot'] = {} self._in_port_dict_ids['spike'] = {} self._in_ids = self.routing_table.src_ids(self.id) self._in_ranks = [self.rank_to_id.inv[i] for i in self._in_ids] for in_id in self._in_ids: self.log_info('extracting input ports for %s' % in_id) # Get interfaces of pattern connecting the current module to # source module `in_id`; `int_1` is connected to the current # module, `int_0` is connected to the other module: pat = self.routing_table[in_id, self.id]['pattern'] int_0 = self.routing_table[in_id, self.id]['int_0'] int_1 = self.routing_table[in_id, self.id]['int_1'] # Get ports in interface (`int_1`) connected to the current # module that are connected to the other module via the pattern: self._in_port_dict['gpot'][in_id] = \ pat.dest_idx(int_0, int_1, 'gpot', 'gpot') self._in_port_dict_ids['gpot'][in_id] = \ self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id]) self._in_port_dict['spike'][in_id] = \ pat.dest_idx(int_0, int_1, 'spike', 'spike') self._in_port_dict_ids['spike'][in_id] = \ self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id]) def _init_data_in(self): """ Buffers for receiving data from other modules. Notes ----- Must be executed after `_init_port_dicts()`. """ # Allocate arrays for receiving data transmitted to the module so that # they don't have to be reallocated during every execution step # synchronization: self.data_in = {} self.data_in['gpot'] = {} self.data_in['spike'] = {} for in_id in self._in_ids: self.data_in['gpot'][in_id] = \ np.empty(np.shape(self._in_port_dict['gpot'][in_id]), self.pm['gpot'].dtype) self.data_in['spike'][in_id] = \ np.empty(np.shape(self._in_port_dict['spike'][in_id]), self.pm['spike'].dtype) def _sync(self): """ Send output data and receive input data. """ if self.time_sync: start = time.time() req = MPI.Request() requests = [] # For each destination module, extract elements from the current # module's port data array, copy them to a contiguous array, and # transmit the latter: for dest_id, dest_rank in zip(self._out_ids, self._out_ranks): # Get source ports in current module that are connected to the # destination module: data_gpot = self.pm['gpot'].get_by_inds(self._out_port_dict_ids['gpot'][dest_id]) data_spike = self.pm['spike'].get_by_inds(self._out_port_dict_ids['spike'][dest_id]) if not self.time_sync: self.log_info('gpot data being sent to %s: %s' % \ (dest_id, str(data_gpot))) self.log_info('spike data being sent to %s: %s' % \ (dest_id, str(data_spike))) r = MPI.COMM_WORLD.Isend([data_gpot, dtype_to_mpi(data_gpot.dtype)], dest_rank, GPOT_TAG) requests.append(r) r = MPI.COMM_WORLD.Isend([data_spike, dtype_to_mpi(data_spike.dtype)], dest_rank, SPIKE_TAG) requests.append(r) if not self.time_sync: self.log_info('sending to %s' % dest_id) if not self.time_sync: self.log_info('sent all data from %s' % self.id) # For each source module, receive elements and copy them into the # current module's port data array: received_gpot = [] received_spike = [] ind_in_gpot_list = [] ind_in_spike_list = [] for src_id, src_rank in zip(self._in_ids, self._in_ranks): r = MPI.COMM_WORLD.Irecv([self.data_in['gpot'][src_id], dtype_to_mpi(data_gpot.dtype)], source=src_rank, tag=GPOT_TAG) requests.append(r) r = MPI.COMM_WORLD.Irecv([self.data_in['spike'][src_id], dtype_to_mpi(data_spike.dtype)], source=src_rank, tag=SPIKE_TAG) requests.append(r) if not self.time_sync: self.log_info('receiving from %s' % src_id) req.Waitall(requests) if not self.time_sync: self.log_info('received all data received by %s' % self.id) # Copy received elements into the current module's data array: for src_id in self._in_ids: ind_in_gpot = self._in_port_dict_ids['gpot'][src_id] self.pm['gpot'].set_by_inds(ind_in_gpot, self.data_in['gpot'][src_id]) ind_in_spike = self._in_port_dict_ids['spike'][src_id] self.pm['spike'].set_by_inds(ind_in_spike, self.data_in['spike'][src_id]) # Save timing data: if self.time_sync: stop = time.time() n_gpot = 0 n_spike = 0 for src_id in self._in_ids: n_gpot += len(self.data_in['gpot'][src_id]) n_spike += len(self.data_in['spike'][src_id]) self.log_info('sent timing data to master') self.intercomm.isend(['sync_time', (self.rank, self.steps, start, stop, n_gpot*self.pm['gpot'].dtype.itemsize+\ n_spike*self.pm['spike'].dtype.itemsize)], dest=0, tag=self._ctrl_tag) else: self.log_info('saved all data received by %s' % self.id) def run_step(self): """ Module work method. This method should be implemented to do something interesting with new input port data in the module's `pm` attribute and update the attribute's output port data if necessary. It should not interact with any other class attributes. """ self.log_info('running execution step') def pre_run(self): """ Code to run before main loop. This method is invoked by the `run()` method before the main loop is started. """ self.log_info('running code before body of worker %s' % self.rank) # Initialize _out_port_dict and _in_port_dict attributes: self._init_port_dicts() # Initialize data_in attribute: self._init_data_in() # Start timing the main loop: if self.time_sync: self.intercomm.isend(['start_time', (self.rank, time.time())], dest=0, tag=self._ctrl_tag) self.log_info('sent start time to manager') def post_run(self): """ Code to run after main loop. This method is invoked by the `run()` method after the main loop is started. """ self.log_info('running code after body of worker %s' % self.rank) # Stop timing the main loop before shutting down the emulation: if self.time_sync: self.intercomm.isend(['stop_time', (self.rank, time.time())], dest=0, tag=self._ctrl_tag) self.log_info('sent stop time to manager') # Send acknowledgment message: self.intercomm.isend(['done', self.rank], 0, self._ctrl_tag) self.log_info('done message sent to manager') def run(self): """ Body of process. """ # Don't allow keyboard interruption of process: with IgnoreKeyboardInterrupt(): # Activate execution loop: super(Module, self).run() def do_work(self): """ Work method. This method is repeatedly executed by the Worker instance after the instance receives a 'start' control message and until it receives a 'stop' control message. """ # If the debug flag is set, don't catch exceptions so that # errors will lead to visible failures: if self.debug: # Run the processing step: self.run_step() # Synchronize: self._sync() else: # Run the processing step: catch_exception(self.run_step, self.log_info) # Synchronize: catch_exception(self._sync, self.log_info)
class Module(mpi.Worker): """ Processing module. This class repeatedly executes a work method until it receives a quit message via its control network port. Parameters ---------- sel : str, unicode, or sequence Path-like selector describing the module's interface of exposed ports. sel_in, sel_out, sel_gpot, sel_spike : str, unicode, or sequence Selectors respectively describing all input, output, graded potential, and spiking ports in the module's interface. data_gpot, data_spike : numpy.ndarray Data arrays associated with the graded potential and spiking ports in the . Array length must equal the number of ports in a module's interface. columns : list of str Interface port attributes. Network port for controlling the module instance. ctrl_tag, gpot_tag, spike_tag : int MPI tags that respectively identify messages containing control data, graded potential port values, and spiking port values transmitted to worker nodes. id : str Module identifier. If no identifier is specified, a unique identifier is automatically generated. device : int GPU device to use. May be set to None if the module does not perform GPU processing. routing_table : neurokernel.routing_table.RoutingTable Routing table describing data connections between modules. If no routing table is specified, the module will be executed in isolation. rank_to_id : bidict.bidict Mapping between MPI ranks and module object IDs. debug : bool Debug flag. When True, exceptions raised during the work method are not be suppressed. time_sync : bool Time synchronization flag. When True, debug messages are not emitted during module synchronization and the time taken to receive all incoming data is computed. Attributes ---------- interface : Interface Object containing information about a module's ports. pm : dict `pm['gpot']` and `pm['spike']` are instances of neurokernel.pm_gpu.PortMapper that map a module's ports to the contents of the values in `data`. data : dict `data['gpot']` and `data['spike']` are arrays of data associated with a module's graded potential and spiking ports. """ def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, debug=False, time_sync=False): super(Module, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: if 'interface' not in columns: raise ValueError('interface column required') if 'io' not in columns: raise ValueError('io column required') if 'type' not in columns: raise ValueError('type column required') # Initialize GPU here so as to be able to initialize a port mapper # containing GPU memory: self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: if not SelectorMethods.is_in(sel_in, sel): raise ValueError('input port selector not in selector of all ports') if not SelectorMethods.is_in(sel_out, sel): raise ValueError('output port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_in, sel_out): raise ValueError('input and output port selectors not disjoint') # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: if not SelectorMethods.is_in(sel_gpot, sel): raise ValueError('gpot port selector not in selector of all ports') if not SelectorMethods.is_in(sel_spike, sel): raise ValueError('spike port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_gpot, sel_spike): raise ValueError('gpot and spike port selectors not disjoint') # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # If a unique ID was specified and the routing table is not empty # (i.e., there are connections between multiple modules), the id # must be a node in the routing table: if routing_table is not None and len(routing_table.ids) and \ not routing_table.has_node(id): raise ValueError('routing table must contain specified ' 'module ID: {}'.format(id)) self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: if len(data_gpot) != len(self.gpot_ports): raise ValueError('incompatible gpot port data array length') if len(data_spike) != len(self.spike_ports): raise ValueError('incompatible spike port data array length') self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False) # MPI Request object for resolving asynchronous transfers: self.req = MPI.Request() def _init_gpu(self): """ Initialize GPU device. Notes ----- Must be called from within the `run()` method, not from within `__init__()`. """ if self.device == None: self.log_info('no GPU specified - not initializing ') else: # Import pycuda.driver here so as to facilitate the # subclassing of Module to create pure Python LPUs that don't use GPUs: import pycuda.driver as drv drv.init() N_gpu = drv.Device.count() if not self.device < N_gpu: new_device = randint(0,N_gpu - 1) self.log_warning("GPU device device %d not in GPU devices %s" % (self.device, str(range(0,N_gpu)))) self.log_warning("Setting device = %d" % new_device) self.device = new_device try: self.gpu_ctx = drv.Device(self.device).make_context() except Exception as e: self.log_info('_init_gpu exception: ' + e.message) else: atexit.register(self.gpu_ctx.pop) self.log_info('GPU %s initialized' % self.device) def _init_port_dicts(self): """ Initial dictionaries of source/destination ports in current module. """ # Extract identifiers of source ports in the current module's interface # for all modules receiving output from the current module: self._out_port_dict_ids = {} self._out_port_dict_ids['gpot'] = {} self._out_port_dict_ids['spike'] = {} self._out_ids = self.routing_table.dest_ids(self.id) self._out_ranks = [self.rank_to_id.inv[i] for i in self._out_ids] for out_id in self._out_ids: self.log_info('extracting output ports for %s' % out_id) # Get interfaces of pattern connecting the current module to # destination module `out_id`; `int_0` is connected to the # current module, `int_1` is connected to the other module: pat = self.routing_table[self.id, out_id]['pattern'] int_0 = self.routing_table[self.id, out_id]['int_0'] int_1 = self.routing_table[self.id, out_id]['int_1'] # Get ports in interface (`int_0`) connected to the current # module that are connected to the other module via the pattern: self._out_port_dict_ids['gpot'][out_id] = \ gpuarray.to_gpu(self.pm['gpot'].ports_to_inds(pat.src_idx(int_0, int_1, 'gpot', 'gpot'))) self._out_port_dict_ids['spike'][out_id] = \ gpuarray.to_gpu(self.pm['spike'].ports_to_inds(pat.src_idx(int_0, int_1, 'spike', 'spike'))) # Extract identifiers of destination ports in the current module's # interface for all modules sending input to the current module: self._in_port_dict_ids = {} self._in_port_dict_ids['gpot'] = {} self._in_port_dict_ids['spike'] = {} # Extract indices corresponding to the entries in the transmitted # buffers that must be copied into the input port map data arrays; these # are needed to support fan-out: self._in_port_dict_buf_ids = {} self._in_port_dict_buf_ids['gpot'] = {} self._in_port_dict_buf_ids['spike'] = {} # Lengths of input buffers: self._in_buf_len = {} self._in_buf_len['gpot'] = {} self._in_buf_len['spike'] = {} self._in_ids = self.routing_table.src_ids(self.id) self._in_ranks = [self.rank_to_id.inv[i] for i in self._in_ids] for in_id in self._in_ids: self.log_info('extracting input ports for %s' % in_id) # Get interfaces of pattern connecting the current module to # source module `in_id`; `int_1` is connected to the current # module, `int_0` is connected to the other module: pat = self.routing_table[in_id, self.id]['pattern'] int_0 = self.routing_table[in_id, self.id]['int_0'] int_1 = self.routing_table[in_id, self.id]['int_1'] # Get ports in interface (`int_1`) connected to the current # module that are connected to the other module via the pattern: self._in_port_dict_ids['gpot'][in_id] = \ gpuarray.to_gpu(self.pm['gpot'].ports_to_inds(pat.dest_idx(int_0, int_1, 'gpot', 'gpot'))) self._in_port_dict_ids['spike'][in_id] = \ gpuarray.to_gpu(self.pm['spike'].ports_to_inds(pat.dest_idx(int_0, int_1, 'spike', 'spike'))) # Get the integer indices associated with the connected source ports # in the pattern interface connected to the source module `in_d`; # these are needed to copy received buffer contents into the current # module's port map data array: self._in_port_dict_buf_ids['gpot'][in_id] = \ np.array(renumber_in_order(BasePortMapper(pat.gpot_ports(int_0).to_tuples()). ports_to_inds(pat.src_idx(int_0, int_1, 'gpot', 'gpot', duplicates=True)))) self._in_port_dict_buf_ids['spike'][in_id] = \ np.array(renumber_in_order(BasePortMapper(pat.spike_ports(int_0).to_tuples()). ports_to_inds(pat.src_idx(int_0, int_1, 'spike', 'spike', duplicates=True)))) # The size of the input buffer to the current module must be the # same length as the output buffer of module `in_id`: self._in_buf_len['gpot'][in_id] = len(pat.src_idx(int_0, int_1, 'gpot', 'gpot')) self._in_buf_len['spike'][in_id] = len(pat.src_idx(int_0, int_1, 'spike', 'spike')) def _init_comm_bufs(self): """ Buffers for sending/receiving data from other modules. Notes ----- Must be executed after `_init_port_dicts()`. """ # Buffers (and their interfaces and MPI types) for receiving data # transmitted from source modules: self._in_buf = {} self._in_buf['gpot'] = {} self._in_buf['spike'] = {} self._in_buf_int = {} self._in_buf_int['gpot'] = {} self._in_buf_int['spike'] = {} self._in_buf_mtype = {} self._in_buf_mtype['gpot'] = {} self._in_buf_mtype['spike'] = {} for in_id in self._in_ids: n_gpot = self._in_buf_len['gpot'][in_id] if n_gpot: self._in_buf['gpot'][in_id] = \ gpuarray.empty(n_gpot, self.pm['gpot'].dtype) self._in_buf_int['gpot'][in_id] = \ bufint(self._in_buf['gpot'][in_id]) self._in_buf_mtype['gpot'][in_id] = \ dtype_to_mpi(self._in_buf['gpot'][in_id].dtype) else: self._in_buf['gpot'][in_id] = None n_spike = self._in_buf_len['spike'][in_id] if n_spike: self._in_buf['spike'][in_id] = \ gpuarray.empty(n_spike, self.pm['spike'].dtype) self._in_buf_int['spike'][in_id] = \ bufint(self._in_buf['spike'][in_id]) self._in_buf_mtype['spike'][in_id] = \ dtype_to_mpi(self._in_buf['spike'][in_id].dtype) else: self._in_buf['spike'][in_id] = None # Buffers (and their interfaces and MPI types) for transmitting data to # destination modules: self._out_buf = {} self._out_buf['gpot'] = {} self._out_buf['spike'] = {} self._out_buf_int = {} self._out_buf_int['gpot'] = {} self._out_buf_int['spike'] = {} self._out_buf_mtype = {} self._out_buf_mtype['gpot'] = {} self._out_buf_mtype['spike'] = {} for out_id in self._out_ids: n_gpot = len(self._out_port_dict_ids['gpot'][out_id]) if n_gpot: self._out_buf['gpot'][out_id] = \ gpuarray.empty(n_gpot, self.pm['gpot'].dtype) self._out_buf_int['gpot'][out_id] = \ bufint(self._out_buf['gpot'][out_id]) self._out_buf_mtype['gpot'][out_id] = \ dtype_to_mpi(self._out_buf['gpot'][out_id].dtype) else: self._out_buf['gpot'][out_id] = None n_spike = len(self._out_port_dict_ids['spike'][out_id]) if n_spike: self._out_buf['spike'][out_id] = \ gpuarray.empty(n_spike, self.pm['spike'].dtype) self._out_buf_int['spike'][out_id] = \ bufint(self._out_buf['spike'][out_id]) self._out_buf_mtype['spike'][out_id] = \ dtype_to_mpi(self._out_buf['spike'][out_id].dtype) else: self._out_buf['spike'][out_id] = None def _sync(self): """ Send output data and receive input data. """ if self.time_sync: start = time.time() requests = [] # For each destination module, extract elements from the current # module's port data array, copy them to a contiguous array, and # transmit the latter: for dest_id, dest_rank in zip(self._out_ids, self._out_ranks): # Copy data into destination buffer: if self._out_buf['gpot'][dest_id] is not None: set_by_inds(self._out_buf['gpot'][dest_id], self._out_port_dict_ids['gpot'][dest_id], self.data['gpot'], 'src') if not self.time_sync: self.log_info('gpot data sent to %s: %s' % \ (dest_id, str(self._out_buf['gpot'][dest_id]))) r = MPI.COMM_WORLD.Isend([self._out_buf_int['gpot'][dest_id], self._out_buf_mtype['gpot'][dest_id]], dest_rank, GPOT_TAG) requests.append(r) if self._out_buf['spike'][dest_id] is not None: set_by_inds(self._out_buf['spike'][dest_id], self._out_port_dict_ids['spike'][dest_id], self.data['spike'], 'src') if not self.time_sync: self.log_info('spike data sent to %s: %s' % \ (dest_id, str(self._out_buf['spike'][dest_id]))) r = MPI.COMM_WORLD.Isend([self._out_buf_int['spike'][dest_id], self._out_buf_mtype['spike'][dest_id]], dest_rank, SPIKE_TAG) requests.append(r) if not self.time_sync: self.log_info('sending to %s' % dest_id) if not self.time_sync: self.log_info('sent all data from %s' % self.id) # For each source module, receive elements and copy them into the # current module's port data array: for src_id, src_rank in zip(self._in_ids, self._in_ranks): if self._in_buf['gpot'][src_id] is not None: r = MPI.COMM_WORLD.Irecv([self._in_buf_int['gpot'][src_id], self._in_buf_mtype['gpot'][src_id]], source=src_rank, tag=GPOT_TAG) requests.append(r) if self._in_buf['spike'][src_id] is not None: r = MPI.COMM_WORLD.Irecv([self._in_buf_int['spike'][src_id], self._in_buf_mtype['spike'][src_id]], source=src_rank, tag=SPIKE_TAG) requests.append(r) if not self.time_sync: self.log_info('receiving from %s' % src_id) if requests: self.req.Waitall(requests) if not self.time_sync: self.log_info('all data were received by %s' % self.id) # Copy received elements into the current module's data array: for src_id in self._in_ids: if self._in_buf['gpot'][src_id] is not None: if not self.time_sync: self.log_info('gpot data received from %s: %s' % \ (src_id, str(self._in_buf['gpot'][src_id]))) set_by_inds_from_inds(self.data['gpot'], self._in_port_dict_ids['gpot'][src_id], self._in_buf['gpot'][src_id], self._in_port_dict_buf_ids['gpot'][src_id]) if self._in_buf['spike'][src_id] is not None: if not self.time_sync: self.log_info('spike data received from %s: %s' % \ (src_id, str(self._in_buf['spike'][src_id]))) set_by_inds_from_inds(self.data['spike'], self._in_port_dict_ids['spike'][src_id], self._in_buf['spike'][src_id], self._in_port_dict_buf_ids['spike'][src_id]) # Save timing data: if self.time_sync: stop = time.time() n_gpot = 0 n_spike = 0 for src_id in self._in_ids: n_gpot += len(self._in_buf['gpot'][src_id]) n_spike += len(self._in_buf['spike'][src_id]) self.log_info('sent timing data to master') self.intercomm.isend(['sync_time', (self.rank, self.steps, start, stop, n_gpot*self.pm['gpot'].dtype.itemsize+\ n_spike*self.pm['spike'].dtype.itemsize)], dest=0, tag=self._ctrl_tag) else: self.log_info('saved all data received by %s' % self.id) def pre_run(self): """ Code to run before main loop. This method is invoked by the `run()` method before the main loop is started. """ self.log_info('running code before body of worker %s' % self.rank) # Initialize _out_port_dict and _in_port_dict attributes: self._init_port_dicts() # Initialize transmission buffers: self._init_comm_bufs() # Start timing the main loop: if self.time_sync: self.intercomm.isend(['start_time', (self.rank, time.time())], dest=0, tag=self._ctrl_tag) self.log_info('sent start time to manager') def post_run(self): """ Code to run after main loop. This method is invoked by the `run()` method after the main loop is started. """ self.log_info('running code after body of worker %s' % self.rank) # Stop timing the main loop before shutting down the emulation: if self.time_sync: self.intercomm.isend(['stop_time', (self.rank, time.time())], dest=0, tag=self._ctrl_tag) self.log_info('sent stop time to manager') # Send acknowledgment message: self.intercomm.isend(['done', self.rank], 0, self._ctrl_tag) self.log_info('done message sent to manager') def run_step(self): """ Module work method. This method should be implemented to do something interesting with new input port data in the module's `pm` attribute and update the attribute's output port data if necessary. It should not interact with any other class attributes. """ self.log_info('running execution step') def run(self): """ Body of process. """ # Don't allow keyboard interruption of process: with IgnoreKeyboardInterrupt(): # Activate execution loop: super(Module, self).run() def do_work(self): """ Work method. This method is repeatedly executed by the Worker instance after the instance receives a 'start' control message and until it receives a 'stop' control message. """ # If the debug flag is set, don't catch exceptions so that # errors will lead to visible failures: if self.debug: # Run the processing step: self.run_step() # Synchronize: self._sync() else: # Run the processing step: catch_exception(self.run_step, self.log_info) # Synchronize: catch_exception(self._sync, self.log_info)
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, debug=False, time_sync=False): super(Module, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: if 'interface' not in columns: raise ValueError('interface column required') if 'io' not in columns: raise ValueError('io column required') if 'type' not in columns: raise ValueError('type column required') # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: if not SelectorMethods.is_in(sel_in, sel): raise ValueError('input port selector not in selector of all ports') if not SelectorMethods.is_in(sel_out, sel): raise ValueError('output port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_in, sel_out): raise ValueError('input and output port selectors not disjoint') # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: if not SelectorMethods.is_in(sel_gpot, sel): raise ValueError('gpot port selector not in selector of all ports') if not SelectorMethods.is_in(sel_spike, sel): raise ValueError('spike port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_gpot, sel_spike): raise ValueError('gpot and spike port selectors not disjoint') # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # If a unique ID was specified and the routing table is not empty # (i.e., there are connections between multiple modules), # the id must be a node in the table: if routing_table is not None and len(routing_table.ids) and \ not routing_table.has_node(id): raise ValueError('routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: if len(data_gpot) != len(self.gpot_ports): raise ValueError('incompatible gpot port data array length') if len(data_spike) != len(self.spike_ports): raise ValueError('incompatible spike port data array length') self.data = {} self.data['gpot'] = data_gpot self.data['spike'] = data_spike self.pm = {} self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot']) self.pm['spike'] = PortMapper(sel_spike, self.data['spike'])