Ejemplo n.º 1
0
class Module(BaseModule):
    def __init__(self,
                 sel,
                 sel_in,
                 sel_out,
                 sel_gpot,
                 sel_spike,
                 data_gpot,
                 data_spike,
                 columns=['interface', 'io', 'type'],
                 ctrl_tag=CTRL_TAG,
                 gpot_tag=GPOT_TAG,
                 spike_tag=SPIKE_TAG,
                 id=None,
                 device=None,
                 routing_table=None,
                 rank_to_id=None,
                 pm_all=None,
                 debug=False,
                 time_sync=False):

        # Call super for BaseModule rather than Module because most of the
        # functionality of the former's constructor must be overridden in any case:
        super(BaseModule, self).__init__(ctrl_tag)
        self.debug = debug
        self.time_sync = time_sync
        self.device = device

        self._gpot_tag = gpot_tag
        self._spike_tag = spike_tag

        # Require several necessary attribute columns:
        assert 'interface' in columns
        assert 'io' in columns
        assert 'type' in columns

        self._init_gpu()

        # This is needed to ensure that MPI_Finalize is called before PyCUDA
        # attempts to clean up; see
        # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw
        atexit.register(MPI.Finalize)

        # Manually register the file close method associated with MPIOutput
        # so that it is called by atexit before MPI.Finalize() (if the file is
        # closed after MPI.Finalize() is called, an error will occur):
        for k, v in twiggy.emitters.iteritems():
            if isinstance(v._output, MPIOutput):
                atexit.register(v._output.close)

        # Ensure that the input and output port selectors respectively
        # select mutually exclusive subsets of the set of all ports exposed by
        # the module:
        assert SelectorMethods.is_in(sel_in, sel)
        assert SelectorMethods.is_in(sel_out, sel)
        assert SelectorMethods.are_disjoint(sel_in, sel_out)

        # Ensure that the graded potential and spiking port selectors
        # respectively select mutually exclusive subsets of the set of all ports
        # exposed by the module:
        assert SelectorMethods.is_in(sel_gpot, sel)
        assert SelectorMethods.is_in(sel_spike, sel)
        assert SelectorMethods.are_disjoint(sel_gpot, sel_spike)

        # Save routing table and mapping between MPI ranks and module IDs:
        self.routing_table = routing_table
        self.rank_to_id = rank_to_id

        # Save module interface data (stored in a dict of BasePortMapper instances):
        self.pm_all = pm_all

        # Generate a unique ID if none is specified:
        if id is None:
            self.id = uid()
        else:

            # Save routing table; if a unique ID was specified, it must be a node in
            # the routing table:
            if routing_table is not None and not routing_table.has_node(id):
                raise ValueError(
                    'routing table must contain specified module ID')
            self.id = id

        # Reformat logger name:
        LoggerMixin.__init__(self, 'mod %s' % self.id)

        # Create module interface given the specified ports:
        self.interface = Interface(sel, columns)

        # Set the interface ID to 0; we assume that a module only has one interface:
        self.interface[sel, 'interface'] = 0

        # Set the port attributes:
        self.interface[sel_in, 'io'] = 'in'
        self.interface[sel_out, 'io'] = 'out'
        self.interface[sel_gpot, 'type'] = 'gpot'
        self.interface[sel_spike, 'type'] = 'spike'

        # Find the input and output ports:
        self.in_ports = self.interface.in_ports().to_tuples()
        self.out_ports = self.interface.out_ports().to_tuples()

        # Find the graded potential and spiking ports:
        self.gpot_ports = self.interface.gpot_ports().to_tuples()
        self.spike_ports = self.interface.spike_ports().to_tuples()

        self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples()
        self.in_spike_ports = self.interface.in_ports().spike_ports(
        ).to_tuples()
        self.out_gpot_ports = self.interface.out_ports().gpot_ports(
        ).to_tuples()
        self.out_spike_ports = self.interface.out_ports().spike_ports(
        ).to_tuples()

        # Set up mapper between port identifiers and their associated data:
        assert len(data_gpot) == len(self.gpot_ports)
        assert len(data_spike) == len(self.spike_ports)
        self.data = {}
        self.data['gpot'] = gpuarray.to_gpu(data_gpot)
        self.data['spike'] = gpuarray.to_gpu(data_spike)
        self.pm = {}
        self.pm['gpot'] = GPUPortMapper(sel_gpot,
                                        self.data['gpot'],
                                        make_copy=False)
        self.pm['spike'] = GPUPortMapper(sel_spike,
                                         self.data['spike'],
                                         make_copy=False)

    def _init_port_dicts(self):
        """
        Initial dictionaries of source/destination ports in current module.
        """

        # Extract identifiers of source ports in all modules sending input to
        # the current module's ports and of destination ports in the current
        # module's interface for all modules sending input to the current
        # module:
        self._in_port_dict = {}
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}
        self._in_port_dict_ids = {}
        self._in_port_dict_ids['gpot'] = {}
        self._in_port_dict_ids['spike'] = {}

        self._from_port_dict = {}
        self._from_port_dict['gpot'] = {}
        self._from_port_dict['spike'] = {}
        self._from_port_dict_ids = {}
        self._from_port_dict_ids['gpot'] = {}
        self._from_port_dict_ids['spike'] = {}

        self._in_ids = self.routing_table.src_ids(self.id)
        for in_id in self._in_ids:
            self.log_info('extracting input ports for %s' % in_id)

            # Get interfaces of pattern connecting the current module to
            # source module `in_id`; `int_1` is connected to the current
            # module, `int_0` is connected to the other module:
            pat = self.routing_table[in_id, self.id]['pattern']
            int_0 = self.routing_table[in_id, self.id]['int_0']
            int_1 = self.routing_table[in_id, self.id]['int_1']

            # Get ports in interface (`int_1`) connected to the current
            # module that are connected to the other module via the pattern:
            self._in_port_dict['gpot'][in_id] = \
                    pat.dest_idx(int_0, int_1, 'gpot', 'gpot')
            self._in_port_dict_ids['gpot'][in_id] = \
                    self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id])
            self._in_port_dict['spike'][in_id] = \
                    pat.dest_idx(int_0, int_1, 'spike', 'spike')
            self._in_port_dict_ids['spike'][in_id] = \
                    self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id])

            # Get ports in interface (`int_0`) connected to the other module
            # that are connected to the current module via the pattern:
            self._from_port_dict['gpot'][in_id] = \
                    pat.src_idx(int_0, int_1, 'gpot', 'gpot')
            self._from_port_dict_ids['gpot'][in_id] = \
                self.pm_all['gpot'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id])
            self._from_port_dict['spike'][in_id] = \
                    pat.src_idx(int_0, int_1, 'spike', 'spike')
            self._from_port_dict_ids['spike'][in_id] = \
                self.pm_all['spike'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id])

    def _init_comm_bufs(self):
        """
        Buffers for receiving data from other modules.

        Notes
        -----
        Must be executed after `_init_port_dicts()`.
        """

        # Buffer interface to and MPI type of this module's port data array:
        self._data_int = {}
        self._data_int['gpot'] = bufint(self.data['gpot'])
        self._data_int['spike'] = bufint(self.data['spike'])
        self._data_mtype = {}
        self._data_mtype['gpot'] = dtype_to_mpi(self.data['gpot'].dtype)
        self._data_mtype['spike'] = dtype_to_mpi(self.data['spike'].dtype)

        # Buffers (and their interfaces and MPI types) for receiving data
        # transmitted from source modules:
        self._in_buf = {}
        self._in_buf['gpot'] = {}
        self._in_buf['spike'] = {}
        self._in_buf_int = {}
        self._in_buf_int['gpot'] = {}
        self._in_buf_int['spike'] = {}
        self._in_buf_mtype = {}
        self._in_buf_mtype['gpot'] = {}
        self._in_buf_mtype['spike'] = {}
        for in_id in self._in_ids:

            # Get interfaces of pattern connecting the current module to
            # source module `in_id`; `int_1` is connected to the current
            # module, `int_0` is connected to the other module:
            pat = self.routing_table[in_id, self.id]['pattern']
            int_0 = self.routing_table[in_id, self.id]['int_0']
            int_1 = self.routing_table[in_id, self.id]['int_1']

            # The buffers must be the same size as the port data arrays of the
            # modules from which they received data:
            self._in_buf['gpot'][in_id] = \
                gpuarray.empty(len(self.pm_all['gpot'][in_id]),
                               self.pm['gpot'].dtype)
            self._in_buf_int['gpot'][in_id] = bufint(
                self._in_buf['gpot'][in_id])
            self._in_buf_mtype['gpot'][in_id] = \
                dtype_to_mpi(self._in_buf['gpot'][in_id])

            self._in_buf['spike'][in_id] = \
                gpuarray.empty(len(self.pm_all['spike'][in_id]),
                               self.pm['spike'].dtype)
            self._in_buf_int['spike'][in_id] = bufint(
                self._in_buf['spike'][in_id])
            self._in_buf_mtype['spike'][in_id] = \
                dtype_to_mpi(self._in_buf['spike'][in_id])

    def _sync(self):
        """
        Send output data and receive input data.
        """

        if self.time_sync:
            start = time.time()
        req = MPI.Request()
        requests = []

        # Transmit the entire port data array to each destination module:
        dest_ids = self.routing_table.dest_ids(self.id)
        for dest_id in dest_ids:
            dest_rank = self.rank_to_id[:dest_id]
            r = MPI.COMM_WORLD.Isend(
                [self._data_int['gpot'], self._data_mtype['gpot']], dest_rank,
                GPOT_TAG)
            requests.append(r)
            r = MPI.COMM_WORLD.Isend(
                [self._data_int['spike'], self._data_mtype['spike']],
                dest_rank, SPIKE_TAG)
            requests.append(r)

            if not self.time_sync:
                self.log_info('sending to %s' % dest_id)
        if not self.time_sync:
            self.log_info('sent all data from %s' % self.id)

        # For each source module, receive elements and copy them into the
        # current module's port data array:
        src_ids = self.routing_table.src_ids(self.id)
        for src_id in src_ids:
            src_rank = self.rank_to_id[:src_id]
            r = MPI.COMM_WORLD.Irecv([
                self._in_buf_int['gpot'][src_id],
                self._in_buf_mtype['gpot'][src_id]
            ],
                                     source=src_rank,
                                     tag=GPOT_TAG)
            requests.append(r)
            r = MPI.COMM_WORLD.Irecv([
                self._in_buf_int['spike'][src_id],
                self._in_buf_mtype['spike'][src_id]
            ],
                                     source=src_rank,
                                     tag=SPIKE_TAG)
            requests.append(r)
            if not self.time_sync:
                self.log_info('receiving from %s' % src_id)
        req.Waitall(requests)
        if not self.time_sync:
            self.log_info('received all data received by %s' % self.id)

        # Copy received elements into the current module's data array:
        n_gpot = 0
        n_spike = 0
        for src_id in src_ids:
            ind_from_gpot = self._from_port_dict_ids['gpot'][src_id]
            ind_in_gpot = self._in_port_dict_ids['gpot'][src_id]
            set_by_inds_from_inds(self.data['gpot'], ind_in_gpot,
                                  self._in_buf['gpot'][src_id], ind_from_gpot)
            n_gpot += len(self._in_buf['gpot'][src_id])
            ind_from_spike = self._from_port_dict_ids['spike'][src_id]
            ind_in_spike = self._in_port_dict_ids['spike'][src_id]
            set_by_inds_from_inds(self.data['spike'], ind_in_spike,
                                  self._in_buf['spike'][src_id],
                                  ind_from_spike)
            n_spike += len(self._in_buf['spike'][src_id])

        # Save timing data:
        if self.time_sync:
            stop = time.time()
            #self.log_info('sent timing data to master')
            self.intercomm.isend(['time', (self.rank, self.steps, start, stop,
                n_gpot*self.pm['gpot'].dtype.itemsize+\
                n_spike*self.pm['spike'].dtype.itemsize)],
                    dest=0, tag=self._ctrl_tag)
        else:
            self.log_info('saved all data received by %s' % self.id)
Ejemplo n.º 2
0
class Module(mpi.Worker):
    """
    Processing module.

    This class repeatedly executes a work method until it receives a
    quit message via its control network port.

    Parameters
    ----------
    sel : str, unicode, or sequence
        Path-like selector describing the module's interface of
        exposed ports.
    sel_in, sel_out, sel_gpot, sel_spike : str, unicode, or sequence
        Selectors respectively describing all input, output, graded potential,
        and spiking ports in the module's interface.
    data_gpot, data_spike : numpy.ndarray
        Data arrays associated with the graded potential and spiking ports in
        the . Array length must equal the number
        of ports in a module's interface.
    columns : list of str
        Interface port attributes.
        Network port for controlling the module instance.
    ctrl_tag, gpot_tag, spike_tag : int
        MPI tags that respectively identify messages containing control data,
        graded potential port values, and spiking port values transmitted to 
        worker nodes.
    id : str
        Module identifier. If no identifier is specified, a unique
        identifier is automatically generated.
    device : int
        GPU device to use. May be set to None if the module does not perform
        GPU processing.
    routing_table : neurokernel.routing_table.RoutingTable
        Routing table describing data connections between modules. If no routing
        table is specified, the module will be executed in isolation.
    rank_to_id : bidict.bidict
        Mapping between MPI ranks and module object IDs.
    debug : bool
        Debug flag. When True, exceptions raised during the work method
        are not be suppressed.
    time_sync : bool
        Time synchronization flag. When True, debug messages are not emitted
        during module synchronization and the time taken to receive all incoming
        data is computed.

    Attributes
    ----------
    interface : Interface
        Object containing information about a module's ports.
    pm : dict
        `pm['gpot']` and `pm['spike']` are instances of neurokernel.pm.PortMapper that
        map a module's ports to the contents of the values in `data`.
    data : dict
        `data['gpot']` and `data['spike']` are arrays of data associated with 
        a module's graded potential and spiking ports.
    """

    def __init__(self, sel, sel_in, sel_out,
                 sel_gpot, sel_spike, data_gpot, data_spike,
                 columns=['interface', 'io', 'type'],
                 ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG,
                 id=None, device=None,
                 routing_table=None, rank_to_id=None,
                 debug=False, time_sync=False):

        super(Module, self).__init__(ctrl_tag)
        self.debug = debug
        self.time_sync = time_sync
        self.device = device

        self._gpot_tag = gpot_tag
        self._spike_tag = spike_tag

        # Require several necessary attribute columns:
        if 'interface' not in columns:
            raise ValueError('interface column required')
        if 'io' not in columns:
            raise ValueError('io column required')
        if 'type' not in columns:
            raise ValueError('type column required')

        # Manually register the file close method associated with MPIOutput
        # so that it is called by atexit before MPI.Finalize() (if the file is
        # closed after MPI.Finalize() is called, an error will occur):
        for k, v in twiggy.emitters.iteritems():
             if isinstance(v._output, MPIOutput):       
                 atexit.register(v._output.close)

        # Ensure that the input and output port selectors respectively
        # select mutually exclusive subsets of the set of all ports exposed by
        # the module:
        if not SelectorMethods.is_in(sel_in, sel):
            raise ValueError('input port selector not in selector of all ports')
        if not SelectorMethods.is_in(sel_out, sel):
            raise ValueError('output port selector not in selector of all ports')
        if not SelectorMethods.are_disjoint(sel_in, sel_out):
            raise ValueError('input and output port selectors not disjoint')

        # Ensure that the graded potential and spiking port selectors
        # respectively select mutually exclusive subsets of the set of all ports
        # exposed by the module:
        if not SelectorMethods.is_in(sel_gpot, sel):
            raise ValueError('gpot port selector not in selector of all ports')
        if not SelectorMethods.is_in(sel_spike, sel):
            raise ValueError('spike port selector not in selector of all ports')
        if not SelectorMethods.are_disjoint(sel_gpot, sel_spike):
            raise ValueError('gpot and spike port selectors not disjoint')

        # Save routing table and mapping between MPI ranks and module IDs:
        self.routing_table = routing_table
        self.rank_to_id = rank_to_id

        # Generate a unique ID if none is specified:
        if id is None:
            self.id = uid()
        else:

            # If a unique ID was specified and the routing table is not empty 
            # (i.e., there are connections between multiple modules),
            # the id must be a node in the table:
            if routing_table is not None and len(routing_table.ids) and \
                    not routing_table.has_node(id):
                raise ValueError('routing table must contain specified module ID')
            self.id = id

        # Reformat logger name:
        LoggerMixin.__init__(self, 'mod %s' % self.id)

        # Create module interface given the specified ports:
        self.interface = Interface(sel, columns)

        # Set the interface ID to 0; we assume that a module only has one interface:
        self.interface[sel, 'interface'] = 0

        # Set the port attributes:
        self.interface[sel_in, 'io'] = 'in'
        self.interface[sel_out, 'io'] = 'out'
        self.interface[sel_gpot, 'type'] = 'gpot'
        self.interface[sel_spike, 'type'] = 'spike'

        # Find the input and output ports:
        self.in_ports = self.interface.in_ports().to_tuples()
        self.out_ports = self.interface.out_ports().to_tuples()

        # Find the graded potential and spiking ports:
        self.gpot_ports = self.interface.gpot_ports().to_tuples()
        self.spike_ports = self.interface.spike_ports().to_tuples()

        self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples()
        self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples()
        self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples()
        self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples()

        # Set up mapper between port identifiers and their associated data:
        if len(data_gpot) != len(self.gpot_ports):
            raise ValueError('incompatible gpot port data array length')
        if len(data_spike) != len(self.spike_ports):
            raise ValueError('incompatible spike port data array length')
        self.data = {}
        self.data['gpot'] = data_gpot
        self.data['spike'] = data_spike
        self.pm = {}
        self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot'])
        self.pm['spike'] = PortMapper(sel_spike, self.data['spike'])

    def _init_gpu(self):
        """
        Initialize GPU device.

        Notes
        -----
        Must be called from within the `run()` method, not from within
        `__init__()`.
        """

        if self.device == None:
            self.log_info('no GPU specified - not initializing ')
        else:

            # Import pycuda.driver here so as to facilitate the
            # subclassing of Module to create pure Python LPUs that don't use GPUs:
            import pycuda.driver as drv
            drv.init()
            try:
                self.gpu_ctx = drv.Device(self.device).make_context()
            except Exception as e:
                self.log_info('_init_gpu exception: ' + e.message)
            else:
                atexit.register(self.gpu_ctx.pop)
                self.log_info('GPU initialized')

    def _init_port_dicts(self):
        """
        Initial dictionaries of source/destination ports in current module.
        """

        # Extract identifiers of source ports in the current module's interface
        # for all modules receiving output from the current module:
        self._out_port_dict = {}
        self._out_port_dict['gpot'] = {}
        self._out_port_dict['spike'] = {}
        self._out_port_dict_ids = {}
        self._out_port_dict_ids['gpot'] = {}
        self._out_port_dict_ids['spike'] = {}

        self._out_ids = self.routing_table.dest_ids(self.id)
        self._out_ranks = [self.rank_to_id.inv[i] for i in self._out_ids]
        for out_id in self._out_ids:
            self.log_info('extracting output ports for %s' % out_id)

            # Get interfaces of pattern connecting the current module to
            # destination module `out_id`; `int_0` is connected to the
            # current module, `int_1` is connected to the other module:
            pat = self.routing_table[self.id, out_id]['pattern']
            int_0 = self.routing_table[self.id, out_id]['int_0']
            int_1 = self.routing_table[self.id, out_id]['int_1']

            # Get ports in interface (`int_0`) connected to the current
            # module that are connected to the other module via the pattern:
            self._out_port_dict['gpot'][out_id] = \
                    pat.src_idx(int_0, int_1, 'gpot', 'gpot')
            self._out_port_dict_ids['gpot'][out_id] = \
                    self.pm['gpot'].ports_to_inds(self._out_port_dict['gpot'][out_id])
            self._out_port_dict['spike'][out_id] = \
                    pat.src_idx(int_0, int_1, 'spike', 'spike')
            self._out_port_dict_ids['spike'][out_id] = \
                    self.pm['spike'].ports_to_inds(self._out_port_dict['spike'][out_id])
           
        # Extract identifiers of destination ports in the current module's
        # interface for all modules sending input to the current module:
        self._in_port_dict = {}
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}
        self._in_port_dict_ids = {}
        self._in_port_dict_ids['gpot'] = {}
        self._in_port_dict_ids['spike'] = {}

        self._in_ids = self.routing_table.src_ids(self.id)
        self._in_ranks = [self.rank_to_id.inv[i] for i in self._in_ids]
        for in_id in self._in_ids:
            self.log_info('extracting input ports for %s' % in_id)

            # Get interfaces of pattern connecting the current module to
            # source module `in_id`; `int_1` is connected to the current
            # module, `int_0` is connected to the other module:
            pat = self.routing_table[in_id, self.id]['pattern']
            int_0 = self.routing_table[in_id, self.id]['int_0']
            int_1 = self.routing_table[in_id, self.id]['int_1']

            # Get ports in interface (`int_1`) connected to the current
            # module that are connected to the other module via the pattern:
            self._in_port_dict['gpot'][in_id] = \
                    pat.dest_idx(int_0, int_1, 'gpot', 'gpot')
            self._in_port_dict_ids['gpot'][in_id] = \
                    self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id])
            self._in_port_dict['spike'][in_id] = \
                    pat.dest_idx(int_0, int_1, 'spike', 'spike')
            self._in_port_dict_ids['spike'][in_id] = \
                    self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id])

    def _init_data_in(self):
        """
        Buffers for receiving data from other modules.

        Notes
        -----
        Must be executed after `_init_port_dicts()`.
        """

        # Allocate arrays for receiving data transmitted to the module so that
        # they don't have to be reallocated during every execution step
        # synchronization:
        self.data_in = {}
        self.data_in['gpot'] = {}
        self.data_in['spike'] = {}
        for in_id in self._in_ids:
            self.data_in['gpot'][in_id] = \
                np.empty(np.shape(self._in_port_dict['gpot'][in_id]), 
                         self.pm['gpot'].dtype)
            self.data_in['spike'][in_id] = \
                np.empty(np.shape(self._in_port_dict['spike'][in_id]), 
                         self.pm['spike'].dtype)
            
    def _sync(self):
        """
        Send output data and receive input data.
        """

        if self.time_sync:
            start = time.time()
        req = MPI.Request()
        requests = []

        # For each destination module, extract elements from the current
        # module's port data array, copy them to a contiguous array, and
        # transmit the latter:
        for dest_id, dest_rank in zip(self._out_ids, self._out_ranks):

            # Get source ports in current module that are connected to the
            # destination module:
            data_gpot = self.pm['gpot'].get_by_inds(self._out_port_dict_ids['gpot'][dest_id])
            data_spike = self.pm['spike'].get_by_inds(self._out_port_dict_ids['spike'][dest_id])

            if not self.time_sync:
                self.log_info('gpot data being sent to %s: %s' % \
                              (dest_id, str(data_gpot)))
                self.log_info('spike data being sent to %s: %s' % \
                              (dest_id, str(data_spike)))
            r = MPI.COMM_WORLD.Isend([data_gpot,
                                      dtype_to_mpi(data_gpot.dtype)],
                                     dest_rank, GPOT_TAG)
            requests.append(r)
            r = MPI.COMM_WORLD.Isend([data_spike,
                                      dtype_to_mpi(data_spike.dtype)],
                                     dest_rank, SPIKE_TAG)
            requests.append(r)

            if not self.time_sync:
                self.log_info('sending to %s' % dest_id)
        if not self.time_sync:
            self.log_info('sent all data from %s' % self.id)

        # For each source module, receive elements and copy them into the
        # current module's port data array:
        received_gpot = []
        received_spike = []
        ind_in_gpot_list = []
        ind_in_spike_list = []
        for src_id, src_rank in zip(self._in_ids, self._in_ranks):
            r = MPI.COMM_WORLD.Irecv([self.data_in['gpot'][src_id],
                                      dtype_to_mpi(data_gpot.dtype)],
                                     source=src_rank, tag=GPOT_TAG)
            requests.append(r)
            r = MPI.COMM_WORLD.Irecv([self.data_in['spike'][src_id],
                                      dtype_to_mpi(data_spike.dtype)],
                                     source=src_rank, tag=SPIKE_TAG)
            requests.append(r)
            if not self.time_sync:
                self.log_info('receiving from %s' % src_id)
        req.Waitall(requests)
        if not self.time_sync:
            self.log_info('received all data received by %s' % self.id)            

        # Copy received elements into the current module's data array:
        for src_id in self._in_ids:
            ind_in_gpot = self._in_port_dict_ids['gpot'][src_id]
            self.pm['gpot'].set_by_inds(ind_in_gpot, self.data_in['gpot'][src_id])
            ind_in_spike = self._in_port_dict_ids['spike'][src_id]
            self.pm['spike'].set_by_inds(ind_in_spike, self.data_in['spike'][src_id]) 

        # Save timing data:
        if self.time_sync:
            stop = time.time()
            n_gpot = 0
            n_spike = 0
            for src_id in self._in_ids:
                n_gpot += len(self.data_in['gpot'][src_id])
                n_spike += len(self.data_in['spike'][src_id])
            self.log_info('sent timing data to master')
            self.intercomm.isend(['sync_time',
                                  (self.rank, self.steps, start, stop,
                                   n_gpot*self.pm['gpot'].dtype.itemsize+\
                                   n_spike*self.pm['spike'].dtype.itemsize)],
                                 dest=0, tag=self._ctrl_tag)
        else:
            self.log_info('saved all data received by %s' % self.id)

    def run_step(self):
        """
        Module work method.

        This method should be implemented to do something interesting with new
        input port data in the module's `pm` attribute and update the attribute's
        output port data if necessary. It should not interact with any other
        class attributes.
        """

        self.log_info('running execution step')

    def pre_run(self):
        """
        Code to run before main loop.

        This method is invoked by the `run()` method before the main loop is
        started.
        """

        self.log_info('running code before body of worker %s' % self.rank)

        # Initialize _out_port_dict and _in_port_dict attributes:
        self._init_port_dicts()

        # Initialize data_in attribute:
        self._init_data_in()

        # Start timing the main loop:
        if self.time_sync:
            self.intercomm.isend(['start_time', (self.rank, time.time())],
                                 dest=0, tag=self._ctrl_tag)                
            self.log_info('sent start time to manager')

    def post_run(self):
        """
        Code to run after main loop.

        This method is invoked by the `run()` method after the main loop is
        started.
        """

        self.log_info('running code after body of worker %s' % self.rank)

        # Stop timing the main loop before shutting down the emulation:
        if self.time_sync:
            self.intercomm.isend(['stop_time', (self.rank, time.time())],
                                 dest=0, tag=self._ctrl_tag)

            self.log_info('sent stop time to manager')

        # Send acknowledgment message:
        self.intercomm.isend(['done', self.rank], 0, self._ctrl_tag)
        self.log_info('done message sent to manager')

    def run(self):
        """
        Body of process.
        """

        # Don't allow keyboard interruption of process:
        with IgnoreKeyboardInterrupt():

            # Activate execution loop:
            super(Module, self).run()

    def do_work(self):
        """
        Work method.

        This method is repeatedly executed by the Worker instance after the
        instance receives a 'start' control message and until it receives a 'stop'
        control message.
        """

        # If the debug flag is set, don't catch exceptions so that
        # errors will lead to visible failures:
        if self.debug:

            # Run the processing step:
            self.run_step()

            # Synchronize:
            self._sync()
        else:

            # Run the processing step:
            catch_exception(self.run_step, self.log_info)

            # Synchronize:
            catch_exception(self._sync, self.log_info)
Ejemplo n.º 3
0
class Module(BaseModule):
    """
    Processing module.

    This class repeatedly executes a work method until it receives
    a quit message via its control port.

    Parameters
    ----------
    sel : str, unicode, or sequence
        Path-like selector describing the module's interface of
        exposed ports.
    sel_in : str, unicode, or sequence
        Selector describing all input ports in the module's interface.
    sel_out : str, unicode, or sequence
        Selector describing all input ports in the module's interface.
    sel_gpot : str, unicode, or sequence
        Selector describing all graded potential ports in the module's
        interface.
    sel_spike : str, unicode, or sequence
        Selector describing all spiking ports in the module's interface.
    data_gpot : numpy.ndarray
        Data array to associate with graded potential ports. Array length
        must equal the number of graded potential ports in the module's interface.
    data_spike : numpy.ndarray
        Data array to associate with spiking ports. Array length
        must equal the number of spiking ports in the module's interface.
    columns : list of str
        Interface port attributes. This list must at least contain
        'interface', 'io', and 'type'.
    port_data : int
        Network port for transmitting data.
    port_ctrl : int
        Network port for controlling the module instance.
    id : str
        Module identifier. If no identifier is specified, a unique
        identifier is automatically generated.
    device : int
        GPU device to use.
    debug : bool
        Debug flag.
    time_sync : bool
        Time synchronization flag. When True, debug messages are not emitted during
        module synchronization and the time taken to receive all incoming data is 
        computed.
        
    Notes
    -----
    A module instance connected to other module instances contains a list of the
    connectivity objects that describe incoming connects and a list of
    masks that select for the neurons whose data must be transmitted to
    destination modules.
    """

    def __init__(self, sel, sel_in, sel_out, 
                 sel_gpot, sel_spike,
                 data_gpot, data_spike,
                 columns=['interface', 'io', 'type'],
                 port_data=PORT_DATA, port_ctrl=PORT_CTRL, port_time=PORT_TIME,
                 id=None, device=None, debug=False, time_sync=False):

        self.debug = debug
        self.time_sync = time_sync
        self.device = device

        # Require several necessary attribute columns:
        assert 'interface' in columns
        assert 'io' in columns
        assert 'type' in columns

        # Generate a unique ID if none is specified:
        if id is None:
            id = uid()

        # Call super for BaseModule rather than Module because most of the
        # functionality of the former's constructor must be overridden in any case:
        super(BaseModule, self).__init__(port_ctrl, id)

        # Reformat logger name:
        LoggerMixin.__init__(self, 'mod %s' % self.id)

        # Data port:
        if port_data == port_ctrl:
            raise ValueError('data and control ports must differ')
        self.port_data = port_data
        if port_time == port_ctrl or port_time == port_data:
            raise ValueError('time port must differ from data and control ports')
        self.port_time = port_time

        # Initial network connectivity:
        self.net = 'none'

        # Create module interface given the specified ports:
        self.interface = Interface(sel, columns)

        # Set the interface ID to 0
        # we assume that a module only has one interface:
        self.interface[sel, 'interface'] = 0

        # Set port types:
        assert SelectorMethods.is_in(sel_in, sel)
        assert SelectorMethods.is_in(sel_out, sel)
        assert SelectorMethods.are_disjoint(sel_in, sel_out)
        self.interface[sel_in, 'io'] = 'in'
        self.interface[sel_out, 'io'] = 'out'
        assert SelectorMethods.is_in(sel_gpot, sel)
        assert SelectorMethods.is_in(sel_spike, sel)
        assert SelectorMethods.are_disjoint(sel_gpot, sel_spike)
        self.interface[sel_gpot, 'type'] = 'gpot'
        self.interface[sel_spike, 'type'] = 'spike'

        # Set up mapper between port identifiers and their associated data:
        assert len(data_gpot) == len(self.interface.gpot_ports())
        assert len(data_spike) == len(self.interface.spike_ports())
        self.data = {}
        self.data['gpot'] = data_gpot
        self.data['spike'] = data_spike
        self.pm = {}
        self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot'])
        self.pm['spike'] = PortMapper(sel_spike, self.data['spike'])

        # Patterns connecting this module instance with other modules instances.
        # Keyed on the IDs of those modules:
        self.patterns = {}

        # Each entry in pat_ints is a tuple containing the identifiers of which 
        # of a pattern's identifiers are connected to the current module (first
        # entry) and the modules to which it is connected (second entry).
        # Keyed on the IDs of those modules:
        self.pat_ints = {}

        # Dict for storing incoming data; each entry (corresponding to each
        # module that sends input to the current module) is a deque containing
        # incoming data, which in turn contains transmitted data arrays. Deques
        # are used here to accommodate situations when multiple data from a
        # single source arrive:
        self._in_data = {}

        # List for storing outgoing data; each entry is a tuple whose first
        # entry is the source or destination module ID and whose second entry is
        # the data to transmit:
        self._out_data = []

        # Dictionaries containing ports of source modules that
        # send output to this module. Must be initialized immediately before
        # an emulation begins running. Keyed on source module ID:
        self._in_port_dict = {}
        self._in_port_dict_ids = {}
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}

        # Dictionaries containing ports of destination modules that
        # receive input from this module. Must be initialized immediately before
        # an emulation begins running. Keyed on destination module ID:
        self._out_port_dict = {}
        self._out_port_dict_ids = {}
        self._out_port_dict['gpot'] = {}
        self._out_port_dict['spike'] = {}

        self._out_ids = []
        self._in_ids = []

    def _init_gpu(self):
        """
        Initialize GPU device.

        Notes
        -----
        Must be called from within the `run()` method, not from within
        `__init__()`.
        """

        if self.device == None:
            self.log_info('no GPU specified - not initializing ')
        else:

            # Import pycuda.driver here so as to facilitate the
            # subclassing of Module to create pure Python LPUs that don't use GPUs:
            import pycuda.driver as drv
            drv.init()
            try:
                self.gpu_ctx = drv.Device(self.device).make_context()
            except Exception as e:
                self.log_info('_init_gpu exception: ' + e.message)
            else:
                atexit.register(self.gpu_ctx.pop)
                self.log_info('GPU initialized')

    @property
    def N_gpot_ports(self):
        """
        Number of exposed graded-potential ports.
        """

        return len(self.interface.gpot_ports())

    @property
    def N_spike_ports(self):
        """
        Number of exposed spiking ports.
        """

        return len(self.interface.spike_ports())

    def _get_in_data(self):
        """
        Get input data from incoming transmission buffer.

        Populate the data arrays associated with a module's ports using input
        data received from other modules.
        """

        if self.net in ['none', 'ctrl']:
            self.log_info('not retrieving from input buffer')
        else:
            self.log_info('retrieving from input buffer')

            # Since fan-in is not permitted, the data from all source modules
            # must necessarily map to different ports; we can therefore write each
            # of the received data to the array associated with the module's ports
            # here without worry of overwriting the data from each source module:
            for in_id in self._in_ids:
                # Check for exceptions so as to not fail on the first emulation
                # step when there is no input data to retrieve:
                try:

                    # The first entry of `data` contains graded potential values,
                    # while the second contains spiking port values (i.e., 0 or
                    # 1):
                    data = self._in_data[in_id].popleft()
                except:
                    self.log_info('no input data from [%s] retrieved' % in_id)
                else:
                    self.log_info('input data from [%s] retrieved' % in_id)

                    # Assign transmitted values directly to port data array:
                    if len(self._in_port_dict_ids['gpot'][in_id]) and data[0] is not None:
                        self.pm['gpot'].set_by_inds(self._in_port_dict_ids['gpot'][in_id], data[0])
                    if len(self._in_port_dict_ids['spike'][in_id]) and data[1] is not None:
                        self.pm['spike'].set_by_inds(self._in_port_dict_ids['spike'][in_id], data[1])

    def _put_out_data(self):
        """
        Put specified output data in outgoing transmission buffer.

        Stage data from the data arrays associated with a module's ports for
        output to other modules.

        Notes
        -----
        The output spike port selection algorithm could probably be made faster.
        """

        if self.net in ['none', 'ctrl']:
            self.log_info('not populating output buffer')
        else:
            self.log_info('populating output buffer')

            # Clear output buffer before populating it:
            self._out_data = []

            # Select data that should be sent to each destination module and append
            # it to the outgoing queue:
            for out_id in self._out_ids:
                # Select port data using list of graded potential ports that can
                # transmit output:
                if len(self._out_port_dict_ids['gpot'][out_id]):
                    gpot_data = \
                        self.pm['gpot'].get_by_inds(self._out_port_dict_ids['gpot'][out_id])
                else:
                    gpot_data = None
                if len(self._out_port_dict_ids['spike'][out_id]):
                    spike_data = \
                        self.pm['spike'].get_by_inds(self._out_port_dict_ids['spike'][out_id])
                else:
                    spike_data = None

                # Attempt to stage the emitted port data for transmission:            
                try:
                    self._out_data.append((out_id, (gpot_data, spike_data)))
                except:
                    self.log_info('no output data to [%s] sent' % out_id)
                else:
                    self.log_info('output data to [%s] sent' % out_id)
                
    def run_step(self):
        """
        Module work method.
    
        This method should be implemented to do something interesting with new 
        input port data in the module's `pm` attribute and update the attribute's
        output port data if necessary. It should not interact with any other 
        class attributes.
        """

        self.log_info('running execution step')

    def _init_port_dicts(self):
        """
        Initial dictionaries of source/destination ports in current module.
        """

        # Extract identifiers of source ports in the current module's interface
        # for all modules receiving output from the current module:
        self._out_port_dict['gpot'] = {}
        self._out_port_dict['spike'] = {}
        self._out_port_dict_ids['gpot'] = {}
        self._out_port_dict_ids['spike'] = {}

        self._out_ids = self.out_ids
        for out_id in self._out_ids:
            self.log_info('extracting output ports for %s' % out_id)

            # Get interfaces of pattern connecting the current module to
            # destination module `out_id`; `from_int` is connected to the
            # current module, `to_int` is connected to the other module:
            from_int, to_int = self.pat_ints[out_id]

            # Get ports in interface (`from_int`) connected to the current
            # module that are connected to the other module via the pattern:
            self._out_port_dict['gpot'][out_id] = \
                self.patterns[out_id].src_idx(from_int, to_int,
                                              'gpot', 'gpot')
            self._out_port_dict_ids['gpot'][out_id] = \
                self.pm['gpot'].ports_to_inds(self._out_port_dict['gpot'][out_id])
            self._out_port_dict['spike'][out_id] = \
                self.patterns[out_id].src_idx(from_int, to_int,
                                              'spike', 'spike')
            self._out_port_dict_ids['spike'][out_id] = \
                self.pm['spike'].ports_to_inds(self._out_port_dict['spike'][out_id])
                                                              
        # Extract identifiers of destination ports in the current module's
        # interface for all modules sending input to the current module:
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}
        self._in_port_dict_ids['gpot'] = {}
        self._in_port_dict_ids['spike'] = {}

        self._in_ids = self.in_ids
        for in_id in self._in_ids:
            self.log_info('extracting input ports for %s' % in_id)

            # Get interfaces of pattern connecting the current module to
            # source module `out_id`; `to_int` is connected to the current
            # module, `from_int` is connected to the other module:
            to_int, from_int = self.pat_ints[in_id]

            # Get ports in interface (`to_int`) connected to the current
            # module that are connected to the other module via the pattern:
            self._in_port_dict['gpot'][in_id] = \
                self.patterns[in_id].dest_idx(from_int, to_int,
                                              'gpot', 'gpot')
            self._in_port_dict_ids['gpot'][in_id] = \
                self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id])
            self._in_port_dict['spike'][in_id] = \
                self.patterns[in_id].dest_idx(from_int, to_int,
                                              'spike', 'spike')
            self._in_port_dict_ids['spike'][in_id] = \
                self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id])

    def pre_run(self, *args, **kwargs):
        """
        Code to run before main module run loop.

        Code in this method will be executed after a module's process has been
        launched and all connectivity objects made available, but before the
        main run loop begins. Initialization routines (such as GPU
        initialization) should be performed in this method.
        """

        pass

    def run(self):
        """
        Body of process.
        """

        # Don't allow keyboard interruption of process:
        self.log_info('starting')
        with IgnoreKeyboardInterrupt():

            # Initialize environment:
            self._init_net()
            
            # Initialize _out_port_dict and _in_port_dict attributes:
            self._init_port_dicts()

            # Initialize Buffer for incoming data.  Dict used to store the
            # incoming data keyed by the source module id.  Each value is a
            # queue buferring the received data:
            self._in_data = {k: collections.deque() for k in self._in_ids}

            # Perform any pre-emulation operations:
            self.pre_run()

            self.running = True
            self.steps = 0
            if self.time_sync:
                self.sock_time.send(msgpack.packb((self.id, self.steps, 'start',
                                                   time.time())))
                self.log_info('sent start time to master')

            # Counter for number of steps between synchronizations:
            steps_since_sync = 0
            while self.steps < self.max_steps:
                self.log_info('execution step: %s/%s' % (self.steps, self.max_steps))

                # If the debug flag is set, don't catch exceptions so that
                # errors will lead to visible failures:
                if self.debug:

                    # Run the processing step:
                    self.run_step()

                    # Do post-processing:
                    self.post_run_step()

                    # Synchronize:
                    if steps_since_sync == self.sync_period:
                        self._sync()
                        steps_since_sync = 0
                    else:
                        self.log_info('skipping sync (%s/%s)' % \
                                      (steps_since_sync, self.sync_period))
                        steps_since_sync += 1

                else:

                    # Run the processing step:
                    catch_exception(self.run_step, self.log_info)

                    # Do post processing:
                    catch_exception(self.post_run_step, self.log_info)

                    # Synchronize:
                    if steps_since_sync == self.sync_period:
                        catch_exception(self._sync, self.log_info)
                        steps_since_sync = 0
                    else:
                        self.log_info('skipping sync (%s/%s)' % \
                                      (steps_since_sync, self.sync_period))
                        steps_since_sync += 1

                # Exit run loop when a quit signal has been received:
                if not self.running:
                    self.log_info('run loop stopped')
                    break

                self.steps += 1
            if self.time_sync:
                self.sock_time.send(msgpack.packb((self.id, self.steps, 'stop',
                                                   time.time())))
                self.log_info('sent stop time to master')
            self.log_info('maximum number of steps reached')

            # Perform any post-emulation operations:
            self.post_run()

            # Shut down the control handler and inform the manager that the
            # module has shut down:
            self._ctrl_stream_shutdown()
            ack = 'shutdown'
            self.sock_ctrl.send(ack)
            self.log_info('sent to manager: %s' % ack)
                
        self.log_info('exiting')
Ejemplo n.º 4
0
class Module(mpi.Worker):
    """
    Processing module.

    This class repeatedly executes a work method until it receives a
    quit message via its control network port.

    Parameters
    ----------
    sel : str, unicode, or sequence
        Path-like selector describing the module's interface of
        exposed ports.
    sel_in, sel_out, sel_gpot, sel_spike : str, unicode, or sequence
        Selectors respectively describing all input, output, graded potential,
        and spiking ports in the module's interface.
    data_gpot, data_spike : numpy.ndarray
        Data arrays associated with the graded potential and spiking ports in
        the . Array length must equal the number
        of ports in a module's interface.
    columns : list of str
        Interface port attributes.
        Network port for controlling the module instance.
    ctrl_tag, gpot_tag, spike_tag : int
        MPI tags that respectively identify messages containing control data,
        graded potential port values, and spiking port values transmitted to 
        worker nodes.
    id : str
        Module identifier. If no identifier is specified, a unique
        identifier is automatically generated.
    device : int
        GPU device to use. May be set to None if the module does not perform
        GPU processing.
    routing_table : neurokernel.routing_table.RoutingTable
        Routing table describing data connections between modules. If no routing
        table is specified, the module will be executed in isolation.
    rank_to_id : bidict.bidict
        Mapping between MPI ranks and module object IDs.
    debug : bool
        Debug flag. When True, exceptions raised during the work method
        are not be suppressed.
    time_sync : bool
        Time synchronization flag. When True, debug messages are not emitted
        during module synchronization and the time taken to receive all incoming
        data is computed.

    Attributes
    ----------
    interface : Interface
        Object containing information about a module's ports.
    pm : dict
        `pm['gpot']` and `pm['spike']` are instances of neurokernel.pm_gpu.PortMapper that
        map a module's ports to the contents of the values in `data`.
    data : dict
        `data['gpot']` and `data['spike']` are arrays of data associated with 
        a module's graded potential and spiking ports.
    """

    def __init__(self, sel, sel_in, sel_out,
                 sel_gpot, sel_spike, data_gpot, data_spike,
                 columns=['interface', 'io', 'type'],
                 ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG,
                 id=None, device=None,
                 routing_table=None, rank_to_id=None,
                 debug=False, time_sync=False):

        super(Module, self).__init__(ctrl_tag)
        self.debug = debug
        self.time_sync = time_sync
        self.device = device

        self._gpot_tag = gpot_tag
        self._spike_tag = spike_tag

        # Require several necessary attribute columns:
        if 'interface' not in columns:
            raise ValueError('interface column required')
        if 'io' not in columns:
            raise ValueError('io column required')
        if 'type' not in columns:
            raise ValueError('type column required')

        # Initialize GPU here so as to be able to initialize a port mapper
        # containing GPU memory:
        self._init_gpu()

        # This is needed to ensure that MPI_Finalize is called before PyCUDA
        # attempts to clean up; see
        # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw
        atexit.register(MPI.Finalize)

        # Manually register the file close method associated with MPIOutput
        # so that it is called by atexit before MPI.Finalize() (if the file is
        # closed after MPI.Finalize() is called, an error will occur):
        for k, v in twiggy.emitters.iteritems():
             if isinstance(v._output, MPIOutput):       
                 atexit.register(v._output.close)

        # Ensure that the input and output port selectors respectively
        # select mutually exclusive subsets of the set of all ports exposed by
        # the module:
        if not SelectorMethods.is_in(sel_in, sel):
            raise ValueError('input port selector not in selector of all ports')
        if not SelectorMethods.is_in(sel_out, sel):
            raise ValueError('output port selector not in selector of all ports')
        if not SelectorMethods.are_disjoint(sel_in, sel_out):
            raise ValueError('input and output port selectors not disjoint')

        # Ensure that the graded potential and spiking port selectors
        # respectively select mutually exclusive subsets of the set of all ports
        # exposed by the module:
        if not SelectorMethods.is_in(sel_gpot, sel):
            raise ValueError('gpot port selector not in selector of all ports')
        if not SelectorMethods.is_in(sel_spike, sel):
            raise ValueError('spike port selector not in selector of all ports')
        if not SelectorMethods.are_disjoint(sel_gpot, sel_spike):
            raise ValueError('gpot and spike port selectors not disjoint')

        # Save routing table and mapping between MPI ranks and module IDs:
        self.routing_table = routing_table
        self.rank_to_id = rank_to_id

        # Generate a unique ID if none is specified:
        if id is None:
            self.id = uid()
        else:

            # If a unique ID was specified and the routing table is not empty
            # (i.e., there are connections between multiple modules), the id
            # must be a node in the routing table:
            if routing_table is not None and len(routing_table.ids) and \
                    not routing_table.has_node(id):
                raise ValueError('routing table must contain specified '
                                 'module ID: {}'.format(id))
            self.id = id

        # Reformat logger name:
        LoggerMixin.__init__(self, 'mod %s' % self.id)

        # Create module interface given the specified ports:
        self.interface = Interface(sel, columns)

        # Set the interface ID to 0; we assume that a module only has one interface:
        self.interface[sel, 'interface'] = 0

        # Set the port attributes:
        self.interface[sel_in, 'io'] = 'in'
        self.interface[sel_out, 'io'] = 'out'
        self.interface[sel_gpot, 'type'] = 'gpot'
        self.interface[sel_spike, 'type'] = 'spike'

        # Find the input and output ports:
        self.in_ports = self.interface.in_ports().to_tuples()
        self.out_ports = self.interface.out_ports().to_tuples()

        # Find the graded potential and spiking ports:
        self.gpot_ports = self.interface.gpot_ports().to_tuples()
        self.spike_ports = self.interface.spike_ports().to_tuples()

        self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples()
        self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples()
        self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples()
        self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples()

        # Set up mapper between port identifiers and their associated data:
        if len(data_gpot) != len(self.gpot_ports):
            raise ValueError('incompatible gpot port data array length')
        if len(data_spike) != len(self.spike_ports):
            raise ValueError('incompatible spike port data array length')
        self.data = {}
        self.data['gpot'] = gpuarray.to_gpu(data_gpot)
        self.data['spike'] = gpuarray.to_gpu(data_spike)

        self.pm = {}
        self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False)
        self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False)

        # MPI Request object for resolving asynchronous transfers:
        self.req = MPI.Request()

    def _init_gpu(self):
        """
        Initialize GPU device.

        Notes
        -----
        Must be called from within the `run()` method, not from within
        `__init__()`.
        """

        if self.device == None:
            self.log_info('no GPU specified - not initializing ')
        else:

            # Import pycuda.driver here so as to facilitate the
            # subclassing of Module to create pure Python LPUs that don't use GPUs:
            import pycuda.driver as drv
            drv.init()

            N_gpu = drv.Device.count()
            if not self.device < N_gpu:
                new_device = randint(0,N_gpu - 1)
                self.log_warning("GPU device device %d not in GPU devices %s" % (self.device, str(range(0,N_gpu))))
                self.log_warning("Setting device = %d" % new_device)
                self.device = new_device

            try:
                self.gpu_ctx = drv.Device(self.device).make_context()
            except Exception as e:
                self.log_info('_init_gpu exception: ' + e.message)
            else:
                atexit.register(self.gpu_ctx.pop)
                self.log_info('GPU %s initialized' % self.device)

    def _init_port_dicts(self):
        """
        Initial dictionaries of source/destination ports in current module.
        """

        # Extract identifiers of source ports in the current module's interface
        # for all modules receiving output from the current module:
        self._out_port_dict_ids = {}
        self._out_port_dict_ids['gpot'] = {}
        self._out_port_dict_ids['spike'] = {}

        self._out_ids = self.routing_table.dest_ids(self.id)
        self._out_ranks = [self.rank_to_id.inv[i] for i in self._out_ids]
        for out_id in self._out_ids:
            self.log_info('extracting output ports for %s' % out_id)

            # Get interfaces of pattern connecting the current module to
            # destination module `out_id`; `int_0` is connected to the
            # current module, `int_1` is connected to the other module:
            pat = self.routing_table[self.id, out_id]['pattern']
            int_0 = self.routing_table[self.id, out_id]['int_0']
            int_1 = self.routing_table[self.id, out_id]['int_1']

            # Get ports in interface (`int_0`) connected to the current
            # module that are connected to the other module via the pattern:
            self._out_port_dict_ids['gpot'][out_id] = \
                gpuarray.to_gpu(self.pm['gpot'].ports_to_inds(pat.src_idx(int_0, int_1, 'gpot', 'gpot')))
            self._out_port_dict_ids['spike'][out_id] = \
                gpuarray.to_gpu(self.pm['spike'].ports_to_inds(pat.src_idx(int_0, int_1, 'spike', 'spike')))

        # Extract identifiers of destination ports in the current module's
        # interface for all modules sending input to the current module:
        self._in_port_dict_ids = {}
        self._in_port_dict_ids['gpot'] = {}
        self._in_port_dict_ids['spike'] = {}

        # Extract indices corresponding to the entries in the transmitted
        # buffers that must be copied into the input port map data arrays; these
        # are needed to support fan-out:
        self._in_port_dict_buf_ids = {}
        self._in_port_dict_buf_ids['gpot'] = {}
        self._in_port_dict_buf_ids['spike'] = {}

        # Lengths of input buffers:
        self._in_buf_len = {} 
        self._in_buf_len['gpot'] = {}
        self._in_buf_len['spike'] = {}

        self._in_ids = self.routing_table.src_ids(self.id)
        self._in_ranks = [self.rank_to_id.inv[i] for i in self._in_ids]
        for in_id in self._in_ids:
            self.log_info('extracting input ports for %s' % in_id)

            # Get interfaces of pattern connecting the current module to
            # source module `in_id`; `int_1` is connected to the current
            # module, `int_0` is connected to the other module:
            pat = self.routing_table[in_id, self.id]['pattern']
            int_0 = self.routing_table[in_id, self.id]['int_0']
            int_1 = self.routing_table[in_id, self.id]['int_1']

            # Get ports in interface (`int_1`) connected to the current
            # module that are connected to the other module via the pattern:
            self._in_port_dict_ids['gpot'][in_id] = \
                gpuarray.to_gpu(self.pm['gpot'].ports_to_inds(pat.dest_idx(int_0, int_1, 'gpot', 'gpot')))
            self._in_port_dict_ids['spike'][in_id] = \
                gpuarray.to_gpu(self.pm['spike'].ports_to_inds(pat.dest_idx(int_0, int_1, 'spike', 'spike')))

			# Get the integer indices associated with the connected source ports
            # in the pattern interface connected to the source module `in_d`;
            # these are needed to copy received buffer contents into the current
            # module's port map data array:
            self._in_port_dict_buf_ids['gpot'][in_id] = \
                np.array(renumber_in_order(BasePortMapper(pat.gpot_ports(int_0).to_tuples()).
                        ports_to_inds(pat.src_idx(int_0, int_1, 'gpot', 'gpot', duplicates=True))))
            self._in_port_dict_buf_ids['spike'][in_id] = \
                np.array(renumber_in_order(BasePortMapper(pat.spike_ports(int_0).to_tuples()).
                        ports_to_inds(pat.src_idx(int_0, int_1, 'spike', 'spike', duplicates=True))))

            # The size of the input buffer to the current module must be the
            # same length as the output buffer of module `in_id`:
            self._in_buf_len['gpot'][in_id] = len(pat.src_idx(int_0, int_1, 'gpot', 'gpot'))
            self._in_buf_len['spike'][in_id] = len(pat.src_idx(int_0, int_1, 'spike', 'spike'))

    def _init_comm_bufs(self):
        """
        Buffers for sending/receiving data from other modules.

        Notes
        -----
        Must be executed after `_init_port_dicts()`.
        """

        # Buffers (and their interfaces and MPI types) for receiving data
        # transmitted from source modules:
        self._in_buf = {}
        self._in_buf['gpot'] = {}
        self._in_buf['spike'] = {}
        self._in_buf_int = {}
        self._in_buf_int['gpot'] = {}
        self._in_buf_int['spike'] = {}
        self._in_buf_mtype = {}
        self._in_buf_mtype['gpot'] = {}
        self._in_buf_mtype['spike'] = {}
        for in_id in self._in_ids:
            n_gpot = self._in_buf_len['gpot'][in_id]
            if n_gpot:
                self._in_buf['gpot'][in_id] = \
                    gpuarray.empty(n_gpot, self.pm['gpot'].dtype)
                self._in_buf_int['gpot'][in_id] = \
                    bufint(self._in_buf['gpot'][in_id])
                self._in_buf_mtype['gpot'][in_id] = \
                    dtype_to_mpi(self._in_buf['gpot'][in_id].dtype)
            else:
                self._in_buf['gpot'][in_id] = None

            n_spike = self._in_buf_len['spike'][in_id]
            if n_spike:
                self._in_buf['spike'][in_id] = \
                    gpuarray.empty(n_spike, self.pm['spike'].dtype)
                self._in_buf_int['spike'][in_id] = \
                    bufint(self._in_buf['spike'][in_id])
                self._in_buf_mtype['spike'][in_id] = \
                    dtype_to_mpi(self._in_buf['spike'][in_id].dtype)
            else:
                self._in_buf['spike'][in_id] = None

        # Buffers (and their interfaces and MPI types) for transmitting data to
        # destination modules:
        self._out_buf = {}
        self._out_buf['gpot'] = {}
        self._out_buf['spike'] = {}
        self._out_buf_int = {}
        self._out_buf_int['gpot'] = {}
        self._out_buf_int['spike'] = {}
        self._out_buf_mtype = {}
        self._out_buf_mtype['gpot'] = {}
        self._out_buf_mtype['spike'] = {}
        for out_id in self._out_ids:
            n_gpot = len(self._out_port_dict_ids['gpot'][out_id])
            if n_gpot:
                self._out_buf['gpot'][out_id] = \
                    gpuarray.empty(n_gpot, self.pm['gpot'].dtype)
                self._out_buf_int['gpot'][out_id] = \
                    bufint(self._out_buf['gpot'][out_id])
                self._out_buf_mtype['gpot'][out_id] = \
                    dtype_to_mpi(self._out_buf['gpot'][out_id].dtype)
            else:
                self._out_buf['gpot'][out_id] = None

            n_spike = len(self._out_port_dict_ids['spike'][out_id])
            if n_spike:
                self._out_buf['spike'][out_id] = \
                    gpuarray.empty(n_spike, self.pm['spike'].dtype)
                self._out_buf_int['spike'][out_id] = \
                    bufint(self._out_buf['spike'][out_id])
                self._out_buf_mtype['spike'][out_id] = \
                    dtype_to_mpi(self._out_buf['spike'][out_id].dtype)
            else:
                self._out_buf['spike'][out_id] = None

    def _sync(self):
        """
        Send output data and receive input data.
        """

        if self.time_sync:
            start = time.time()
        requests = []

        # For each destination module, extract elements from the current
        # module's port data array, copy them to a contiguous array, and
        # transmit the latter:
        for dest_id, dest_rank in zip(self._out_ids, self._out_ranks):

            # Copy data into destination buffer:
            if self._out_buf['gpot'][dest_id] is not None:
                set_by_inds(self._out_buf['gpot'][dest_id],
                            self._out_port_dict_ids['gpot'][dest_id],
                            self.data['gpot'], 'src')
                if not self.time_sync:
                    self.log_info('gpot data sent to %s: %s' % \
                                  (dest_id, str(self._out_buf['gpot'][dest_id])))
                r = MPI.COMM_WORLD.Isend([self._out_buf_int['gpot'][dest_id],
                                          self._out_buf_mtype['gpot'][dest_id]],
                                         dest_rank, GPOT_TAG)
                requests.append(r)
            if self._out_buf['spike'][dest_id] is not None:
                set_by_inds(self._out_buf['spike'][dest_id],
                            self._out_port_dict_ids['spike'][dest_id],
                            self.data['spike'], 'src')
                if not self.time_sync:
                    self.log_info('spike data sent to %s: %s' % \
                                  (dest_id, str(self._out_buf['spike'][dest_id])))
                r = MPI.COMM_WORLD.Isend([self._out_buf_int['spike'][dest_id],
                                          self._out_buf_mtype['spike'][dest_id]],
                                         dest_rank, SPIKE_TAG)
                requests.append(r)
            if not self.time_sync:
                self.log_info('sending to %s' % dest_id)
        if not self.time_sync:
            self.log_info('sent all data from %s' % self.id)

        # For each source module, receive elements and copy them into the
        # current module's port data array:
        for src_id, src_rank in zip(self._in_ids, self._in_ranks):
            if self._in_buf['gpot'][src_id] is not None:
                r = MPI.COMM_WORLD.Irecv([self._in_buf_int['gpot'][src_id],
                                          self._in_buf_mtype['gpot'][src_id]],
                                         source=src_rank, tag=GPOT_TAG)
                requests.append(r)
            if self._in_buf['spike'][src_id] is not None:
                r = MPI.COMM_WORLD.Irecv([self._in_buf_int['spike'][src_id],
                                          self._in_buf_mtype['spike'][src_id]],
                                         source=src_rank, tag=SPIKE_TAG)
                requests.append(r)
            if not self.time_sync:
                self.log_info('receiving from %s' % src_id)
        if requests:
            self.req.Waitall(requests)
        if not self.time_sync:
            self.log_info('all data were received by %s' % self.id)

        # Copy received elements into the current module's data array:
        for src_id in self._in_ids:
            if self._in_buf['gpot'][src_id] is not None:
                if not self.time_sync:
                    self.log_info('gpot data received from %s: %s' % \
                                  (src_id, str(self._in_buf['gpot'][src_id])))
                set_by_inds_from_inds(self.data['gpot'],
                                      self._in_port_dict_ids['gpot'][src_id],
                                      self._in_buf['gpot'][src_id],
                                      self._in_port_dict_buf_ids['gpot'][src_id])
            if self._in_buf['spike'][src_id] is not None:
                if not self.time_sync:
                    self.log_info('spike data received from %s: %s' % \
                                  (src_id, str(self._in_buf['spike'][src_id])))
                set_by_inds_from_inds(self.data['spike'],
                                      self._in_port_dict_ids['spike'][src_id],
                                      self._in_buf['spike'][src_id],
                                      self._in_port_dict_buf_ids['spike'][src_id])

        # Save timing data:
        if self.time_sync:
            stop = time.time()
            n_gpot = 0
            n_spike = 0
            for src_id in self._in_ids:
                n_gpot += len(self._in_buf['gpot'][src_id])
                n_spike += len(self._in_buf['spike'][src_id])
            self.log_info('sent timing data to master')
            self.intercomm.isend(['sync_time',
                                  (self.rank, self.steps, start, stop,
                                   n_gpot*self.pm['gpot'].dtype.itemsize+\
                                   n_spike*self.pm['spike'].dtype.itemsize)],
                                 dest=0, tag=self._ctrl_tag)
        else:
            self.log_info('saved all data received by %s' % self.id)

    def pre_run(self):
        """
        Code to run before main loop.

        This method is invoked by the `run()` method before the main loop is
        started.
        """

        self.log_info('running code before body of worker %s' % self.rank)

        # Initialize _out_port_dict and _in_port_dict attributes:
        self._init_port_dicts()

        # Initialize transmission buffers:
        self._init_comm_bufs()

        # Start timing the main loop:
        if self.time_sync:
            self.intercomm.isend(['start_time', (self.rank, time.time())],
                                 dest=0, tag=self._ctrl_tag)                
            self.log_info('sent start time to manager')

    def post_run(self):
        """
        Code to run after main loop.

        This method is invoked by the `run()` method after the main loop is
        started.
        """

        self.log_info('running code after body of worker %s' % self.rank)

        # Stop timing the main loop before shutting down the emulation:
        if self.time_sync:
            self.intercomm.isend(['stop_time', (self.rank, time.time())],
                                 dest=0, tag=self._ctrl_tag)

            self.log_info('sent stop time to manager')

        # Send acknowledgment message:
        self.intercomm.isend(['done', self.rank], 0, self._ctrl_tag)
        self.log_info('done message sent to manager')

    def run_step(self):
        """
        Module work method.

        This method should be implemented to do something interesting with new
        input port data in the module's `pm` attribute and update the attribute's
        output port data if necessary. It should not interact with any other
        class attributes.
        """

        self.log_info('running execution step')

    def run(self):
        """
        Body of process.
        """

        # Don't allow keyboard interruption of process:
        with IgnoreKeyboardInterrupt():

            # Activate execution loop:
            super(Module, self).run()

    def do_work(self):
        """
        Work method.

        This method is repeatedly executed by the Worker instance after the
        instance receives a 'start' control message and until it receives a 'stop'
        control message.
        """

        # If the debug flag is set, don't catch exceptions so that
        # errors will lead to visible failures:
        if self.debug:

            # Run the processing step:
            self.run_step()

            # Synchronize:
            self._sync()
        else:

            # Run the processing step:
            catch_exception(self.run_step, self.log_info)

            # Synchronize:
            catch_exception(self._sync, self.log_info)
Ejemplo n.º 5
0
class Module(BaseModule):
    def __init__(self, sel, sel_in, sel_out,
                 sel_gpot, sel_spike, data_gpot, data_spike,
                 columns=['interface', 'io', 'type'],
                 ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG,
                 id=None, device=None,
                 routing_table=None, rank_to_id=None, pm_all=None,
                 debug=False, time_sync=False):

        # Call super for BaseModule rather than Module because most of the
        # functionality of the former's constructor must be overridden in any case:
        super(BaseModule, self).__init__(ctrl_tag)
        self.debug = debug
        self.time_sync = time_sync
        self.device = device

        self._gpot_tag = gpot_tag
        self._spike_tag = spike_tag

        # Require several necessary attribute columns:
        assert 'interface' in columns
        assert 'io' in columns
        assert 'type' in columns

        self._init_gpu()

        # This is needed to ensure that MPI_Finalize is called before PyCUDA
        # attempts to clean up; see
        # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw
        atexit.register(MPI.Finalize)

        # Manually register the file close method associated with MPIOutput
        # so that it is called by atexit before MPI.Finalize() (if the file is
        # closed after MPI.Finalize() is called, an error will occur):
        for k, v in twiggy.emitters.iteritems():
             if isinstance(v._output, MPIOutput):       
                 atexit.register(v._output.close)

        # Ensure that the input and output port selectors respectively
        # select mutually exclusive subsets of the set of all ports exposed by
        # the module:
        assert SelectorMethods.is_in(sel_in, sel)
        assert SelectorMethods.is_in(sel_out, sel)
        assert SelectorMethods.are_disjoint(sel_in, sel_out)

        # Ensure that the graded potential and spiking port selectors
        # respectively select mutually exclusive subsets of the set of all ports
        # exposed by the module:
        assert SelectorMethods.is_in(sel_gpot, sel)
        assert SelectorMethods.is_in(sel_spike, sel)
        assert SelectorMethods.are_disjoint(sel_gpot, sel_spike)

        # Save routing table and mapping between MPI ranks and module IDs:
        self.routing_table = routing_table
        self.rank_to_id = rank_to_id

        # Save module interface data (stored in a dict of BasePortMapper instances):
        self.pm_all = pm_all

        # Generate a unique ID if none is specified:
        if id is None:
            self.id = uid()
        else:

            # Save routing table; if a unique ID was specified, it must be a node in
            # the routing table:
            if routing_table is not None and not routing_table.has_node(id):
                raise ValueError('routing table must contain specified module ID')
            self.id = id

        # Reformat logger name:
        LoggerMixin.__init__(self, 'mod %s' % self.id)

        # Create module interface given the specified ports:
        self.interface = Interface(sel, columns)

        # Set the interface ID to 0; we assume that a module only has one interface:
        self.interface[sel, 'interface'] = 0

        # Set the port attributes:
        self.interface[sel_in, 'io'] = 'in'
        self.interface[sel_out, 'io'] = 'out'
        self.interface[sel_gpot, 'type'] = 'gpot'
        self.interface[sel_spike, 'type'] = 'spike'

        # Find the input and output ports:
        self.in_ports = self.interface.in_ports().to_tuples()
        self.out_ports = self.interface.out_ports().to_tuples()

        # Find the graded potential and spiking ports:
        self.gpot_ports = self.interface.gpot_ports().to_tuples()
        self.spike_ports = self.interface.spike_ports().to_tuples()

        self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples()
        self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples()
        self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples()
        self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples()

        # Set up mapper between port identifiers and their associated data:
        assert len(data_gpot) == len(self.gpot_ports)
        assert len(data_spike) == len(self.spike_ports)
        self.data = {}
        self.data['gpot'] = gpuarray.to_gpu(data_gpot)
        self.data['spike'] = gpuarray.to_gpu(data_spike)
        self.pm = {}
        self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False)
        self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False)

    def _init_port_dicts(self):
        """
        Initial dictionaries of source/destination ports in current module.
        """

        # Extract identifiers of source ports in all modules sending input to
        # the current module's ports and of destination ports in the current
        # module's interface for all modules sending input to the current
        # module:           
        self._in_port_dict = {}
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}
        self._in_port_dict_ids = {}
        self._in_port_dict_ids['gpot'] = {}
        self._in_port_dict_ids['spike'] = {}

        self._from_port_dict = {}
        self._from_port_dict['gpot'] = {}
        self._from_port_dict['spike'] = {}
        self._from_port_dict_ids = {}
        self._from_port_dict_ids['gpot'] = {}
        self._from_port_dict_ids['spike'] = {}

        self._in_ids = self.routing_table.src_ids(self.id)
        for in_id in self._in_ids:
            self.log_info('extracting input ports for %s' % in_id)

            # Get interfaces of pattern connecting the current module to
            # source module `in_id`; `int_1` is connected to the current
            # module, `int_0` is connected to the other module:
            pat = self.routing_table[in_id, self.id]['pattern']
            int_0 = self.routing_table[in_id, self.id]['int_0']
            int_1 = self.routing_table[in_id, self.id]['int_1']

            # Get ports in interface (`int_1`) connected to the current
            # module that are connected to the other module via the pattern:
            self._in_port_dict['gpot'][in_id] = \
                    pat.dest_idx(int_0, int_1, 'gpot', 'gpot')
            self._in_port_dict_ids['gpot'][in_id] = \
                    self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id])
            self._in_port_dict['spike'][in_id] = \
                    pat.dest_idx(int_0, int_1, 'spike', 'spike')
            self._in_port_dict_ids['spike'][in_id] = \
                    self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id])

            # Get ports in interface (`int_0`) connected to the other module
            # that are connected to the current module via the pattern:
            self._from_port_dict['gpot'][in_id] = \
                    pat.src_idx(int_0, int_1, 'gpot', 'gpot')
            self._from_port_dict_ids['gpot'][in_id] = \
                self.pm_all['gpot'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id])
            self._from_port_dict['spike'][in_id] = \
                    pat.src_idx(int_0, int_1, 'spike', 'spike')
            self._from_port_dict_ids['spike'][in_id] = \
                self.pm_all['spike'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id])

    def _init_comm_bufs(self):
        """
        Buffers for receiving data from other modules.

        Notes
        -----
        Must be executed after `_init_port_dicts()`.
        """

        # Buffer interface to and MPI type of this module's port data array:
        self._data_int = {}
        self._data_int['gpot'] = bufint(self.data['gpot'])
        self._data_int['spike'] = bufint(self.data['spike'])
        self._data_mtype = {}        
        self._data_mtype['gpot'] = dtype_to_mpi(self.data['gpot'].dtype)
        self._data_mtype['spike'] = dtype_to_mpi(self.data['spike'].dtype)

        # Buffers (and their interfaces and MPI types) for receiving data
        # transmitted from source modules:
        self._in_buf = {}
        self._in_buf['gpot'] = {}
        self._in_buf['spike'] = {}
        self._in_buf_int = {}
        self._in_buf_int['gpot'] = {}
        self._in_buf_int['spike'] = {}        
        self._in_buf_mtype = {}
        self._in_buf_mtype['gpot'] = {}
        self._in_buf_mtype['spike'] = {}        
        for in_id in self._in_ids:

            # Get interfaces of pattern connecting the current module to
            # source module `in_id`; `int_1` is connected to the current
            # module, `int_0` is connected to the other module:
            pat = self.routing_table[in_id, self.id]['pattern']
            int_0 = self.routing_table[in_id, self.id]['int_0']
            int_1 = self.routing_table[in_id, self.id]['int_1']

            # The buffers must be the same size as the port data arrays of the
            # modules from which they received data:
            self._in_buf['gpot'][in_id] = \
                gpuarray.empty(len(self.pm_all['gpot'][in_id]),
                               self.pm['gpot'].dtype)                               
            self._in_buf_int['gpot'][in_id] = bufint(self._in_buf['gpot'][in_id])
            self._in_buf_mtype['gpot'][in_id] = \
                dtype_to_mpi(self._in_buf['gpot'][in_id])

            self._in_buf['spike'][in_id] = \
                gpuarray.empty(len(self.pm_all['spike'][in_id]),
                               self.pm['spike'].dtype)
            self._in_buf_int['spike'][in_id] = bufint(self._in_buf['spike'][in_id])
            self._in_buf_mtype['spike'][in_id] = \
                dtype_to_mpi(self._in_buf['spike'][in_id])

    def _sync(self):
        """
        Send output data and receive input data.
        """

        if self.time_sync:
            start = time.time()
        req = MPI.Request()
        requests = []

        # Transmit the entire port data array to each destination module:
        dest_ids = self.routing_table.dest_ids(self.id)
        for dest_id in dest_ids:            
            dest_rank = self.rank_to_id[:dest_id]
            r = MPI.COMM_WORLD.Isend([self._data_int['gpot'],
                                      self._data_mtype['gpot']],
                                     dest_rank, GPOT_TAG)
            requests.append(r)
            r = MPI.COMM_WORLD.Isend([self._data_int['spike'],
                                      self._data_mtype['spike']],
                                     dest_rank, SPIKE_TAG)
            requests.append(r)

            if not self.time_sync:
                self.log_info('sending to %s' % dest_id)
        if not self.time_sync:
            self.log_info('sent all data from %s' % self.id)

        # For each source module, receive elements and copy them into the
        # current module's port data array:
        src_ids = self.routing_table.src_ids(self.id)
        for src_id in src_ids:
            src_rank = self.rank_to_id[:src_id]
            r = MPI.COMM_WORLD.Irecv([self._in_buf_int['gpot'][src_id],
                                      self._in_buf_mtype['gpot'][src_id]],
                                     source=src_rank, tag=GPOT_TAG)
            requests.append(r)
            r = MPI.COMM_WORLD.Irecv([self._in_buf_int['spike'][src_id],
                                      self._in_buf_mtype['spike'][src_id]],
                                     source=src_rank, tag=SPIKE_TAG)
            requests.append(r)
            if not self.time_sync:
                self.log_info('receiving from %s' % src_id)
        req.Waitall(requests)
        if not self.time_sync:
            self.log_info('received all data received by %s' % self.id)

        # Copy received elements into the current module's data array:
        n_gpot = 0
        n_spike = 0
        for src_id in src_ids:
            ind_from_gpot = self._from_port_dict_ids['gpot'][src_id]
            ind_in_gpot = self._in_port_dict_ids['gpot'][src_id]
            set_by_inds_from_inds(self.data['gpot'], ind_in_gpot,
                                  self._in_buf['gpot'][src_id], ind_from_gpot)
            n_gpot += len(self._in_buf['gpot'][src_id])
            ind_from_spike = self._from_port_dict_ids['spike'][src_id]
            ind_in_spike = self._in_port_dict_ids['spike'][src_id]
            set_by_inds_from_inds(self.data['spike'], ind_in_spike,
                                  self._in_buf['spike'][src_id], ind_from_spike)
            n_spike += len(self._in_buf['spike'][src_id])

        # Save timing data:
        if self.time_sync:
            stop = time.time()
            #self.log_info('sent timing data to master')
            self.intercomm.isend(['time', (self.rank, self.steps, start, stop,
                n_gpot*self.pm['gpot'].dtype.itemsize+\
                n_spike*self.pm['spike'].dtype.itemsize)],
                    dest=0, tag=self._ctrl_tag)
        else:
            self.log_info('saved all data received by %s' % self.id)
Ejemplo n.º 6
0
class Module(BaseModule):
    """
    Processing module.

    This class repeatedly executes a work method until it receives
    a quit message via its control port.

    Parameters
    ----------
    selector : str, unicode, or sequence
        Path-like selector describing the module's interface of 
        exposed ports.
    sel_gpot : str, unicode, or sequence
        Path-like selector describing the graded potential ports in the module's
        interface.
    sel_spike : str, unicode, or sequence
        Path-like selector describing the spiking ports in the module's
        interface.
    data_gpot : numpy.ndarray
        Data array to associate with graded potential ports. Array length
        must equal the number of graded potential ports in the module's interface.
    data_spike : numpy.ndarray
        Data array to associate with spiking ports. Array length
        must equal the number of spiking ports in the module's interface.
    columns : list of str
        Interface port attributes. This list must at least contain
        'interface', 'io', and 'type'.
    port_data : int
        Network port for transmitting data.
    port_ctrl : int
        Network port for controlling the module instance.
    id : str
        Module identifier. If no identifier is specified, a unique
        identifier is automatically generated.
    device : int
        GPU device to use.
    debug : bool
        Debug flag.
        
    Notes
    -----
    A module instance connected to other module instances contains a list of the
    connectivity objects that describe incoming connects and a list of
    masks that select for the neurons whose data must be transmitted to
    destination modules.
    """
    def __init__(self,
                 selector,
                 sel_gpot,
                 sel_spike,
                 data_gpot,
                 data_spike,
                 columns=['interface', 'io', 'type'],
                 port_data=PORT_DATA,
                 port_ctrl=PORT_CTRL,
                 id=None,
                 device=None,
                 debug=False):

        self.debug = debug
        self.device = device

        # Require several necessary attribute columns:
        assert 'interface' in columns
        assert 'io' in columns
        assert 'type' in columns

        # Generate a unique ID if none is specified:
        if id is None:
            id = uid()

        super(BaseModule, self).__init__(port_ctrl, id)

        # Logging:
        self.logger = twiggy.log.name('module %s' % self.id)

        # Data port:
        if port_data == port_ctrl:
            raise ValueError('data and control ports must differ')
        self.port_data = port_data

        # Initial network connectivity:
        self.net = 'none'

        # Create module interface given the specified ports:
        self.interface = Interface(selector, columns)

        # Set the interface ID to 0
        # we assume that a module only has one interface:
        self.interface[selector, 'interface'] = 0

        # Set port types:
        assert PathLikeSelector.is_in(sel_gpot, selector)
        assert PathLikeSelector.is_in(sel_spike, selector)
        assert PathLikeSelector.are_disjoint(sel_gpot, sel_spike)
        self.interface[sel_gpot, 'type'] = 'gpot'
        self.interface[sel_spike, 'type'] = 'spike'

        # Set up mapper between port identifiers and their associated data:
        assert len(data_gpot) == len(self.interface.gpot_ports())
        assert len(data_spike) == len(self.interface.spike_ports())
        self.data = {}
        self.data['gpot'] = data_gpot
        self.data['spike'] = data_spike
        self.pm = {}
        self.pm['gpot'] = PortMapper(self.data['gpot'], sel_gpot)
        self.pm['spike'] = PortMapper(self.data['spike'], sel_spike)

        # Patterns connecting this module instance with other modules instances.
        # Keyed on the IDs of those modules:
        self.patterns = {}

        # Each entry in pat_ints is a tuple containing the identifiers of which
        # of a pattern's identifiers are connected to the current module (first
        # entry) and the modules to which it is connected (second entry).
        # Keyed on the IDs of those modules:
        self.pat_ints = {}

        # Dict for storing incoming data; each entry (corresponding to each
        # module that sends input to the current module) is a deque containing
        # incoming data, which in turn contains transmitted data arrays. Deques
        # are used here to accommodate situations when multiple data from a
        # single source arrive:
        self._in_data = {}

        # List for storing outgoing data; each entry is a tuple whose first
        # entry is the source or destination module ID and whose second entry is
        # the data to transmit:
        self._out_data = []

        # Dictionaries containing ports of source modules that
        # send output to this module. Must be initialized immediately before
        # an emulation begins running. Keyed on source module ID:
        self._in_port_dict = {}
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}

        # Dictionaries containing ports of destination modules that
        # receive input from this module. Must be initialized immediately before
        # an emulation begins running. Keyed on destination module ID:
        self._out_port_dict = {}
        self._out_port_dict['gpot'] = {}
        self._out_port_dict['spike'] = {}

    def _init_gpu(self):
        """
        Initialize GPU device.

        Notes
        -----
        Must be called from within the `run()` method, not from within
        `__init__()`.

        """

        if self.device == None:
            self.logger.info('no GPU specified - not initializing ')
        else:
            drv.init()
            try:
                self.gpu_ctx = drv.Device(self.device).make_context()
            except Exception as e:
                self.logger.info('_init_gpu exception: ' + e.message)
            else:
                atexit.register(self.gpu_ctx.pop)
                self.logger.info('GPU initialized')

    @property
    def N_gpot_ports(self):
        """
        Number of exposed graded-potential ports.
        """

        return len(self.interface.gpot_ports())

    @property
    def N_spike_ports(self):
        """
        Number of exposed spiking ports.
        """

        return len(self.interface.spike_ports())

    def _get_in_data(self):
        """
        Get input data from incoming transmission buffer.

        Populate the data arrays associated with a module's ports using input
        data received from other modules.
        """

        self.logger.info('retrieving from input buffer')

        # Since fan-in is not permitted, the data from all source modules
        # must necessarily map to different ports; we can therefore write each
        # of the received data to the array associated with the module's ports
        # here without worry of overwriting the data from each source module:
        for in_id in self.in_ids:

            # Check for exceptions so as to not fail on the first emulation
            # step when there is no input data to retrieve:
            try:

                # The first entry of `data` contains graded potential values,
                # while the second contains integer indices of the current
                # module's ports that should receive transmitted spikes:

                data = self._in_data[in_id].popleft()
            except IndexError:
                self.logger.info('no input data from [%s] retrieved' % in_id)
            else:
                self.logger.info('input data from [%s] retrieved' % in_id)

                # Assign transmitted graded potential values directly to port
                # data array:
                self.pm['gpot'][self._in_port_dict['gpot'][in_id]] = data[0]

                # Clear all input spike port data..
                self.pm['spike'][self._in_port_dict['spike'][in_id]] = 0

                # ..and then set the port data using the transmitted
                # information about source module spikes:
                self.pm['spike'][data[1]] = 1

    def _put_out_data(self):
        """
        Put specified output data in outgoing transmission buffer.

        Stage data from the data arrays associated with a module's ports for
        output to other modules.
        """

        self.logger.info('populating output buffer')

        # Clear output buffer before populating it:
        self._out_data = []

        # Select data that should be sent to each destination module and append
        # it to the outgoing queue:
        for out_id in self.out_ids:

            # Select graded potential data using list of
            # graded potential ports that can transmit output:
            gpot_data = self.pm['gpot'][self._out_port_dict['gpot'][out_id]]

            # Select spiking ports that can transmit output:
            out_spike_ports_all = self._out_port_dict['spike'][out_id]

            # Find those ports that have emitted a spike:
            out_spike_ports_with_spikes = \
                self.pm['spike'].get_ports_nonzero()

            # Compute the intersection of the two sets of spiking
            # ports obtained above to determine which ports the destination
            # module must be informed about:
            out_spike_ports = \
                list(set(out_spike_ports_all).intersection(out_spike_ports_with_spikes))

            # Find the input ports in the destination module's interface to
            # which the spikes emitted by the current module's spiking ports
            # must be sent:
            from_int, to_int = self.pat_ints[out_id]
            spike_data = \
                self.patterns[out_id].dest_idx(from_int, to_int, 'spike', 'spike',
                                               out_spike_ports)

            try:

                # Stage the emitted port data for transmission:
                self._out_data.append((out_id, (gpot_data, spike_data)))
            except:
                self.logger.info('no output data to [%s] sent' % out_id)
            else:
                self.logger.info('output data to [%s] sent' % out_id)

    def run_step(self):
        """
        Module work method.
    
        This method should be implemented to do something interesting with new 
        input port data in the module's `pm` attribute and update the attribute's
        output port data if necessary. It should not interact with any other 
        class attributes.
        """

        self.logger.info('running execution step')

    def _init_port_dicts(self):
        """
        Initial dictionaries of source/destination ports in current module.
        """

        # Extract identifiers of source ports in the current module's interface
        # for all modules receiving output from the current module:
        self._out_port_dict['gpot'] = {}
        self._out_port_dict['spike'] = {}
        for out_id in self.out_ids:
            self.logger.info('extracting output ports for %s' % out_id)

            # Get interfaces of pattern connecting the current module to
            # destination module `out_id`; `from_int` is connected to the
            # current module, `to_int` is connected to the other module:
            from_int, to_int = self.pat_ints[out_id]

            # Get ports in interface (`from_int`) connected to the current
            # module that are connected to the other module via the pattern:
            self._out_port_dict['gpot'][out_id] = \
                self.patterns[out_id].src_idx(from_int, to_int,
                                              'gpot', 'gpot')
            self._out_port_dict['spike'][out_id] = \
                self.patterns[out_id].src_idx(from_int, to_int,
                                              'spike', 'spike')

        # Extract identifiers of destination ports in the current module's
        # interface for all modules sending input to the current module:
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}
        for in_id in self.in_ids:
            self.logger.info('extracting input ports for %s' % in_id)

            # Get interfaces of pattern connecting the current module to
            # source module `out_id`; `to_int` is connected to the current
            # module, `from_int` is connected to the other module:
            to_int, from_int = self.pat_ints[in_id]

            # Get ports in interface (`to_int`) connected to the current
            # module that are connected to the other module via the pattern:
            self._in_port_dict['gpot'][in_id] = \
                self.patterns[in_id].dest_idx(from_int, to_int,
                                              'gpot', 'gpot')
            self._in_port_dict['spike'][in_id] = \
                self.patterns[in_id].dest_idx(from_int, to_int,
                                              'spike', 'spike')

    def pre_run(self, *args, **kwargs):
        """
        Code to run before main module run loop.

        Code in this method will be executed after a module's process has been
        launched and all connectivity objects made available, but before the
        main run loop begins.
        """

        self._init_gpu()
        pass

    def run(self):
        """
        Body of process.
        """

        # Don't allow keyboard interruption of process:
        self.logger.info('starting')
        with IgnoreKeyboardInterrupt():
            # Initialize environment:
            self._init_net()
            # Initialize Buffer for incoming data.  Dict used to store the
            # incoming data keyed by the source module id.  Each value is a
            # queue buffering the received data:
            self._in_data = {k: collections.deque() for k in self.in_ids}

            # Initialize _out_port_dict and _in_port_dict attributes:
            self._init_port_dicts()

            # Perform any pre-emulation operations:
            self.pre_run()

            cProfile.runctx('self.main_run()', globals(), locals(),
                            'prof%s.prof' % self.id)

            # Perform any post-emulation operations:
            self.post_run()

            # Shut down the control handler and inform the manager that the
            # module has shut down:
            self._ctrl_stream_shutdown()
            ack = 'shutdown'
            self.sock_ctrl.send(ack)
            self.logger.info('sent to manager: %s' % ack)

        self.logger.info('exiting')

    def main_run(self):
        self.running = True
        curr_steps = 0
        while curr_steps < self._steps:
            self.logger.info('execution step: %s' % curr_steps)
            # If the debug flag is set, don't catch exceptions so that
            # errors will lead to visible failures:
            if self.debug:

                # Get transmitted input data for processing:
                self._get_in_data()

                # Run the processing step:
                self.run_step()

                # Stage generated output data for transmission to other
                # modules:
                self._put_out_data()

                # Synchronize:
                self._sync()

            else:

                # Get transmitted input data for processing:
                catch_exception(self._get_in_data, self.logger.info)

                # Run the processing step:
                catch_exception(self.run_step, self.logger.info)

                # Stage generated output data for transmission to other
                # modules:
                catch_exception(self._put_out_data, self.logger.info)

                # Synchronize:
                catch_exception(self._sync, self.logger.info)

            # Exit run loop when a quit signal has been received:
            if not self.running:
                self.logger.info('run loop stopped')
                break

            curr_steps += 1
Ejemplo n.º 7
0
class Module(BaseModule):
    """
    Processing module.

    This class repeatedly executes a work method until it receives
    a quit message via its control port.

    Parameters
    ----------
    selector : str, unicode, or sequence
        Path-like selector describing the module's interface of 
        exposed ports.
    sel_gpot : str, unicode, or sequence
        Path-like selector describing the graded potential ports in the module's
        interface.
    sel_spike : str, unicode, or sequence
        Path-like selector describing the spiking ports in the module's
        interface.
    data_gpot : numpy.ndarray
        Data array to associate with graded potential ports. Array length
        must equal the number of graded potential ports in the module's interface.
    data_spike : numpy.ndarray
        Data array to associate with spiking ports. Array length
        must equal the number of spiking ports in the module's interface.
    columns : list of str
        Interface port attributes. This list must at least contain
        'interface', 'io', and 'type'.
    port_data : int
        Network port for transmitting data.
    port_ctrl : int
        Network port for controlling the module instance.
    id : str
        Module identifier. If no identifier is specified, a unique
        identifier is automatically generated.
    device : int
        GPU device to use.
    debug : bool
        Debug flag.
        
    Notes
    -----
    A module instance connected to other module instances contains a list of the
    connectivity objects that describe incoming connects and a list of
    masks that select for the neurons whose data must be transmitted to
    destination modules.
    """

    def __init__(self, selector, sel_gpot, sel_spike, data_gpot, data_spike,
                 columns=['interface', 'io', 'type'],
                 port_data=PORT_DATA, port_ctrl=PORT_CTRL,
                 id=None, device=None, debug=False):

        self.debug = debug
        self.device = device
        
        # Require several necessary attribute columns:
        assert 'interface' in columns
        assert 'io' in columns
        assert 'type' in columns

        # Generate a unique ID if none is specified:
        if id is None:
            id = uid()

        super(BaseModule, self).__init__(port_ctrl, id)

        # Logging:
        self.logger = twiggy.log.name('module %s' % self.id)

        # Data port:
        if port_data == port_ctrl:
            raise ValueError('data and control ports must differ')
        self.port_data = port_data

        # Initial network connectivity:
        self.net = 'none'

        # Create module interface given the specified ports:
        self.interface = Interface(selector, columns)

        # Set the interface ID to 0
        # we assume that a module only has one interface:
        self.interface[selector, 'interface'] = 0

        # Set port types:
        assert PathLikeSelector.is_in(sel_gpot, selector)
        assert PathLikeSelector.is_in(sel_spike, selector)
        assert PathLikeSelector.are_disjoint(sel_gpot, sel_spike)
        self.interface[sel_gpot, 'type'] = 'gpot'
        self.interface[sel_spike, 'type'] = 'spike'

        # Set up mapper between port identifiers and their associated data:
        assert len(data_gpot) == len(self.interface.gpot_ports())
        assert len(data_spike) == len(self.interface.spike_ports())
        self.data = {}
        self.data['gpot'] = data_gpot
        self.data['spike'] = data_spike
        self.pm = {}
        self.pm['gpot'] = PortMapper(self.data['gpot'], sel_gpot)
        self.pm['spike'] = PortMapper(self.data['spike'], sel_spike)

        # Patterns connecting this module instance with other modules instances.
        # Keyed on the IDs of those modules:
        self.patterns = {}

        # Each entry in pat_ints is a tuple containing the identifiers of which 
        # of a pattern's identifiers are connected to the current module (first
        # entry) and the modules to which it is connected (second entry).
        # Keyed on the IDs of those modules:
        self.pat_ints = {}

        # Dict for storing incoming data; each entry (corresponding to each
        # module that sends input to the current module) is a deque containing
        # incoming data, which in turn contains transmitted data arrays. Deques
        # are used here to accommodate situations when multiple data from a
        # single source arrive:
        self._in_data = {}

        # List for storing outgoing data; each entry is a tuple whose first
        # entry is the source or destination module ID and whose second entry is
        # the data to transmit:
        self._out_data = []

        # Dictionaries containing ports of source modules that
        # send output to this module. Must be initialized immediately before
        # an emulation begins running. Keyed on source module ID:
        self._in_port_dict = {}
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}

        # Dictionaries containing ports of destination modules that
        # receive input from this module. Must be initialized immediately before
        # an emulation begins running. Keyed on destination module ID:
        self._out_port_dict = {}
        self._out_port_dict['gpot'] = {}
        self._out_port_dict['spike'] = {}
        
    def _init_gpu(self):
        """
        Initialize GPU device.

        Notes
        -----
        Must be called from within the `run()` method, not from within
        `__init__()`.

        """

        if self.device == None:
            self.logger.info('no GPU specified - not initializing ')
        else:
            drv.init()
            try:
                self.gpu_ctx = drv.Device(self.device).make_context()
            except Exception as e:
                self.logger.info('_init_gpu exception: ' + e.message)
            else:
                atexit.register(self.gpu_ctx.pop)
                self.logger.info('GPU initialized')

    @property
    def N_gpot_ports(self):
        """
        Number of exposed graded-potential ports.
        """

        return len(self.interface.gpot_ports())

    @property
    def N_spike_ports(self):
        """
        Number of exposed spiking ports.
        """

        return len(self.interface.spike_ports())

    def _get_in_data(self):
        """
        Get input data from incoming transmission buffer.

        Populate the data arrays associated with a module's ports using input
        data received from other modules.
        """

        self.logger.info('retrieving from input buffer')

        # Since fan-in is not permitted, the data from all source modules
        # must necessarily map to different ports; we can therefore write each
        # of the received data to the array associated with the module's ports
        # here without worry of overwriting the data from each source module:
        for in_id in self.in_ids:

            # Check for exceptions so as to not fail on the first emulation
            # step when there is no input data to retrieve:
            try:
                
                # The first entry of `data` contains graded potential values,
                # while the second contains integer indices of the current
                # module's ports that should receive transmitted spikes:

                data = self._in_data[in_id].popleft()
            except IndexError:
                self.logger.info('no input data from [%s] retrieved' % in_id)
            else:
                self.logger.info('input data from [%s] retrieved' % in_id)

                # Assign transmitted graded potential values directly to port
                # data array:
                self.pm['gpot'][self._in_port_dict['gpot'][in_id]] = data[0]

                # Clear all input spike port data..
                self.pm['spike'][self._in_port_dict['spike'][in_id]] = 0
                
                # ..and then set the port data using the transmitted
                # information about source module spikes:
                self.pm['spike'][data[1]] = 1

    def _put_out_data(self):
        """
        Put specified output data in outgoing transmission buffer.

        Stage data from the data arrays associated with a module's ports for
        output to other modules.
        """

        self.logger.info('populating output buffer')

        # Clear output buffer before populating it:
        self._out_data = []

        # Select data that should be sent to each destination module and append
        # it to the outgoing queue:
        for out_id in self.out_ids:

            # Select graded potential data using list of 
            # graded potential ports that can transmit output:
            gpot_data = self.pm['gpot'][self._out_port_dict['gpot'][out_id]]

            # Select spiking ports that can transmit output:
            out_spike_ports_all = self._out_port_dict['spike'][out_id]

            # Find those ports that have emitted a spike:
            out_spike_ports_with_spikes = \
                self.pm['spike'].get_ports_nonzero()

            # Compute the intersection of the two sets of spiking
            # ports obtained above to determine which ports the destination
            # module must be informed about:
            out_spike_ports = \
                list(set(out_spike_ports_all).intersection(out_spike_ports_with_spikes))

            # Find the input ports in the destination module's interface to
            # which the spikes emitted by the current module's spiking ports
            # must be sent:
            from_int, to_int = self.pat_ints[out_id]
            spike_data = \
                self.patterns[out_id].dest_idx(from_int, to_int, 'spike', 'spike',
                                               out_spike_ports)

            try:


                # Stage the emitted port data for transmission:
                self._out_data.append((out_id, (gpot_data, spike_data)))
            except:
                self.logger.info('no output data to [%s] sent' % out_id)
            else:
                self.logger.info('output data to [%s] sent' % out_id)

    def run_step(self):
        """
        Module work method.
    
        This method should be implemented to do something interesting with new 
        input port data in the module's `pm` attribute and update the attribute's
        output port data if necessary. It should not interact with any other 
        class attributes.
        """

        self.logger.info('running execution step')

    def _init_port_dicts(self):
        """
        Initial dictionaries of source/destination ports in current module.
        """

        # Extract identifiers of source ports in the current module's interface
        # for all modules receiving output from the current module:
        self._out_port_dict['gpot'] = {}
        self._out_port_dict['spike'] = {}
        for out_id in self.out_ids:
            self.logger.info('extracting output ports for %s' % out_id)

            # Get interfaces of pattern connecting the current module to
            # destination module `out_id`; `from_int` is connected to the
            # current module, `to_int` is connected to the other module:
            from_int, to_int = self.pat_ints[out_id]

            # Get ports in interface (`from_int`) connected to the current
            # module that are connected to the other module via the pattern:
            self._out_port_dict['gpot'][out_id] = \
                self.patterns[out_id].src_idx(from_int, to_int,
                                              'gpot', 'gpot')
            self._out_port_dict['spike'][out_id] = \
                self.patterns[out_id].src_idx(from_int, to_int,
                                              'spike', 'spike')
                                                              
        # Extract identifiers of destination ports in the current module's
        # interface for all modules sending input to the current module:
        self._in_port_dict['gpot'] = {}
        self._in_port_dict['spike'] = {}
        for in_id in self.in_ids:
            self.logger.info('extracting input ports for %s' % in_id)

            # Get interfaces of pattern connecting the current module to
            # source module `out_id`; `to_int` is connected to the current
            # module, `from_int` is connected to the other module:
            to_int, from_int = self.pat_ints[in_id]

            # Get ports in interface (`to_int`) connected to the current
            # module that are connected to the other module via the pattern:
            self._in_port_dict['gpot'][in_id] = \
                self.patterns[in_id].dest_idx(from_int, to_int,
                                              'gpot', 'gpot')
            self._in_port_dict['spike'][in_id] = \
                self.patterns[in_id].dest_idx(from_int, to_int,
                                              'spike', 'spike')

    def pre_run(self, *args, **kwargs):
        """
        Code to run before main module run loop.

        Code in this method will be executed after a module's process has been
        launched and all connectivity objects made available, but before the
        main run loop begins.
        """
        
        self._init_gpu()
        pass

    def run(self):
        """
        Body of process.
        """

        # Don't allow keyboard interruption of process:
        self.logger.info('starting')
        with IgnoreKeyboardInterrupt():
            # Initialize environment:
            self._init_net()
            # Initialize Buffer for incoming data.  Dict used to store the
            # incoming data keyed by the source module id.  Each value is a
            # queue buffering the received data:
            self._in_data = {k: collections.deque() for k in self.in_ids}

            # Initialize _out_port_dict and _in_port_dict attributes:
            self._init_port_dicts()

            # Perform any pre-emulation operations:
            self.pre_run()

            cProfile.runctx('self.main_run()', globals(), locals(), 'prof%s.prof' % self.id)

            # Perform any post-emulation operations:
            self.post_run()

            # Shut down the control handler and inform the manager that the
            # module has shut down:
            self._ctrl_stream_shutdown()
            ack = 'shutdown'
            self.sock_ctrl.send(ack)
            self.logger.info('sent to manager: %s' % ack)
                
        self.logger.info('exiting')
        
    def main_run(self):
        self.running = True
        curr_steps = 0
        while curr_steps < self._steps:
            self.logger.info('execution step: %s' % curr_steps)
            # If the debug flag is set, don't catch exceptions so that
            # errors will lead to visible failures:
            if self.debug:

                # Get transmitted input data for processing:
                self._get_in_data()

                # Run the processing step:
                self.run_step()

                # Stage generated output data for transmission to other
                # modules:
                self._put_out_data()

                # Synchronize:
                self._sync()

            else:

                # Get transmitted input data for processing:
                catch_exception(self._get_in_data, self.logger.info)

                # Run the processing step:
                catch_exception(self.run_step, self.logger.info)

                # Stage generated output data for transmission to other
                # modules:
                catch_exception(self._put_out_data, self.logger.info)

                # Synchronize:
                catch_exception(self._sync, self.logger.info)

            # Exit run loop when a quit signal has been received:
            if not self.running:
                self.logger.info('run loop stopped')
                break

            curr_steps += 1