def __init__(self, required_args=["sel", "sel_in", "sel_out", "sel_gpot", "sel_spike"], ctrl_tag=CTRL_TAG): super(Manager, self).__init__(ctrl_tag) # Required constructor args: self.required_args = required_args # One-to-one mapping between MPI rank and module ID: self.rank_to_id = bidict.bidict() # Unique object ID: self.id = uid() # Set up a dynamic table to contain the routing table: self.routing_table = RoutingTable() # Number of emulation steps to run: self.steps = np.inf # Variables for timing run loop: self.start_time = 0.0 self.stop_time = 0.0 # Variables for computing throughput: self.counter = 0 self.total_sync_time = 0.0 self.total_sync_nbytes = 0.0 self.received_data = {} # Average step synchronization time: self._average_step_sync_time = 0.0 # Computed throughput (only updated after an emulation run): self._average_throughput = 0.0 self._total_throughput = 0.0 self.log_info("manager instantiated")
def __init__(self, port_data=PORT_DATA, port_ctrl=PORT_CTRL): # Unique object ID: self.id = uid() self.logger = twiggy.log.name('manage %s' % self.id) self.port_data = port_data self.port_ctrl = port_ctrl # Set up a router socket to communicate with other topology # components; linger period is set to 0 to prevent hanging on # unsent messages when shutting down: self.zmq_ctx = zmq.Context() self.sock_ctrl = self.zmq_ctx.socket(zmq.ROUTER) self.sock_ctrl.setsockopt(zmq.LINGER, LINGER_TIME) self.sock_ctrl.bind("tcp://*:%i" % self.port_ctrl) # Set up a poller for detecting acknowledgements to control messages: self.ctrl_poller = zmq.Poller() self.ctrl_poller.register(self.sock_ctrl, zmq.POLLIN) # Data structures for instances of objects that correspond to processes # keyed on object IDs (bidicts are used to enable retrieval of # broker/module IDs from object instances): self.brokers = bidict.bidict() self.modules = bidict.bidict() # Set up a dynamic table to contain the routing table: self.routing_table = RoutingTable() # Number of emulation steps to run: self.steps = np.inf
def __init__(self, port_data=PORT_DATA, port_ctrl=PORT_CTRL, id=None): # Generate a unique ID if none is specified: if id is None: id = uid() super(BaseModule, self).__init__(port_ctrl, id) # Logging: self.logger = twiggy.log.name('module %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data # Initial connectivity: self.net = 'none' # List used for storing outgoing data; each # entry is a tuple whose first entry is the source or destination # module ID and whose second entry is the data: self._out_data = [] # Objects describing connectivity between this module and other modules # keyed by the IDs of the other modules: self._conn_dict = {} # Dictionary containing ports of destination modules that receive input # from this module; must be initialized immediately before an emulation # begins running: self._out_idx_dict = {}
def __init__(self, port_data=PORT_DATA, port_ctrl=PORT_CTRL): # Unique object ID: self.id = uid() self.logger = twiggy.log.name('manage %s' % self.id) self.port_data = port_data self.port_ctrl = port_ctrl # Set up a router socket to communicate with other topology # components; linger period is set to 0 to prevent hanging on # unsent messages when shutting down: self.zmq_ctx = zmq.Context() self.sock_ctrl = self.zmq_ctx.socket(zmq.ROUTER) self.sock_ctrl.setsockopt(zmq.LINGER, LINGER_TIME) self.sock_ctrl.bind("tcp://*:%i" % self.port_ctrl) # Set up a poller for detecting acknowledgements to control messages: self.ctrl_poller = zmq.Poller() self.ctrl_poller.register(self.sock_ctrl, zmq.POLLIN) # Data structures for storing broker, module, and connectivity instances: self.brok_dict = bidict.bidict() self.mod_dict = bidict.bidict() self.conn_dict = bidict.bidict() # Set up a dynamic table to contain the routing table: self.routing_table = RoutingTable() # Number of emulation steps to run: self.steps = np.inf
def __init__(self, port_data=PORT_DATA, port_ctrl=PORT_CTRL, port_time=PORT_TIME): # Unique object ID: self.id = uid() # Set logger name: LoggerMixin.__init__(self, 'man %s' % self.id) self.port_data = port_data self.port_ctrl = port_ctrl self.port_time = port_time # Set up a router socket to communicate with other topology # components; linger period is set to 0 to prevent hanging on # unsent messages when shutting down: self.zmq_ctx = zmq.Context() self.sock_ctrl = self.zmq_ctx.socket(zmq.ROUTER) self.sock_ctrl.setsockopt(zmq.LINGER, LINGER_TIME) self.sock_ctrl.bind("tcp://*:%i" % self.port_ctrl) # Data structures for instances of objects that correspond to processes # keyed on object IDs (bidicts are used to enable retrieval of # broker/module IDs from object instances): self.brokers = bidict.bidict() self.modules = bidict.bidict() # Set up a dynamic table to contain the routing table: self.routing_table = RoutingTable() # Number of emulation steps to run: self.max_steps = float('inf') # Set up process to handle time data: self.time_listener = TimeListener(self.port_ctrl, self.port_time)
def _generate_metadata(self): return TestCaseMetadata( **{ 'name': self.obj.name, 'path': self._path, 'uid': uid.uid(self.obj, self._path), 'status': Status.Unscheduled, 'result': Result(Result.NotRun), 'suite_uid': self.parent_suite.metadata.uid })
def _generate_metadata(self): return TestSuiteMetadata( **{ 'name': self.obj.name, 'tags': self.obj.tags, 'path': self._path, # TODO Loader supply info? 'uid': uid.uid(self.obj, self._path), # TODO Requires path 'status': Status.Unscheduled, 'result': Result(Result.NotRun) })
def __init__(self, N_A, N_B, N_mult=1, A_id='A', B_id='B'): # Unique object ID: self.id = uid() # The number of ports in both of the LPUs must be nonzero: assert N_A != 0 assert N_B != 0 # The maximum number of connections between any two ports must be # nonzero: assert N_mult != 0 # The module IDs must be non-null and nonidentical: assert A_id != B_id assert len(A_id) != 0 assert len(B_id) != 0 self.N_A = N_A self.N_B = N_B self.N_mult = N_mult self.A_id = A_id self.B_id = B_id # Strings indicating direction between modules connected by instances of # the class: self._AtoB = '/'.join((A_id, B_id)) self._BtoA = '/'.join((B_id, A_id)) # All matrices are stored in this dict: self._data = {} # Keys corresponding to each connectivity direction are stored in the # following lists: self._keys_by_dir = {self._AtoB: [], self._BtoA: []} # Create connectivity matrices for both directions; the key structure # is source module/dest module/connection #/parameter name. Note that # the matrices associated with A -> B have the dimensions (N_A, N_B) # while those associated with B -> have the dimensions (N_B, N_A): key = self._make_key(self._AtoB, 0, 'conn') self._data[key] = self._make_matrix((self.N_A, self.N_B), int) self._keys_by_dir[self._AtoB].append(key) key = self._make_key(self._BtoA, 0, 'conn') self._data[key] = self._make_matrix((self.N_B, self.N_A), int) self._keys_by_dir[self._BtoA].append(key)
def __init__(self, port_data=PORT_DATA, port_ctrl=PORT_CTRL, routing_table=None): super(Broker, self).__init__(port_ctrl, uid()) # Logging: self.logger = twiggy.log.name('broker %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data # Routing table: self.routing_table = routing_table # Buffers used to accumulate data to route: self.data_to_route = []
def __init__(self, port_ctrl, port_time, ids=set()): super(TimeListener, self).__init__(port_ctrl, uid()) # Reformat logger name: LoggerMixin.__init__(self, 'lis %s' % self.id) # Time port: if port_time == port_ctrl: raise ValueError('time and control ports must differ') self.port_time = port_time # IDs of modules from which to collect timing data: assert isinstance(ids, set) self.ids = ids # Queue for returning timing results to parent process: self.queue = mp.Queue()
def run(self): self.Sock = socket(AF_INET, SOCK_STREAM) self.Sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) self.Sock.bind(('', self.Port)) self.Sock.listen(10) while True: csock = None caddr = ('-', '-') csock, caddr = self.Sock.accept() cid = uid() self.debug("%s connection accepted from %s:%s" % (cid, caddr[0], caddr[1])) wrapped_sock, ssl_info = self.Server.wrap_socket(csock) handle = RequestHandle(cid, wrapped_sock, caddr, ssl_info) try: self.Reader.add(handle) except RuntimeError: self.debug("% reader queue is full" % (cid, )) wrapped_sock.sendall(b"HTTP/1.1 503 Server queue is full\n\n") wrapped_sock.close()
def __init__(self, required_args=[ 'sel', 'sel_in', 'sel_out', 'sel_gpot', 'sel_spike' ], ctrl_tag=CTRL_TAG): super(Manager, self).__init__(ctrl_tag) # Required constructor args: self.required_args = required_args # One-to-one mapping between MPI rank and module ID: self.rank_to_id = bidict.bidict() # Unique object ID: self.id = uid() # Set up a dynamic table to contain the routing table: self.routing_table = RoutingTable() # Number of emulation steps to run: self.steps = np.inf # Variables for timing run loop: self.start_time = 0.0 self.stop_time = 0.0 # Variables for computing throughput: self.counter = 0 self.total_sync_time = 0.0 self.total_sync_nbytes = 0.0 self.received_data = {} # Average step synchronization time: self._average_step_sync_time = 0.0 # Computed throughput (only updated after an emulation run): self._average_throughput = 0.0 self._total_throughput = 0.0 self.log_info('manager instantiated')
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, debug=False, time_sync=False): super(Module, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: if 'interface' not in columns: raise ValueError('interface column required') if 'io' not in columns: raise ValueError('io column required') if 'type' not in columns: raise ValueError('type column required') # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: if not SelectorMethods.is_in(sel_in, sel): raise ValueError('input port selector not in selector of all ports') if not SelectorMethods.is_in(sel_out, sel): raise ValueError('output port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_in, sel_out): raise ValueError('input and output port selectors not disjoint') # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: if not SelectorMethods.is_in(sel_gpot, sel): raise ValueError('gpot port selector not in selector of all ports') if not SelectorMethods.is_in(sel_spike, sel): raise ValueError('spike port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_gpot, sel_spike): raise ValueError('gpot and spike port selectors not disjoint') # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # If a unique ID was specified and the routing table is not empty # (i.e., there are connections between multiple modules), # the id must be a node in the table: if routing_table is not None and len(routing_table.ids) and \ not routing_table.has_node(id): raise ValueError('routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: if len(data_gpot) != len(self.gpot_ports): raise ValueError('incompatible gpot port data array length') if len(data_spike) != len(self.spike_ports): raise ValueError('incompatible spike port data array length') self.data = {} self.data['gpot'] = data_gpot self.data['spike'] = data_spike self.pm = {} self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot']) self.pm['spike'] = PortMapper(sel_spike, self.data['spike'])
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, pm_all=None, debug=False, time_sync=False): # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Save module interface data (stored in a dict of BasePortMapper instances): self.pm_all = pm_all # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # Save routing table; if a unique ID was specified, it must be a node in # the routing table: if routing_table is not None and not routing_table.has_node(id): raise ValueError( 'routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports( ).to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports( ).to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports( ).to_tuples() # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.gpot_ports) assert len(data_spike) == len(self.spike_ports) self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False)
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], port_data=PORT_DATA, port_ctrl=PORT_CTRL, port_time=PORT_TIME, id=None, device=None, debug=False, time_sync=False): self.debug = debug self.time_sync = time_sync self.device = device # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns # Generate a unique ID if none is specified: if id is None: id = uid() # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(port_ctrl, id) # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data if port_time == port_ctrl or port_time == port_data: raise ValueError('time port must differ from data and control ports') self.port_time = port_time # Initial network connectivity: self.net = 'none' # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0 # we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set port types: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.interface.gpot_ports()) assert len(data_spike) == len(self.interface.spike_ports()) self.data = {} self.data['gpot'] = data_gpot self.data['spike'] = data_spike self.pm = {} self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot']) self.pm['spike'] = PortMapper(sel_spike, self.data['spike']) # Patterns connecting this module instance with other modules instances. # Keyed on the IDs of those modules: self.patterns = {} # Each entry in pat_ints is a tuple containing the identifiers of which # of a pattern's identifiers are connected to the current module (first # entry) and the modules to which it is connected (second entry). # Keyed on the IDs of those modules: self.pat_ints = {} # Dict for storing incoming data; each entry (corresponding to each # module that sends input to the current module) is a deque containing # incoming data, which in turn contains transmitted data arrays. Deques # are used here to accommodate situations when multiple data from a # single source arrive: self._in_data = {} # List for storing outgoing data; each entry is a tuple whose first # entry is the source or destination module ID and whose second entry is # the data to transmit: self._out_data = [] # Dictionaries containing ports of source modules that # send output to this module. Must be initialized immediately before # an emulation begins running. Keyed on source module ID: self._in_port_dict = {} self._in_port_dict_ids = {} self._in_port_dict['gpot'] = {} self._in_port_dict['spike'] = {} # Dictionaries containing ports of destination modules that # receive input from this module. Must be initialized immediately before # an emulation begins running. Keyed on destination module ID: self._out_port_dict = {} self._out_port_dict_ids = {} self._out_port_dict['gpot'] = {} self._out_port_dict['spike'] = {} self._out_ids = [] self._in_ids = []
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, debug=False, time_sync=False): super(Module, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: if 'interface' not in columns: raise ValueError('interface column required') if 'io' not in columns: raise ValueError('io column required') if 'type' not in columns: raise ValueError('type column required') # Initialize GPU here so as to be able to initialize a port mapper # containing GPU memory: self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: if not SelectorMethods.is_in(sel_in, sel): raise ValueError('input port selector not in selector of all ports') if not SelectorMethods.is_in(sel_out, sel): raise ValueError('output port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_in, sel_out): raise ValueError('input and output port selectors not disjoint') # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: if not SelectorMethods.is_in(sel_gpot, sel): raise ValueError('gpot port selector not in selector of all ports') if not SelectorMethods.is_in(sel_spike, sel): raise ValueError('spike port selector not in selector of all ports') if not SelectorMethods.are_disjoint(sel_gpot, sel_spike): raise ValueError('gpot and spike port selectors not disjoint') # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # If a unique ID was specified and the routing table is not empty # (i.e., there are connections between multiple modules), the id # must be a node in the routing table: if routing_table is not None and len(routing_table.ids) and \ not routing_table.has_node(id): raise ValueError('routing table must contain specified ' 'module ID: {}'.format(id)) self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: if len(data_gpot) != len(self.gpot_ports): raise ValueError('incompatible gpot port data array length') if len(data_spike) != len(self.spike_ports): raise ValueError('incompatible spike port data array length') self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False) # MPI Request object for resolving asynchronous transfers: self.req = MPI.Request()
def __init__(self, selector, data, columns=['interface', 'io', 'type'], port_data=PORT_DATA, port_ctrl=PORT_CTRL, id=None, debug=False): self.debug = debug # Generate a unique ID if none is specified: if id is None: id = uid() super(BaseModule, self).__init__(port_ctrl, id) # Logging: self.logger = twiggy.log.name('module %s' % self.id) # Data port: if port_data == port_ctrl: raise ValueError('data and control ports must differ') self.port_data = port_data # Initial network connectivity: self.net = 'none' # Create module interface given the specified ports: self.interface = Interface(selector, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[selector, 'interface'] = 0 # Set up mapper between port identifiers and their associated data: assert len(data) == len(self.interface) self.data = data self.pm = PortMapper(self.data, selector) # Patterns connecting this module instance with other modules instances. # Keyed on the IDs of those modules: self.patterns = {} # Each entry in pat_ints is a tuple containing the identifiers of which # of a pattern's identifiers are connected to the current module (first # entry) and the modules to which it is connected (second entry). # Keyed on the IDs of those modules: self.pat_ints = {} # Dict for storing incoming data; each entry (corresponding to each # module that sends input to the current module) is a deque containing # incoming data, which in turn contains transmitted data arrays. Deques # are used here to accommodate situations when multiple data from a # single source arrive: self._in_data = {} # List for storing outgoing data; each entry is a tuple whose first # entry is the source or destination module ID and whose second entry is # the data to transmit: self._out_data = [] # Dictionary containing ports of source modules that # send output to this module. Must be initialized immediately before # an emulation begins running. Keyed on source module ID: self._in_port_dict = {} # Dictionary containing ports of destination modules that # receive input from this module. Must be initialized immediately before # an emulation begins running. Keyed on destination module ID: self._out_port_dict = {} self._out_ids = [] self._in_ids = []
def __init__(self, sel, sel_in, sel_out, sel_gpot, sel_spike, data_gpot, data_spike, columns=['interface', 'io', 'type'], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG, id=None, device=None, routing_table=None, rank_to_id=None, pm_all=None, debug=False, time_sync=False): # Call super for BaseModule rather than Module because most of the # functionality of the former's constructor must be overridden in any case: super(BaseModule, self).__init__(ctrl_tag) self.debug = debug self.time_sync = time_sync self.device = device self._gpot_tag = gpot_tag self._spike_tag = spike_tag # Require several necessary attribute columns: assert 'interface' in columns assert 'io' in columns assert 'type' in columns self._init_gpu() # This is needed to ensure that MPI_Finalize is called before PyCUDA # attempts to clean up; see # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw atexit.register(MPI.Finalize) # Manually register the file close method associated with MPIOutput # so that it is called by atexit before MPI.Finalize() (if the file is # closed after MPI.Finalize() is called, an error will occur): for k, v in twiggy.emitters.iteritems(): if isinstance(v._output, MPIOutput): atexit.register(v._output.close) # Ensure that the input and output port selectors respectively # select mutually exclusive subsets of the set of all ports exposed by # the module: assert SelectorMethods.is_in(sel_in, sel) assert SelectorMethods.is_in(sel_out, sel) assert SelectorMethods.are_disjoint(sel_in, sel_out) # Ensure that the graded potential and spiking port selectors # respectively select mutually exclusive subsets of the set of all ports # exposed by the module: assert SelectorMethods.is_in(sel_gpot, sel) assert SelectorMethods.is_in(sel_spike, sel) assert SelectorMethods.are_disjoint(sel_gpot, sel_spike) # Save routing table and mapping between MPI ranks and module IDs: self.routing_table = routing_table self.rank_to_id = rank_to_id # Save module interface data (stored in a dict of BasePortMapper instances): self.pm_all = pm_all # Generate a unique ID if none is specified: if id is None: self.id = uid() else: # Save routing table; if a unique ID was specified, it must be a node in # the routing table: if routing_table is not None and not routing_table.has_node(id): raise ValueError('routing table must contain specified module ID') self.id = id # Reformat logger name: LoggerMixin.__init__(self, 'mod %s' % self.id) # Create module interface given the specified ports: self.interface = Interface(sel, columns) # Set the interface ID to 0; we assume that a module only has one interface: self.interface[sel, 'interface'] = 0 # Set the port attributes: self.interface[sel_in, 'io'] = 'in' self.interface[sel_out, 'io'] = 'out' self.interface[sel_gpot, 'type'] = 'gpot' self.interface[sel_spike, 'type'] = 'spike' # Find the input and output ports: self.in_ports = self.interface.in_ports().to_tuples() self.out_ports = self.interface.out_ports().to_tuples() # Find the graded potential and spiking ports: self.gpot_ports = self.interface.gpot_ports().to_tuples() self.spike_ports = self.interface.spike_ports().to_tuples() self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples() self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples() self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples() self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples() # Set up mapper between port identifiers and their associated data: assert len(data_gpot) == len(self.gpot_ports) assert len(data_spike) == len(self.spike_ports) self.data = {} self.data['gpot'] = gpuarray.to_gpu(data_gpot) self.data['spike'] = gpuarray.to_gpu(data_spike) self.pm = {} self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False) self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False)