예제 #1
0
파일: LPU.py 프로젝트: yiyin/neurokernel
    def __init__(self, dt, n_dict, s_dict, input_file=None, output_file=None,
                 device=0, ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG,
                 spike_tag=SPIKE_TAG, rank_to_id=None, routing_table=None,
                 id=None, debug=False, columns=['io', 'type', 'interface'],
                 cuda_verbose=False, time_sync=False):

        LoggerMixin.__init__(self, 'mod {}'.format(id))
        self.log_info('Test')

        assert('io' in columns)
        assert('type' in columns)
        assert('interface' in columns)
        self.LPU_id = id
        self.dt = dt
        self.debug = debug
        self.device = device
        if cuda_verbose:
            self.compile_options = ['--ptxas-options=-v']
        else:
            self.compile_options = []

        # Handle file I/O:
        self.output_file = output_file
        self.output = True if output_file else False
        self.input_file = input_file
        self.input_eof = False if input_file else True

        # Load neurons and synapse data:
        self._load_neurons()
        self._load_synapses()

        # Set default one time import:
        self._one_time_import = 10

        # Save neuron data in the form
        # [('Model0', {'attrib0': [..], 'attrib1': [..]}), ('Model1', ...)]
        self.n_list = n_dict.items()

        # List of booleans indicating whether first neuron of each model is a
        # spiking model:
        n_model_is_spk = [ n['spiking'][0] for _, n in self.n_list ]

        # Number of neurons of each model:
        n_model_num = [ len(n['id']) for _, n in self.n_list ]

        # Concatenate lists of integers corresponding to neuron positions in LPU
        # graph for all of the models into a single list:
        n_id = np.array(sum( [ n['id'] for _, n in self.n_list ], []),
                        dtype=np.int32)

        # Concatenate lists of common attributes in model dictionaries into
        # single lists:
        n_is_spk = np.array(sum( [ n['spiking'] for _, n in self.n_list ], []))
        n_is_pub = np.array(sum( [ n['public'] for _, n in self.n_list ], []))
        n_has_in = np.array(sum( [ n['extern'] for _, n in self.n_list ], []))

        # Get selectors and positions of input ports:
        try:
            sel_in_gpot = self.extract_in_gpot(n_dict)
            in_ports_ids_gpot = np.array(n_dict[PORT_IN_GPOT]['id'])
            self.ports_in_gpot_mem_ind = zip(*self.n_list)[0].index(PORT_IN_GPOT)
        except KeyError:
            sel_in_gpot = ''
            in_ports_ids_gpot = np.array([], dtype=np.int32)
            self.ports_in_gpot_mem_ind = None

        try:
            sel_in_spk = self.extract_in_spk(n_dict)
            in_ports_ids_spk = np.array(n_dict[PORT_IN_SPK]['id'],
                                        dtype=np.int32)
            self.ports_in_spk_mem_ind = zip(*self.n_list)[0].index(PORT_IN_SPK)
        except KeyError:
            sel_in_spk = ''
            in_ports_ids_spk = np.array([], dtype=np.int32)
            self.ports_in_spk_mem_ind = None

        sel_in = ','.join(filter(None, [sel_in_gpot, sel_in_spk]))

        # Get selectors and positions of output neurons:
        sel_out_gpot = self.extract_out_gpot(n_dict)
        sel_out_spk = self.extract_out_spk(n_dict)
        self.out_ports_ids_gpot = np.array([id for _, n in self.n_list for id, pub, spk in
                                            zip(n['id'], n['public'], n['spiking'])
                                            if pub and not spk], dtype=np.int32)
        self.out_ports_ids_spk = np.array([id for _, n in self.n_list for id, pub, spk in
                                           zip(n['id'], n['public'], n['spiking'])
                                           if pub and spk], dtype=np.int32)

        sel_out = ','.join(filter(None, [sel_out_gpot, sel_out_spk]))
        sel_gpot = ','.join(filter(None, [sel_in_gpot, sel_out_gpot]))
        sel_spk = ','.join(filter(None, [sel_in_spk, sel_out_spk]))
        sel = ','.join(filter(None, [sel_gpot, sel_spk]))

        self.sel_in_spk = sel_in_spk
        self.sel_out_spk = sel_out_spk
        self.sel_in_gpot = sel_in_gpot
        self.sel_out_gpot = sel_out_gpot

        # The following code creates a mapping for each neuron from its "id" to
        # its position on the gpu array. The gpu array is arranged as follows,
        #
        #    | gpot_neuron | spike_neuron |
        #
        # For example, suppose the id's of the gpot neurons and of the spike
        # neurons are 1,4,5 and 2,0,3, respectively. We allocate gpu memory
        # for each neuron as follows,
        #
        #    | 1 4 5 | 2 0 3 |
        #
        # To get the position of each neuron, we simply use numpy.argsort:
        #
        # >>> x = [1,4,5,2,0,3]
        # >>> y = numpy.argsort( x )
        # >>> y
        # [4,0,3,5,1,2]
        #
        # The i-th value of the returned array is the positon to find the
        # i-th smallest element in the original array, which is exactly i.
        # In other words, we have the relations: x[i]=j and y[j]=i.

        num_gpot_neurons = np.where( n_model_is_spk, 0, n_model_num)
        num_spike_neurons = np.where( n_model_is_spk, n_model_num, 0)

        # Total numbers of gpot and spiking neurons:
        self.total_num_gpot_neurons = sum( num_gpot_neurons )
        self.total_num_spike_neurons = sum( num_spike_neurons )

        gpot_idx = n_id[ ~n_is_spk ]
        spike_idx = n_id[ n_is_spk ]
        self.order = np.argsort(
            np.concatenate((gpot_idx, spike_idx))).astype(np.int32)
        self.gpot_order = np.argsort(gpot_idx).astype(np.int32)
        self.spike_order = np.argsort(spike_idx).astype(np.int32)
        self.spike_shift = self.total_num_gpot_neurons
        in_id = n_id[n_has_in]
        in_id.sort()
        pub_spk_id = n_id[ n_is_pub & n_is_spk ]
        pub_spk_id.sort()
        pub_gpot_id = n_id[ n_is_pub & ~n_is_spk ]
        pub_gpot_id.sort()
        self.input_neuron_list = self.order[in_id]
        public_spike_list = self.order[pub_spk_id]
        public_gpot_list = self.order[pub_gpot_id]
        self.num_public_gpot = len( public_gpot_list )
        self.num_public_spike = len( public_spike_list )
        self.num_input = len( self.input_neuron_list )
        in_ports_ids_gpot = self.order[in_ports_ids_gpot]
        in_ports_ids_spk = self.order[in_ports_ids_spk]
        self.out_ports_ids_gpot = self.order[self.out_ports_ids_gpot]
        self.out_ports_ids_spk = self.order[self.out_ports_ids_spk]

        gpot_delay_steps = 0
        spike_delay_steps = 0

        cond_pre = []
        cond_post = []
        I_pre = []
        I_post = []
        reverse = []

        count = 0

        self.s_dict = s_dict
        self.s_list = self.s_dict.items()
        self.nid_max = np.max(n_id) + 1
        num_synapses = [ len(s['id']) for _, s in self.s_list ]
        for (_, s) in self.s_list:
            shift = self.spike_shift if s['class'][0] <= 1 else 0
            s['pre'] = [ self.order[int(nid)] - shift for nid in s['pre'] ]
            # why don't why need shift for post neuron?
            # For synapses whose post-synaptic site is another synapse, we set
            # its post-id to be max_neuron_id + synapse_id. By doing so, we
            # won't confuse synapse ID's with neurons ID's.
            s_neu_post = [self.order[int(nid)] for nid in s['post'] if 'synapse' not in str(nid)]
            s_syn_post = [int(nid[8:])+self.nid_max for nid in s['post'] if 'synapse' in str(nid)]
            s['post'] = s_neu_post + s_syn_post

            order = np.argsort(s['post']).astype(np.int32)
            for k, v in s.items():
                s[k] = np.asarray(v)[order]

            # NOTE: a computational model of synapse could be either
            # conductance based or non-conducatance based under the same set of
            # ODEs. If the EPSC comes directly from one of the state variables,
            # the model is non-conductacne. Otherwise, if the calculation of
            # EPSC involves reverse potential, the model is recognized as
            # conductance based.

            idx = np.where(s['conductance'])[0]
            if len(idx) > 0:
                 cond_post.extend(s['post'][idx])
                 reverse.extend(s['reverse'][idx])
                 cond_pre.extend(range(count, count+len(idx)))
                 count += len(idx)
                 if 'delay' in s:
                     max_del = np.max( s['delay'][idx] )
                     gpot_delay_steps = max_del if max_del > gpot_delay_steps \
                                            else gpot_delay_steps

            idx = np.where(~s['conductance'])[0]
            if len(idx) > 0:
                 I_post.extend(s['post'][idx])
                 I_pre.extend(range(count, count+len(s['post'][idx])))
                 count += len(s['post'])
                 if 'delay' in s:
                     max_del = np.max( s['delay'][idx] )
                     spike_delay_steps = max_del if max_del > spike_delay_steps \
                                         else spike_delay_steps

        self.total_synapses = int(np.sum(num_synapses))
        I_post.extend(self.input_neuron_list)
        I_pre.extend(range(self.total_synapses, self.total_synapses + \
                          len(self.input_neuron_list)))

        cond_post = np.asarray(cond_post, dtype=np.int32)
        cond_pre = np.asarray(cond_pre, dtype = np.int32)
        reverse = np.asarray(reverse, dtype=np.double)

        order1 = np.argsort(cond_post, kind='mergesort')
        cond_post = cond_post[order1]
        cond_pre = cond_pre[order1]
        reverse = reverse[order1]


        I_post = np.asarray(I_post, dtype=np.int32)
        I_pre = np.asarray(I_pre, dtype=np.int32)

        order1 = np.argsort(I_post, kind='mergesort')
        I_post = I_post[order1]
        I_pre = I_pre[order1]

        self.idx_start_gpot = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_gpot_neurons, dtype=np.int32)))
        self.idx_start_spike = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_spike_neurons, dtype=np.int32)))
        self.idx_start_synapse = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_synapses, dtype=np.int32)))


        for i, (t, n) in enumerate(self.n_list):
            if n['spiking'][0]:
                idx = np.where(
                    (cond_post >= self.idx_start_spike[i] + self.spike_shift)&
                    (cond_post < self.idx_start_spike[i+1] + self.spike_shift) )
                n['cond_post'] = cond_post[idx] - self.idx_start_spike[i] - self.spike_shift
                n['cond_pre'] = cond_pre[idx]
                n['reverse'] = reverse[idx]
                idx = np.where(
                    (I_post >= self.idx_start_spike[i] + self.spike_shift)&
                    (I_post < self.idx_start_spike[i+1] + self.spike_shift) )
                n['I_post'] = I_post[idx] - self.idx_start_spike[i] - self.spike_shift
                n['I_pre'] = I_pre[idx]
            else:
                idx = np.where( (cond_post >= self.idx_start_gpot[i])&
                                (cond_post < self.idx_start_gpot[i+1]) )
                n['cond_post'] = cond_post[idx] - self.idx_start_gpot[i]
                n['cond_pre'] = cond_pre[idx]
                n['reverse'] = reverse[idx]
                idx =  np.where( (I_post >= self.idx_start_gpot[i])&
                                 (I_post < self.idx_start_gpot[i+1]) )
                n['I_post'] = I_post[idx] - self.idx_start_gpot[i]
                n['I_pre'] = I_pre[idx]

            n['num_dendrites_cond'] = Counter(n['cond_post'])
            n['num_dendrites_I'] = Counter(n['I_post'])

        if len(self.s_list) > 0:
            s_id = np.concatenate([s['id'] for _, s in self.s_list]).astype(np.int32)
        else:
            s_id = np.empty(0, dtype = np.int32)
        
        s_order = np.arange(self.total_synapses)[s_id]
        idx = np.where(cond_post >= self.nid_max)[0]
        cond_post_syn = s_order[cond_post[idx] - self.nid_max]
        cond_post_syn_offset = idx[0] if len(idx) > 0 else 0
        idx = np.where(I_post >= self.nid_max)[0]
        I_post_syn = s_order[I_post[idx] - self.nid_max]
        I_post_syn_offset = idx[0] if len(idx) > 0 else 0
        for i, (t, s) in enumerate(self.s_list):
            idx = np.where(
                (cond_post_syn >= self.idx_start_synapse[i]) &
                (cond_post_syn < self.idx_start_synapse[i+1]))[0]
            s['cond_post'] = cond_post[idx+cond_post_syn_offset] - self.nid_max
            s['cond_pre'] = cond_pre[idx+cond_post_syn_offset]
            s['reverse'] = reverse[idx+cond_post_syn_offset]
            # NOTE: after this point, s['reverse'] is no longer the reverse
            # potential associated with the current synapse class, but the
            # reverse potential of other synapses projecting to the current one.
            # Its purpose is exactly the same as n['reverse'].
            # Not sure if this is good though, since it obvious creates some
            # degree of confusion.
            idx = np.where(
                (I_post_syn >= self.idx_start_synapse[i]) &
                (I_post_syn < self.idx_start_synapse[i+1]))[0]
            s['I_post'] = I_post[idx+I_post_syn_offset] - self.nid_max
            s['I_pre'] = I_pre[idx+I_post_syn_offset]

            s['num_dendrites_cond'] = Counter(s['cond_post'])
            s['num_dendrites_I'] = Counter(s['I_post'])

        self.gpot_delay_steps = int(round(gpot_delay_steps*1e-3/self.dt)) + 1
        self.spike_delay_steps = int(round(spike_delay_steps*1e-3/self.dt)) + 1

        data_gpot = np.zeros(self.num_public_gpot + len(in_ports_ids_gpot),
                             np.double)
        data_spike = np.zeros(self.num_public_spike + len(in_ports_ids_spk),
                              np.int32)
        super(LPU, self).__init__(sel=sel, sel_in=sel_in, sel_out=sel_out,
                                  sel_gpot=sel_gpot, sel_spike=sel_spk,
                                  data_gpot=data_gpot, data_spike=data_spike,
                                  columns=columns, ctrl_tag=ctrl_tag, gpot_tag=gpot_tag,
                                  spike_tag=spike_tag, id=self.LPU_id,
                                  rank_to_id=rank_to_id, routing_table=routing_table,
                                  device=device, debug=debug, time_sync=time_sync)

        self.sel_in_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(self.sel_in_gpot),
                                        dtype=np.int32)
        self.sel_out_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(self.sel_out_gpot),
                                        dtype=np.int32)
        self.sel_in_spk_ids = np.array(self.pm['spike'].ports_to_inds(self.sel_in_spk),
                                        dtype=np.int32)
        self.sel_out_spk_ids = np.array(self.pm['spike'].ports_to_inds(self.sel_out_spk),
                                        dtype=np.int32)
예제 #2
0
파일: LPU.py 프로젝트: yiyin/neurokernel
    def __init__(
        self,
        dt,
        n_dict,
        s_dict,
        input_file=None,
        output_file=None,
        device=0,
        ctrl_tag=CTRL_TAG,
        gpot_tag=GPOT_TAG,
        spike_tag=SPIKE_TAG,
        rank_to_id=None,
        routing_table=None,
        id=None,
        debug=False,
        columns=["io", "type", "interface"],
        cuda_verbose=False,
        time_sync=False,
    ):

        LoggerMixin.__init__(self, "mod {}".format(id))
        self.log_info("Test")

        assert "io" in columns
        assert "type" in columns
        assert "interface" in columns
        self.LPU_id = id
        self.dt = dt
        self.debug = debug
        self.device = device
        if cuda_verbose:
            self.compile_options = ["--ptxas-options=-v"]
        else:
            self.compile_options = []

        # Handle file I/O:
        self.output_file = output_file
        self.output = True if output_file else False
        self.input_file = input_file
        self.input_eof = False if input_file else True

        # Load neurons and synapse data:
        self._load_neurons()
        self._load_synapses()

        # Set default one time import:
        self._one_time_import = 10

        # Save neuron data in the form
        # [('Model0', {'attrib0': [..], 'attrib1': [..]}), ('Model1', ...)]
        self.n_list = n_dict.items()

        # List of booleans indicating whether first neuron of each model is a
        # spiking model:
        n_model_is_spk = [n["spiking"][0] for _, n in self.n_list]

        # Number of neurons of each model:
        n_model_num = [len(n["id"]) for _, n in self.n_list]

        # Concatenate lists of integers corresponding to neuron positions in LPU
        # graph for all of the models into a single list:
        n_id = np.array(sum([n["id"] for _, n in self.n_list], []), dtype=np.int32)

        # Concatenate lists of common attributes in model dictionaries into
        # single lists:
        n_is_spk = np.array(sum([n["spiking"] for _, n in self.n_list], []))
        n_is_pub = np.array(sum([n["public"] for _, n in self.n_list], []))
        n_has_in = np.array(sum([n["extern"] for _, n in self.n_list], []))

        # Get selectors and positions of input ports:
        try:
            sel_in_gpot = self.extract_in_gpot(n_dict)
            in_ports_ids_gpot = np.array(n_dict[PORT_IN_GPOT]["id"])
            self.ports_in_gpot_mem_ind = zip(*self.n_list)[0].index(PORT_IN_GPOT)
        except KeyError:
            sel_in_gpot = ""
            in_ports_ids_gpot = np.array([], dtype=np.int32)
            self.ports_in_gpot_mem_ind = None

        try:
            sel_in_spk = self.extract_in_spk(n_dict)
            in_ports_ids_spk = np.array(n_dict[PORT_IN_SPK]["id"], dtype=np.int32)
            self.ports_in_spk_mem_ind = zip(*self.n_list)[0].index(PORT_IN_SPK)
        except KeyError:
            sel_in_spk = ""
            in_ports_ids_spk = np.array([], dtype=np.int32)
            self.ports_in_spk_mem_ind = None

        sel_in = ",".join(filter(None, [sel_in_gpot, sel_in_spk]))

        # Get selectors and positions of output neurons:
        sel_out_gpot = self.extract_out_gpot(n_dict)
        sel_out_spk = self.extract_out_spk(n_dict)
        self.out_ports_ids_gpot = np.array(
            [id for _, n in self.n_list for id, pub, spk in zip(n["id"], n["public"], n["spiking"]) if pub and not spk],
            dtype=np.int32,
        )
        self.out_ports_ids_spk = np.array(
            [id for _, n in self.n_list for id, pub, spk in zip(n["id"], n["public"], n["spiking"]) if pub and spk],
            dtype=np.int32,
        )

        sel_out = ",".join(filter(None, [sel_out_gpot, sel_out_spk]))
        sel_gpot = ",".join(filter(None, [sel_in_gpot, sel_out_gpot]))
        sel_spk = ",".join(filter(None, [sel_in_spk, sel_out_spk]))
        sel = ",".join(filter(None, [sel_gpot, sel_spk]))

        self.sel_in_spk = sel_in_spk
        self.sel_out_spk = sel_out_spk
        self.sel_in_gpot = sel_in_gpot
        self.sel_out_gpot = sel_out_gpot

        # The following code creates a mapping for each neuron from its "id" to
        # its position on the gpu array. The gpu array is arranged as follows,
        #
        #    | gpot_neuron | spike_neuron |
        #
        # For example, suppose the id's of the gpot neurons and of the spike
        # neurons are 1,4,5 and 2,0,3, respectively. We allocate gpu memory
        # for each neuron as follows,
        #
        #    | 1 4 5 | 2 0 3 |
        #
        # To get the position of each neuron, we simply use numpy.argsort:
        #
        # >>> x = [1,4,5,2,0,3]
        # >>> y = numpy.argsort( x )
        # >>> y
        # [4,0,3,5,1,2]
        #
        # The i-th value of the returned array is the positon to find the
        # i-th smallest element in the original array, which is exactly i.
        # In other words, we have the relations: x[i]=j and y[j]=i.

        num_gpot_neurons = np.where(n_model_is_spk, 0, n_model_num)
        num_spike_neurons = np.where(n_model_is_spk, n_model_num, 0)

        # Total numbers of gpot and spiking neurons:
        self.total_num_gpot_neurons = sum(num_gpot_neurons)
        self.total_num_spike_neurons = sum(num_spike_neurons)

        gpot_idx = n_id[~n_is_spk]
        spike_idx = n_id[n_is_spk]
        self.order = np.argsort(np.concatenate((gpot_idx, spike_idx))).astype(np.int32)
        self.gpot_order = np.argsort(gpot_idx).astype(np.int32)
        self.spike_order = np.argsort(spike_idx).astype(np.int32)
        self.spike_shift = self.total_num_gpot_neurons
        in_id = n_id[n_has_in]
        in_id.sort()
        pub_spk_id = n_id[n_is_pub & n_is_spk]
        pub_spk_id.sort()
        pub_gpot_id = n_id[n_is_pub & ~n_is_spk]
        pub_gpot_id.sort()
        self.input_neuron_list = self.order[in_id]
        public_spike_list = self.order[pub_spk_id]
        public_gpot_list = self.order[pub_gpot_id]
        self.num_public_gpot = len(public_gpot_list)
        self.num_public_spike = len(public_spike_list)
        self.num_input = len(self.input_neuron_list)
        in_ports_ids_gpot = self.order[in_ports_ids_gpot]
        in_ports_ids_spk = self.order[in_ports_ids_spk]
        self.out_ports_ids_gpot = self.order[self.out_ports_ids_gpot]
        self.out_ports_ids_spk = self.order[self.out_ports_ids_spk]

        gpot_delay_steps = 0
        spike_delay_steps = 0

        cond_pre = []
        cond_post = []
        I_pre = []
        I_post = []
        reverse = []

        count = 0

        self.s_dict = s_dict
        self.s_list = self.s_dict.items()
        self.nid_max = np.max(n_id) + 1
        num_synapses = [len(s["id"]) for _, s in self.s_list]
        for (_, s) in self.s_list:
            shift = self.spike_shift if s["class"][0] <= 1 else 0
            s["pre"] = [self.order[int(nid)] - shift for nid in s["pre"]]
            # why don't why need shift for post neuron?
            # For synapses whose post-synaptic site is another synapse, we set
            # its post-id to be max_neuron_id + synapse_id. By doing so, we
            # won't confuse synapse ID's with neurons ID's.
            s_neu_post = [self.order[int(nid)] for nid in s["post"] if "synapse" not in str(nid)]
            s_syn_post = [int(nid[8:]) + self.nid_max for nid in s["post"] if "synapse" in str(nid)]
            s["post"] = s_neu_post + s_syn_post

            order = np.argsort(s["post"]).astype(np.int32)
            for k, v in s.items():
                s[k] = np.asarray(v)[order]

            # NOTE: a computational model of synapse could be either
            # conductance based or non-conducatance based under the same set of
            # ODEs. If the EPSC comes directly from one of the state variables,
            # the model is non-conductacne. Otherwise, if the calculation of
            # EPSC involves reverse potential, the model is recognized as
            # conductance based.

            idx = np.where(s["conductance"])[0]
            if len(idx) > 0:
                cond_post.extend(s["post"][idx])
                reverse.extend(s["reverse"][idx])
                cond_pre.extend(range(count, count + len(idx)))
                count += len(idx)
                if "delay" in s:
                    max_del = np.max(s["delay"][idx])
                    gpot_delay_steps = max_del if max_del > gpot_delay_steps else gpot_delay_steps

            idx = np.where(~s["conductance"])[0]
            if len(idx) > 0:
                I_post.extend(s["post"][idx])
                I_pre.extend(range(count, count + len(s["post"][idx])))
                count += len(s["post"])
                if "delay" in s:
                    max_del = np.max(s["delay"][idx])
                    spike_delay_steps = max_del if max_del > spike_delay_steps else spike_delay_steps

        self.total_synapses = int(np.sum(num_synapses))
        I_post.extend(self.input_neuron_list)
        I_pre.extend(range(self.total_synapses, self.total_synapses + len(self.input_neuron_list)))

        cond_post = np.asarray(cond_post, dtype=np.int32)
        cond_pre = np.asarray(cond_pre, dtype=np.int32)
        reverse = np.asarray(reverse, dtype=np.double)

        order1 = np.argsort(cond_post, kind="mergesort")
        cond_post = cond_post[order1]
        cond_pre = cond_pre[order1]
        reverse = reverse[order1]

        I_post = np.asarray(I_post, dtype=np.int32)
        I_pre = np.asarray(I_pre, dtype=np.int32)

        order1 = np.argsort(I_post, kind="mergesort")
        I_post = I_post[order1]
        I_pre = I_pre[order1]

        self.idx_start_gpot = np.concatenate(
            (np.asarray([0], dtype=np.int32), np.cumsum(num_gpot_neurons, dtype=np.int32))
        )
        self.idx_start_spike = np.concatenate(
            (np.asarray([0], dtype=np.int32), np.cumsum(num_spike_neurons, dtype=np.int32))
        )
        self.idx_start_synapse = np.concatenate(
            (np.asarray([0], dtype=np.int32), np.cumsum(num_synapses, dtype=np.int32))
        )

        for i, (t, n) in enumerate(self.n_list):
            if n["spiking"][0]:
                idx = np.where(
                    (cond_post >= self.idx_start_spike[i] + self.spike_shift)
                    & (cond_post < self.idx_start_spike[i + 1] + self.spike_shift)
                )
                n["cond_post"] = cond_post[idx] - self.idx_start_spike[i] - self.spike_shift
                n["cond_pre"] = cond_pre[idx]
                n["reverse"] = reverse[idx]
                idx = np.where(
                    (I_post >= self.idx_start_spike[i] + self.spike_shift)
                    & (I_post < self.idx_start_spike[i + 1] + self.spike_shift)
                )
                n["I_post"] = I_post[idx] - self.idx_start_spike[i] - self.spike_shift
                n["I_pre"] = I_pre[idx]
            else:
                idx = np.where((cond_post >= self.idx_start_gpot[i]) & (cond_post < self.idx_start_gpot[i + 1]))
                n["cond_post"] = cond_post[idx] - self.idx_start_gpot[i]
                n["cond_pre"] = cond_pre[idx]
                n["reverse"] = reverse[idx]
                idx = np.where((I_post >= self.idx_start_gpot[i]) & (I_post < self.idx_start_gpot[i + 1]))
                n["I_post"] = I_post[idx] - self.idx_start_gpot[i]
                n["I_pre"] = I_pre[idx]

            n["num_dendrites_cond"] = Counter(n["cond_post"])
            n["num_dendrites_I"] = Counter(n["I_post"])

        if len(self.s_list) > 0:
            s_id = np.concatenate([s["id"] for _, s in self.s_list]).astype(np.int32)
        else:
            s_id = np.empty(0, dtype=np.int32)

        s_order = np.arange(self.total_synapses)[s_id]
        idx = np.where(cond_post >= self.nid_max)[0]
        cond_post_syn = s_order[cond_post[idx] - self.nid_max]
        cond_post_syn_offset = idx[0] if len(idx) > 0 else 0
        idx = np.where(I_post >= self.nid_max)[0]
        I_post_syn = s_order[I_post[idx] - self.nid_max]
        I_post_syn_offset = idx[0] if len(idx) > 0 else 0
        for i, (t, s) in enumerate(self.s_list):
            idx = np.where(
                (cond_post_syn >= self.idx_start_synapse[i]) & (cond_post_syn < self.idx_start_synapse[i + 1])
            )[0]
            s["cond_post"] = cond_post[idx + cond_post_syn_offset] - self.nid_max
            s["cond_pre"] = cond_pre[idx + cond_post_syn_offset]
            s["reverse"] = reverse[idx + cond_post_syn_offset]
            # NOTE: after this point, s['reverse'] is no longer the reverse
            # potential associated with the current synapse class, but the
            # reverse potential of other synapses projecting to the current one.
            # Its purpose is exactly the same as n['reverse'].
            # Not sure if this is good though, since it obvious creates some
            # degree of confusion.
            idx = np.where((I_post_syn >= self.idx_start_synapse[i]) & (I_post_syn < self.idx_start_synapse[i + 1]))[0]
            s["I_post"] = I_post[idx + I_post_syn_offset] - self.nid_max
            s["I_pre"] = I_pre[idx + I_post_syn_offset]

            s["num_dendrites_cond"] = Counter(s["cond_post"])
            s["num_dendrites_I"] = Counter(s["I_post"])

        self.gpot_delay_steps = int(round(gpot_delay_steps * 1e-3 / self.dt)) + 1
        self.spike_delay_steps = int(round(spike_delay_steps * 1e-3 / self.dt)) + 1

        data_gpot = np.zeros(self.num_public_gpot + len(in_ports_ids_gpot), np.double)
        data_spike = np.zeros(self.num_public_spike + len(in_ports_ids_spk), np.int32)
        super(LPU, self).__init__(
            sel=sel,
            sel_in=sel_in,
            sel_out=sel_out,
            sel_gpot=sel_gpot,
            sel_spike=sel_spk,
            data_gpot=data_gpot,
            data_spike=data_spike,
            columns=columns,
            ctrl_tag=ctrl_tag,
            gpot_tag=gpot_tag,
            spike_tag=spike_tag,
            id=self.LPU_id,
            rank_to_id=rank_to_id,
            routing_table=routing_table,
            device=device,
            debug=debug,
            time_sync=time_sync,
        )

        self.sel_in_gpot_ids = np.array(self.pm["gpot"].ports_to_inds(self.sel_in_gpot), dtype=np.int32)
        self.sel_out_gpot_ids = np.array(self.pm["gpot"].ports_to_inds(self.sel_out_gpot), dtype=np.int32)
        self.sel_in_spk_ids = np.array(self.pm["spike"].ports_to_inds(self.sel_in_spk), dtype=np.int32)
        self.sel_out_spk_ids = np.array(self.pm["spike"].ports_to_inds(self.sel_out_spk), dtype=np.int32)
예제 #3
0
    def __init__(self,
                 dt,
                 n_dict,
                 s_dict,
                 input_file=None,
                 output_file=None,
                 device=0,
                 ctrl_tag=CTRL_TAG,
                 gpot_tag=GPOT_TAG,
                 spike_tag=SPIKE_TAG,
                 rank_to_id=None,
                 routing_table=None,
                 id=None,
                 debug=False,
                 columns=['io', 'type', 'interface'],
                 cuda_verbose=False,
                 time_sync=False):

        LoggerMixin.__init__(self, 'mod {}'.format(id))
        self.log_info('Test')

        assert ('io' in columns)
        assert ('type' in columns)
        assert ('interface' in columns)
        self.LPU_id = id
        self.dt = dt
        self.debug = debug
        self.device = device
        if cuda_verbose:
            self.compile_options = ['--ptxas-options=-v']
        else:
            self.compile_options = []

        # Handle file I/O:
        self.output_file = output_file
        self.output = True if output_file else False
        self.input_file = input_file
        self.input_eof = False if input_file else True

        # Load neurons and synapse data:
        self._load_neurons()
        self._load_synapses()

        # Set default one time import:
        self._one_time_import = 10

        # Save neuron data in the form
        # [('Model0', {'attrib0': [..], 'attrib1': [..]}), ('Model1', ...)]
        self.n_list = n_dict.items()

        # List of booleans indicating whether first neuron of each model is a
        # spiking model:
        n_model_is_spk = [n['spiking'][0] for _, n in self.n_list]

        # Number of neurons of each model:
        n_model_num = [len(n['id']) for _, n in self.n_list]

        # Concatenate lists of integers corresponding to neuron positions in LPU
        # graph for all of the models into a single list:
        n_id = np.array(sum([n['id'] for _, n in self.n_list], []),
                        dtype=np.int32)

        # Concatenate lists of common attributes in model dictionaries into
        # single lists:
        n_is_spk = np.array(sum([n['spiking'] for _, n in self.n_list], []))
        n_is_pub = np.array(sum([n['public'] for _, n in self.n_list], []))
        n_has_in = np.array(sum([n['extern'] for _, n in self.n_list], []))

        # Get selectors and positions of input ports:
        try:
            sel_in_gpot = self.extract_in_gpot(n_dict)
            in_ports_ids_gpot = np.array(n_dict[PORT_IN_GPOT]['id'])
            self.ports_in_gpot_mem_ind = zip(
                *self.n_list)[0].index(PORT_IN_GPOT)
        except KeyError:
            sel_in_gpot = ''
            in_ports_ids_gpot = np.array([], dtype=np.int32)
            self.ports_in_gpot_mem_ind = None

        try:
            sel_in_spk = self.extract_in_spk(n_dict)
            in_ports_ids_spk = np.array(n_dict[PORT_IN_SPK]['id'],
                                        dtype=np.int32)
            self.ports_in_spk_mem_ind = zip(*self.n_list)[0].index(PORT_IN_SPK)
        except KeyError:
            sel_in_spk = ''
            in_ports_ids_spk = np.array([], dtype=np.int32)
            self.ports_in_spk_mem_ind = None

        sel_in = ','.join(filter(None, [sel_in_gpot, sel_in_spk]))

        # Get selectors and positions of output neurons:
        sel_out_gpot = self.extract_out_gpot(n_dict)
        sel_out_spk = self.extract_out_spk(n_dict)
        self.out_ports_ids_gpot = np.array([
            id for _, n in self.n_list
            for id, pub, spk in zip(n['id'], n['public'], n['spiking'])
            if pub and not spk
        ],
                                           dtype=np.int32)
        self.out_ports_ids_spk = np.array([
            id for _, n in self.n_list
            for id, pub, spk in zip(n['id'], n['public'], n['spiking'])
            if pub and spk
        ],
                                          dtype=np.int32)

        sel_out = ','.join(filter(None, [sel_out_gpot, sel_out_spk]))
        sel_gpot = ','.join(filter(None, [sel_in_gpot, sel_out_gpot]))
        sel_spk = ','.join(filter(None, [sel_in_spk, sel_out_spk]))
        sel = ','.join(filter(None, [sel_gpot, sel_spk]))

        self.sel_in_spk = sel_in_spk
        self.sel_out_spk = sel_out_spk
        self.sel_in_gpot = sel_in_gpot
        self.sel_out_gpot = sel_out_gpot

        # Lists of numbers of neurons of gpot and spiking model types:
        num_gpot_neurons = np.where(n_model_is_spk, 0, n_model_num)
        num_spike_neurons = np.where(n_model_is_spk, n_model_num, 0)

        # Total numbers of gpot and spiking neurons:
        self.my_num_gpot_neurons = sum(num_gpot_neurons)
        self.my_num_spike_neurons = sum(num_spike_neurons)

        gpot_idx = n_id[~n_is_spk]
        spike_idx = n_id[n_is_spk]
        self.order = np.argsort(np.concatenate(
            (gpot_idx, spike_idx))).astype(np.int32)
        self.gpot_order = np.argsort(gpot_idx).astype(np.int32)
        self.spike_order = np.argsort(spike_idx).astype(np.int32)
        self.spike_shift = self.my_num_gpot_neurons
        in_id = n_id[n_has_in]
        in_id.sort()
        pub_spk_id = n_id[n_is_pub & n_is_spk]
        pub_spk_id.sort()
        pub_gpot_id = n_id[n_is_pub & ~n_is_spk]
        pub_gpot_id.sort()
        self.input_neuron_list = self.order[in_id]
        public_spike_list = self.order[pub_spk_id]
        public_gpot_list = self.order[pub_gpot_id]
        self.num_public_gpot = len(public_gpot_list)
        self.num_public_spike = len(public_spike_list)
        self.num_input = len(self.input_neuron_list)
        in_ports_ids_gpot = self.order[in_ports_ids_gpot]
        in_ports_ids_spk = self.order[in_ports_ids_spk]
        self.out_ports_ids_gpot = self.order[self.out_ports_ids_gpot]
        self.out_ports_ids_spk = self.order[self.out_ports_ids_spk]

        # Get presynaptic
        self.s_dict = s_dict
        if s_dict:
            for s in self.s_dict.itervalues():
                shift = self.spike_shift \
                    if s['class'][0] == 0 or s['class'][0] == 1 else 0
                s['pre'] = [
                    self.order[int(neu_id)] - shift for neu_id in s['pre']
                ]
                s['post'] = [self.order[int(neu_id)] for neu_id in s['post']]

        gpot_delay_steps = 0
        spike_delay_steps = 0

        spike_shift = self.spike_shift

        cond_pre = []
        cond_post = []
        I_pre = []
        I_post = []
        reverse = []

        count = 0

        self.s_list = self.s_dict.items()
        num_synapses = [len(s['id']) for _, s in self.s_list]
        for (_, s) in self.s_list:
            order = np.argsort(s['post']).astype(np.int32)
            for k, v in s.items():
                s[k] = np.asarray(v)[order]

            if s['conductance'][0]:
                cond_post.extend(s['post'])
                reverse.extend(s['reverse'])
                cond_pre.extend(range(count, count + len(s['post'])))
                count += len(s['post'])
                if 'delay' in s:
                    max_del = np.max(s['delay'])
                    gpot_delay_steps = max_del if max_del > gpot_delay_steps \
                                       else gpot_delay_steps
            else:
                I_post.extend(s['post'])
                I_pre.extend(range(count, count + len(s['post'])))
                count += len(s['post'])
                if 'delay' in s:
                    max_del = np.max(s['delay'])
                    spike_delay_steps = max_del if max_del > spike_delay_steps \
                                        else spike_delay_steps

        self.total_synapses = int(np.sum(num_synapses))
        I_post.extend(self.input_neuron_list)
        I_pre.extend(range(self.total_synapses, self.total_synapses + \
                          len(self.input_neuron_list)))

        cond_post = np.asarray(cond_post, dtype=np.int32)
        cond_pre = np.asarray(cond_pre, dtype=np.int32)
        reverse = np.asarray(reverse, dtype=np.double)

        order1 = np.argsort(cond_post, kind='mergesort')
        cond_post = cond_post[order1]
        cond_pre = cond_pre[order1]
        reverse = reverse[order1]

        I_post = np.asarray(I_post, dtype=np.int32)
        I_pre = np.asarray(I_pre, dtype=np.int32)

        order1 = np.argsort(I_post, kind='mergesort')
        I_post = I_post[order1]
        I_pre = I_pre[order1]

        self.idx_start_gpot = np.concatenate((np.asarray([
            0,
        ], dtype=np.int32), np.cumsum(num_gpot_neurons, dtype=np.int32)))
        self.idx_start_spike = np.concatenate(
            (np.asarray([
                0,
            ], dtype=np.int32), np.cumsum(num_spike_neurons, dtype=np.int32)))
        self.idx_start_synapse = np.concatenate(
            (np.asarray([
                0,
            ], dtype=np.int32), np.cumsum(num_synapses, dtype=np.int32)))

        for i, (t, n) in enumerate(self.n_list):
            if n['spiking'][0]:
                idx = np.where(
                    (cond_post >= self.idx_start_spike[i] + spike_shift)
                    & (cond_post < self.idx_start_spike[i + 1] + spike_shift))
                n['cond_post'] = cond_post[idx] - self.idx_start_spike[
                    i] - spike_shift
                n['cond_pre'] = cond_pre[idx]
                n['reverse'] = reverse[idx]
                idx = np.where(
                    (I_post >= self.idx_start_spike[i] + spike_shift)
                    & (I_post < self.idx_start_spike[i + 1] + spike_shift))
                n['I_post'] = I_post[idx] - self.idx_start_spike[
                    i] - spike_shift
                n['I_pre'] = I_pre[idx]
            else:
                idx = np.where((cond_post >= self.idx_start_gpot[i])
                               & (cond_post < self.idx_start_gpot[i + 1]))
                n['cond_post'] = cond_post[idx] - self.idx_start_gpot[i]
                n['cond_pre'] = cond_pre[idx]
                n['reverse'] = reverse[idx]
                idx = np.where((I_post >= self.idx_start_gpot[i])
                               & (I_post < self.idx_start_gpot[i + 1]))
                n['I_post'] = I_post[idx] - self.idx_start_gpot[i]
                n['I_pre'] = I_pre[idx]

            n['num_dendrites_cond'] = Counter(n['cond_post'])
            n['num_dendrites_I'] = Counter(n['I_post'])

        self.gpot_delay_steps = int(round(
            gpot_delay_steps * 1e-3 / self.dt)) + 1
        self.spike_delay_steps = int(round(
            spike_delay_steps * 1e-3 / self.dt)) + 1

        data_gpot = np.zeros(self.num_public_gpot + len(in_ports_ids_gpot),
                             np.double)
        data_spike = np.zeros(self.num_public_spike + len(in_ports_ids_spk),
                              np.int32)
        super(LPU, self).__init__(sel=sel,
                                  sel_in=sel_in,
                                  sel_out=sel_out,
                                  sel_gpot=sel_gpot,
                                  sel_spike=sel_spk,
                                  data_gpot=data_gpot,
                                  data_spike=data_spike,
                                  columns=columns,
                                  ctrl_tag=ctrl_tag,
                                  gpot_tag=gpot_tag,
                                  spike_tag=spike_tag,
                                  id=self.LPU_id,
                                  rank_to_id=rank_to_id,
                                  routing_table=routing_table,
                                  device=device,
                                  debug=debug,
                                  time_sync=time_sync)

        self.sel_in_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(
            self.sel_in_gpot),
                                        dtype=np.int32)
        self.sel_out_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(
            self.sel_out_gpot),
                                         dtype=np.int32)
        self.sel_in_spk_ids = np.array(self.pm['spike'].ports_to_inds(
            self.sel_in_spk),
                                       dtype=np.int32)
        self.sel_out_spk_ids = np.array(self.pm['spike'].ports_to_inds(
            self.sel_out_spk),
                                        dtype=np.int32)
예제 #4
0
    def __init__(self, dt, n_dict, s_dict, input_file=None, output_file=None,
                 device=0, ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG,
                 spike_tag=SPIKE_TAG, rank_to_id=None, routing_table=None,
                 id=None, debug=False, columns=['io', 'type', 'interface'],
                 cuda_verbose=False, time_sync=False):

        LoggerMixin.__init__(self, 'mod {}'.format(id))
        self.log_info('Test')

        assert('io' in columns)
        assert('type' in columns)
        assert('interface' in columns)
        self.LPU_id = id
        self.dt = dt
        self.debug = debug
        self.device = device
        if cuda_verbose:
            self.compile_options = ['--ptxas-options=-v']
        else:
            self.compile_options = []

        # Handle file I/O:
        self.output_file = output_file
        self.output = True if output_file else False
        self.input_file = input_file
        self.input_eof = False if input_file else True

        # Load neurons and synapse data:
        self._load_neurons()
        self._load_synapses()

        # Set default one time import:
        self._one_time_import = 10

        # Save neuron data in the form
        # [('Model0', {'attrib0': [..], 'attrib1': [..]}), ('Model1', ...)]
        self.n_list = n_dict.items()

        # List of booleans indicating whether first neuron of each model is a
        # spiking model:
        n_model_is_spk = [ n['spiking'][0] for _, n in self.n_list ]

        # Number of neurons of each model:
        n_model_num = [ len(n['id']) for _, n in self.n_list ]

        # Concatenate lists of integers corresponding to neuron positions in LPU
        # graph for all of the models into a single list:
        n_id = np.array(sum( [ n['id'] for _, n in self.n_list ], []), 
                        dtype=np.int32)

        # Concatenate lists of common attributes in model dictionaries into
        # single lists:
        n_is_spk = np.array(sum( [ n['spiking'] for _, n in self.n_list ], []))
        n_is_pub = np.array(sum( [ n['public'] for _, n in self.n_list ], []))
        n_has_in = np.array(sum( [ n['extern'] for _, n in self.n_list ], []))

        # Get selectors and positions of input ports:
        try:
            sel_in_gpot = self.extract_in_gpot(n_dict)
            in_ports_ids_gpot = np.array(n_dict[PORT_IN_GPOT]['id'])
            self.ports_in_gpot_mem_ind = zip(*self.n_list)[0].index(PORT_IN_GPOT)
        except KeyError:
            sel_in_gpot = ''
            in_ports_ids_gpot = np.array([], dtype=np.int32)
            self.ports_in_gpot_mem_ind = None

        try:
            sel_in_spk = self.extract_in_spk(n_dict)
            in_ports_ids_spk = np.array(n_dict[PORT_IN_SPK]['id'], 
                                        dtype=np.int32)
            self.ports_in_spk_mem_ind = zip(*self.n_list)[0].index(PORT_IN_SPK)
        except KeyError:
            sel_in_spk = ''
            in_ports_ids_spk = np.array([], dtype=np.int32)
            self.ports_in_spk_mem_ind = None
            
        sel_in = ','.join(filter(None, [sel_in_gpot, sel_in_spk]))
        
        # Get selectors and positions of output neurons:
        sel_out_gpot = self.extract_out_gpot(n_dict)
        sel_out_spk = self.extract_out_spk(n_dict)
        self.out_ports_ids_gpot = np.array([id for _, n in self.n_list for id, pub, spk in
                                            zip(n['id'], n['public'], n['spiking'])
                                            if pub and not spk], dtype=np.int32)
        self.out_ports_ids_spk = np.array([id for _, n in self.n_list for id, pub, spk in
                                           zip(n['id'], n['public'], n['spiking'])
                                           if pub and spk], dtype=np.int32)

        sel_out = ','.join(filter(None, [sel_out_gpot, sel_out_spk]))
        sel_gpot = ','.join(filter(None, [sel_in_gpot, sel_out_gpot]))
        sel_spk = ','.join(filter(None, [sel_in_spk, sel_out_spk]))
        sel = ','.join(filter(None, [sel_gpot, sel_spk]))

        self.sel_in_spk = sel_in_spk
        self.sel_out_spk = sel_out_spk
        self.sel_in_gpot = sel_in_gpot
        self.sel_out_gpot = sel_out_gpot
        
        # Lists of numbers of neurons of gpot and spiking model types:
        num_gpot_neurons = np.where(n_model_is_spk, 0, n_model_num)
        num_spike_neurons = np.where(n_model_is_spk, n_model_num, 0)

        # Total numbers of gpot and spiking neurons:
        self.my_num_gpot_neurons = sum(num_gpot_neurons)
        self.my_num_spike_neurons = sum(num_spike_neurons)

        gpot_idx = n_id[~n_is_spk]
        spike_idx = n_id[n_is_spk]
        self.order = np.argsort(
            np.concatenate((gpot_idx, spike_idx))).astype(np.int32)
        self.gpot_order = np.argsort(gpot_idx).astype(np.int32)
        self.spike_order = np.argsort(spike_idx).astype(np.int32)
        self.spike_shift = self.my_num_gpot_neurons
        in_id = n_id[n_has_in]
        in_id.sort()
        pub_spk_id = n_id[ n_is_pub & n_is_spk ]
        pub_spk_id.sort()
        pub_gpot_id = n_id[ n_is_pub & ~n_is_spk ]
        pub_gpot_id.sort()
        self.input_neuron_list = self.order[in_id]
        public_spike_list = self.order[pub_spk_id]
        public_gpot_list = self.order[pub_gpot_id]
        self.num_public_gpot = len( public_gpot_list )
        self.num_public_spike = len( public_spike_list )
        self.num_input = len( self.input_neuron_list )
        in_ports_ids_gpot = self.order[in_ports_ids_gpot]
        in_ports_ids_spk = self.order[in_ports_ids_spk]
        self.out_ports_ids_gpot = self.order[self.out_ports_ids_gpot]
        self.out_ports_ids_spk = self.order[self.out_ports_ids_spk]

        # Get presynaptic 
        self.s_dict = s_dict
        if s_dict:
            for s in self.s_dict.itervalues():
                shift = self.spike_shift \
                    if s['class'][0] == 0 or s['class'][0] == 1 else 0
                s['pre'] = [self.order[int(neu_id)] - shift 
                            for neu_id in s['pre'] ]
                s['post'] = [self.order[int(neu_id)] 
                             for neu_id in s['post'] ]

        gpot_delay_steps = 0
        spike_delay_steps = 0

        spike_shift = self.spike_shift

        cond_pre = []
        cond_post = []
        I_pre = []
        I_post = []
        reverse = []

        count = 0

        self.s_list = self.s_dict.items()
        num_synapses = [ len(s['id']) for _, s in self.s_list ]
        for (_, s) in self.s_list:
            order = np.argsort(s['post']).astype(np.int32)
            for k, v in s.items():
                s[k] = np.asarray(v)[order]

            if s['conductance'][0]:
                cond_post.extend(s['post'])
                reverse.extend(s['reverse'])
                cond_pre.extend(range(count, count+len(s['post'])))
                count += len(s['post'])
                if 'delay' in s:
                    max_del = np.max( s['delay'] )
                    gpot_delay_steps = max_del if max_del > gpot_delay_steps \
                                       else gpot_delay_steps
            else:
                I_post.extend(s['post'])
                I_pre.extend(range(count, count+len(s['post'])))
                count += len(s['post'])
                if 'delay' in s:
                    max_del = np.max( s['delay'] )
                    spike_delay_steps = max_del if max_del > spike_delay_steps \
                                        else spike_delay_steps
        
        self.total_synapses = int(np.sum(num_synapses))
        I_post.extend(self.input_neuron_list)
        I_pre.extend(range(self.total_synapses, self.total_synapses + \
                          len(self.input_neuron_list)))

        cond_post = np.asarray(cond_post, dtype=np.int32)
        cond_pre = np.asarray(cond_pre, dtype = np.int32)
        reverse = np.asarray(reverse, dtype=np.double)

        order1 = np.argsort(cond_post, kind='mergesort')
        cond_post = cond_post[order1]
        cond_pre = cond_pre[order1]
        reverse = reverse[order1]


        I_post = np.asarray(I_post, dtype=np.int32)
        I_pre = np.asarray(I_pre, dtype=np.int32)

        order1 = np.argsort(I_post, kind='mergesort')
        I_post = I_post[order1]
        I_pre = I_pre[order1]

        self.idx_start_gpot = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_gpot_neurons, dtype=np.int32)))
        self.idx_start_spike = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_spike_neurons, dtype=np.int32)))
        self.idx_start_synapse = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_synapses, dtype=np.int32)))


        for i, (t, n) in enumerate(self.n_list):
            if n['spiking'][0]:
                idx = np.where(
                    (cond_post >= self.idx_start_spike[i] + spike_shift)&
                    (cond_post < self.idx_start_spike[i+1] + spike_shift) )
                n['cond_post'] = cond_post[idx] - self.idx_start_spike[i] - spike_shift
                n['cond_pre'] = cond_pre[idx]
                n['reverse'] = reverse[idx]
                idx = np.where(
                    (I_post >= self.idx_start_spike[i] + spike_shift)&
                    (I_post < self.idx_start_spike[i+1] + spike_shift) )
                n['I_post'] = I_post[idx] - self.idx_start_spike[i] - spike_shift
                n['I_pre'] = I_pre[idx]
            else:
                idx = np.where( (cond_post >= self.idx_start_gpot[i])&
                                (cond_post < self.idx_start_gpot[i+1]) )
                n['cond_post'] = cond_post[idx] - self.idx_start_gpot[i]
                n['cond_pre'] = cond_pre[idx]
                n['reverse'] = reverse[idx]
                idx =  np.where( (I_post >= self.idx_start_gpot[i])&
                                 (I_post < self.idx_start_gpot[i+1]) )
                n['I_post'] = I_post[idx] - self.idx_start_gpot[i]
                n['I_pre'] = I_pre[idx]

            n['num_dendrites_cond'] = Counter(n['cond_post'])
            n['num_dendrites_I'] = Counter(n['I_post'])

        self.gpot_delay_steps = int(round(gpot_delay_steps*1e-3/self.dt)) + 1
        self.spike_delay_steps = int(round(spike_delay_steps*1e-3/self.dt)) + 1

        data_gpot = np.zeros(self.num_public_gpot + len(in_ports_ids_gpot),
                             np.double)
        data_spike = np.zeros(self.num_public_spike + len(in_ports_ids_spk),
                              np.int32)
        super(LPU, self).__init__(sel=sel, sel_in=sel_in, sel_out=sel_out,
                                  sel_gpot=sel_gpot, sel_spike=sel_spk,
                                  data_gpot=data_gpot, data_spike=data_spike,
                                  columns=columns, ctrl_tag=ctrl_tag, gpot_tag=gpot_tag,
                                  spike_tag=spike_tag, id=self.LPU_id,
                                  rank_to_id=rank_to_id, routing_table=routing_table,
                                  device=device, debug=debug, time_sync=time_sync)

        self.sel_in_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(self.sel_in_gpot),
                                        dtype=np.int32)
        self.sel_out_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(self.sel_out_gpot),
                                        dtype=np.int32)
        self.sel_in_spk_ids = np.array(self.pm['spike'].ports_to_inds(self.sel_in_spk),
                                        dtype=np.int32)
        self.sel_out_spk_ids = np.array(self.pm['spike'].ports_to_inds(self.sel_out_spk),
                                        dtype=np.int32)
예제 #5
0
def mpi_run_manager(man, steps, targets=None, delete_tempfile=True, log=False,
                    log_screen=False, log_file_name='neurokernel.log'):
    """
    Run the manager with mpiexec.
    
    Implemented as a fix to 'import neurokernel.mpi_relaunch', which does not work
    in notebooks. Serializes the manager and sends it to a temporary file, then
    sends a function to mpi_run, which loads the manager in an mpiexec process and
    runs it using the common set of commands:
        man.spawn()
        man.start(steps = {Number of steps})
        man.wait()
    Returns the stdout of from the manager along with a string indicating whether
    or not the manager ran properly.
    
    Parameters
    ----------
    man : neurokernel.core_gpu.Manager or neurokernel.core.Manager
        The Neurokernel manager to be run.
    steps : int
        Number of steps to run the manager for.
    targets : list
        Dependencies of the manager, such as child classes of the Module class
        from neurokernel.core_gpu or neurokernel.core.
    delete_tempfile : bool
        Whether or not to delete temporary file once the manager is executed.
    log : boolean
        Whether or not to connect to logger for manager if logger exists.
    log_screen : bool
        Whether or not to send log messages to the screen.
    log_file_name : str
        File to send log messages to.
    
    Returns
    -------
    output : str
        The stdout from the manager run with mpiexec cast to a string.
    
    Usage
    -----
    Returns the stdout from the manager 
    """

    l = LoggerMixin("mpi_run_manager()",log_on=log)

    #Write a function that loads and runs the Manager
    func_code  = "\ndef MPI_Function():"
    func_code += "\n    import dill"
    func_code += "\n    f = open(\"%s\",\"rb\")"
    func_code += "\n    man = dill.load(f)"
    func_code += "\n    man.spawn()" 
    func_code += "\n    man.start(steps=%i)"
    func_code += "\n    man.wait()"

    try:
        #Store the Manager in a temporary file
        temp = tempfile.NamedTemporaryFile(delete = delete_tempfile)
        dill.dump(man, temp)
        temp.flush()
        
        #Run the function using mpiexec
        out = mpi_run(func_code % (temp.name,steps), targets, 
                      delete_tempfile=delete_tempfile, log=log,
                      log_screen=log_screen, log_file_name=log_file_name)

    except Exception as e:
        l.log_error(str(e))
        raise

    finally:    
        #Closing the temp file closes and deletes it
        temp.close()
    
    #Return the output
    return str(out)
예제 #6
0
def mpi_run(func, targets=None, delete_tempfile=True, log=False,
            log_screen=False, log_file_name='neurokernel.log'):
    """
    Run a function with mpiexec.
    
    Implemented as a fix to 'import neurokernel.mpi_relaunch', which does not
    work within notebooks. Writes the source code for a function to a temporary
    file and then runs the temporary file using mpiexec. Returns the stdout of
    from the function along with a string indicating whether or not the function
    executed properly.

    Parameters
    ----------
    func : function or str
        Function to be executed with mpiexec. All imports and variables used
        must be imported or defined within the function. func can either be a callable
        function or code that represents a valid function.
    targets : list
        Dependencies of the manager, such as child classes of the Module class
        from neurokernel.core_gpu or neurokernel.core.
    delete_tempfile : bool
        Whether or not to delete temporary file once func is executed.
    log : boolean
        Whether or not to connect to logger for func if logger exists.
    log_screen : bool
        Whether or not to send log messages to the screen.
    log_file_name : str
        File to send log messages to.
    
    Returns
    -------
    output : str
        The stdout from the function run with mpiexec cast to a string.

    Usage
    -----
    Does not seem to work with openmpi version 2
    func should not import neurokernel.mpi_relaunch
    All modules and variables used must be imported or defined within func
    Returns the stdout from the function run under 'mpiexec -np 1 python {tmp_file_name}'
    """

    l = LoggerMixin("mpi_run()",log_on=log)
    
    if callable(func):
        func_text = inspect.getsource(func)
        # Make a feeble attempt at fixing indentation. Will work for a nested function
        # that takes no args, not a member function that expects (self) or a class
        func_text = "\n" + re.sub(r"(^\s+)def ","def ",func_text) + "\n"
        func_name = func.__name__
    else: 
        func_text = "\n" + func + "\n"
        func_name = re.search('def *(.*)\(\):', func_text).group(1)

    target_text  = "\n"
    
    if targets:
        for t in targets:
            target_text +=  "\n" + inspect.getsource(t) + "\n"

    main_code  = "\n"
    main_code += "\nif __name__ == \"__main__\":"
    main_code += "\n   import neurokernel.mpi as mpi"
    main_code += "\n   from neurokernel.mixins import LoggerMixin"
    main_code += "\n   from mpi4py import MPI"

    if log:
        main_code += "\n   mpi.setup_logger(screen=%s, file_name=\"%s\"," % (log_screen, log_file_name)
        main_code += "\n                    mpi_comm=MPI.COMM_WORLD, multiline=True)"

    main_code += "\n   l = LoggerMixin(\"%s\",%s)" % (func_name,str(log))
    main_code += "\n   try:"
    main_code += "\n      %s()" % func_name
    main_code += "\n      print(\"MPI_RUN_SUCCESS: %s\")" % func_name
    main_code += "\n      l.log_info(\"MPI_RUN_SUCCESS: %s\")" % func_name
    main_code += "\n   except Exception as e:"
    main_code += "\n      print(\"MPI_RUN_FAILURE: %s\")" % func_name
    main_code += "\n      l.log_error(\"MPI_RUN_FAILURE: %s\")" % func_name
    main_code += "\n      print(e)"
    main_code += "\n"

    try:
        from mpi4py import MPI
        #Write code for the function to a temp file
        temp = tempfile.NamedTemporaryFile(delete = delete_tempfile)
        temp.write(target_text)
        temp.write(func_text)
        temp.write(main_code)
        temp.flush()
        
        #Execute the code
        #There's a bug in Open MPI v2 that prevents running this with mpiexec. Running 'from mpi4py import MPI' 
        #does a basic mpi_relaunch which will work for the notebook code, but you give up some of the features 
        #of mpiexec.
        if MPI.Get_library_version().startswith("Open MPI v2"):
            command = ["python", temp.name]
        else:
            command = ["mpiexec", "-np", "1", "python", temp.name]

        env = os.environ.copy()
        l.log_info("Calling: " + " ".join(command))
        out = subprocess.check_output(command, env = env)

    except Exception as e:
        l.log_error(str(e))
        raise

    finally:    
        #Closing the temp file closes and deletes it
        temp.close()
    
    #Return the output
    if "MPI_RUN_FAILURE" in out:
        raise RuntimeError(out)

    return str(out)
예제 #7
0
def mpi_run_manager(man,
                    steps,
                    targets=None,
                    delete_tempfile=True,
                    log=False,
                    log_screen=False,
                    log_file_name='neurokernel.log'):
    """
    Run the manager with mpiexec.
    
    Implemented as a fix to 'import neurokernel.mpi_relaunch', which does not work
    in notebooks. Serializes the manager and sends it to a temporary file, then
    sends a function to mpi_run, which loads the manager in an mpiexec process and
    runs it using the common set of commands:
        man.spawn()
        man.start(steps = {Number of steps})
        man.wait()
    Returns the stdout of from the manager along with a string indicating whether
    or not the manager ran properly.
    
    Parameters
    ----------
    man : neurokernel.core_gpu.Manager or neurokernel.core.Manager
        The Neurokernel manager to be run.
    steps : int
        Number of steps to run the manager for.
    targets : list
        Dependencies of the manager, such as child classes of the Module class
        from neurokernel.core_gpu or neurokernel.core.
    delete_tempfile : bool
        Whether or not to delete temporary file once the manager is executed.
    log : boolean
        Whether or not to connect to logger for manager if logger exists.
    log_screen : bool
        Whether or not to send log messages to the screen.
    log_file_name : str
        File to send log messages to.
    
    Returns
    -------
    output : str
        The stdout from the manager run with mpiexec cast to a string.
    
    Usage
    -----
    Returns the stdout from the manager 
    """

    l = LoggerMixin("mpi_run_manager()", log_on=log)

    #Write a function that loads and runs the Manager
    func_code = "\ndef MPI_Function():"
    func_code += "\n    import dill"
    func_code += "\n    f = open(\"%s\",\"rb\")"
    func_code += "\n    man = dill.load(f)"
    func_code += "\n    man.spawn()"
    func_code += "\n    man.start(steps=%i)"
    func_code += "\n    man.wait()"

    try:
        #Store the Manager in a temporary file
        temp = tempfile.NamedTemporaryFile(delete=delete_tempfile)
        dill.dump(man, temp)
        temp.flush()

        #Run the function using mpiexec
        out = mpi_run(func_code % (temp.name, steps),
                      targets,
                      delete_tempfile=delete_tempfile,
                      log=log,
                      log_screen=log_screen,
                      log_file_name=log_file_name)

    except Exception as e:
        l.log_error(str(e))
        raise

    finally:
        #Closing the temp file closes and deletes it
        temp.close()

    #Return the output
    return str(out)
예제 #8
0
def mpi_run(func,
            targets=None,
            delete_tempfile=True,
            log=False,
            log_screen=False,
            log_file_name='neurokernel.log'):
    """
    Run a function with mpiexec.
    
    Implemented as a fix to 'import neurokernel.mpi_relaunch', which does not
    work within notebooks. Writes the source code for a function to a temporary
    file and then runs the temporary file using mpiexec. Returns the stdout of
    from the function along with a string indicating whether or not the function
    executed properly.

    Parameters
    ----------
    func : function or str
        Function to be executed with mpiexec. All imports and variables used
        must be imported or defined within the function. func can either be a callable
        function or code that represents a valid function.
    targets : list
        Dependencies of the manager, such as child classes of the Module class
        from neurokernel.core_gpu or neurokernel.core.
    delete_tempfile : bool
        Whether or not to delete temporary file once func is executed.
    log : boolean
        Whether or not to connect to logger for func if logger exists.
    log_screen : bool
        Whether or not to send log messages to the screen.
    log_file_name : str
        File to send log messages to.
    
    Returns
    -------
    output : str
        The stdout from the function run with mpiexec cast to a string.

    Usage
    -----
    Does not seem to work with openmpi version 2
    func should not import neurokernel.mpi_relaunch
    All modules and variables used must be imported or defined within func
    Returns the stdout from the function run under 'mpiexec -np 1 python {tmp_file_name}'
    """

    l = LoggerMixin("mpi_run()", log_on=log)

    if callable(func):
        func_text = inspect.getsource(func)
        # Make a feeble attempt at fixing indentation. Will work for a nested function
        # that takes no args, not a member function that expects (self) or a class
        func_text = "\n" + re.sub(r"(^\s+)def ", "def ", func_text) + "\n"
        func_name = func.__name__
    else:
        func_text = "\n" + func + "\n"
        func_name = re.search('def *(.*)\(\):', func_text).group(1)

    target_text = "\n"

    if targets:
        for t in targets:
            target_text += "\n" + inspect.getsource(t) + "\n"

    main_code = "\n"
    main_code += "\nif __name__ == \"__main__\":"
    main_code += "\n   import neurokernel.mpi as mpi"
    main_code += "\n   from neurokernel.mixins import LoggerMixin"
    main_code += "\n   from mpi4py import MPI"

    if log:
        main_code += "\n   mpi.setup_logger(screen=%s, file_name=\"%s\"," % (
            log_screen, log_file_name)
        main_code += "\n                    mpi_comm=MPI.COMM_WORLD, multiline=True)"

    main_code += "\n   l = LoggerMixin(\"%s\",%s)" % (func_name, str(log))
    main_code += "\n   try:"
    main_code += "\n      %s()" % func_name
    main_code += "\n      print(\"MPI_RUN_SUCCESS: %s\")" % func_name
    main_code += "\n      l.log_info(\"MPI_RUN_SUCCESS: %s\")" % func_name
    main_code += "\n   except Exception as e:"
    main_code += "\n      print(\"MPI_RUN_FAILURE: %s\")" % func_name
    main_code += "\n      l.log_error(\"MPI_RUN_FAILURE: %s\")" % func_name
    main_code += "\n      print(e)"
    main_code += "\n"

    try:
        from mpi4py import MPI
        #Write code for the function to a temp file
        temp = tempfile.NamedTemporaryFile(delete=delete_tempfile)
        temp.write(target_text)
        temp.write(func_text)
        temp.write(main_code)
        temp.flush()

        #Execute the code
        #There's a bug in Open MPI v2 that prevents running this with mpiexec. Running 'from mpi4py import MPI'
        #does a basic mpi_relaunch which will work for the notebook code, but you give up some of the features
        #of mpiexec.
        if MPI.Get_library_version().startswith("Open MPI v2"):
            command = ["python", temp.name]
        else:
            command = ["mpiexec", "-np", "1", "python", temp.name]

        env = os.environ.copy()
        l.log_info("Calling: " + " ".join(command))
        out = subprocess.check_output(command, env=env)

    except Exception as e:
        l.log_error(str(e))
        raise

    finally:
        #Closing the temp file closes and deletes it
        temp.close()

    #Return the output
    if "MPI_RUN_FAILURE" in out:
        raise RuntimeError(out)

    return str(out)
예제 #9
0
    def __init__(self, sel, sel_in, sel_out,
                 sel_gpot, sel_spike, data_gpot, data_spike,
                 columns=['interface', 'io', 'type'],
                 ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG,
                 id=None, device=None,
                 routing_table=None, rank_to_id=None,
                 debug=False, time_sync=False, print_timing=False):

        super(Module, self).__init__(ctrl_tag)
        self.debug = debug
        self.time_sync = time_sync
        self.device = device
        self.print_timing = print_timing

        self._gpot_tag = gpot_tag
        self._spike_tag = spike_tag

        # Require several necessary attribute columns:
        if 'interface' not in columns:
            raise ValueError('interface column required')
        if 'io' not in columns:
            raise ValueError('io column required')
        if 'type' not in columns:
            raise ValueError('type column required')

        # Initialize GPU here so as to be able to initialize a port mapper
        # containing GPU memory:
        self._init_gpu()

        # This is needed to ensure that MPI_Finalize is called before PyCUDA
        # attempts to clean up; see
        # https://groups.google.com/forum/#!topic/mpi4py/by0Rd5q0Ayw
        atexit.register(MPI.Finalize)

        # Manually register the file close method associated with MPIOutput
        # so that it is called by atexit before MPI.Finalize() (if the file is
        # closed after MPI.Finalize() is called, an error will occur):
        for k, v in iteritems(twiggy.emitters):
             if isinstance(v._output, MPIOutput):
                 atexit.register(v._output.close)

        # Ensure that the input and output port selectors respectively
        # select mutually exclusive subsets of the set of all ports exposed by
        # the module:
        if not SelectorMethods.is_in(sel_in, sel):
            raise ValueError('input port selector not in selector of all ports')
        if not SelectorMethods.is_in(sel_out, sel):
            raise ValueError('output port selector not in selector of all ports')
        if not SelectorMethods.are_disjoint(sel_in, sel_out):
            raise ValueError('input and output port selectors not disjoint')

        #assert(SelectorMethods.is_in(sel_in, sel))
        #assert(SelectorMethods.is_in(sel_out, sel))
        #assert(SelectorMethods.are_disjoint(sel_in, sel_out))

        # Ensure that the graded potential and spiking port selectors
        # respectively select mutually exclusive subsets of the set of all ports
        # exposed by the module:
        #assert(SelectorMethods.is_in(sel_gpot, sel))
        #assert(SelectorMethods.is_in(sel_spike, sel))
        #assert(SelectorMethods.are_disjoint(sel_gpot, sel_spike))

        # Save routing table and mapping between MPI ranks and module IDs:
        self.routing_table = routing_table
        self.rank_to_id = rank_to_id

        # Generate a unique ID if none is specified:
        if id is None:
            self.id = uid()
        else:

            # If a unique ID was specified and the routing table is not empty
            # (i.e., there are connections between multiple modules), the id
            # must be a node in the routing table:
            if routing_table is not None and len(routing_table.ids) and \
                    not routing_table.has_node(id):
                raise ValueError('routing table must contain specified '
                                 'module ID: {}'.format(id))
            self.id = id

        # Reformat logger name:
        LoggerMixin.__init__(self, 'mod %s' % self.id)

        if self.print_timing:
            start = time.time()

        # Create module interface given the specified ports:
        self.interface = Interface(sel, columns)
        # Set the interface ID to 0; we assume that a module only has one interface:
        self.interface[sel, 'interface'] = 0

        # Set the port attributes:
        if len(sel_in):
            self.interface[sel_in, 'io'] = 'in'
        if len(sel_out):
            self.interface[sel_out, 'io'] = 'out'
        if len(sel_gpot):
            self.interface[sel_gpot, 'type'] = 'gpot'
        if len(sel_spike):
            self.interface[sel_spike, 'type'] = 'spike'

        if self.print_timing:
            self.log_info('Elapsed time for setting up interface: {:.3f} seconds'.format(time.time()-start))
            start = time.time()

        # Find the graded potential and spiking ports:
        self.gpot_ports = self.interface.gpot_ports()
        self.spike_ports = self.interface.spike_ports()

        if len(self.gpot_ports):
            self.in_gpot_ports = self.gpot_ports.in_ports(tuples=True)
            self.out_gpot_ports = self.gpot_ports.out_ports(tuples=True)
        else:
            self.in_gpot_ports = []
            self.out_gpot_ports = []
        if len(self.spike_ports):
            self.in_spike_ports = self.spike_ports.in_ports(tuples=True)
            self.out_spike_ports = self.spike_ports.out_ports(tuples=True)
        else:
            self.in_spike_ports = []
            self.out_spike_ports = []

        if self.print_timing:
            self.log_info('Elapsed time for extracting ports: {:.3f} seconds'.format(time.time()-start))
            start = time.time()

        # Set up mapper between port identifiers and their associated data:
        if len(data_gpot) != len(self.gpot_ports):
            raise ValueError('incompatible gpot port data array length')
        if len(data_spike) != len(self.spike_ports):
            raise ValueError('incompatible spike port data array length')
        self.data = {}
        self.data['gpot'] = gpuarray.to_gpu(data_gpot)
        self.data['spike'] = gpuarray.to_gpu(data_spike)

        self.pm = {}
        self.pm['gpot'] = GPUPortMapper(sel_gpot, self.data['gpot'], make_copy=False)
        self.pm['spike'] = GPUPortMapper(sel_spike, self.data['spike'], make_copy=False)
        if self.print_timing:
            cuda.Context.synchronize()
            self.log_info('Elapsed time for creating array and PortMapper {} seconds'.format(time.time()-start))
예제 #10
0
파일: LPU.py 프로젝트: fersal01/neurokernel
    def __init__(self,
                 dt,
                 n_dict,
                 s_dict,
                 input_file=None,
                 output_file=None,
                 device=0,
                 port_ctrl=base.PORT_CTRL,
                 port_data=base.PORT_DATA,
                 port_time=base.PORT_TIME,
                 id=None,
                 debug=False,
                 columns=['io', 'type', 'interface'],
                 cuda_verbose=False,
                 time_sync=False):
        assert ('io' in columns)
        assert ('type' in columns)
        assert ('interface' in columns)
        self.LPU_id = id
        self.dt = dt
        self.debug = debug
        self.device = device
        if cuda_verbose:
            self.compile_options = ['--ptxas-options=-v']
        else:
            self.compile_options = []

        LoggerMixin.__init__(self, 'mod %s' % self.LPU_id)

        # handle file I/O
        self.output_file = output_file
        self.output = True if output_file else False
        self.input_file = input_file
        self.input_eof = False if input_file else True

        # load neurons and synapse definition
        self._load_neurons()
        self._load_synapses()

        # set default one time import
        self._one_time_import = 10
        #TODO: comment
        self.n_list = n_dict.items()
        n_model_is_spk = [n['spiking'][0] for _, n in self.n_list]
        n_model_num = [len(n['id']) for _, n in self.n_list]
        # concatenation of lists of neurons
        # n['id'] is a list of integers that correspond to the positions
        # of the neuron model n['model'] in LPU graph
        n_id = np.array(sum([n['id'] for _, n in self.n_list], []),
                        dtype=np.int32)
        # concatenates lists of common attributes in model dictionaries
        n_is_spk = np.array(sum([n['spiking'] for _, n in self.n_list], []))
        n_is_pub = np.array(sum([n['public'] for _, n in self.n_list], []))
        n_has_in = np.array(sum([n['extern'] for _, n in self.n_list], []))

        try:
            sel_in_gpot = ','.join(
                filter(None, n_dict[PORT_IN_GPOT]['selector']))
            in_ports_ids_gpot = np.array(n_dict[PORT_IN_GPOT]['id'])
            self.ports_in_gpot_mem_ind = zip(*self.n_list)[0] \
                                            .index(PORT_IN_GPOT)
        except KeyError:
            sel_in_gpot = ''
            in_ports_ids_gpot = np.array([], dtype=np.int32)
            self.ports_in_gpot_mem_ind = None

        try:
            sel_in_spk = ','.join(filter(None,
                                         n_dict[PORT_IN_SPK]['selector']))
            in_ports_ids_spk = np.array(n_dict[PORT_IN_SPK]['id'],
                                        dtype=np.int32)
            self.ports_in_spk_mem_ind = zip(*self.n_list)[0].index(PORT_IN_SPK)
        except KeyError:
            sel_in_spk = ''
            in_ports_ids_spk = np.array([], dtype=np.int32)
            self.ports_in_spk_mem_ind = None

        sel_in = ','.join(filter(None, [sel_in_gpot, sel_in_spk]))

        sel_out_gpot = ','.join(
            filter(None, [
                sel for _, n in self.n_list
                for sel, pub, spk in zip(n['selector'], n['public'],
                                         n['spiking']) if pub and not spk
            ]))
        sel_out_spk = ','.join(
            filter(None, [
                sel for _, n in self.n_list for sel, pub, spk in zip(
                    n['selector'], n['public'], n['spiking']) if pub and spk
            ]))
        self.out_ports_ids_gpot = np.array([
            id for _, n in self.n_list
            for id, pub, spk in zip(n['id'], n['public'], n['spiking'])
            if pub and not spk
        ],
                                           dtype=np.int32)
        self.out_ports_ids_spk = np.array([
            id for _, n in self.n_list
            for id, pub, spk in zip(n['id'], n['public'], n['spiking'])
            if pub and spk
        ],
                                          dtype=np.int32)

        sel_out = ','.join(filter(None, [sel_out_gpot, sel_out_spk]))

        sel_gpot = ','.join(filter(None, [sel_in_gpot, sel_out_gpot]))
        sel_spk = ','.join(filter(None, [sel_in_spk, sel_out_spk]))

        sel = ','.join(filter(None, [sel_gpot, sel_spk]))

        self.sel_in_spk = sel_in_spk
        self.sel_out_spk = sel_out_spk
        self.sel_in_gpot = sel_in_gpot
        self.sel_out_gpot = sel_out_gpot

        #TODO: comment
        num_gpot_neurons = np.where(n_model_is_spk, 0, n_model_num)
        num_spike_neurons = np.where(n_model_is_spk, n_model_num, 0)
        self.my_num_gpot_neurons = sum(num_gpot_neurons)
        self.my_num_spike_neurons = sum(num_spike_neurons)
        gpot_idx = n_id[~n_is_spk]
        spike_idx = n_id[n_is_spk]
        self.order = np.argsort(np.concatenate(
            (gpot_idx, spike_idx))).astype(np.int32)
        self.gpot_order = np.argsort(gpot_idx).astype(np.int32)
        self.spike_order = np.argsort(spike_idx).astype(np.int32)
        self.spike_shift = self.my_num_gpot_neurons
        in_id = n_id[n_has_in]
        in_id.sort()
        pub_spk_id = n_id[n_is_pub & n_is_spk]
        pub_spk_id.sort()
        pub_gpot_id = n_id[n_is_pub & ~n_is_spk]
        pub_gpot_id.sort()
        self.input_neuron_list = self.order[in_id]
        public_spike_list = self.order[pub_spk_id]
        public_gpot_list = self.order[pub_gpot_id]
        self.num_public_gpot = len(public_gpot_list)
        self.num_public_spike = len(public_spike_list)
        self.num_input = len(self.input_neuron_list)
        in_ports_ids_gpot = self.order[in_ports_ids_gpot]
        in_ports_ids_spk = self.order[in_ports_ids_spk]
        self.out_ports_ids_gpot = self.order[self.out_ports_ids_gpot]
        self.out_ports_ids_spk = self.order[self.out_ports_ids_spk]

        #TODO: comment
        self.s_dict = s_dict
        if s_dict:
            for s in self.s_dict.itervalues():
                shift = self.spike_shift \
                    if s['class'][0] == 0 or s['class'][0] == 1 else 0
                s['pre'] = [
                    self.order[int(neu_id)] - shift for neu_id in s['pre']
                ]
                s['post'] = [self.order[int(neu_id)] for neu_id in s['post']]

        gpot_delay_steps = 0
        spike_delay_steps = 0

        spike_shift = self.spike_shift

        cond_pre = []
        cond_post = []
        I_pre = []
        I_post = []
        reverse = []

        count = 0

        self.s_list = self.s_dict.items()
        num_synapses = [len(s['id']) for _, s in self.s_list]
        for (_, s) in self.s_list:
            order = np.argsort(s['post']).astype(np.int32)
            for k, v in s.items():
                s[k] = np.asarray(v)[order]

            if s['conductance'][0]:
                cond_post.extend(s['post'])
                reverse.extend(s['reverse'])
                cond_pre.extend(range(count, count + len(s['post'])))
                count += len(s['post'])
                if 'delay' in s:
                    max_del = np.max(s['delay'])
                    gpot_delay_steps = max_del if max_del > gpot_delay_steps \
                                       else gpot_delay_steps
            else:
                I_post.extend(s['post'])
                I_pre.extend(range(count, count + len(s['post'])))
                count += len(s['post'])
                if 'delay' in s:
                    max_del = np.max(s['delay'])
                    spike_delay_steps = max_del if max_del > spike_delay_steps \
                                        else spike_delay_steps

        self.total_synapses = int(np.sum(num_synapses))
        I_post.extend(self.input_neuron_list)
        I_pre.extend(range(self.total_synapses, self.total_synapses + \
                          len(self.input_neuron_list)))

        cond_post = np.asarray(cond_post, dtype=np.int32)
        cond_pre = np.asarray(cond_pre, dtype=np.int32)
        reverse = np.asarray(reverse, dtype=np.double)

        order1 = np.argsort(cond_post, kind='mergesort')
        cond_post = cond_post[order1]
        cond_pre = cond_pre[order1]
        reverse = reverse[order1]

        I_post = np.asarray(I_post, dtype=np.int32)
        I_pre = np.asarray(I_pre, dtype=np.int32)

        order1 = np.argsort(I_post, kind='mergesort')
        I_post = I_post[order1]
        I_pre = I_pre[order1]

        self.idx_start_gpot = np.concatenate((np.asarray([
            0,
        ], dtype=np.int32), np.cumsum(num_gpot_neurons, dtype=np.int32)))
        self.idx_start_spike = np.concatenate(
            (np.asarray([
                0,
            ], dtype=np.int32), np.cumsum(num_spike_neurons, dtype=np.int32)))
        self.idx_start_synapse = np.concatenate(
            (np.asarray([
                0,
            ], dtype=np.int32), np.cumsum(num_synapses, dtype=np.int32)))

        for i, (t, n) in enumerate(self.n_list):
            if n['spiking'][0]:
                idx = np.where(
                    (cond_post >= self.idx_start_spike[i] + spike_shift)
                    & (cond_post < self.idx_start_spike[i + 1] + spike_shift))
                n['cond_post'] = cond_post[idx] - self.idx_start_spike[
                    i] - spike_shift
                n['cond_pre'] = cond_pre[idx]
                n['reverse'] = reverse[idx]
                idx = np.where(
                    (I_post >= self.idx_start_spike[i] + spike_shift)
                    & (I_post < self.idx_start_spike[i + 1] + spike_shift))
                n['I_post'] = I_post[idx] - self.idx_start_spike[
                    i] - spike_shift
                n['I_pre'] = I_pre[idx]
            else:
                idx = np.where((cond_post >= self.idx_start_gpot[i])
                               & (cond_post < self.idx_start_gpot[i + 1]))
                n['cond_post'] = cond_post[idx] - self.idx_start_gpot[i]
                n['cond_pre'] = cond_pre[idx]
                n['reverse'] = reverse[idx]
                idx = np.where((I_post >= self.idx_start_gpot[i])
                               & (I_post < self.idx_start_gpot[i + 1]))
                n['I_post'] = I_post[idx] - self.idx_start_gpot[i]
                n['I_pre'] = I_pre[idx]

            n['num_dendrites_cond'] = Counter(n['cond_post'])
            n['num_dendrites_I'] = Counter(n['I_post'])

        self.gpot_delay_steps = int(round(
            gpot_delay_steps * 1e-3 / self.dt)) + 1
        self.spike_delay_steps = int(round(
            spike_delay_steps * 1e-3 / self.dt)) + 1

        data_gpot = np.zeros(self.num_public_gpot + len(in_ports_ids_gpot),
                             np.double)
        data_spike = np.zeros(self.num_public_spike + len(in_ports_ids_spk),
                              np.bool)
        super(LPU, self).__init__(sel, sel_in, sel_out, sel_gpot, sel_spk,
                                  data_gpot, data_spike, columns, port_data,
                                  port_ctrl, port_time, self.LPU_id, device,
                                  debug, time_sync)

        self.sel_in_gpot_ids = self.pm['gpot'].ports_to_inds(self.sel_in_gpot)
        self.sel_out_gpot_ids = self.pm['gpot'].ports_to_inds(
            self.sel_out_gpot)
        self.sel_in_spk_ids = self.pm['spike'].ports_to_inds(self.sel_in_spk)
        self.sel_out_spk_ids = self.pm['spike'].ports_to_inds(self.sel_out_spk)
예제 #11
0
    def __init__(self, dt, n_dict, s_dict, input_file=None, output_file=None,
                 device=0, ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG,
                 spike_tag=SPIKE_TAG, rank_to_id=None, routing_table=None,
                 id=None, debug=False, columns=['io', 'type', 'interface'],
                 cuda_verbose=False, time_sync=False):

        LoggerMixin.__init__(self, 'mod {}'.format(id))

        assert('io' in columns)
        assert('type' in columns)
        assert('interface' in columns)
        self.LPU_id = id
        self.dt = dt
        self.debug = debug
        self.device = device
        if cuda_verbose:
            self.compile_options = ['--ptxas-options=-v']
        else:
            self.compile_options = []

        # Handle file I/O:
        self.output_file = output_file
        self.output = True if output_file else False
        self.input_file = input_file
        self.input_eof = False if input_file else True

        # Load neurons and synapse classes:
        self._load_neurons()
        self._load_synapses()

        # Set default one time import for reading from input files:
        self._one_time_import = 10

        # Save neuron data in the form
        # [('Model0', {'attrib0': [..], 'attrib1': [..]}), ('Model1', ...)]
        self.n_list = n_dict.items()

        # List of booleans indicating whether first neuron of each model is a
        # spiking model:
        n_model_is_spk = [n['spiking'][0] for _, n in self.n_list]

        # Number of neurons of each model:
        n_model_num = [len(n['id']) for _, n in self.n_list]

        # Concatenate lists of integers corresponding to neuron positions in LPU
        # graph for all of the models into a single list:
        n_id = np.array(sum([n['id'] for _, n in self.n_list], []),
                        dtype=np.int32)

        # Concatenate lists of common attributes in model dictionaries into
        # single lists:
        n_is_spk = np.array(sum([n['spiking'] for _, n in self.n_list], []))
        n_is_pub = np.array(sum([n['public'] for _, n in self.n_list], []))
        n_has_in = np.array(sum([n['extern'] for _, n in self.n_list], []))

        # Get selectors and positions of input ports:
        try:
            sel_in_gpot = self.extract_in_gpot(n_dict)
            num_in_ports_gpot = len(n_dict[PORT_IN_GPOT]['id'])
            self.ports_in_gpot_mem_ind = zip(*self.n_list)[0].index(PORT_IN_GPOT)
        except KeyError:
            sel_in_gpot = ''
            num_in_ports_gpot = 0
            self.ports_in_gpot_mem_ind = None

        try:
            sel_in_spk = self.extract_in_spk(n_dict)
            num_in_ports_spk = len(n_dict[PORT_IN_SPK]['id'])
            self.ports_in_spk_mem_ind = zip(*self.n_list)[0].index(PORT_IN_SPK)
        except KeyError:
            sel_in_spk = ''
            num_in_ports_spk = 0
            self.ports_in_spk_mem_ind = None

        sel_in = ','.join(filter(None, [sel_in_gpot, sel_in_spk]))

        # Get selectors of output neurons:
        sel_out_gpot = self.extract_out_gpot(n_dict)
        sel_out_spk = self.extract_out_spk(n_dict)

        sel_out = ','.join(filter(None, [sel_out_gpot, sel_out_spk]))
        sel_gpot = ','.join(filter(None, [sel_in_gpot, sel_out_gpot]))
        sel_spk = ','.join(filter(None, [sel_in_spk, sel_out_spk]))
        sel = ','.join(filter(None, [sel_gpot, sel_spk]))

        self.sel_in_spk = sel_in_spk
        self.sel_out_spk = sel_out_spk
        self.sel_in_gpot = sel_in_gpot
        self.sel_out_gpot = sel_out_gpot

        # Get IDs of output neurons:
        self.out_ports_ids_gpot = np.array([id for _, n in self.n_list for id, pub, spk in
                                            zip(n['id'], n['public'], n['spiking'])
                                            if pub and not spk], dtype=np.int32)
        self.out_ports_ids_spk = np.array([id for _, n in self.n_list for id, pub, spk in
                                           zip(n['id'], n['public'], n['spiking'])
                                           if pub and spk], dtype=np.int32)

        # The following code creates a mapping for each neuron from its ID to
        # the position of the memory entry associated with it in a GPU
        # array. The array is organized as follows:
        #
        #    | gpot neurons | spiking neurons |
        #
        # For example, suppose the IDs of the gpot neurons and of the spike
        # neurons are 1, 4, 5 and 2, 0, 3, respectively. We allocate GPU memory
        # for each neuron as follows:
        #
        #    | 1 4 5 | 2 0 3 |
        #
        # To get the position of the array entry associated with each neuron, 
        # we simply use numpy.argsort:
        #
        # >>> x = [1,4,5,2,0,3]
        # >>> y = numpy.argsort(x)
        # >>> y
        # [4,0,3,5,1,2]
        #
        # The i-th value of the returned array is the position of the
        # i-th smallest element in the original array, which is exactly i.
        # In other words, x[i] == j and y[j] == i.
        #
        # Note also that the sort indices obtained by numpy.argsort are the
        # same as those obtained by consecutive enumeration of the array of 
        # neuron IDs.

        # Count total number of graded potential and spiking neurons:
        num_gpot_neurons = np.where(n_model_is_spk, 0, n_model_num)
        num_spike_neurons = np.where(n_model_is_spk, n_model_num, 0)
        self.total_num_gpot_neurons = sum(num_gpot_neurons)
        self.total_num_spike_neurons = sum(num_spike_neurons)

        # Find sort order of neurons in combined list of graded potential
        # followed by spiking neuron IDs:
        gpot_idx = n_id[~n_is_spk]
        spike_idx = n_id[n_is_spk]
        idx = np.concatenate((gpot_idx, spike_idx))
        order = np.argsort(
            np.concatenate((gpot_idx, spike_idx))).astype(np.int32)
        self.order_dict = {} # maps neuron IDs to indices
        for i, id in enumerate(idx):
            self.order_dict[id] = i

        # Find sort order of neurons within list of 
        # graded potential neuron IDs only;
        gpot_order = np.argsort(gpot_idx).astype(np.int32)
        self.gpot_order_l = gpot_order
        self.gpot_order_dict = {} # maps neuron IDs to indices
        for i, id in enumerate(gpot_idx):
            self.gpot_order_dict[id] = i

        # Find sort order of neurons within list of
        # spiking neurons only:
        spike_order = np.argsort(spike_idx).astype(np.int32)
        self.spike_order_l = spike_order
        self.spike_order_dict= {} # maps neuron IDs to indices
        for i, id in enumerate(spike_idx):
            self.spike_order_dict[id] = i

        # Offset into concatenated list of neurons where group of spiking
        # neurons starts:
        self.spike_shift = self.total_num_gpot_neurons

        # Sorted list of neurons accepting external input:
        in_id = n_id[n_has_in]
        in_id.sort()
        self.input_neuron_list = self.order(in_id)

        self.num_public_gpot = len(self.out_ports_ids_gpot)
        self.num_public_spike = len(self.out_ports_ids_spk)
        self.num_input = len(self.input_neuron_list)

        # Replace self.out_ports_ids* with the indices into the original lists
        # of gpot and spiking neuron IDs that can be used to access the array
        # elements in memory that correspond to each neuron:
        self.out_ports_ids_gpot = self.gpot_order(self.out_ports_ids_gpot)
        self.out_ports_ids_spk = self.spike_order(self.out_ports_ids_spk)

        gpot_delay_steps = 0
        spike_delay_steps = 0

        cond_pre = []
        cond_post = []
        I_pre = []
        I_post = []
        reverse = []

        count = 0

        self.s_dict = s_dict
        self.s_list = self.s_dict.items()
        self.nid_max = np.max(n_id) + 1
        num_synapses = [len(s['id']) for _, s in self.s_list]
        for (_, s) in self.s_list:
            cls = s['class'][0]
            s['pre'] = [self.spike_order(int(nid)) if cls <= 1 else self.gpot_order(int(nid))
                        for nid in s['pre']]

            # why don't why need shift for post neuron?
            # For synapses whose post-synaptic site is another synapse, we set
            # its post-id to be max_neuron_id + synapse_id. By doing so, we
            # won't confuse synapse ID's with neurons ID's.
            s_neu_post = [self.order(int(nid)) for nid in s['post'] if 'synapse' not in str(nid)]
            s_syn_post = [int(nid[8:])+self.nid_max for nid in s['post'] if 'synapse' in str(nid)]
            s['post'] = s_neu_post+s_syn_post

            order = np.argsort(s['post']).astype(np.int32)
            for k, v in s.items():
                s[k] = np.asarray(v)[order]

            # The same set of ODEs may be used to describe conductance-based and
            # non-conductance-based versions of a synapse model 
            # If the EPSC comes directly from one of the state variables,
            # the model is non-conductance-based. If the calculation of the
            # EPSC involves a reverse potential, the model is
            # conductance based.
            idx = np.where(s['conductance'])[0]
            if len(idx) > 0:
                 cond_post.extend(s['post'][idx])
                 reverse.extend(s['reverse'][idx])
                 cond_pre.extend(range(count, count+len(idx)))
                 count += len(idx)

                 # Set the delay to either the specified value or 0:
                 if 'delay' in s:
                     max_del = np.max( s['delay'][idx] )
                     gpot_delay_steps = max_del if max_del > gpot_delay_steps \
                                            else gpot_delay_steps

            idx = np.where(~s['conductance'])[0]
            if len(idx) > 0:
                 I_post.extend(s['post'][idx])
                 I_pre.extend(range(count, count+len(s['post'][idx])))
                 count += len(s['post'])

                 # Set the delay to either the specified value or 0:
                 if 'delay' in s:
                     max_del = np.max( s['delay'][idx] )
                     spike_delay_steps = max_del if max_del > spike_delay_steps \
                                         else spike_delay_steps

        self.total_synapses = int(np.sum(num_synapses))
        I_post.extend(self.input_neuron_list)
        I_pre.extend(range(self.total_synapses, self.total_synapses + \
                          len(self.input_neuron_list)))

        cond_post = np.asarray(cond_post, dtype=np.int32)
        cond_pre = np.asarray(cond_pre, dtype=np.int32)
        reverse = np.asarray(reverse, dtype=np.double)

        order1 = np.argsort(cond_post, kind='mergesort')
        cond_post = cond_post[order1]
        cond_pre = cond_pre[order1]
        reverse = reverse[order1]

        I_post = np.asarray(I_post, dtype=np.int32)
        I_pre = np.asarray(I_pre, dtype=np.int32)

        order1 = np.argsort(I_post, kind='mergesort')
        I_post = I_post[order1]
        I_pre = I_pre[order1]

        self.idx_start_gpot = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_gpot_neurons, dtype=np.int32)))
        self.idx_start_spike = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_spike_neurons, dtype=np.int32)))
        self.idx_start_synapse = np.concatenate(
            (np.asarray([0,], dtype=np.int32),
             np.cumsum(num_synapses, dtype=np.int32)))

        for i, (t, n) in enumerate(self.n_list):
            if n['spiking'][0]:
                idx = np.where(
                    (cond_post >= self.idx_start_spike[i] + self.spike_shift)&
                    (cond_post < self.idx_start_spike[i+1] + self.spike_shift) )
                n['cond_post'] = cond_post[idx] - self.idx_start_spike[i] - self.spike_shift
                n['cond_pre'] = cond_pre[idx]

                # Save reverse potential from synapses in neuron data dict:
                n['reverse'] = reverse[idx]
                idx = np.where(
                    (I_post >= self.idx_start_spike[i] + self.spike_shift)&
                    (I_post < self.idx_start_spike[i+1] + self.spike_shift) )
                n['I_post'] = I_post[idx] - self.idx_start_spike[i] - self.spike_shift
                n['I_pre'] = I_pre[idx]
            else:
                idx = np.where( (cond_post >= self.idx_start_gpot[i])&
                                (cond_post < self.idx_start_gpot[i+1]) )
                n['cond_post'] = cond_post[idx] - self.idx_start_gpot[i]
                n['cond_pre'] = cond_pre[idx]

                # Save reverse potential from synapses in neuron data dict:
                n['reverse'] = reverse[idx]
                idx =  np.where( (I_post >= self.idx_start_gpot[i])&
                                 (I_post < self.idx_start_gpot[i+1]) )
                n['I_post'] = I_post[idx] - self.idx_start_gpot[i]
                n['I_pre'] = I_pre[idx]

            n['num_dendrites_cond'] = Counter(n['cond_post'])
            n['num_dendrites_I'] = Counter(n['I_post'])

        if len(self.s_list) > 0:
            s_id = np.concatenate([s['id'] for _, s in self.s_list]).astype(np.int32)
        else:
            s_id = np.empty(0, dtype = np.int32)

        s_order = np.arange(self.total_synapses)[s_id]
        idx = np.where(cond_post >= self.nid_max)[0]
        cond_post_syn = s_order[cond_post[idx] - self.nid_max]
        cond_post_syn_offset = idx[0] if len(idx) > 0 else 0
        idx = np.where(I_post >= self.nid_max)[0]
        I_post_syn = s_order[I_post[idx] - self.nid_max]
        I_post_syn_offset = idx[0] if len(idx) > 0 else 0
        for i, (t, s) in enumerate(self.s_list):
            idx = np.where(
                (cond_post_syn >= self.idx_start_synapse[i]) &
                (cond_post_syn < self.idx_start_synapse[i+1]))[0]
            s['cond_post'] = cond_post[idx+cond_post_syn_offset] - self.nid_max
            s['cond_pre'] = cond_pre[idx+cond_post_syn_offset]
            s['reverse'] = reverse[idx+cond_post_syn_offset]

            # NOTE: after this point, s['reverse'] is no longer the reverse
            # potential associated with the current synapse class, but the
            # reverse potential of other synapses projecting to the current one.
            # Its purpose is exactly the same as n['reverse'].
            # Not sure if this is good though, since it obvious creates some
            # degree of confusion.
            idx = np.where(
                (I_post_syn >= self.idx_start_synapse[i]) &
                (I_post_syn < self.idx_start_synapse[i+1]))[0]
            s['I_post'] = I_post[idx+I_post_syn_offset] - self.nid_max
            s['I_pre'] = I_pre[idx+I_post_syn_offset]

            s['num_dendrites_cond'] = Counter(s['cond_post'])
            s['num_dendrites_I'] = Counter(s['I_post'])

        self.gpot_delay_steps = int(round(gpot_delay_steps*1e-3/self.dt)) + 1
        self.spike_delay_steps = int(round(spike_delay_steps*1e-3/self.dt)) + 1

        data_gpot = np.zeros(self.num_public_gpot + num_in_ports_gpot,
                             np.double)
        data_spike = np.zeros(self.num_public_spike + num_in_ports_spk,
                              np.int32)
        super(LPU, self).__init__(sel=sel, sel_in=sel_in, sel_out=sel_out,
                                  sel_gpot=sel_gpot, sel_spike=sel_spk,
                                  data_gpot=data_gpot, data_spike=data_spike,
                                  columns=columns, ctrl_tag=ctrl_tag, gpot_tag=gpot_tag,
                                  spike_tag=spike_tag, id=self.LPU_id,
                                  rank_to_id=rank_to_id, routing_table=routing_table,
                                  device=device, debug=debug, time_sync=time_sync)

        # Integer indices in port map data arrays corresponding to input/output
        # gpot/spiking ports:
        self.sel_in_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(self.sel_in_gpot),
                                        dtype=np.int32)
        self.sel_out_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(self.sel_out_gpot),
                                        dtype=np.int32)
        self.sel_in_spk_ids = np.array(self.pm['spike'].ports_to_inds(self.sel_in_spk),
                                        dtype=np.int32)
        self.sel_out_spk_ids = np.array(self.pm['spike'].ports_to_inds(self.sel_out_spk),
                                        dtype=np.int32)