Example #1
0
def validate_block(block):
    # -- Compartment
    validate_compartment(block.compartment)

    # -- Axons
    OUT_AXONS_MAX = d(b'NDA5Ng==', int)
    n_axons = sum(a.axon_slots() for a in block.axons)
    if n_axons > OUT_AXONS_MAX:
        raise BuildError("Output axons (%d) exceeded max (%d)" %
                         (n_axons, OUT_AXONS_MAX))

    for axon in block.axons:
        validate_axon(axon)

    # -- Synapses
    IN_AXONS_MAX = d(b'NDA5Ng==', int)
    n_axons = sum(s.n_axons for s in block.synapses)
    if n_axons > IN_AXONS_MAX:
        raise BuildError("Input axons (%d) exceeded max (%d)" %
                         (n_axons, IN_AXONS_MAX))

    MAX_SYNAPSE_BITS = d(b'MTA0ODU3Ng==', int)
    synapse_bits = sum(s.bits() for s in block.synapses)
    if synapse_bits > MAX_SYNAPSE_BITS:
        raise BuildError("Total synapse bits (%d) exceeded max (%d)" %
                         (synapse_bits, MAX_SYNAPSE_BITS))

    for synapse in block.synapses:
        validate_synapse(synapse)

    # -- Probes
    for probe in block.probes:
        validate_probe(probe)
Example #2
0
 def atom_bits_extra(self):
     """Number of extra bits needed for the atom for incoming pop16 spikes."""
     if self.pop_type == 16:
         atom_bits = self.atom_bits()
         assert atom_bits <= d(b"OQ==", int), "Too many atom bits"
         return max(atom_bits - d(b"NQ==", int), 0)
     else:
         return 0  # meaningless if pop_type != 16
Example #3
0
    def add_spikes_to_generator(cls, t, spikes, basic_spike_generator):
        methods = {
            0: getattr(basic_spike_generator, d(b"YWRkU3Bpa2U=")),
            16: getattr(basic_spike_generator, d(b"YWRkUG9wMTZTcGlrZQ==")),
            32: getattr(basic_spike_generator, d(b"YWRkUG9wMzJTcGlrZQ==")),
        }
        time = d(b"dGltZQ==")
        chip_id = d(b"Y2hpcElk")
        core_id = d(b"Y29yZUlk")
        axon_id = d(b"YXhvbklk")
        atom = d(b"c3JjQXRvbQ==")
        atom_bits_extra = d(b"YXRvbUJpdHM=")

        for spike in spikes:
            axon_type = int(spike["axon_type"])
            kwargs = {
                time: t,
                chip_id: spike["chip_id"],
                core_id: spike["core_id"],
                axon_id: spike["axon_id"],
            }
            if axon_type == 0:
                assert spike[
                    "atom"] == 0, "Atom must be zero for discrete spikes"
            else:
                kwargs[atom] = spike["atom"]
                if axon_type == 16:
                    kwargs[atom_bits_extra] = spike["atom_bits_extra"]

            methods[axon_type](**kwargs)
Example #4
0
class CompartmentConfig(Config):
    DECAY_U_MAX = d(b"NDA5NQ==", int)
    DECAY_V_MAX = d(b"NDA5NQ==", int)
    REFRACT_DELAY_MAX = d(b"NjM=", int)

    params = ("decay_u", "decay_v", "refract_delay", "enable_noise")

    def __init__(self, decay_v, decay_u, refract_delay, enable_noise):
        super(CompartmentConfig, self).__init__()
        self.decay_v = decay_v
        self.decay_u = decay_u
        self.refract_delay = refract_delay
        self.enable_noise = enable_noise
Example #5
0
class CompartmentConfig(Config):
    DECAY_U_MAX = d(b'NDA5NQ==', int)
    DECAY_V_MAX = d(b'NDA5NQ==', int)
    REFRACT_DELAY_MAX = d(b'NjM=', int)

    params = ('decay_u', 'decay_v', 'refract_delay', 'enable_noise')

    def __init__(self, decay_v, decay_u, refract_delay, enable_noise):
        super(CompartmentConfig, self).__init__()
        self.decay_v = decay_v
        self.decay_u = decay_u
        self.refract_delay = refract_delay
        self.enable_noise = enable_noise
Example #6
0
def validate_synapse(synapse):
    validate_synapse_cfg(synapse.synapse_cfg)
    if synapse.axon_compartment_bases is not None:
        min_base = d(b'LTE=', int)
        max_base = d(b'MjU2', int)
        assert all(
            min_base <= b < max_base for b in synapse.axon_compartment_bases
        ), ("compartment base must be >= %d and < %d (-1 indicating unused)" %
            (min_base, max_base))
    if synapse.pop_type == 16:
        if synapse.axon_compartment_bases is not None:
            assert all(b % d(b'NA==', int) == 0
                       for b in synapse.axon_compartment_bases)
Example #7
0
def discretize_weights(synapse_cfg,
                       w,
                       dtype=np.int32,
                       lossy_shift=True,
                       check_result=True):
    """Takes weights and returns their quantized values with weight_exp.

    The actual weight to be put on the chip is this returned value
    divided by the ``scale`` attribute.

    Parameters
    ----------
    w : float ndarray
        Weights to be discretized, in the range -255 to 255.
    dtype : np.dtype, optional (Default: np.int32)
        Data type for discretized weights.
    lossy_shift : bool, optional (Default: True)
        Whether to mimic the two-part weight shift that currently happens
        on the chip, which can lose information for small weight_exp.
    check_results : bool, optional (Default: True)
        Whether to check that the discretized weights fall in
        the valid range for weights on the chip (-256 to 255).
    """
    s = synapse_cfg.shift_bits
    m = 2**(d(b'OA==', int) - s) - 1

    w = np.round(w / 2.**s).clip(-m, m).astype(dtype)
    s2 = s + synapse_cfg.weight_exp

    if lossy_shift:
        if s2 < 0:
            warnings.warn("Lost %d extra bits in weight rounding" % (-s2, ))

            # Round before `s2` right shift. Just shifting would floor
            # everything resulting in weights biased towards being smaller.
            w = (np.round(w * 2.**s2) / 2**s2).clip(-m, m).astype(dtype)

        shift(w, s2, out=w)
        np.left_shift(w, d(b'Ng==', int), out=w)
    else:
        shift(w, d(b'Ng==', int) + s2, out=w)

    if check_result:
        ws = w // synapse_cfg.scale
        assert (np.all(ws <= d(b'MjU1', int))
                and np.all(ws >= d(b'LTI1Ng==', int)))

    return w
Example #8
0
def scale_pes_errors(error, scale=1.0):
    """Scale PES errors based on a scaling factor, round and clip."""
    error = scale * error
    error = np.round(error).astype(np.int32)
    max_err = d(b"MTI3", int)
    q = error > max_err
    if np.any(q):
        warnings.warn(
            "Received PES error greater than chip max (%0.2e). "
            "Consider changing `Model.pes_error_scale`." % (max_err / scale,)
        )
        logger.debug(
            "PES error %0.2e > %0.2e (chip max)", np.max(error) / scale, max_err / scale
        )
        error[q] = max_err
    q = error < -max_err
    if np.any(q):
        warnings.warn(
            "Received PES error less than chip min (%0.2e). "
            "Consider changing `Model.pes_error_scale`." % (-max_err / scale,)
        )
        logger.debug(
            "PES error %0.2e < %0.2e (chip min)",
            np.min(error) / scale,
            -max_err / scale,
        )
        error[q] = -max_err
    return error
Example #9
0
    def set_learning(
            self, learning_rate=1., tracing_tau=2, tracing_mag=1.0, wgt_exp=4):
        assert tracing_tau == int(tracing_tau), "tracing_tau must be integer"

        self.learning = True
        self.tracing_tau = int(tracing_tau)
        self.tracing_mag = tracing_mag
        # stdp_cfg hard-coded for now (see hardware.builder)
        self.format(learning_cfg=d(b'MQ==', int), stdp_cfg=d(b'MA==', int))

        self.train_epoch = 2
        self.learn_epoch_k = 1
        self.learn_epoch = self.train_epoch * 2**self.learn_epoch_k

        self.learning_rate = learning_rate * self.learn_epoch
        self.learning_wgt_exp = wgt_exp
Example #10
0
    def _chip2host_snips(self, probes_receivers):
        count = self.nengo_io_c2h_count
        data = self.nengo_io_c2h.read(count)
        time_step, data = data[0], np.array(data[1:], dtype=np.int32)
        snip_range = self.nengo_io_snip_range

        for probe in self._snip_probe_data:
            assert probe.use_snip
            x = data[snip_range[probe]]
            assert x.ndim == 1
            if probe.key == 'spiked':
                assert isinstance(probe.target, LoihiBlock)
                refract_delays = probe.target.compartment.refract_delay

                # Loihi uses the voltage value to indicate where we
                # are in the refractory period. We want to find neurons
                # starting their refractory period.
                x = (x == refract_delays * d(b'MTI4', int))

            if probe.weights is not None:
                x = np.dot(x, probe.weights)

            receiver = probes_receivers.get(probe, None)
            if receiver is not None:
                # chip->host
                receiver.receive(self.model.dt * time_step, x)
            else:
                # onchip probes
                self._snip_probe_data[probe].append(x)

        self._chip2host_sent_steps += 1
Example #11
0
def validate_synapse(synapse):
    validate_synapse_cfg(synapse.synapse_cfg)
    if synapse.axon_compartment_bases is not None:
        min_base = d(b"LTE=", int)
        max_base = d(b"MjU2", int)
        assert all(
            min_base <= b < max_base for b in synapse.axon_compartment_bases
        ), ("compartment base must be >= %d and < %d (-1 indicating unused)" %
            (min_base, max_base))
    if synapse.pop_type == 16:
        if synapse.axon_compartment_bases is not None:
            assert all(
                b % 4 == 0 for b in synapse.axon_compartment_bases
                if b >= 0), (
                    "Pop16 axons must have all compartment bases modulo 4: %s"
                    % synapse.axon_compartment_bases)
Example #12
0
    def block_to_chip(self, block, chip):
        if block.compartment.n_compartments > d(b'MTAyNA==', int):
            raise ValidationError("Segment does not fit on one chip",
                                  "n_neurons")

        core = chip.new_core()
        core.add_block(block)

        compartment_cfgs, compartment_cfg_idxs = core_compartment_cfgs(core)
        [core.add_compartment_cfg(cfg) for cfg in compartment_cfgs]
        core.compartment_cfg_idxs = compartment_cfg_idxs

        vth_cfgs, vth_cfg_idxs = core_vth_cfgs(core)
        [core.add_vth_cfg(cfg) for cfg in vth_cfgs]
        core.vth_cfg_idxs = vth_cfg_idxs

        for synapse in block.synapses:
            core.add_synapse(synapse)

        stdp_pre_cfgs, stdp_pre_cfg_idxs = core_stdp_pre_cfgs(core)
        [core.add_stdp_pre_cfg(stdp_pre_cfg) for stdp_pre_cfg in stdp_pre_cfgs]
        core.stdp_pre_cfg_idxs = stdp_pre_cfg_idxs

        core.stdp_pre_cfg_idx = None  # hardware.builder will set
        core.stdp_cfg_idx = None  # hardware.builder will set
Example #13
0
    def prepare_for_probe(self, block, pinfo, target_idx):
        chip_idx = pinfo.chip_idx[target_idx]
        core_id = pinfo.core_id[target_idx]
        compartment_idxs = pinfo.compartment_idxs[target_idx]

        self.cores.add(core_id)

        key = pinfo.key
        if key == "spike":
            refract_delay = block.compartment.refract_delay[0]
            assert np.all(block.compartment.refract_delay == refract_delay)
            key = refract_delay * d(b"MTI4", int)

        n_comps = len(compartment_idxs)
        logger.info(n_comps)
        comp0 = compartment_idxs[0]
        comp_diff = np.diff(compartment_idxs)
        is_ranged_comps = np.all(
            comp_diff == comp_diff[0] if len(comp_diff) > 0 else False
        )
        is_packed_spikes = is_ranged_comps and (pinfo.key == "spike")
        n_packed_spikes = n_comps if is_packed_spikes else 0

        output_len = ceil_div(n_comps, 32) if is_packed_spikes else n_comps
        output_slice = slice(self.last_output, self.last_output + output_len)
        pinfo.snip_range.append((chip_idx, output_slice, n_packed_spikes))

        offset = self.output_offset + self.last_output
        if is_ranged_comps:
            self.probes.append((offset, key, core_id, comp0, comp_diff[0], n_comps))
        else:
            for i, comp in enumerate(compartment_idxs):
                self.probes.append((offset + i, key, core_id, comp, 0, 1))
        self.last_output += output_len
Example #14
0
    def run_steps(self, steps, blocking=True):
        if self.use_snips and self.nengo_io_h2c is None:
            self.create_io_snip()

        # NOTE: we need to call connect() after snips are created
        self.connect()
        d_get(self.nxsdk_board, b'cnVu')(steps, **{
            d(b'YVN5bmM='): not blocking
        })
Example #15
0
    def run_steps(self, steps, blocking=True):
        assert self.connected, "Interface is not built"

        # start the board running the desired number of steps
        d_get(self.nxsdk_board, b"cnVu")(steps, **{d(b"YVN5bmM="): not blocking})

        # connect snips
        if self.use_snips and not self.snips.connected:
            self.snips.connect(self.nxsdk_board)
Example #16
0
    def set_population_weights(
        self, weights, indices, axon_to_weight_map, compartment_bases, pop_type=None
    ):
        """Set population weights on this Synapse."""
        self.axon_to_weight_map = axon_to_weight_map
        self.axon_compartment_bases = compartment_bases
        self.pop_type = 16 if pop_type is None else pop_type

        self._set_weights_indices(weights, indices=indices, compression=d(b"MA==", int))
Example #17
0
    def set_learning(
        self, learning_rate=1.0, tracing_tau=2, tracing_mag=1.0, wgt_exp=4
    ):
        """Set the learning parameters for this Synapse."""
        assert tracing_tau == int(tracing_tau), "tracing_tau must be integer"

        self.learning = True
        self.tracing_tau = int(tracing_tau)
        self.tracing_mag = tracing_mag
        # stdp_cfg hard-coded for now (see hardware.builder)
        self.format(learning_cfg=d(b"MQ==", int), stdp_cfg=d(b"MA==", int))

        self.train_epoch = 2
        self.learn_epoch_k = 1
        self.learn_epoch = self.train_epoch * 2 ** self.learn_epoch_k

        self.learning_rate = learning_rate * self.learn_epoch
        self.learning_wgt_exp = wgt_exp
Example #18
0
    def set_full_weights(self, weights):
        weights = np.array(weights, copy=False, dtype=np.float32)
        assert weights.ndim in (1, 2)

        if weights.ndim == 1:
            indices = np.arange(weights.size)
        else:
            indices = None

        self._set_weights_indices(weights, indices=indices)
        assert len(self.weights) == self.n_axons, (
            "Full weights must have different weights for each axon")

        self.format(
            compression=d(b'Mw==', int),
            idx_bits=self.idx_bits(),
            fanout_type=d(b'MQ==', int),
            n_synapses=d(b'NjM=', int),
            weight_bits=d(b'Nw==', int),
        )
Example #19
0
    def _set_weights_indices(
        self,
        weights,
        indices=None,
        weight_dtype=np.float32,
        compression=d(b"MA==", int),
    ):
        weights = [
            np.array(w, copy=False, dtype=weight_dtype, ndmin=2) for w in weights
        ]
        assert all(
            w.ndim == 2 for w in weights
        ), "Weights must be shape (n_axons,) (n_populations, n_compartments)"
        assert all(
            w.shape[0] == weights[0].shape[0] for w in weights
        ), "All axon weights must have the same number of populations"
        self.weights = weights

        if indices is None:
            indices = [
                np.zeros((w.shape[0], 1), dtype=np.int32)
                + np.arange(w.shape[1], dtype=np.int32)
                for w in self.weights
            ]
        indices = [np.array(i, copy=False, dtype=np.int32, ndmin=2) for i in indices]
        assert all(
            i.ndim == 2 for i in indices
        ), "Indices must be shape (n_axons,) (n_populations, n_compartments)"
        assert all(
            i.shape == w.shape for i, w in zip(indices, weights)
        ), "Indices shapes must match weights shapes"
        assert len(weights) == len(indices)
        self.indices = indices

        self.format(
            compression=compression,
            idx_bits=self.idx_bits(),
            fanout_type=d(b"MQ==", int),
            n_synapses=d(b"NjM=", int),
            weight_bits=d(b"Nw==", int),
        )
Example #20
0
    def set_population_weights(
        self,
        weights,
        indices,
        axon_to_weight_map,
        compartment_bases,
        pop_type=None
    ):
        self._set_weights_indices(weights, indices)
        self.axon_to_weight_map = axon_to_weight_map
        self.axon_compartment_bases = compartment_bases
        self.pop_type = 16 if pop_type is None else pop_type

        idx_bits = self.idx_bits()
        self.format(
            compression=d(b'MA==', int),
            idx_bits=idx_bits,
            fanout_type=d(b'MQ==', int),
            n_synapses=d(b'NjM=', int),
            weight_bits=d(b'Nw==', int),
        )
Example #21
0
def decay_int(x, decay, bits=None, offset=0, out=None):
    """Decay integer values using a decay constant.

    The decayed value is given by::

        sign(x) * floor(abs(x) * (2**bits - offset - decay) / 2**bits)
    """
    if out is None:
        out = np.zeros_like(x)
    if bits is None:
        bits = d(b'MTI=', int)
    r = (2**bits - offset - np.asarray(decay)).astype(np.int64)
    np.right_shift(np.abs(x) * r, bits, out=out)
    return np.sign(x) * out
Example #22
0
def validate_core(core):
    # TODO: check these numbers are correct
    assert len(core.compartment_cfgs) <= d(b'MzI=', int)
    assert len(core.vth_cfgs) <= d(b'MTY=', int)
    assert len(core.synapse_cfgs) <= d(b'MTY=', int)
    assert len(core.stdp_pre_cfgs) <= d(b'Mw==', int)

    for cfg in core.compartment_cfgs:
        validate_compartment_cfg(cfg)
    for cfg in core.vth_cfgs:
        validate_vth_cfg(cfg, core=core)
    for cfg in core.synapse_cfgs:
        if cfg is not None:
            validate_synapse_cfg(cfg)
    for cfg in core.stdp_pre_cfgs:
        validate_trace_cfg(cfg)

    for synapse in core.synapse_axons:
        cfg = core.get_synapse_cfg(synapse)
        idxbits = cfg.real_idx_bits
        for i in synapse.indices:
            assert np.all(i >= 0)
            assert np.all(i < 2**idxbits)
Example #23
0
    def set_weights(self, weights):
        """Set dense or sparse weights on this Synapse."""
        if isinstance(weights, scipy.sparse.spmatrix):
            csr = weights.tocsr()
            weights_by_row, idxs_by_row = [], []
            for i in range(weights.shape[0]):
                i0, i1 = csr.indptr[i : i + 2]
                weights_by_row.append(csr.data[i0:i1])
                idxs_by_row.append(csr.indices[i0:i1])

            weights = weights_by_row
            indices = idxs_by_row
        else:
            weights = np.array(weights, copy=False, dtype=np.float32)
            assert weights.ndim == 2
            indices = None

        assert len(weights) == self.n_axons, "Must have different weights for each axon"
        self._set_weights_indices(weights, indices=indices, compression=d(b"Mw==", int))
Example #24
0
def build_block(nxsdk_core, core, block, compartment_idxs, ax_range):
    assert block.compartment.scale_u is False
    assert block.compartment.scale_v is False

    logger.debug("Building %s on core.id=%d", block, nxsdk_core.id)

    for i, bias in enumerate(block.compartment.bias):
        bman, bexp = bias_to_manexp(bias)
        icomp = core.compartment_cfg_idxs[block][i]
        ivth = core.vth_cfg_idxs[block][i]

        ii = compartment_idxs[i]
        d_func(d_get(nxsdk_core, b'Y3hDZmc=')[ii],
               b'Y29uZmlndXJl',
               kwargs={
                   b'Ymlhcw==': bman,
                   b'Ymlhc0V4cA==': bexp,
                   b'dnRoUHJvZmlsZQ==': ivth,
                   b'Y3hQcm9maWxl': icomp,
               })

        phasex = d(b'cGhhc2UlZA==') % (ii % 4, )
        d_get(
            d_get(nxsdk_core, b'Y3hNZXRhU3RhdGU=')[ii // 4],
            b'Y29uZmlndXJl')(**{
                phasex: 2
            })

    logger.debug("- Building %d synapses", len(block.synapses))
    for synapse in block.synapses:
        build_synapse(nxsdk_core, core, block, synapse, compartment_idxs)

    logger.debug("- Building %d axons", len(block.axons))
    pop_id_map = {}
    for axon in block.axons:
        build_axons(nxsdk_core, core, block, axon, compartment_idxs,
                    pop_id_map)

    logger.debug("- Building %d probes", len(block.probes))
    for probe in block.probes:
        build_probe(nxsdk_core, core, block, probe, compartment_idxs)
def test_interface_connection_errors(Simulator, monkeypatch):
    with nengo.Network() as net:
        nengo.Ensemble(2, 1)

    # test opening closed interface error
    sim = Simulator(net)
    interface = sim.sims["loihi"]
    interface.close()
    with pytest.raises(SimulationError, match="cannot be reopened"):
        with interface:
            pass
    sim.close()

    # test failed connection error
    def start(*args, **kwargs):
        raise Exception("Mock failure to connect")

    monkeypatch.setattr(NxsdkBoard, d(b"c3RhcnQ="), start)

    with pytest.raises(SimulationError, match="Mock failure to connect"):
        with Simulator(net):
            pass
Example #26
0
    def bits_per_axon(self, n_weights):
        """For an axon with n weights, compute the weight memory bits used"""
        bits_per_weight = self.real_weight_bits + self.delay_bits + self.tag_bits
        if self.compression == d(b"MA==", int):
            bits_per_weight += self.real_idx_bits
        elif self.compression == d(b"Mw==", int):
            pass
        else:
            raise NotImplementedError("Compression %s" % (self.compression,))

        synapse_idx_bits = d(b"NA==", int)
        n_synapses_bits = d(b"Ng==", int)
        bits = 0
        synapses_per_block = self.n_synapses + 1
        for i in range(0, n_weights, synapses_per_block):
            n = min(n_weights - i, synapses_per_block)
            bits_i = n * bits_per_weight + synapse_idx_bits + n_synapses_bits
            # round up to nearest memory unit
            bits_i = -d(b"NjQ=", int) * (-bits_i // d(b"NjQ=", int))
            bits += bits_i

        return bits
Example #27
0
class Compartment:
    """Stores information for configuring Loihi compartments.

    The information stored here will be associated with some block,
    and all compartments will share certain information.
    While compartments are usually thought of neurons, we use compartments
    to implement Nengo ensembles, nodes, and connection through special
    decode neurons.

    Before `.discretize_compartment` has been called, most attributes in
    this class are floating-point values. Calling `.discretize_compartment`
    converts them to integer values inplace for use on Loihi.

    Attributes
    ----------
    bias : (n,) ndarray
        Compartment biases.
    enable_noise : (n,) ndarray
        Whether to enable noise for each compartment.
    decay_u : (n,) ndarray
        Input (synapse) decay constant for each compartment.
    decay_v : (n,) ndarray
        Voltage decay constant for each compartment.
    label : string
        A label for the block (for debugging purposes).
    n_compartments : int
        The number of compartments in the block.
    noise_at_membrane : {0, 1}
        Inject noise into current (0) or voltage (1).
    noise_exp : float or int
        Exponent for noise generation. Floating point values are base 10
        in units of current or voltage. Integer values are in base 2.
    noise_offset : float or int
        Offset for noise generation.
    refract_delay : (n,) ndarray
        Compartment refractory delays, in time steps.
    scale_u : bool
        Scale input (U) by decay_u so that the integral of U is
        the same before and after filtering.
    scale_v : bool
        Scale voltage (V) by decay_v so that the integral of V is
        the same before and after filtering.
    tau_s : float or None
        Time constant used to set decay_u. None if decay_u has not been set.
    vmax : float or int (range [2**9 - 1, 2**23 - 1])
        Maximum voltage for all compartments, in the same units as ``vth``.
    vmin : float or int (range [-2**23 + 1, 0])
        Minimum voltage for all compartments, in the same units as ``vth``.
    vth : (n,) ndarray
        Compartment voltage thresholds.
    """
    # threshold at which U/V scaling is allowed
    DECAY_SCALE_TH = 0.5 / d(b'NDA5Ng==', int)  # half of decay scaling unit

    def __init__(self, n_compartments, label=None):
        self.n_compartments = n_compartments
        self.label = label

        # parameters specific to compartments/block
        self.decay_u = np.ones(n_compartments, dtype=np.float32)
        # ^ default to no filter
        self.decay_v = np.zeros(n_compartments, dtype=np.float32)
        # ^ default to integration
        self.tau_s = None
        self.scale_u = True
        self.scale_v = False

        self.refract_delay = np.zeros(n_compartments, dtype=np.int32)
        self.vth = np.zeros(n_compartments, dtype=np.float32)
        self.bias = np.zeros(n_compartments, dtype=np.float32)
        self.enable_noise = np.zeros(n_compartments, dtype=bool)

        # parameters common to core
        self.vmin = 0
        self.vmax = np.inf
        self.noise_offset = 0
        self.noise_exp = 0
        self.noise_at_membrane = 0

    def __str__(self):
        return "%s(%s)" % (
            type(self).__name__, self.label if self.label else '')

    def configure_default_filter(self, tau_s, dt=0.001):
        """Set the default Lowpass synaptic input filter for compartments.

        Parameters
        ----------
        tau_s : float
            `nengo.Lowpass` synapse time constant for filtering.
        dt : float
            Simulator time step.
        """
        if self.tau_s is None:  # don't overwrite a non-default filter
            self._configure_filter(tau_s, dt=dt)

    def configure_filter(self, tau_s, dt=0.001):
        """Set Lowpass synaptic input filter for compartments.

        Parameters
        ----------
        tau_s : float
            `nengo.Lowpass` synapse time constant for filtering.
        dt : float
            Simulator time step.
        """
        if self.tau_s is not None and tau_s < self.tau_s:
            warnings.warn("tau_s is already set to %g, which is larger than "
                          "%g. Using %g." % (self.tau_s, tau_s, self.tau_s))
            return
        elif self.tau_s is not None and tau_s > self.tau_s:
            warnings.warn(
                "tau_s is currently %g, which is smaller than %g. Overwriting "
                "tau_s with %g." % (self.tau_s, tau_s, tau_s))
        self._configure_filter(tau_s, dt=dt)
        self.tau_s = tau_s

    def _configure_filter(self, tau_s, dt):
        decay_u = 1 if tau_s == 0 else -np.expm1(-dt/np.asarray(tau_s))
        self.decay_u[:] = decay_u
        self.scale_u = decay_u > self.DECAY_SCALE_TH
        if not self.scale_u:
            raise BuildError(
                "Current (U) scaling is required. Perhaps a synapse time "
                "constant is too large in your model.")

    def configure_lif(self, tau_rc=0.02, tau_ref=0.001, vth=1, dt=0.001,
                      min_voltage=0):
        """Configure these compartments as individual LIF neurons.

        Parameters
        ----------
        tau_rc : float
            Membrane time constant (in seconds) of the neurons.
        tau_ref : float
            Refractory period (in seconds) of the neurons.
        vth : float
            Voltage firing threshold of the neurons.
        dt : float
            Simulator time step length (in seconds).
        min_voltage : float
            The minimum voltage for the neurons.
        """

        self.decay_v[:] = -np.expm1(-dt / np.asarray(tau_rc))
        self.refract_delay[:] = np.round(tau_ref / dt) + 1
        self.vth[:] = vth
        self.vmin = min_voltage
        self.vmax = np.inf
        self.scale_v = np.all(self.decay_v > self.DECAY_SCALE_TH)
        if not self.scale_v:
            raise BuildError(
                "Voltage (V) scaling is required with LIF neurons. Perhaps "
                "the neuron tau_rc time constant is too large.")

    def configure_nonspiking(self, tau_ref=0.0, vth=1, dt=0.001):
        """Configure these compartments as individual non-spiking neurons.

        Parameters
        ----------
        tau_ref : float
            Refractory period (in seconds) of the neurons.
        vth : float
            Voltage firing threshold of the neurons.
        dt : float
            Simulator time step length (in seconds).
        """

        self.decay_v[:] = 1.
        self.refract_delay[:] = 1
        self.vth[:] = vth
        self.vmin = 0
        self.vmax = np.inf
        self.scale_v = False

    def configure_relu(self, tau_ref=0.0, vth=1, dt=0.001):
        """Configure these compartments as individual Rectified Linear neurons.

        These are also known as non-leaky integrate-and-fire neurons. The
        voltage is the integral of the input current.

        Parameters
        ----------
        tau_ref : float
            Refractory period (in seconds) of the neurons.
        vth : float
            Voltage firing threshold of the neurons.
        dt : float
            Simulator time step length (in seconds).
        """

        self.decay_v[:] = 0.
        self.refract_delay[:] = np.round(tau_ref / dt) + 1
        self.vth[:] = vth
        self.vmin = 0
        self.vmax = np.inf
        self.scale_v = False
Example #28
0
 def axon_bits(self):
     if self.pop_type == 16:
         return d(b'MTA=', int) - self.atom_bits_extra()
     else:
         return d(b'MTI=', int)
Example #29
0
 def atom_bits_extra(self):
     atom_bits = self.atom_bits()
     assert atom_bits <= d(b'OQ==', int), "Too many atom bits"
     return max(atom_bits - d(b'NQ==', int), 0)
Example #30
0
 def idxs_per_synapse(self):
     return d(b'Mg==', int) if self.learning else d(b'MQ==', int)