Exemplo n.º 1
0
    def test_Constructor(self):
        import numpy
        import pymarocco

        marocco = pymarocco.PyMarocco()
        marocco.calib_backend = pymarocco.PyMarocco.CalibBackend.Default

        pynn.setup(marocco=marocco)

        N = 10
        model = pynn.IF_cond_exp
        selector = numpy.array(
            [random.choice([True, False]) for x in range(0, N)])
        pop = pynn.Population(N, model)
        pv = pynn.PopulationView(pop, selector)

        self.assertEqual(len(pv), len(numpy.where(selector == True)[0]))

        # now a selection with wrong size is given
        wrong_selector = numpy.array(
            [random.choice([True, False]) for x in range(0, 2 * N)])
        with self.assertRaises(RuntimeError):
            pv = pynn.PopulationView(pop, wrong_selector)

        pynn.run(100)
Exemplo n.º 2
0
    def test(self):

        wafer = 99999  # a wafer for which no redman data is availale
        hicann = 82
        neuron_number = 12

        marocco = PyMarocco()
        marocco.neuron_placement.default_neuron_size(4)
        marocco.backend = PyMarocco.Without
        marocco.default_wafer = C.Wafer(wafer)

        used_hicann = C.HICANNGlobal(C.HICANNOnWafer(Enum(hicann)),
                                     C.Wafer(wafer))

        used_hicann  # prevent pep8 warning of unused variable

        pynn.setup(marocco=marocco)

        pop = pynn.Population(1, pynn.IF_cond_exp)
        topleft = C.NeuronOnWafer(C.NeuronOnHICANN(X(neuron_number), Y(0)),
                                  C.HICANNOnWafer(Enum(hicann)))
        logical_neuron = LogicalNeuron.rectangular(topleft, size=4)
        marocco.manual_placement.on_neuron(pop, logical_neuron)

        with self.assertRaises(RuntimeError):
            pynn.run(0)
            pynn.end()
    def test_access_queue(self):
        """use queue handling"""
        class myPlacer(placer):
            def loop(self):
                print("reversing populations")

                # b = self.m_queue.access;  # use this or the following
                print((dir(self.m_queue.value()[0])))
                b = self.m_queue.value()
                print((dir(b)))

        marocco = self.marocco
        user_strat = myPlacer()

        marocco.neuron_placement.default_placement_strategy(user_strat)

        pynn.setup(marocco=marocco)

        self.network()

        result = self.load_results()
        hicann = C.HICANNOnWafer(Enum(42))
        nb = C.NeuronBlockOnHICANN(Enum(4))
        for pop in self.pops:
            for nrn in pop:
                placement_item, = result.placement.find(nrn)
                logical_neuron = placement_item.logical_neuron()
                for denmem in logical_neuron:
                    # all pops shall be on different NBs
                    self.assertFalse(nb == denmem.toNeuronBlockOnHICANN()
                                     and hicann == denmem.toHICANNOnWafer())
            nb = denmem.toNeuronBlockOnHICANN()
            hicann = denmem.toHICANNOnWafer()
Exemplo n.º 4
0
    def test_get_denmems(self):
        pop_size = 2

        for neuron_size in [4, 8, 12, 16, 32]:
            self.marocco.neuron_placement.default_neuron_size(neuron_size)

            pynn.setup(marocco=self.marocco)

            target = pynn.Population(pop_size, pynn.IF_cond_exp, {})

            populations = [target]
            for i in range(3):
                p1 = pynn.Population(pop_size, pynn.SpikeSourceArray,
                                     {'spike_times': [1.]})
                p2 = pynn.Population(pop_size, pynn.IF_cond_exp, {})
                pynn.Projection(p1, target,
                                pynn.OneToOneConnector(weights=0.004))
                pynn.Projection(p2, target,
                                pynn.OneToOneConnector(weights=0.004))

                populations.append(p2)

            pynn.run(0)
            pynn.end()

            mapstats = self.marocco.getStats()

            results = Marocco.from_file(self.marocco.persist)
            for pop in populations:
                for nrn in range(pop_size):
                    for item in results.placement.find(pop[nrn]):
                        self.assertFalse(item.logical_neuron().is_external())
                        self.assertEqual(neuron_size,
                                         item.logical_neuron().size())
Exemplo n.º 5
0
    def test_popview_on_hicann(self, size):
        pynn.setup(marocco=self.marocco)
        neuron_size = 4
        self.marocco.neuron_placement.default_neuron_size(neuron_size)
        hicann = C.HICANNOnWafer(Enum(122))
        hicann_1 = C.HICANNOnWafer(Enum(123))
        hicann_2 = C.HICANNOnWafer(Enum(124))
        hicann_3 = C.HICANNOnWafer(Enum(125))
        pop = pynn.Population(size, pynn.IF_cond_exp, {})
        pop_1 = pynn.Population(size, pynn.IF_cond_exp, {})
        pop_view = pynn.PopulationView(pop,list(range(0,size,2)))
        pop_view_1 = pynn.PopulationView(pop,list(range(1,size,2)))
        pop_1_view = pynn.PopulationView(pop_1,list(range(1,size//2)))
        pop_1_view_1 = pynn.PopulationView(pop_1,list(range(size-2,size//2,-1)))
        pop_auto_placement = pynn.PopulationView(pop_1,[0,size//2,size-1])
        self.marocco.manual_placement.on_hicann(pop_view, hicann)
        self.marocco.manual_placement.on_hicann(pop_view_1, hicann_1)
        self.marocco.manual_placement.on_hicann(pop_1_view, hicann_2)
        self.marocco.manual_placement.on_hicann(pop_1_view_1, hicann_3)

        if neuron_size * size//2 > C.NeuronOnHICANN.enum_type.size:
            with self.assertRaises(RuntimeError):
                pynn.run(0)
                pynn.end()
            return

        pynn.run(0)
        pynn.end()

        results = self.load_results()

        for nrn in pop_view:
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            for denmem in logical_neuron:
                self.assertEqual(hicann, denmem.toHICANNOnWafer())

        for nrn in pop_view_1:
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            for denmem in logical_neuron:
                self.assertEqual(hicann_1, denmem.toHICANNOnWafer())

        for nrn in pop_1_view:
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            for denmem in logical_neuron:
                self.assertEqual(hicann_2, denmem.toHICANNOnWafer())

        for nrn in pop_1_view_1:
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            for denmem in logical_neuron:
                self.assertEqual(hicann_3, denmem.toHICANNOnWafer())

        for nrn in pop_auto_placement:
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            for denmem in logical_neuron:
                self.assertIsNotNone(denmem.toHICANNOnWafer())
Exemplo n.º 6
0
    def test_popview_on_neuron(self):
        pynn.setup(marocco=self.marocco)

        pop = pynn.Population(4, pynn.IF_cond_exp, {})
        neuron_block = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(3))
        neuron_block_1 = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(2))
        logical_neuron = (LogicalNeuron.on(neuron_block)
                          .add(C.NeuronOnNeuronBlock(X(3), Y(0)), 2)
                          .add(C.NeuronOnNeuronBlock(X(3), Y(1)), 2)
                          .done())
        logical_neuron_1 = (LogicalNeuron.on(neuron_block_1)
                          .add(C.NeuronOnNeuronBlock(X(4), Y(0)), 2)
                          .add(C.NeuronOnNeuronBlock(X(4), Y(1)), 2)
                          .done())

        popview = pynn.PopulationView(pop,[0])
        popview_1 = pynn.PopulationView(pop,[2])
        popview_auto_placement= pynn.PopulationView(pop,[1,3])
        self.marocco.manual_placement.on_neuron(popview, logical_neuron)
        self.marocco.manual_placement.on_neuron(popview_1, logical_neuron_1)

        pynn.run(0)
        pynn.end()

        results = self.load_results()

        placement_item, = results.placement.find(popview[0])
        self.assertEqual(logical_neuron, placement_item.logical_neuron())
        placement_item, = results.placement.find(popview_1[0])
        self.assertEqual(logical_neuron_1, placement_item.logical_neuron())
        for nrn in popview_auto_placement:
            placement_item, = results.placement.find(nrn)
            self.assertIsNotNone(placement_item.logical_neuron())
Exemplo n.º 7
0
    def test(self):

        import pyhmf as pynn
        from pymarocco import PyMarocco
        import pylogging, pyhalbe
        pyhalbe.Debug.change_loglevel(2)
        pylogging.set_loglevel(pylogging.get("marocco"),
                               pylogging.LogLevel.TRACE)
        pylogging.set_loglevel(pylogging.get("sthal"),
                               pylogging.LogLevel.DEBUG)

        marocco = PyMarocco()
        marocco.neuron_placement.default_neuron_size(4)

        pynn.setup(marocco=marocco)

        neuron1 = pynn.Population(1, pynn.IF_cond_exp)

        inh = pynn.Population(1, pynn.SpikeSourceArray, {'spike_times': [0]})
        exc = pynn.Population(1, pynn.SpikeSourceArray, {'spike_times': [0]})
        exc_2 = pynn.Population(1, pynn.SpikeSourceArray, {'spike_times': [0]})
        exc_3 = pynn.Population(1, pynn.SpikeSourceArray, {'spike_times': [0]})

        c_exc = pynn.FixedProbabilityConnector(p_connect=1.0, weights=1)

        proj1 = pynn.Projection(inh, neuron1, c_exc, target='excitatory')
        proj2 = pynn.Projection(exc, neuron1, c_exc, target='excitatory')
        proj3 = pynn.Projection(exc_2, neuron1, c_exc, target='excitatory')
        proj4 = pynn.Projection(exc_3, neuron1, c_exc, target='inhibitory')

        pynn.run(10000)
        pynn.end()
Exemplo n.º 8
0
    def __init__(self,
                 marocco,
                 linearsize,
                 dimension,
                 kbiasneurons,
                 nbiasneurons,
                 nsources,
                 ksources,
                 duplicates,
                 sourcerate,
                 model=pynn.IF_cond_exp):
        # size of the edge of the lattice
        self.linearsize = linearsize
        # dimension of the lattice
        self.dimension = dimension
        # number of bias neurons projecting onto each network neuron
        # (might be needed for scaling)
        self.kbiasneurons = kbiasneurons
        # total number of bias neurons, must be an integer multiple of k
        # should help synapse loss
        self.nbiasneurons = nbiasneurons
        # size of the noise network
        self.nsources = nsources
        # number of excitatory and inhibitory source projecting onto each
        # neuron
        self.ksources = ksources
        # number of connections between neighboring neurons
        self.duplicates = duplicates
        self.model = model
        self.marocco = marocco

        pynn.setup(marocco=self.marocco)
Exemplo n.º 9
0
    def test_with_size(self):
        pynn.setup(marocco=self.marocco)
        default_size = 4
        self.marocco.neuron_placement.default_neuron_size(default_size)

        sizes = {}

        pop = pynn.Population(1, pynn.IF_cond_exp, {})
        sizes[pop] = default_size

        pop = pynn.Population(1, pynn.IF_cond_exp, {})
        self.marocco.manual_placement.with_size(pop, 2)
        sizes[pop] = 2

        pop = pynn.Population(1, pynn.IF_cond_exp, {})
        self.marocco.manual_placement.with_size(pop, 6)
        sizes[pop] = 6

        pynn.run(0)
        pynn.end()

        results = self.load_results()

        for pop, size in sizes.items():
            placement_item, = results.placement.find(pop[0])
            logical_neuron = placement_item.logical_neuron()
            self.assertEqual(size, logical_neuron.size())
Exemplo n.º 10
0
    def test_on_neuron_block(self, size):
        pynn.setup(marocco=self.marocco)
        neuron_size = 4
        self.marocco.neuron_placement.default_neuron_size(neuron_size)

        neuron_block = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(3))
        pop = pynn.Population(size, pynn.IF_cond_exp, {})
        self.marocco.manual_placement.on_neuron_block(pop, neuron_block)

        if neuron_size * size > C.NeuronOnNeuronBlock.enum_type.size:
            with self.assertRaises(RuntimeError):
                pynn.run(0)
                pynn.end()
            return

        pynn.run(0)
        pynn.end()

        results = self.load_results()

        for nrn in pop:
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            self.assertEqual(neuron_size, logical_neuron.size())
            for denmem in logical_neuron:
                self.assertEqual(neuron_block, denmem.toNeuronBlockOnWafer())
Exemplo n.º 11
0
    def test_binomial_distribution(self):
        import pyhmf as pynn
        pynn.setup()

        rng = pynn.NativeRNG()
        self.assertEqual(rng.next(
            distribution = "binomial", parameters = [100, 1.]), 100)
Exemplo n.º 12
0
    def test_on_neurons(self, pop_size):
        pynn.setup(marocco=self.marocco)

        pop = pynn.Population(pop_size, pynn.IF_cond_exp, {})
        neuron_block = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(3))
        logical_neurons = [
            (LogicalNeuron.on(neuron_block)
             .add(C.NeuronOnNeuronBlock(X(3), Y(0)), 2)
             .add(C.NeuronOnNeuronBlock(X(3), Y(1)), 2)
             .done()),
            (LogicalNeuron.on(neuron_block)
             .add(C.NeuronOnNeuronBlock(X(11), Y(0)), 2)
             .add(C.NeuronOnNeuronBlock(X(11), Y(1)), 2)
             .done()),
        ]
        self.marocco.manual_placement.on_neuron(pop, logical_neurons)

        if pop_size != len(logical_neurons):
            with self.assertRaises(RuntimeError):
                pynn.run(0)
                pynn.end()
            return

        pynn.run(0)
        pynn.end()

        results = self.load_results()

        for nrn, logical_neuron in zip(pop, logical_neurons):
            placement_item, = results.placement.find(nrn)
            self.assertEqual(logical_neuron, placement_item.logical_neuron())
Exemplo n.º 13
0
    def __init__(self, N, prob, marocco, model=pynn.EIF_cond_exp_isfa_ista):
        self.N = N
        self.prob = prob
        self.model = model
        self.marocco = marocco

        pynn.setup(marocco=self.marocco)
Exemplo n.º 14
0
    def random_network(self):
        pynn.setup(marocco=self.marocco)

        NUM_POPS = random.randint(10, 100)
        POP_SIZE = random.randint(1, 100)
        PROJ_PROB = 0.2

        pops = [ pynn.Population(POP_SIZE, pynn.EIF_cond_exp_isfa_ista) for x in
                range(NUM_POPS) ]

        connector = pynn.AllToAllConnector(
                allow_self_connections=True,
                weights=1.)

        for src in pops:
            for trg in pops:
                target_type = 'inhibitory' if random.random() < 0.2 else 'excitatory'
                if random.random() < PROJ_PROB:
                    pynn.Projection(src, trg, connector, target=target_type)

        pynn.run(1)
        pynn.end()

        stats = self.marocco.getStats()
        print("python synapse loss: ", stats.getSynapseLoss())
Exemplo n.º 15
0
def build_network(num_pops, pop_size, marocco):
    from pymarocco import PyMarocco
    import pyhmf as pynn

    logging.info("num_pops: %d, pop_size: %d, total size: %d" %
                 (num_pops, pop_size, num_pops * pop_size))

    pynn.setup(marocco=marocco)

    pops = [
        pynn.Population(pop_size, pynn.EIF_cond_exp_isfa_ista)
        for x in range(num_pops)
    ]

    for idx, pop in enumerate(pops):
        connector = pynn.AllToAllConnector(allow_self_connections=True,
                                           weights=1.)

        # build ring like network topology
        pynn.Projection(pop,
                        pops[(idx + 1) % len(pops)],
                        connector,
                        target='excitatory')

        # add poisson stimulus
        source = pynn.Population(1, pynn.SpikeSourcePoisson, {'rate': 2})

        pynn.Projection(source, pop, connector, target='excitatory')

    pynn.run(1)
    pynn.end()

    stats = marocco.getStats()
    loss = float(stats.getSynapseLoss()) / stats.getSynapses()
    return (num_pops, pop_size, loss)
Exemplo n.º 16
0
    def test_hw_merging_spl1_should_merge_some(self):
        """
        some DNCs shall be merged, but not all, because of syndriver
        requirements on each NB. 2 neurons will be placed (same HICANN).
        A fully connected network is built.
        This results in 8*2 = 16 synapses being routed to each neuron.
        With neuron size 4 and chain length 3 -> 12 synapses can be realised
        on each neuron. As a result at maximum 12 synapses shall be on the
        same L1Route. The merger tries to merge them and will fail, then spit
        it and merge 8 to each merger [3,5].

        The result is a better L1 utilisation compared to one-to-one mapping,
        2 instead of 8 routes, while staying within hardware constrains,
        compared to merge all (16 synapses requiring 4 drivers, 1 driver will
        be lost).
        """
        pynn.setup(marocco=self.marocco)
        neuron_size = 4
        self.marocco.neuron_placement.default_neuron_size(neuron_size)
        self.marocco.merger_routing.strategy(
            self.marocco.merger_routing.minimize_as_possible)
        # restrict to 3 driver, so that this test is hardware agnostic
        self.marocco.synapse_routing.driver_chain_length(3)

        hicann = C.HICANNOnWafer(Enum(123))
        pops = []
        # All but the first neuron block are occupied.
        for nb in range(C.NeuronBlockOnHICANN.end):
            pop = pynn.Population(2, pynn.IF_cond_exp, {})
            self.marocco.manual_placement.on_neuron_block(
                pop, C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(nb), hicann))
            pops.append(pop)

        for p in pops:
            for other_p in pops:
                pynn.Projection(p, other_p, pynn.AllToAllConnector(weights=1.))

        pynn.run(0)
        pynn.end()

        merged_dncs = [3, 3, 3, 3, 5, 5, 5, 5]

        results = self.load_results()

        for pop in pops:
            nrn = pop[0]
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            self.assertEqual(neuron_size, logical_neuron.size())
            for denmem in logical_neuron:
                self.assertEqual(hicann, denmem.toHICANNOnWafer())
            address = placement_item.address()

            # some DNCs shall be merged.
            dnc = C.DNCMergerOnHICANN(merged_dncs[pop.euter_id()])
            self.assertEqual(hicann, address.toHICANNOnWafer())
            self.assertEqual(dnc, address.toDNCMergerOnHICANN())
            self.assertEqual(C.DNCMergerOnWafer(dnc, hicann),
                             address.toDNCMergerOnWafer())
Exemplo n.º 17
0
    def test_distribution_clip(self):
        import pyhmf as pynn
        pynn.setup()

        r = pynn.NativeRNG(0)
        d = pynn.RandomDistribution("uniform", [0.0, 25.0], r, (12.5, 12.5), "clip")

        assert_array_equal(d.next(100), numpy.ones(100) * 12.5)
Exemplo n.º 18
0
    def __init__(self, N, K, L, marocco, model=pynn.EIF_cond_exp_isfa_ista):
        self.N = N
        self.K = K
        self.L = L
        self.model = model
        self.marocco = marocco

        pynn.setup(marocco=self.marocco)
Exemplo n.º 19
0
    def __init__(self, num_layers, conn_prob, neurons_per_layer, marocco, model=pynn.EIF_cond_exp_isfa_ista):
        self.neurons_per_layer = neurons_per_layer
        self.num_layers = num_layers 
        self.conn_prob = conn_prob
        self.model = model
        self.marocco = marocco

        pynn.setup(marocco=self.marocco)
    def test_creation(self, strategy):
        self.marocco.neuron_placement.default_placement_strategy(strategy)

        pynn.setup(marocco=self.marocco)

        self.network()

        self.assertTrue(1 == 1)
Exemplo n.º 21
0
    def __init__(self, Nvisible, Nhidden, marocco,
                 model=pynn.EIF_cond_exp_isfa_ista):
        self.Nvisible = Nvisible
        self.Nhidden = Nhidden
        self.model = model
        self.marocco = marocco

        pynn.setup(marocco=self.marocco)
Exemplo n.º 22
0
    def test_min_spl1_should_allow_external_input_on_same_chip(self):
        """
        Even when the rightmost neuron block / DNC merger is not reserved for external input, it
        should be possible to place external input on the same chip.
        """
        pynn.setup(marocco=self.marocco)
        neuron_size = 4
        self.marocco.neuron_placement.default_neuron_size(neuron_size)
        self.marocco.merger_routing.strategy(
            self.marocco.merger_routing.minimize_number_of_sending_repeaters)
        # Do not reserve rightmost neuron block / DNC merger for external input.
        self.marocco.neuron_placement.restrict_rightmost_neuron_blocks(False)

        hicann = C.HICANNOnWafer(C.Enum(123))
        pops = []
        # All but the first neuron block are occupied.
        for nb in range(1, C.NeuronBlockOnHICANN.end):
            pop = pynn.Population(1, pynn.IF_cond_exp, {})
            self.marocco.manual_placement.on_neuron_block(
                pop, C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(nb), hicann))
            pops.append(pop)
        in_pop = pynn.Population(1, pynn.SpikeSourceArray, {})
        self.marocco.manual_placement.on_hicann(in_pop, hicann)

        pynn.run(0)
        pynn.end()

        results = self.load_results()

        for pop in pops:
            nrn = pop[0]
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            self.assertEqual(neuron_size, logical_neuron.size())
            for denmem in logical_neuron:
                self.assertEqual(hicann, denmem.toHICANNOnWafer())
            address = placement_item.address()

            # All used neuron blocks should be connected to a single DNC merger.
            dnc = C.DNCMergerOnHICANN(3)
            self.assertEqual(hicann, address.toHICANNOnWafer())
            self.assertEqual(dnc, address.toDNCMergerOnHICANN())
            self.assertEqual(C.DNCMergerOnWafer(dnc, hicann),
                             address.toDNCMergerOnWafer())

        nrn = in_pop[0]
        placement_item, = results.placement.find(nrn)
        logical_neuron = placement_item.logical_neuron()
        self.assertTrue(logical_neuron.is_external())
        address = placement_item.address()

        # External input should be on the leftmost DNC merger, since all other
        # mergers do not have direct access to a background generator.
        dnc = C.DNCMergerOnHICANN(0)
        self.assertEqual(hicann, address.toHICANNOnWafer())
        self.assertEqual(dnc, address.toDNCMergerOnHICANN())
        self.assertEqual(C.DNCMergerOnWafer(dnc, hicann),
                         address.toDNCMergerOnWafer())
Exemplo n.º 23
0
    def test_L1_detour_at_side_switch_usage(self):
        """
                                  [155]
                                   191
            [223]  224  225 x226x {227}

            test detour and predecessor settings at the edge of a wafer

        """

        pylogging.set_loglevel(pylogging.get("marocco"),
                               pylogging.LogLevel.TRACE)
        pylogging.set_loglevel(pylogging.get("Calibtic"),
                               pylogging.LogLevel.ERROR)

        self.marocco.persist = ''  # or add test suite TestWithRuntime?

        runtime = Runtime(self.marocco.default_wafer)
        pynn.setup(marocco=self.marocco, marocco_runtime=runtime)

        settings = pysthal.Settings.get()

        settings.synapse_switches.max_switches_per_column_per_side = 1
        settings.crossbar_switches.max_switches_per_row = 1

        source = pynn.Population(1, pynn.IF_cond_exp, {})
        target1 = pynn.Population(1, pynn.IF_cond_exp, {})
        target2 = pynn.Population(1, pynn.IF_cond_exp, {})

        proj = pynn.Projection(
            source, target1, pynn.AllToAllConnector(weights=1.))
        proj = pynn.Projection(
            source, target2, pynn.AllToAllConnector(weights=1.))

        source_hicann = C.HICANNOnWafer(Enum(227))
        target1_hicann = C.HICANNOnWafer(Enum(155))
        target2_hicann = C.HICANNOnWafer(Enum(225))

        self.marocco.manual_placement.on_hicann(source, source_hicann)
        self.marocco.manual_placement.on_hicann(target1, target1_hicann)
        self.marocco.manual_placement.on_hicann(target2, target2_hicann)

        disabled_hicanns = [226, 263]
        wafer = self.marocco.default_wafer
        self.marocco.defects.set(pyredman.Wafer(runtime.wafer().index()))
        for hicann in C.iter_all(C.HICANNOnWafer):
            if hicann.toEnum().value() in disabled_hicanns:
                self.marocco.defects.wafer().hicanns().disable(C.HICANNGlobal(hicann, wafer))
            continue

        pynn.run(0)
        pynn.end()

        for hicann in runtime.wafer().getAllocatedHicannCoordinates():
            h = runtime.wafer()[hicann]
            print(hicann, h.check())
            self.assertEqual(h.check(), "")
Exemplo n.º 24
0
    def test_projections(self):
        pynn.setup(marocco=self.marocco)

        target = pynn.Population(1, pynn.IF_cond_exp, {})
        pop_a = pynn.Population(2, pynn.SpikeSourceArray,
                                {'spike_times': [1.]})
        pop_b = pynn.Population(1, pynn.SpikeSourceArray,
                                {'spike_times': [2.]})
        pop_ab = pynn.Assembly()
        pop_ab += pop_a
        pop_ab += pop_b

        con = pynn.AllToAllConnector(weights=0.004)
        proj_a = pynn.Projection(pop_a, target, con)
        proj_b = pynn.Projection(pop_b, target, con)
        proj_ab = pynn.Projection(pop_ab, target, con)

        pynn.run(0)
        pynn.end()

        results = self.load_results()
        synapses = results.synapse_routing.synapses()

        items_a = synapses.find(proj_a)
        self.assertEqual(2, len(items_a))

        items_b = synapses.find(proj_b)
        self.assertEqual(1, len(items_b))

        items_ab = synapses.find(proj_ab)
        self.assertEqual(3, len(items_ab))

        def to_hw_synapses(items):
            hw_synapses = set()
            for item in items:
                synapse = item.hardware_synapse()
                if synapse:
                    hw_synapses.add(synapse)
            return hw_synapses

        hw_a = to_hw_synapses(items_a)
        hw_b = to_hw_synapses(items_b)
        hw_ab = to_hw_synapses(items_ab)
        self.assertTrue(hw_a.isdisjoint(hw_b))
        self.assertTrue(hw_a.isdisjoint(hw_ab))
        self.assertTrue(hw_b.isdisjoint(hw_ab))

        for source_neuron in pop_a:
            items = synapses.find(proj_ab, source_neuron, target[0])
            self.assertEqual(1, len(items))
            self.assertTrue(hw_ab.issuperset(to_hw_synapses(items)))

        for source_neuron in pop_b:
            items = synapses.find(proj_ab, source_neuron, target[0])
            self.assertEqual(1, len(items))
            self.assertTrue(hw_ab.issuperset(to_hw_synapses(items)))
Exemplo n.º 25
0
    def test_issue1565(self):
        # although there is only 1 synapse column per neuron (of size 2), a 2nd synapse is used
        self.marocco.neuron_placement.default_neuron_size(2)
        con = pynn.FixedProbabilityConnector(p_connect=1.0, weights=0.004)

        pynn.setup(marocco=self.marocco)
        pop1 = pynn.Population(10, pynn.IF_cond_exp, {})
        ipu1 = pynn.Population(2, pynn.SpikeSourceArray, {'spike_times': []})
        pro1 = pynn.Projection(ipu1, pop1, con, target='excitatory')
        pynn.run(0)
Exemplo n.º 26
0
    def setUp(self):
        super(MaroccoFixture, self).setUp()
        self.marocco = PyMarocco()
        self.marocco.backend = PyMarocco.Without
        self.marocco.calib_backend = PyMarocco.CalibBackend.Default
        self.marocco.defects.backend = Defects.Backend.Without
        self.marocco.merger_routing.strategy(
            self.marocco.merger_routing.minimize_number_of_sending_repeaters)

        sim.setup(marocco=self.marocco)
Exemplo n.º 27
0
    def test_min_spl1_is_nongreedy_when_pops_are_placed_to_nbs(self, nbs):
        """
        See above.  Instead of a single population placed to the HICANN, populations are placed to
        specific neuron blocks.
        """
        pynn.setup(marocco=self.marocco)
        neuron_size = 4
        self.marocco.neuron_placement.default_neuron_size(neuron_size)
        self.marocco.merger_routing.strategy(
            self.marocco.merger_routing.minimize_number_of_sending_repeaters)
        self.marocco.neuron_placement.restrict_rightmost_neuron_blocks(True)

        hicann = C.HICANNOnWafer(C.Enum(123))
        pops = []
        for nb in nbs:
            pop = pynn.Population(1, pynn.IF_cond_exp, {})
            self.marocco.manual_placement.on_neuron_block(
                pop, C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(nb), hicann))
            pops.append(pop)
        in_pop = pynn.Population(1, pynn.SpikeSourceArray, {})
        self.marocco.manual_placement.on_hicann(in_pop, hicann)

        pynn.run(0)
        pynn.end()

        results = self.load_results()

        for pop in pops:
            nrn = pop[0]
            placement_item, = results.placement.find(nrn)
            logical_neuron = placement_item.logical_neuron()
            self.assertEqual(neuron_size, logical_neuron.size())
            for denmem in logical_neuron:
                self.assertEqual(hicann, denmem.toHICANNOnWafer())
            address = placement_item.address()

            # All used neuron blocks should still be connected to a single DNC merger.
            dnc = C.DNCMergerOnHICANN(3)
            self.assertEqual(hicann, address.toHICANNOnWafer())
            self.assertEqual(dnc, address.toDNCMergerOnHICANN())
            self.assertEqual(C.DNCMergerOnWafer(dnc, hicann),
                             address.toDNCMergerOnWafer())

        nrn = in_pop[0]
        placement_item, = results.placement.find(nrn)
        logical_neuron = placement_item.logical_neuron()
        self.assertTrue(logical_neuron.is_external())
        address = placement_item.address()

        # External input should be on the rightmost DNC merger since that is tried first.
        dnc = C.DNCMergerOnHICANN(7)
        self.assertEqual(hicann, address.toHICANNOnWafer())
        self.assertEqual(dnc, address.toDNCMergerOnHICANN())
        self.assertEqual(C.DNCMergerOnWafer(dnc, hicann),
                         address.toDNCMergerOnWafer())
Exemplo n.º 28
0
    def test_TwoNeuron(self):
        if True:
            pynn.setup(marocco=self.marocco)

            # create neuron with v_rest below v_thresh
            source = pynn.Population(1, pynn.EIF_cond_exp_isfa_ista, {
                'v_rest': -50.,
                'v_thresh': -60.,
                'v_reset': -70.6,
            })

            N = 8  # number of target populations

            p = [
                pynn.Population(1, pynn.EIF_cond_exp_isfa_ista)
                for i in range(N)
            ]

            # place source on HICANN 0
            source_hicann = self.chip(0)
            self.marocco.manual_placement.on_hicann(source, source_hicann)

            # place targets on all HICANNs on same reticle but random neurons
            nrns = self.shuffle(255)
            for ii, pop in enumerate(p):
                hicann = HICANNGlobal(X(int(source_hicann.x()) + ii % 4),
                                      Y(int(source_hicann.y()) + ii // 4))
                self.marocco.manual_placement.on_hicann(pop, hicann)
                print(pop, hicann)

            connector = pynn.AllToAllConnector(allow_self_connections=True,
                                               weights=1.)

            store = []
            # connect source to targets
            for trg in p:
                proj = pynn.Projection(source,
                                       trg,
                                       connector,
                                       target='excitatory')
                weights = copy.deepcopy(proj.getWeights())
                store.append((proj, weights))

            # start simulation
            pynn.run(10)  # in ms
            pynn.end()

            # make sure we have no synapse loss
            self.assertEqual(0, self.marocco.stats.getSynapseLoss())

            # assert weights are the same (at least as long as we don't send be
            # the transformed digital weights)
            for proj, weights in store:
                self.assertEqual(self.marocco.stats.getWeights(proj), weights)
    def test_loop_modularity_nb(self):
        """tests to override the loop hook with NB handling"""
        class myPlacer(placer):
            def initialise(self):
                b = sorted(
                    self.m_neuron_blocks.access,
                    key=lambda nb: int(nb.toHICANNOnWafer().toEnum().value()))
                self.m_neuron_blocks.access = b

            def loop(self):
                print("removing the last NB")

                b = self.m_neuron_blocks.access  # use this or the following
                b = self.m_neuron_blocks.value()

                b = sorted(
                    b,
                    key=lambda nb: int(nb.toHICANNOnWafer().toEnum().value()))

                c = []
                for i in range(len(b) - 1):
                    c.append(b[i])

                # use access to set full vector
                self.m_neuron_blocks.access = c

                # or use the value() to access single elements
                for i in range(len(self.m_neuron_blocks.value())):
                    del (self.m_neuron_blocks.value()[0])
                for nb in c:
                    self.m_neuron_blocks.value().append(nb)

        marocco = self.marocco
        user_strat = myPlacer()

        marocco.neuron_placement.default_placement_strategy(user_strat)

        pynn.setup(marocco=marocco)

        self.network()

        result = self.load_results()
        hicann = C.HICANNOnWafer(Enum(42))
        nb = C.NeuronBlockOnHICANN(Enum(4))
        for pop in self.pops:
            for nrn in pop:
                placement_item, = result.placement.find(nrn)
                logical_neuron = placement_item.logical_neuron()
                for denmem in logical_neuron:
                    # all pops must be on different NBs
                    self.assertFalse(nb == denmem.toNeuronBlockOnHICANN()
                                     and hicann == denmem.toHICANNOnWafer())
            nb = denmem.toNeuronBlockOnHICANN()
            hicann = denmem.toHICANNOnWafer()
    def test_external_sources_projections(self, params):
        nprojections = params[0]
        nsources = params[1]
        print((nprojections, nsources))
        """
            An external sources has multiple projections
            so it should be split if it wuld not be of size 1
            so unfortunately the users would need to live with that.
        """
        pylogging.set_loglevel(pylogging.get("marocco"),
                               pylogging.LogLevel.TRACE)
        pylogging.set_loglevel(pylogging.get("Calibtic"),
                               pylogging.LogLevel.ERROR)

        pynn.setup(marocco=self.marocco)
        self.marocco.neuron_placement.default_neuron_size(4)

        # ensure a limited synapse driver chain length.
        self.marocco.synapse_routing.driver_chain_length(3)

        # we expect synapse loss, but we dont care, as the source cant be split.
        # we want this tests not to throw exceptions.
        self.marocco.continue_despite_synapse_loss = True

        target = pynn.Population(1, pynn.IF_cond_exp, {})
        hicann = C.HICANNOnWafer(Enum(100))
        self.marocco.manual_placement.on_hicann(target, hicann)

        exsource = pynn.Population(nsources, pynn.SpikeSourcePoisson,
                                   {'rate': 1.})
        for i in range(nprojections):
            proj = pynn.Projection(exsource, target,
                                   pynn.AllToAllConnector(weights=1.))

        # access to proj so flake8 keeps silent
        proj.size

        pynn.run(0)
        pynn.end()

        results = self.load_results()
        synapses = results.synapse_routing.synapses()
        placement = results.placement

        for dnc in C.iter_all(C.DNCMergerOnWafer):
            PonDNC = placement.find(dnc)  # PopulationOnDNC
            if PonDNC:
                ## if driver requirements exceeded, only one source should be
                ## placed on the DNC, but synapse loss is still expected
                if (nprojections > 4):  # this number is just guessed
                    self.assertTrue(len(PonDNC) <= 1)
                else:
                    self.assertTrue(len(PonDNC) <= 12)
def main():
    """
    create small network with synapse loss.  The synapse loss happens due to a
    maximum syndriver chain length of 5 and only 4 denmems per neuron.  After
    mapping, the synapse loss per projection is evaluated and plotted for one
    projection.  The sum of lost synapses per projection is compared to the
    overall synapse loss returnd by the mapping stats.
    """
    marocco = PyMarocco()
    marocco.neuron_placement.default_neuron_size(4)
    marocco.synapse_routing.driver_chain_length(5)
    marocco.continue_despite_synapse_loss = True
    marocco.calib_backend = PyMarocco.CalibBackend.Default
    marocco.neuron_placement.skip_hicanns_without_neuron_blacklisting(False)

    pynn.setup(marocco=marocco)

    neuron = pynn.Population(50, pynn.IF_cond_exp)
    source = pynn.Population(50, pynn.SpikeSourcePoisson, {'rate' : 2})

    connector = pynn.FixedProbabilityConnector(
            allow_self_connections=True,
            p_connect=0.5,
            weights=0.00425)
    proj_stim = pynn.Projection(source, neuron, connector, target="excitatory")
    proj_rec = pynn.Projection(neuron, neuron, connector, target="excitatory")

    pynn.run(1)

    print marocco.stats

    total_syns = 0
    lost_syns = 0
    for proj in [proj_stim, proj_rec]:
        l,t = projectionwise_synapse_loss(proj, marocco)
        total_syns += t
        lost_syns += l

    assert total_syns == marocco.stats.getSynapses()
    assert lost_syns == marocco.stats.getSynapseLoss()

    plot_projectionwise_synapse_loss(proj_stim, marocco)
    pynn.end()
        dual=False)

# Mapping config
marocco = PyMarocco()
marocco.backend = PyMarocco.ESS # choose Executable System Specification instead of real hardware
marocco.calib_backend = PyMarocco.CalibBackend.Default
marocco.defects.backend = Defects.Backend.None
marocco.neuron_placement.skip_hicanns_without_neuron_blacklisting(False)
marocco.hicann_configurator = pysthal.HICANNConfigurator()
marocco.experiment_time_offset = 5.e-7 # can be low for ESS, as no repeater locking required
marocco.neuron_placement.default_neuron_size(4) # default number of hardware neuron circuits per pyNN neuron
marocco.persist = "nmpm1_adex_neuron_ess.bin"
marocco.param_trafo.use_big_capacitors = False

# set-up the simulator
pynn.setup(marocco=marocco)

neuron_count = 1 # size of the Population we will create

# Set the neuron model class
neuron_model = pynn.EIF_cond_exp_isfa_ista # an Adaptive Exponential I&F Neuron

neuron_parameters = {
 'a'          : 4.0,    # adaptation variable a in nS
 'b'          : 0.0805, # adaptation variable b in pA
 'cm'         : 0.281,  # membrane capacitance nF
 'delta_T'    : 1.0,    # delta_T fom Adex mod in mV, determines the sharpness of spike initiation
 'e_rev_E'    : 0.0,    # excitatory reversal potential in mV
 'e_rev_I'    : -80.0,  # inhibitory reversal potential in mV
 'i_offset'   : 0.0,    # offset current
 'tau_m'      : 9.3667, # membrane time constant
    'cm': 0.2,
    'v_reset': -70.,
    'v_rest': -20.,
    'v_thresh': -10,
    'e_rev_I': -100.,
    'e_rev_E': 60.,
    'tau_m': 20.,
    'tau_refrac': 0.1,
    'tau_syn_E': 5.,
    'tau_syn_I': 5.,
}

marocco = PyMarocco()
marocco.default_wafer = C.Wafer(int(os.environ.get("WAFER", 33)))
runtime = Runtime(marocco.default_wafer)
pynn.setup(marocco=marocco, marocco_runtime=runtime)

#  ——— set up network ——————————————————————————————————————————————————————————

pop = pynn.Population(1, pynn.IF_cond_exp, neuron_parameters)

pop.record()
pop.record_v()

hicann = C.HICANNOnWafer(C.Enum(297))
marocco.manual_placement.on_hicann(pop, hicann)

connector = pynn.AllToAllConnector(weights=1)

duration = 1500.0
def run_mapping(calib_dir, output_dir, wafer, hicann, skip_neurons, params):
    """
    :type hicann: HICANNOnWafer
    :param params: dictionary containing neuron parameters
    :param skip_neurons: number of non-functional dummy neurons to insert
    """

    from pymarocco import PyMarocco
    from pymarocco.results import Marocco
    from pymarocco.coordinates import BioNeuron
    import pyhmf as pynn
    import pysthal

    logger = setup_logger()

    marocco = PyMarocco()
    marocco.neuron_placement.default_neuron_size(
        utils.get_nested(params, "neuron.size", default=4))
    marocco.neuron_placement.restrict_rightmost_neuron_blocks(True)
    marocco.neuron_placement.minimize_number_of_sending_repeaters(False)
    marocco.backend = PyMarocco.None
    marocco.calib_backend = PyMarocco.XML
    marocco.calib_path = calib_dir
    marocco.param_trafo.use_big_capacitors = False
    marocco.persist = os.path.join(output_dir, "marocco.xml.gz")
    marocco.wafer_cfg = os.path.join(output_dir, "wafer_cfg.bin")
    marocco.default_wafer = wafer

    # FIXME: remove?
    marocco.param_trafo.alpha_v = 1000.0
    marocco.param_trafo.shift_v = 0.0

    pynn.setup(marocco=marocco)

    synaptic_input = {}
    for input_type, input_params in params["synaptic_input"].iteritems():
        if not utils.get_nested(input_params, "enabled", default=True):
            logger.info(
                "skipping disabled {!r} synaptic input".format(input_type))
            continue

        spike_times = utils.get_nested(
            input_params, "spike_times", default=None)
        if spike_times:
            start = spike_times["start"]
            stop = spike_times["stop"]
            step = spike_times["step"]
            spike_times = np.arange(start, stop, step)
            input_pop_model = pynn.SpikeSourceArray
            input_pop_params = {"spike_times": spike_times}
        else:
            raise NotImplementedError(
                "unknown config for {!r} synaptic input".format(input_type))

        logger.info(
            ("{!r} synaptic input will come from "
             "{} with parameters {!r}").format(
                input_type, input_pop_model.__name__, input_pop_params))
        synaptic_input[input_type] = pynn.Population(
            1, input_pop_model, input_pop_params)

    neuron_params = utils.get_nested(params, "neuron.parameters")
    neuron_model = getattr(pynn, utils.get_nested(
        params, "neuron.model", default="IF_cond_exp"))

    logger.info(
        "target population is {} neuron with parameters {!r}".format(
            neuron_model.__name__, neuron_params))

    # Force marocco to give us a different neuron by inserting
    # `Neuron_Number - 1` dummy neurons.
    populations = []
    for ii in range(0, skip_neurons + 1):
        populations.append(pynn.Population(
            1, neuron_model, neuron_params))
        marocco.manual_placement.on_hicann(populations[-1], hicann)
    target_pop = populations[-1]

    for input_type, input_pop in synaptic_input.iteritems():
        multiplicity = utils.get_nested(
            params, "synaptic_input", input_type, "multiplicity",
            default=1)
        assert multiplicity >= 1
        weight = utils.get_nested(
            params, "synaptic_input", input_type, "weight")
        con = pynn.AllToAllConnector(weights=weight)
        logger.info(
            ("connecting {!r} synaptic input "
             "to target population with weight {} "
             "via {} projections").format(
                 input_type, weight, multiplicity))
        for _ in xrange(multiplicity):
            pynn.Projection(input_pop, target_pop, con, target=input_type)

    pynn.run(params["duration"])
    pynn.end()

    wafer_cfg = pysthal.Wafer()
    wafer_cfg.load(marocco.wafer_cfg)
    results = Marocco.from_file(marocco.persist)
    return (BioNeuron(target_pop[0]), results, wafer_cfg)