예제 #1
0
    def read_plastic_synaptic_data(
            self, post_vertex_slice, n_synapse_types, pp_size, pp_data,
            fp_size, fp_data):
        # pylint: disable=too-many-arguments
        n_rows = len(fp_size)

        n_synapse_type_bits = get_n_bits(n_synapse_types)
        n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
        neuron_id_mask = (1 << n_neuron_id_bits) - 1

        data_fixed = numpy.concatenate([
            fp_data[i].view(dtype="uint16")[0:fp_size[i]]
            for i in range(n_rows)])
        pp_without_headers = [
            row.view(dtype="uint8")[self._n_header_bytes:] for row in pp_data]
        synapse_structure = self.__timing_dependence.synaptic_structure
        n_half_words = synapse_structure.get_n_half_words_per_connection()
        half_word = synapse_structure.get_weight_half_word()
        pp_half_words = numpy.concatenate([
            pp[:size * n_half_words * 2].view("uint16")[
                half_word::n_half_words]
            for pp, size in zip(pp_without_headers, fp_size)])

        connections = numpy.zeros(
            data_fixed.size, dtype=self.NUMPY_CONNECTORS_DTYPE)
        connections["source"] = numpy.concatenate(
            [numpy.repeat(i, fp_size[i]) for i in range(len(fp_size))])
        connections["target"] = (
            (data_fixed & neuron_id_mask) + post_vertex_slice.lo_atom)
        connections["weight"] = pp_half_words
        connections["delay"] = (data_fixed >> (
            n_neuron_id_bits + n_synapse_type_bits)) & 0xF
        connections["delay"][connections["delay"] == 0] = 16
        return connections
    def get_static_synaptic_data(self, connections, connection_row_indices,
                                 n_rows, post_vertex_slice, n_synapse_types):
        # pylint: disable=too-many-arguments
        n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
        neuron_id_mask = (1 << n_neuron_id_bits) - 1
        n_synapse_type_bits = get_n_bits(n_synapse_types)

        fixed_fixed = (
            ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32")
              & 0xFFFF) << 16) |
            ((connections["delay"].astype("uint32") & 0xF) <<
             (n_neuron_id_bits + n_synapse_type_bits)) |
            (connections["synapse_type"].astype("uint32") << n_neuron_id_bits)
            | ((connections["target"] - post_vertex_slice.lo_atom)
               & neuron_id_mask))
        fixed_fixed_rows = self.convert_per_connection_data_to_rows(
            connection_row_indices, n_rows,
            fixed_fixed.view(dtype="uint8").reshape((-1, 4)))
        ff_size = self.get_n_items(fixed_fixed_rows, 4)
        if self.__pad_to_length is not None:
            # Pad the data
            fixed_fixed_rows = self._pad_row(fixed_fixed_rows, 4)
        ff_data = [fixed_row.view("uint32") for fixed_row in fixed_fixed_rows]

        return ff_data, ff_size
    def __get_max_delay(self):
        if self.__max_delay is not None:
            return self.__max_delay

        # Find the maximum delay from incoming synapses
        app_vertex = self._governed_app_vertex
        max_delay_ms = 0
        for proj in app_vertex.incoming_projections:
            s_info = proj._synapse_information
            proj_max_delay = s_info.synapse_dynamics.get_delay_maximum(
                s_info.connector, s_info)
            max_delay_ms = max(max_delay_ms, proj_max_delay)
        max_delay_steps = math.ceil(max_delay_ms / machine_time_step_ms())
        max_delay_bits = get_n_bits(max_delay_steps)

        # Find the maximum possible delay
        n_atom_bits = get_n_bits(
            min(app_vertex.get_max_atoms_per_core(), app_vertex.n_atoms))
        n_synapse_bits = get_n_bits(
            app_vertex.neuron_impl.get_n_synapse_types())
        n_delay_bits = MAX_RING_BUFFER_BITS - (n_atom_bits + n_synapse_bits)

        # Pick the smallest between the two, so that not too many bits are used
        final_n_delay_bits = min(n_delay_bits, max_delay_bits)
        self.__max_delay = 2**final_n_delay_bits
        if self.__allow_delay_extension is None:
            self.__allow_delay_extension = max_delay_bits > final_n_delay_bits
        return self.__max_delay
    def read_plastic_synaptic_data(
            self, post_vertex_slice, n_synapse_types, pp_size, pp_data,
            fp_size, fp_data):
        # pylint: disable=too-many-arguments
        n_rows = len(fp_size)

        n_synapse_type_bits = get_n_bits(n_synapse_types)
        n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
        neuron_id_mask = (1 << n_neuron_id_bits) - 1

        data_fixed = numpy.concatenate([
            fp_data[i].view(dtype="uint16")[0:fp_size[i]]
            for i in range(n_rows)])
        pp_without_headers = [
            row.view(dtype="uint8")[self._n_header_bytes:] for row in pp_data]
        synapse_structure = self._timing_dependence.synaptic_structure
        n_half_words = synapse_structure.get_n_half_words_per_connection()
        half_word = synapse_structure.get_weight_half_word()
        pp_half_words = numpy.concatenate([
            pp[:size * n_half_words * 2].view("uint16")[
                half_word::n_half_words]
            for pp, size in zip(pp_without_headers, fp_size)])

        connections = numpy.zeros(
            data_fixed.size, dtype=self.NUMPY_CONNECTORS_DTYPE)
        connections["source"] = numpy.concatenate(
            [numpy.repeat(i, fp_size[i]) for i in range(len(fp_size))])
        connections["target"] = (
            (data_fixed & neuron_id_mask) + post_vertex_slice.lo_atom)
        connections["weight"] = pp_half_words
        connections["delay"] = (data_fixed >> (
            n_neuron_id_bits + n_synapse_type_bits)) & 0xF
        connections["delay"][connections["delay"] == 0] = 16
        return connections
예제 #5
0
    def _write_synapse_parameters(self, spec, ring_buffer_shifts):
        """ Write the synapse parameters data region

        :param ~data_specification.DataSpecificationGenerator spec:
            The data specification to write to
        :param list(int) ring_buffer_shifts:
            The shifts to apply to convert ring buffer values to S1615 values
        """
        # Reserve space
        spec.reserve_memory_region(
            region=self._synapse_regions.synapse_params,
            size=self._app_vertex.get_synapse_params_size(),
            label='SynapseParams',
            reference=self._synapse_references.synapse_params)

        # Get values
        n_neurons = self._vertex_slice.n_atoms
        n_synapse_types = self._app_vertex.neuron_impl.get_n_synapse_types()
        max_delay = self._app_vertex.splitter.max_support_delay()

        # Write synapse parameters
        spec.switch_write_focus(self._synapse_regions.synapse_params)
        spec.write_value(n_neurons)
        spec.write_value(n_synapse_types)
        spec.write_value(get_n_bits(n_neurons))
        spec.write_value(get_n_bits(n_synapse_types))
        spec.write_value(get_n_bits(max_delay))
        spec.write_value(int(self._app_vertex.drop_late_spikes))
        spec.write_value(self._app_vertex.incoming_spike_buffer_size)
        spec.write_array(ring_buffer_shifts)
    def get_static_synaptic_data(
            self, connections, connection_row_indices, n_rows,
            post_vertex_slice, n_synapse_types):
        # pylint: disable=too-many-arguments
        n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
        neuron_id_mask = (1 << n_neuron_id_bits) - 1
        n_synapse_type_bits = get_n_bits(n_synapse_types)

        fixed_fixed = (
            ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") &
              0xFFFF) << 16) |
            ((connections["delay"].astype("uint32") & 0xF) <<
             (n_neuron_id_bits + n_synapse_type_bits)) |
            (connections["synapse_type"].astype(
                "uint32") << n_neuron_id_bits) |
            ((connections["target"] - post_vertex_slice.lo_atom) &
             neuron_id_mask))
        fixed_fixed_rows = self.convert_per_connection_data_to_rows(
            connection_row_indices, n_rows,
            fixed_fixed.view(dtype="uint8").reshape((-1, 4)))
        ff_size = self.get_n_items(fixed_fixed_rows, 4)
        if self._pad_to_length is not None:
            # Pad the data
            fixed_fixed_rows = self._pad_row(fixed_fixed_rows, 4)
        ff_data = [fixed_row.view("uint32") for fixed_row in fixed_fixed_rows]

        return ff_data, ff_size
예제 #7
0
    def get_plastic_synaptic_data(self, connections, connection_row_indices,
                                  n_rows, post_vertex_slice, n_synapse_types,
                                  max_n_synapses):
        # pylint: disable=too-many-arguments
        n_synapse_type_bits = get_n_bits(n_synapse_types)
        n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
        neuron_id_mask = (1 << n_neuron_id_bits) - 1

        # Get the fixed data
        fixed_plastic = (
            (connections["delay"].astype("uint16") <<
             (n_neuron_id_bits + n_synapse_type_bits)) |
            (connections["synapse_type"].astype("uint16") << n_neuron_id_bits)
            | ((connections["target"].astype("uint16") -
                post_vertex_slice.lo_atom) & neuron_id_mask))
        fixed_plastic_rows = self.convert_per_connection_data_to_rows(
            connection_row_indices, n_rows,
            fixed_plastic.view(dtype="uint8").reshape((-1, 2)), max_n_synapses)
        fp_size = self.get_n_items(fixed_plastic_rows, BYTES_PER_SHORT)
        if self.__pad_to_length is not None:
            # Pad the data
            fixed_plastic_rows = self._pad_row(fixed_plastic_rows,
                                               BYTES_PER_SHORT)
        fp_data = self.get_words(fixed_plastic_rows)

        # Get the plastic data by inserting the weight into the half-word
        # specified by the synapse structure
        synapse_structure = self.__timing_dependence.synaptic_structure
        n_half_words = synapse_structure.get_n_half_words_per_connection()
        half_word = synapse_structure.get_weight_half_word()
        plastic_plastic = numpy.zeros(len(connections) * n_half_words,
                                      dtype="uint16")
        plastic_plastic[half_word::n_half_words] = \
            numpy.rint(numpy.abs(connections["weight"])).astype("uint16")

        # Convert the plastic data into groups of bytes per connection and
        # then into rows
        plastic_plastic = plastic_plastic.view(dtype="uint8").reshape(
            (-1, n_half_words * BYTES_PER_SHORT))
        plastic_plastic_row_data = self.convert_per_connection_data_to_rows(
            connection_row_indices, n_rows, plastic_plastic, max_n_synapses)

        # pp_size = fp_size in words => fp_size * no_bytes / 4 (bytes)
        if self.__pad_to_length is not None:
            # Pad the data
            plastic_plastic_row_data = self._pad_row(
                plastic_plastic_row_data, n_half_words * BYTES_PER_SHORT)
        plastic_headers = numpy.zeros((n_rows, self._n_header_bytes),
                                      dtype="uint8")
        plastic_plastic_rows = [
            numpy.concatenate(
                (plastic_headers[i], plastic_plastic_row_data[i]))
            for i in range(n_rows)
        ]
        pp_size = self.get_n_items(plastic_plastic_rows, BYTES_PER_WORD)
        pp_data = self.get_words(plastic_plastic_rows)

        return fp_data, pp_data, fp_size, pp_size
예제 #8
0
    def __write_synapse_expander_data_spec(self, spec, generator_data,
                                           weight_scales):
        """ Write the data spec for the synapse expander

        :param ~.DataSpecificationGenerator spec:
            The specification to write to
        :param list(GeneratorData) generator_data: The data to be written
        :param weight_scales: scaling of weights on each synapse
        :type weight_scales: list(int or float)
        """
        if not generator_data:
            if self.__connection_builder_ref is not None:
                # If there is a reference, we still need a region to create
                spec.reserve_memory_region(
                    region=self.__connection_builder_region,
                    size=4,
                    label="ConnectorBuilderRegion",
                    reference=self.__connection_builder_ref)
            return

        n_bytes = (SYNAPSES_BASE_GENERATOR_SDRAM_USAGE_IN_BYTES +
                   (self.__n_synapse_types * DataType.U3232.size))
        for data in generator_data:
            n_bytes += data.size

        spec.reserve_memory_region(region=self.__connection_builder_region,
                                   size=n_bytes,
                                   label="ConnectorBuilderRegion",
                                   reference=self.__connection_builder_ref)
        spec.switch_write_focus(self.__connection_builder_region)

        spec.write_value(self.__synaptic_matrix_region)
        spec.write_value(len(generator_data))
        spec.write_value(self.__post_vertex_slice.lo_atom)
        spec.write_value(self.__post_vertex_slice.n_atoms)
        spec.write_value(self.__n_synapse_types)
        spec.write_value(get_n_bits(self.__n_synapse_types))
        n_neuron_id_bits = get_n_bits(self.__post_vertex_slice.n_atoms)
        spec.write_value(n_neuron_id_bits)
        for w in weight_scales:
            # if the weights are high enough and the population size large
            # enough, then weight_scales < 1 will result in a zero scale
            # if converted to an int, so we use U3232 here instead (as there
            # can be scales larger than U1616.max in conductance-based models)
            dtype = DataType.U3232
            spec.write_value(data=min(w, dtype.max), data_type=dtype)

        items = list()
        for data in generator_data:
            items.extend(data.gen_data)
        spec.write_array(numpy.concatenate(items))
    def generate_data_specification(self, spec, placement, routing_info,
                                    data_n_time_steps):
        """
        :param machine_graph: (injected)
        :param routing_info: (injected)
        :param data_n_time_steps: (injected)
        :param n_key_map: (injected)
        """
        # pylint: disable=arguments-differ
        rec_regions = self._app_vertex.neuron_recorder.get_region_sizes(
            self.vertex_slice, data_n_time_steps)
        self._write_common_data_spec(spec, rec_regions)

        self._write_neuron_data_spec(spec, routing_info,
                                     self.__ring_buffer_shifts)

        # Write information about SDRAM
        n_neurons = self._vertex_slice.n_atoms
        n_synapse_types = self._app_vertex.neuron_impl.get_n_synapse_types()
        spec.reserve_memory_region(region=self.REGIONS.SDRAM_EDGE_PARAMS.value,
                                   size=SDRAM_PARAMS_SIZE,
                                   label="SDRAM Params")
        spec.switch_write_focus(self.REGIONS.SDRAM_EDGE_PARAMS.value)
        spec.write_value(
            self.__sdram_partition.get_sdram_base_address_for(self))
        spec.write_value(self.n_bytes_for_transfer)
        spec.write_value(n_neurons)
        spec.write_value(n_synapse_types)
        spec.write_value(len(self.__sdram_partition.pre_vertices))
        spec.write_value(get_n_bits(n_neurons))

        # End the writing of this specification:
        spec.end_specification()
 def n_bytes_for_transfer(self):
     n_bytes = (2**get_n_bits(self.n_target_neurons) *
                self.n_target_synapse_types * self.N_BYTES_PER_INPUT)
     # May need to add some padding if not a round number of words
     extra_bytes = n_bytes % BYTES_PER_WORD
     if extra_bytes:
         n_bytes += BYTES_PER_WORD - extra_bytes
     return n_bytes
예제 #11
0
    def read_static_synaptic_data(self, post_vertex_slice, n_synapse_types,
                                  ff_size, ff_data):

        n_synapse_type_bits = get_n_bits(n_synapse_types)
        n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
        neuron_id_mask = (1 << n_neuron_id_bits) - 1

        data = numpy.concatenate(ff_data)
        connections = numpy.zeros(data.size, dtype=self.NUMPY_CONNECTORS_DTYPE)
        connections["source"] = numpy.concatenate(
            [numpy.repeat(i, ff_size[i]) for i in range(len(ff_size))])
        connections["target"] = ((data & neuron_id_mask) +
                                 post_vertex_slice.lo_atom)
        connections["weight"] = (data >> 16) & 0xFFFF
        connections["delay"] = (data & 0xFFFF) >> (n_neuron_id_bits +
                                                   n_synapse_type_bits)

        return connections
    def read_static_synaptic_data(
            self, post_vertex_slice, n_synapse_types, ff_size, ff_data):

        n_synapse_type_bits = get_n_bits(n_synapse_types)
        n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
        neuron_id_mask = (1 << n_neuron_id_bits) - 1

        data = numpy.concatenate(ff_data)
        connections = numpy.zeros(data.size, dtype=self.NUMPY_CONNECTORS_DTYPE)
        connections["source"] = numpy.concatenate(
            [numpy.repeat(i, ff_size[i]) for i in range(len(ff_size))])
        connections["target"] = (
            (data & neuron_id_mask) + post_vertex_slice.lo_atom)
        connections["weight"] = (data >> 16) & 0xFFFF
        connections["delay"] = (data >> (n_neuron_id_bits +
                                         n_synapse_type_bits)) & 0xF
        connections["delay"][connections["delay"] == 0] = 16

        return connections
예제 #13
0
    def _write_neuron_parameters(self, spec, ring_buffer_shifts):
        """ Write the neuron parameters region

        :param ~data_specification.DataSpecificationGenerator spec:
            The data specification to write to
        :param list(int) ring_buffer_shifts:
            The shifts to apply to convert ring buffer values to S1615 values
        """
        self._app_vertex.set_has_run()

        # pylint: disable=too-many-arguments
        n_atoms = self._vertex_slice.n_atoms
        spec.comment("\nWriting Neuron Parameters for {} Neurons:\n".format(
            n_atoms))

        # Reserve and switch to the memory region
        params_size = self._app_vertex.get_sdram_usage_for_neuron_params(
            self._vertex_slice)
        spec.reserve_memory_region(
            region=self._neuron_regions.neuron_params, size=params_size,
            label='NeuronParams')
        spec.switch_write_focus(self._neuron_regions.neuron_params)

        # store the tdma data here for this slice.
        data = self._app_vertex.generate_tdma_data_specification_data(
            self._slice_index)
        spec.write_array(data)

        # Write whether the key is to be used, and then the key, or 0 if it
        # isn't to be used
        if self._key is None:
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)
            spec.write_value(data=self._key)

        # Write the number of neurons in the block:
        spec.write_value(data=n_atoms)
        spec.write_value(data=2**get_n_bits(n_atoms))

        # Write the ring buffer data
        # This is only the synapse types that need a ring buffer i.e. not
        # those stored in synapse dynamics
        n_synapse_types = self._app_vertex.neuron_impl.get_n_synapse_types()
        spec.write_value(n_synapse_types)
        spec.write_array(ring_buffer_shifts)

        # Write the neuron parameters
        neuron_data = self._app_vertex.neuron_impl.get_data(
            self._app_vertex.parameters, self._app_vertex.state_variables,
            self._vertex_slice)
        spec.write_array(neuron_data)
    def get_plastic_synaptic_data(
            self, connections, connection_row_indices, n_rows,
            post_vertex_slice, n_synapse_types):
        # pylint: disable=too-many-arguments
        n_synapse_type_bits = get_n_bits(n_synapse_types)
        n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
        neuron_id_mask = (1 << n_neuron_id_bits) - 1

        dendritic_delays = (
            connections["delay"] * self._dendritic_delay_fraction)
        axonal_delays = (
            connections["delay"] * (1.0 - self._dendritic_delay_fraction))

        # Get the fixed data
        fixed_plastic = (
            ((dendritic_delays.astype("uint16") & 0xF) <<
             (n_neuron_id_bits + n_synapse_type_bits)) |
            ((axonal_delays.astype("uint16") & 0xF) <<
             (4 + n_neuron_id_bits + n_synapse_type_bits)) |
            (connections["synapse_type"].astype("uint16")
             << n_neuron_id_bits) |
            ((connections["target"].astype("uint16") -
              post_vertex_slice.lo_atom) & neuron_id_mask))
        fixed_plastic_rows = self.convert_per_connection_data_to_rows(
            connection_row_indices, n_rows,
            fixed_plastic.view(dtype="uint8").reshape((-1, 2)))
        fp_size = self.get_n_items(fixed_plastic_rows, 2)
        if self._pad_to_length is not None:
            # Pad the data
            fixed_plastic_rows = self._pad_row(fixed_plastic_rows, 2)
        fp_data = self.get_words(fixed_plastic_rows)

        # Get the plastic data by inserting the weight into the half-word
        # specified by the synapse structure
        synapse_structure = self._timing_dependence.synaptic_structure
        n_half_words = synapse_structure.get_n_half_words_per_connection()
        half_word = synapse_structure.get_weight_half_word()
        plastic_plastic = numpy.zeros(
            len(connections) * n_half_words, dtype="uint16")
        plastic_plastic[half_word::n_half_words] = \
            numpy.rint(numpy.abs(connections["weight"])).astype("uint16")

        # Convert the plastic data into groups of bytes per connection and
        # then into rows
        plastic_plastic = plastic_plastic.view(dtype="uint8").reshape(
            (-1, n_half_words * 2))
        plastic_plastic_row_data = self.convert_per_connection_data_to_rows(
            connection_row_indices, n_rows, plastic_plastic)

        # pp_size = fp_size in words => fp_size * no_bytes / 4 (bytes)
        if self._pad_to_length is not None:
            # Pad the data
            plastic_plastic_row_data = self._pad_row(
                plastic_plastic_row_data, n_half_words * 2)
        plastic_headers = numpy.zeros(
            (n_rows, self._n_header_bytes), dtype="uint8")
        plastic_plastic_rows = [
            numpy.concatenate((
                plastic_headers[i], plastic_plastic_row_data[i]))
            for i in range(n_rows)]
        pp_size = self.get_n_items(plastic_plastic_rows, 4)
        pp_data = self.get_words(plastic_plastic_rows)

        return fp_data, pp_data, fp_size, pp_size
    def create_machine_vertices(self, resource_tracker, machine_graph):
        app_vertex = self._governed_app_vertex
        label = app_vertex.label
        constraints = get_remaining_constraints(app_vertex)

        # Structural plasticity can only be run on a single synapse core
        if (isinstance(app_vertex.synapse_dynamics,
                       AbstractSynapseDynamicsStructural)
                and self.__n_synapse_vertices != 1):
            raise SynapticConfigurationException(
                "The current implementation of structural plasticity can only"
                " be run on a single synapse core.  Please ensure the number"
                " of synapse cores is set to 1")

        # Do some checks to make sure everything is likely to fit
        atoms_per_core = min(app_vertex.get_max_atoms_per_core(),
                             app_vertex.n_atoms)
        n_synapse_types = app_vertex.neuron_impl.get_n_synapse_types()
        if (get_n_bits(atoms_per_core) + get_n_bits(n_synapse_types) +
                get_n_bits(self.__get_max_delay)) > MAX_RING_BUFFER_BITS:
            raise SynapticConfigurationException(
                "The combination of the number of neurons per core ({}), "
                "the number of synapse types ({}), and the maximum delay per "
                "core ({}) will require too much DTCM.  Please reduce one or "
                "more of these values.".format(atoms_per_core, n_synapse_types,
                                               self.__get_max_delay))

        self.__neuron_vertices = list()
        self.__synapse_vertices = list()
        self.__synapse_verts_by_neuron = defaultdict(list)

        incoming_direct_poisson = self.__handle_poisson_sources(
            label, machine_graph)

        # Work out the ring buffer shifts based on all incoming things
        rb_shifts = app_vertex.get_ring_buffer_shifts(
            app_vertex.incoming_projections)
        weight_scales = app_vertex.get_weight_scales(rb_shifts)

        # Get resources for synapses
        independent_synapse_sdram = self.__independent_synapse_sdram()
        proj_dependent_sdram = self.__proj_dependent_synapse_sdram(
            app_vertex.incoming_projections)

        for index, vertex_slice in enumerate(self.__get_fixed_slices()):

            # Find the maximum number of cores on any chip available
            max_crs = resource_tracker.get_maximum_cores_available_on_a_chip()
            if max_crs < (self.__n_synapse_vertices + 1):
                raise ConfigurationException(
                    "No chips remaining with enough cores for"
                    f" {self.__n_synapse_vertices} synapse cores and a neuron"
                    " core")
            max_crs -= self.__n_synapse_vertices + 1

            # Create the neuron vertex for the slice
            neuron_vertex, neuron_resources = self.__add_neuron_core(
                vertex_slice, label, index, rb_shifts, weight_scales,
                machine_graph, constraints)

            # Keep track of synapse vertices for each neuron vertex and
            # resources used by each core (neuron core is added later)
            synapse_vertices = list()
            self.__synapse_verts_by_neuron[neuron_vertex] = synapse_vertices
            all_resources = []

            # Add the first vertex
            synapse_references, syn_label = self.__add_lead_synapse_core(
                vertex_slice, independent_synapse_sdram, proj_dependent_sdram,
                label, rb_shifts, weight_scales, all_resources, machine_graph,
                synapse_vertices, neuron_vertex, constraints)

            # Do the remaining synapse cores
            for i in range(1, self.__n_synapse_vertices):
                self.__add_shared_synapse_core(syn_label, i, vertex_slice,
                                               synapse_references,
                                               all_resources, machine_graph,
                                               synapse_vertices, neuron_vertex,
                                               constraints)

            # Add resources for Poisson vertices up to core limit
            poisson_vertices = incoming_direct_poisson[vertex_slice]
            remaining_poisson_vertices = list()
            added_poisson_vertices = list()
            for poisson_vertex, poisson_edge in poisson_vertices:
                if max_crs <= 0:
                    remaining_poisson_vertices.append(poisson_vertex)
                    self.__add_poisson_multicast(poisson_vertex,
                                                 synapse_vertices,
                                                 machine_graph, poisson_edge)
                else:
                    all_resources.append(
                        (poisson_vertex.resources_required, []))
                    added_poisson_vertices.append(poisson_vertex)
                    max_crs -= 1

            if remaining_poisson_vertices:
                logger.warn(
                    f"Vertex {label} is using multicast for"
                    f" {len(remaining_poisson_vertices)} one-to-one Poisson"
                    " sources as not enough cores exist to put them on the"
                    " same chip")

            # Create an SDRAM edge partition
            sdram_label = "SDRAM {} Synapses-->Neurons:{}-{}".format(
                label, vertex_slice.lo_atom, vertex_slice.hi_atom)
            source_vertices = added_poisson_vertices + synapse_vertices
            sdram_partition = SourceSegmentedSDRAMMachinePartition(
                SYNAPSE_SDRAM_PARTITION_ID, sdram_label, source_vertices)
            machine_graph.add_outgoing_edge_partition(sdram_partition)
            neuron_vertex.set_sdram_partition(sdram_partition)

            # Add SDRAM edges for synapse vertices
            for source_vertex in source_vertices:
                edge_label = "SDRAM {}-->{}".format(source_vertex.label,
                                                    neuron_vertex.label)
                machine_graph.add_edge(
                    SDRAMMachineEdge(source_vertex, neuron_vertex, edge_label),
                    SYNAPSE_SDRAM_PARTITION_ID)
                source_vertex.set_sdram_partition(sdram_partition)

            # Add SDRAM edge requirements to the neuron SDRAM, as the resource
            # tracker will otherwise try to add another core for it
            extra_sdram = MultiRegionSDRAM()
            extra_sdram.merge(neuron_resources.sdram)
            extra_sdram.add_cost(
                len(extra_sdram.regions) + 1,
                sdram_partition.total_sdram_requirements())
            neuron_resources_plus = ResourceContainer(
                sdram=extra_sdram,
                dtcm=neuron_resources.dtcm,
                cpu_cycles=neuron_resources.cpu_cycles,
                iptags=neuron_resources.iptags,
                reverse_iptags=neuron_resources.reverse_iptags)
            all_resources.append((neuron_resources_plus, constraints))

            # Allocate all the resources to ensure they all fit
            resource_tracker.allocate_constrained_group_resources(
                all_resources)

        return True