コード例 #1
0
    def add_database_socket_address(database_notify_host,
                                    database_notify_port_num,
                                    database_ack_port_num):
        """
        :param database_notify_host:
            Host to talk to tell that the database (and application) is ready.
        :type database_notify_host: str or None
        :param database_notify_port_num:
            Port to talk to tell that the database (and application) is ready.
        :type database_notify_port_num: int or None
        :param database_ack_port_num:
            Port on which to listen for an acknowledgement that the
            simulation should start.
        :type database_ack_port_num: int or None
        """
        if database_notify_port_num is None:
            database_notify_port_num = get_config_int("Database",
                                                      "notify_port")
        if database_notify_host is None:
            database_notify_host = get_config_str("Database",
                                                  "notify_hostname")
        elif database_notify_host == "0.0.0.0":
            database_notify_host = "localhost"
        if database_ack_port_num is None:
            database_ack_port_num = get_config_int("Database", "listen_port")

        # build the database socket address used by the notification interface
        database_socket = SocketAddress(
            listen_port=database_ack_port_num,
            notify_host_name=database_notify_host,
            notify_port_no=database_notify_port_num)

        # update socket interface with new demands.
        SpynnakerExternalDevicePluginManager.add_socket_address(
            database_socket)
コード例 #2
0
    def __init__(self, arms=default_parameters['arms'],
                 reward_delay=default_parameters['reward_delay'],
                 reward_based=default_parameters['reward_based'],
                 rate_on=default_parameters['rate_on'],
                 rate_off=default_parameters['rate_off'],
                 stochastic=default_parameters['stochastic'],
                 constant_input=default_parameters['constant_input'],
                 constraints=default_parameters['constraints'],
                 label=default_parameters['label'],
                 incoming_spike_buffer_size=default_parameters[
                     'incoming_spike_buffer_size'],
                 simulation_duration_ms=default_parameters['duration'],
                 rand_seed=default_parameters['random_seed']):
        # **NOTE** n_neurons currently ignored - width and height will be
        # specified as additional parameters, forcing their product to be
        # duplicated in n_neurons seems pointless

        self._label = label

        # Pass in variables
        self._arms = arms

        self._no_arms = len(arms)
        self._n_neurons = self._no_arms
        self._rand_seed = rand_seed

        self._reward_delay = reward_delay
        self._reward_based = reward_based

        self._rate_on = rate_on
        self._rate_off = rate_off
        self._stochastic = stochastic
        self._constant_input = constant_input

        # used to define size of recording region
        self._recording_size = int((simulation_duration_ms / 1000.) * 4)

        resources_required = (
            self.BANDIT_REGION_BYTES + self.BASE_ARMS_REGION_BYTES +
            self._recording_size)

        vertex_slice = Slice(0, self._n_neurons - 1)

        # Superclasses
        super(Bandit, self).__init__(
            BanditMachineVertex(
                vertex_slice, resources_required, constraints, label, self,
                arms, reward_delay, reward_based, rate_on, rate_off,
                stochastic, constant_input, incoming_spike_buffer_size,
                simulation_duration_ms, rand_seed),
            label=label, constraints=constraints)

        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        SimplePopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        AbstractAcceptsIncomingSynapses.__init__(self)
        self._change_requires_mapping = True
        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = get_config_int(
                "Simulation", "incoming_spike_buffer_size")
コード例 #3
0
    def _write_sdram_edge_spec(self, spec):
        """ Write information about SDRAM Edge

        :param DataSpecificationGenerator spec:
            The generator of the specification to write
        """
        send_size = self.__sdram_partition.get_sdram_size_of_region_for(self)
        spec.reserve_memory_region(
            region=self.REGIONS.SDRAM_EDGE_PARAMS.value,
            size=SDRAM_PARAMS_SIZE, label="SDRAM Params")
        spec.switch_write_focus(self.REGIONS.SDRAM_EDGE_PARAMS.value)
        spec.write_value(
            self.__sdram_partition.get_sdram_base_address_for(self))
        spec.write_value(send_size)
        spec.write_value(get_config_int(
            "Simulation", "transfer_overhead_clocks"))
コード例 #4
0
    def _make_chip(self, chip_info, machine):
        """ Creates a chip from a ChipSummaryInfo structure.

        :param ChipSummaryInfo chip_info:
            The ChipSummaryInfo structure to create the chip from
        :return: The created chip
        :rtype: ~spinn_machine.Chip
        """
        # Create the down cores set if any
        n_cores = min(chip_info.n_cores, Machine.max_cores_per_chip())
        core_states = chip_info.core_states
        down_cores = self._ignore_cores_map.get((chip_info.x, chip_info.y),
                                                None)
        for i in range(1, n_cores):
            if core_states[i] != CPUState.IDLE:
                self._report_ignore("Not using core {}, {}, {} in state {}",
                                    chip_info.x, chip_info.y, i,
                                    core_states[i])
                if down_cores is None:
                    down_cores = set()
                down_cores.add(i)

        # Create the router
        router = self._make_router(chip_info, machine)

        # Create the chip's SDRAM object
        sdram_size = chip_info.largest_free_sdram_block
        max_sdram_size = get_config_int("Machine",
                                        "max_sdram_allowed_per_chip")
        if (max_sdram_size is not None and sdram_size > max_sdram_size):
            sdram_size = max_sdram_size
        sdram = SDRAM(size=sdram_size)

        # Create the chip
        return Chip(x=chip_info.x,
                    y=chip_info.y,
                    n_processors=n_cores,
                    router=router,
                    sdram=sdram,
                    ip_address=chip_info.ethernet_ip_address,
                    nearest_ethernet_x=chip_info.nearest_ethernet_x,
                    nearest_ethernet_y=chip_info.nearest_ethernet_y,
                    down_cores=down_cores,
                    parent_link=chip_info.parent_link)
コード例 #5
0
    def __init__(self,
                 rate_on=default_parameters['rate_on'],
                 rate_off=default_parameters['rate_off'],
                 pop_size=default_parameters['pop_size'],
                 prob_command=default_parameters['prob_command'],
                 prob_in_change=default_parameters['prob_in_change'],
                 time_period=default_parameters['time_period'],
                 stochastic=default_parameters['stochastic'],
                 reward=default_parameters['reward'],
                 constraints=default_parameters['constraints'],
                 label=default_parameters['label'],
                 incoming_spike_buffer_size=default_parameters[
                     'incoming_spike_buffer_size'],
                 simulation_duration_ms=default_parameters['duration'],
                 rand_seed=default_parameters['random_seed']):
        # **NOTE** n_neurons currently ignored - width and height will be
        # specified as additional parameters, forcing their product to be
        # duplicated in n_neurons seems pointless

        self._label = label

        # Pass in variables
        self._rate_on = rate_on
        self._rate_off = rate_off
        self._stochastic = stochastic
        self._reward = reward
        self._pop_size = pop_size
        self._prob_command = prob_command
        self._prob_in_change = prob_in_change

        self._n_neurons = pop_size * 4
        self._rand_seed = rand_seed

        self._time_period = time_period

        # used to define size of recording region
        self._recording_size = int((simulation_duration_ms / 1000.) * 4)

        # technically as using OneAppOneMachine this is not necessary?
        resources_required = (
            self.RECALL_REGION_BYTES + self.DATA_REGION_BYTES +
            self._recording_size)

        vertex_slice = Slice(0, self._n_neurons - 1)

        # Superclasses
        super(Recall, self).__init__(
            RecallMachineVertex(
                vertex_slice, resources_required, constraints, label, self,
                rate_on, rate_off, pop_size, prob_command, prob_in_change,
                time_period, stochastic, reward, incoming_spike_buffer_size,
                simulation_duration_ms, rand_seed),
            label=label, constraints=constraints)

        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        SimplePopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        AbstractAcceptsIncomingSynapses.__init__(self)
        self._change_requires_mapping = True
        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = get_config_int(
                "Simulation", "incoming_spike_buffer_size")
コード例 #6
0
ファイル: logic.py プロジェクト: SpiNNakerManchester/SpiNNGym
    def __init__(self,
                 truth_table,
                 input_sequence,
                 rate_on=default_parameters['rate_on'],
                 rate_off=default_parameters['rate_off'],
                 score_delay=default_parameters['score_delay'],
                 stochastic=default_parameters['stochastic'],
                 constraints=default_parameters['constraints'],
                 label=default_parameters['label'],
                 incoming_spike_buffer_size=default_parameters[
                     'incoming_spike_buffer_size'],
                 simulation_duration_ms=default_parameters['duration'],
                 rand_seed=default_parameters['random_seed']):
        # **NOTE** n_neurons currently ignored - width and height will be
        # specified as additional parameters, forcing their product to be
        # duplicated in n_neurons seems pointless

        self._label = label

        # Pass in variables
        self._truth_table = truth_table
        self._rate_on = rate_on
        self._rate_off = rate_off
        self._stochastic = stochastic
        self._input_sequence = input_sequence
        self._no_inputs = len(input_sequence)
        if self._no_inputs != numpy.log2(len(self._truth_table)):
            try:
                raise Bad_Table('table and input sequence are not compatible')
            except Bad_Table as e:
                print("ERROR: ", e)

        self._n_neurons = self._no_inputs
        self._rand_seed = rand_seed

        self._score_delay = score_delay

        # used to define size of recording region
        self._recording_size = int((simulation_duration_ms / 1000.) * 4)

        # (static) resources required
        # technically as using OneAppOneMachine this is not necessary?
        resources_required = (self.LOGIC_REGION_BYTES +
                              self.BASE_DATA_REGION_BYTES +
                              self._recording_size)

        vertex_slice = Slice(0, self._n_neurons - 1)

        # Superclasses
        super(Logic, self).__init__(LogicMachineVertex(
            vertex_slice, resources_required, constraints, label, self,
            truth_table, input_sequence, rate_on, rate_off, score_delay,
            stochastic, incoming_spike_buffer_size, simulation_duration_ms,
            rand_seed),
                                    label=label,
                                    constraints=constraints)

        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        SimplePopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        AbstractAcceptsIncomingSynapses.__init__(self)
        self._change_requires_mapping = True
        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = get_config_int(
                "Simulation", "incoming_spike_buffer_size")
コード例 #7
0
    def activate_live_output_for(population,
                                 database_notify_host=None,
                                 database_notify_port_num=None,
                                 database_ack_port_num=None,
                                 port=None,
                                 host=None,
                                 tag=None,
                                 strip_sdp=True,
                                 use_prefix=False,
                                 key_prefix=None,
                                 prefix_type=None,
                                 message_type=EIEIOType.KEY_32_BIT,
                                 right_shift=0,
                                 payload_as_time_stamps=True,
                                 notify=True,
                                 use_payload_prefix=True,
                                 payload_prefix=None,
                                 payload_right_shift=0,
                                 number_of_packets_sent_per_time_step=0):
        """ Output the spikes from a given population from SpiNNaker as they\
            occur in the simulation.

        :param ~spynnaker.pyNN.models.populations.Population population:
            The population to activate the live output for
        :param str database_notify_host:
            The hostname for the device which is listening to the database
            notification.
        :param int database_ack_port_num:
            The port number to which a external device will acknowledge that
            they have finished reading the database and are ready for it to
            start execution
        :param int database_notify_port_num:
            The port number to which a external device will receive the
            database is ready command
        :param key_prefix: the prefix to be applied to the key
        :type key_prefix: int or None
        :param ~spinnman.messages.eieio.EIEIOPrefix prefix_type:
            if the prefix type is 32 bit or 16 bit
        :param ~spinnman.messages.eieio.EIEIOType message_type:
            If the message is a EIEIO command message, or an EIEIO data
            message with 16 bit or 32 bit keys.
        :param bool payload_as_time_stamps:
        :param int right_shift:
        :param bool use_payload_prefix:
        :param bool notify:
        :param payload_prefix:
        :type payload_prefix: int or None
        :param int payload_right_shift:
        :param int number_of_packets_sent_per_time_step:
        :param int port:
            The UDP port to which the live spikes will be sent. If not
            specified, the port will be taken from the "live_spike_port"
            parameter in the "Recording" section of the sPyNNaker
            configuration file.
        :param str host:
            The host name or IP address to which the live spikes will be
            sent. If not specified, the host will be taken from the
            "live_spike_host" parameter in the "Recording" section of the
            sPyNNaker configuration file.
        :param int tag:
            The IP tag to be used for the spikes. If not specified, one will
            be automatically assigned
        :param bool strip_sdp:
            Determines if the SDP headers will be stripped from the
            transmitted packet.
        :param bool use_prefix:
            Determines if the spike packet will contain a common prefix for
            the spikes
        :param str label: The label of the gatherer vertex
        :param list(str) partition_ids:
            The names of the partitions to create edges for
        """
        # pylint: disable=too-many-arguments, too-many-locals, protected-access
        # get default params if none set
        if port is None:
            port = get_config_int("Recording", "live_spike_port")
        if host is None:
            host = get_config_str("Recording", "live_spike_host")

        # add new edge and vertex if required to SpiNNaker graph
        SpynnakerExternalDevicePluginManager.update_live_packet_gather_tracker(
            population._vertex,
            "LiveSpikeReceiver",
            port,
            host,
            tag,
            strip_sdp,
            use_prefix,
            key_prefix,
            prefix_type,
            message_type,
            right_shift,
            payload_as_time_stamps,
            use_payload_prefix,
            payload_prefix,
            payload_right_shift,
            number_of_packets_sent_per_time_step,
            partition_ids=[SPIKE_PARTITION_ID])

        if notify:
            SpynnakerExternalDevicePluginManager.add_database_socket_address(
                database_notify_host, database_notify_port_num,
                database_ack_port_num)
コード例 #8
0
    def __init__(self,
                 constraints=default_parameters['constraints'],
                 encoding=default_parameters['encoding'],
                 time_increment=default_parameters['time_increment'],
                 pole_length=default_parameters['pole_length'],
                 pole_angle=default_parameters['pole_angle'],
                 pole2_length=default_parameters['pole2_length'],
                 pole2_angle=default_parameters['pole2_angle'],
                 reward_based=default_parameters['reward_based'],
                 force_increments=default_parameters['force_increments'],
                 max_firing_rate=default_parameters['max_firing_rate'],
                 number_of_bins=default_parameters['number_of_bins'],
                 central=default_parameters['central'],
                 rand_seed=default_parameters['rand_seed'],
                 bin_overlap=default_parameters['bin_overlap'],
                 tau_force=default_parameters['tau_force'],
                 label=default_parameters['label'],
                 incoming_spike_buffer_size=default_parameters[
                     'incoming_spike_buffer_size'],
                 simulation_duration_ms=default_parameters['duration']):
        # **NOTE** n_neurons currently ignored - width and height will be
        # specified as additional parameters, forcing their product to be
        # duplicated in n_neurons seems pointless

        self._label = label

        self._encoding = encoding

        # Pass in variables
        self._pole_length = pole_length
        self._pole_angle = pole_angle
        self._pole2_length = pole2_length
        self._pole2_angle = pole2_angle

        self._force_increments = force_increments
        # for rate based it's only 1 neuron per metric
        # (position, angle, velocity of both)
        self._n_neurons = 6 * number_of_bins

        self._time_increment = time_increment
        self._reward_based = reward_based

        self._max_firing_rate = max_firing_rate
        self._number_of_bins = number_of_bins
        self._central = central
        self._rand_seed = rand_seed
        self._bin_overlap = bin_overlap
        self._tau_force = tau_force

        # used to define size of recording region
        self._recording_size = int((simulation_duration_ms / 1000.) * 4)

        # technically as using OneAppOneMachine this is not necessary?
        resources_required = (self.PENDULUM_REGION_BYTES +
                              self.BASE_DATA_REGION_BYTES +
                              self._recording_size)

        vertex_slice = Slice(0, self._n_neurons - 1)

        # Superclasses
        super(DoublePendulum, self).__init__(DoublePendulumMachineVertex(
            vertex_slice, resources_required, constraints, label, self,
            encoding, time_increment, pole_length, pole_angle, pole2_length,
            pole2_angle, reward_based, force_increments, max_firing_rate,
            number_of_bins, central, bin_overlap, tau_force,
            incoming_spike_buffer_size, simulation_duration_ms, rand_seed),
                                             label=label,
                                             constraints=constraints)

        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        SimplePopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        AbstractAcceptsIncomingSynapses.__init__(self)
        self._change_requires_mapping = True
        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = get_config_int(
                "Simulation", "incoming_spike_buffer_size")
コード例 #9
0
    def __init__(self,
                 n_neurons,
                 constraints,
                 label,
                 seed,
                 max_atoms_per_core,
                 model,
                 rate=None,
                 start=None,
                 duration=None,
                 rates=None,
                 starts=None,
                 durations=None,
                 max_rate=None,
                 splitter=None):
        """
        :param int n_neurons:
        :param constraints:
        :type constraints:
            iterable(~pacman.model.constraints.AbstractConstraint)
        :param str label:
        :param float seed:
        :param int max_atoms_per_core:
        :param ~spynnaker.pyNN.models.spike_source.SpikeSourcePoisson model:
        :param iterable(float) rate:
        :param iterable(int) start:
        :param iterable(int) duration:
        :param splitter:
        :type splitter:
            ~pacman.model.partitioner_splitters.abstract_splitters.AbstractSplitterCommon
        """
        # pylint: disable=too-many-arguments
        super().__init__(label, constraints, max_atoms_per_core, splitter)

        # atoms params
        self.__n_atoms = self.round_n_atoms(n_neurons, "n_neurons")
        self.__model_name = "SpikeSourcePoisson"
        self.__model = model
        self.__seed = seed
        self.__kiss_seed = dict()
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # check for changes parameters
        self.__change_requires_mapping = True

        self.__spike_recorder = MultiSpikeRecorder()

        # Check for disallowed pairs of parameters
        if (rates is not None) and (rate is not None):
            raise Exception("Exactly one of rate and rates can be specified")
        if (starts is not None) and (start is not None):
            raise Exception("Exactly one of start and starts can be specified")
        if (durations is not None) and (duration is not None):
            raise Exception(
                "Exactly one of duration and durations can be specified")
        if rate is None and rates is None:
            raise Exception("One of rate or rates must be specified")

        # Normalise the parameters
        self.__is_variable_rate = rates is not None
        if rates is None:
            if hasattr(rate, "__len__"):
                # Single rate per neuron for whole simulation
                rates = [numpy.array([r]) for r in rate]
            else:
                # Single rate for all neurons for whole simulation
                rates = numpy.array([rate])
        elif hasattr(rates[0], "__len__"):
            # Convert each list to numpy array
            rates = [numpy.array(r) for r in rates]
        else:
            rates = numpy.array(rates)
        if starts is None and start is not None:
            if hasattr(start, "__len__"):
                starts = [numpy.array([s]) for s in start]
            elif start is None:
                starts = numpy.array([0])
            else:
                starts = numpy.array([start])
        elif starts is not None and hasattr(starts[0], "__len__"):
            starts = [numpy.array(s) for s in starts]
        elif starts is not None:
            starts = numpy.array(starts)
        if durations is None and duration is not None:
            if hasattr(duration, "__len__"):
                durations = [numpy.array([d]) for d in duration]
            else:
                durations = numpy.array([duration])
        elif durations is not None and hasattr(durations[0], "__len__"):
            durations = [numpy.array(d) for d in durations]
        elif durations is not None:
            durations = numpy.array(durations)
        else:
            if hasattr(rates[0], "__len__"):
                durations = [
                    numpy.array([None for r in _rate]) for _rate in rates
                ]
            else:
                durations = numpy.array([None for _rate in rates])

        # Check that there is either one list for all neurons,
        # or one per neuron
        if hasattr(rates[0], "__len__") and len(rates) != n_neurons:
            raise Exception(
                "Must specify one rate for all neurons or one per neuron")
        if (starts is not None and hasattr(starts[0], "__len__")
                and len(starts) != n_neurons):
            raise Exception(
                "Must specify one start for all neurons or one per neuron")
        if (durations is not None and hasattr(durations[0], "__len__")
                and len(durations) != n_neurons):
            raise Exception(
                "Must specify one duration for all neurons or one per neuron")

        # Check that for each rate there is a start and duration if needed
        # TODO: Could be more efficient for case where parameters are not one
        #       per neuron
        for i in range(n_neurons):
            rate_set = rates
            if hasattr(rates[0], "__len__"):
                rate_set = rates[i]
            if not hasattr(rate_set, "__len__"):
                raise Exception("Multiple rates must be a list")
            if starts is None and len(rate_set) > 1:
                raise Exception("When multiple rates are specified,"
                                " each must have a start")
            elif starts is not None:
                start_set = starts
                if hasattr(starts[0], "__len__"):
                    start_set = starts[i]
                if len(start_set) != len(rate_set):
                    raise Exception("Each rate must have a start")
                if any(s is None for s in start_set):
                    raise Exception("Start must not be None")
            if durations is not None:
                duration_set = durations
                if hasattr(durations[0], "__len__"):
                    duration_set = durations[i]
                if len(duration_set) != len(rate_set):
                    raise Exception("Each rate must have its own duration")

        if hasattr(rates[0], "__len__"):
            time_to_spike = [
                numpy.array([0 for _ in range(len(rates[i]))])
                for i in range(len(rates))
            ]
        else:
            time_to_spike = numpy.array([0 for _ in range(len(rates))])

        self.__data = SpynnakerRangeDictionary(n_neurons)
        self.__data["rates"] = SpynnakerRangedList(
            n_neurons,
            rates,
            use_list_as_value=not hasattr(rates[0], "__len__"))
        self.__data["starts"] = SpynnakerRangedList(
            n_neurons,
            starts,
            use_list_as_value=not hasattr(starts[0], "__len__"))
        self.__data["durations"] = SpynnakerRangedList(
            n_neurons,
            durations,
            use_list_as_value=not hasattr(durations[0], "__len__"))
        self.__data["time_to_spike"] = SpynnakerRangedList(
            n_neurons,
            time_to_spike,
            use_list_as_value=not hasattr(time_to_spike[0], "__len__"))
        self.__rng = numpy.random.RandomState(seed)
        self.__rate_change = numpy.zeros(n_neurons)

        self.__n_profile_samples = get_config_int("Reports",
                                                  "n_profile_samples")

        # Prepare for recording, and to get spikes
        self.__spike_recorder = MultiSpikeRecorder()

        all_rates = list(_flatten(self.__data["rates"]))
        self.__max_rate = max_rate
        if max_rate is None and len(all_rates):
            self.__max_rate = numpy.amax(all_rates)
        elif max_rate is None:
            self.__max_rate = 0

        total_rate = numpy.sum(all_rates)
        self.__max_spikes = 0
        if total_rate > 0:
            # Note we have to do this per rate, as the whole array is not numpy
            max_rates = numpy.array(
                [numpy.max(r) for r in self.__data["rates"]])
            self.__max_spikes = numpy.sum(
                scipy.stats.poisson.ppf(1.0 - (1.0 / max_rates), max_rates))

        # Keep track of how many outgoing projections exist
        self.__outgoing_projections = list()
コード例 #10
0
    def __init__(self,
                 x_factor=X_FACTOR,
                 y_factor=Y_FACTOR,
                 width=WIDTH_PIXELS,
                 height=HEIGHT_PIXELS,
                 colour_bits=COLOUR_BITS,
                 constraints=None,
                 label="Breakout",
                 incoming_spike_buffer_size=None,
                 simulation_duration_ms=MAX_SIM_DURATION,
                 bricking=1,
                 random_seed=rand_seed):
        # **NOTE** n_neurons currently ignored - width and height will be
        # specified as additional parameters, forcing their product to be
        # duplicated in n_neurons seems pointless

        self._label = label
        self._x_factor = x_factor
        self._y_factor = y_factor
        self._width = width / x_factor
        self._height = height / y_factor
        self._colour_bits = colour_bits
        self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(self._width)))
        self._height_bits = numpy.uint32(numpy.ceil(numpy.log2(self._height)))

        self._n_neurons = int(1 << (self._width_bits + self._height_bits +
                                    self._colour_bits))
        self._bricking = bricking
        self._rand_seed = random_seed

        # print self._rand_seed
        # print "# width =", self._width
        # print "# width bits =", self._width_bits
        # print "# height =", self._height
        # print "# height bits =", self._height_bits
        # print "# neurons =", self._n_neurons

        # Define size of recording region
        self._recording_size = int((simulation_duration_ms / 10000.) * 4)

        # (static) resources required
        # technically as using OneAppOneMachine this is not necessary?
        resources_required = (self.BREAKOUT_REGION_BYTES +
                              self.PARAM_REGION_BYTES + self._recording_size)

        vertex_slice = Slice(0, self._n_neurons - 1)

        # Superclasses
        super(Breakout, self).__init__(BreakoutMachineVertex(
            vertex_slice, resources_required, constraints, self._label, self,
            x_factor, y_factor, width, height, colour_bits,
            incoming_spike_buffer_size, simulation_duration_ms, bricking,
            random_seed),
                                       label=label,
                                       constraints=constraints)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        SimplePopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        AbstractAcceptsIncomingSynapses.__init__(self)
        self._change_requires_mapping = True
        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = get_config_int(
                "Simulation", "incoming_spike_buffer_size")