def get_spikes(self, label, buffer_manager, region,
                   placements, graph_mapper, application_vertex,
                   base_key_function, machine_time_step):
        # pylint: disable=too-many-arguments
        results = list()
        missing = []
        ms_per_tick = machine_time_step / 1000.0
        vertices = graph_mapper.get_machine_vertices(application_vertex)
        progress = ProgressBar(vertices,
                               "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_by_placement(placement, region)
            if data_missing:
                missing.append(placement)
            self._process_spike_data(
                vertex_slice, raw_spike_data, ms_per_tick,
                base_key_function(vertex), results)

        if missing:
            missing_str = recording_utils.make_missing_string(missing)
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region, missing_str)
        if not results:
            return numpy.empty(shape=(0, 2))
        result = numpy.vstack(results)
        return result[numpy.lexsort((result[:, 1], result[:, 0]))]
示例#2
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   graph_mapper, application_vertex, base_key_function,
                   machine_time_step):
        # pylint: disable=too-many-arguments
        results = list()
        missing = []
        ms_per_tick = machine_time_step / 1000.0
        vertices = graph_mapper.get_machine_vertices(application_vertex)
        progress = ProgressBar(vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_by_placement(placement, region)
            if data_missing:
                missing.append(placement)
            self._process_spike_data(vertex_slice, raw_spike_data, ms_per_tick,
                                     base_key_function(vertex), results)

        if missing:
            missing_str = recording_utils.make_missing_string(missing)
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region, missing_str)
        if not results:
            return numpy.empty(shape=(0, 2))
        result = numpy.vstack(results)
        return result[numpy.lexsort((result[:, 1], result[:, 0]))]
示例#3
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   application_vertex, base_key_function):
        """ Get the recorded spikes from the object

        :param str label:
        :param buffer_manager: the buffer manager object
        :type buffer_manager:
            ~spinn_front_end_common.interface.buffer_management.BufferManager
        :param int region:
        :param ~pacman.model.placements.Placements placements:
            the placements object
        :param application_vertex:
        :type application_vertex:
            ~pacman.model.graphs.application.ApplicationVertex
        :param base_key_function:
        :type base_key_function:
            callable(~pacman.model.graphs.machine.MachineVertex,int)
        :return: A numpy array of 2-element arrays of (neuron_id, time)
            ordered by time, one element per event
        :rtype: ~numpy.ndarray(tuple(int,int))
        """
        # pylint: disable=too-many-arguments
        results = list()
        missing = []
        vertices = application_vertex.machine_vertices
        progress = ProgressBar(vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = vertex.vertex_slice

            # Read the spikes
            n_buffer_times = 0
            if vertex.send_buffer_times is not None:
                for i in vertex.send_buffer_times:
                    if hasattr(i, "__len__"):
                        n_buffer_times += len(i)
                    else:
                        # assuming this must be a single integer
                        n_buffer_times += 1

            if n_buffer_times > 0:
                raw_spike_data, data_missing = \
                    buffer_manager.get_data_by_placement(placement, region)
                if data_missing:
                    missing.append(placement)
                self._process_spike_data(vertex_slice, raw_spike_data,
                                         base_key_function(vertex), results)

        if missing:
            missing_str = recording_utils.make_missing_string(missing)
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region, missing_str)
        if not results:
            return numpy.empty(shape=(0, 2))
        result = numpy.vstack(results)
        return result[numpy.lexsort((result[:, 1], result[:, 0]))]
示例#4
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   application_vertex, machine_time_step):
        """
        :param str label:
        :param buffer_manager: the buffer manager object
        :type buffer_manager: \
            ~spinn_front_end_common.interface.buffer_management.BufferManager
        :param int region:
        :param ~pacman.model.placements.Placements placements:
        :param application_vertex:
        :type application_vertex: \
            ~pacman.model.graphs.application.ApplicationVertex
        :param int machine_time_step: microseconds
        :return: A numpy array of 2-element arrays of (neuron_id, time)\
            ordered by time, one element per event
        :rtype: ~numpy.ndarray(tuple(int,int))
        """
        # pylint: disable=too-many-arguments
        spike_times = list()
        spike_ids = list()
        ms_per_tick = machine_time_step / MICRO_TO_MILLISECOND_CONVERSION

        vertices = application_vertex.machine_vertices
        missing = []
        progress = ProgressBar(vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = vertex.vertex_slice

            # Read the spikes from the buffer manager
            neuron_param_data, data_missing = \
                buffer_manager.get_data_by_placement(placement, region)
            if data_missing:
                missing.append(placement)
            self._process_spike_data(
                vertex_slice, ms_per_tick,
                int(math.ceil(vertex_slice.n_atoms / BITS_PER_WORD)),
                neuron_param_data, spike_ids, spike_times)

        if missing:
            logger.warning(
                "Population {} is missing spike data in region {} from the "
                "following cores: {}", label, region,
                recording_utils.make_missing_string(missing))

        if not spike_ids:
            return numpy.zeros((0, 2))

        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
    def get_spikes(
            self, label, buffer_manager, region,
            placements, graph_mapper, application_vertex, machine_time_step):
        # pylint: disable=too-many-arguments
        spike_times = list()
        spike_ids = list()
        ms_per_tick = machine_time_step / 1000.0

        vertices = graph_mapper.get_machine_vertices(application_vertex)
        missing = []
        progress = ProgressBar(
            vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            # Read the spikes from the buffer manager
            neuron_param_data, data_missing = \
                buffer_manager.get_data_by_placement(placement, region)
            if data_missing:
                missing.append(placement)
            self._process_spike_data(
                vertex_slice, ms_per_tick,
                int(math.ceil(vertex_slice.n_atoms / 32.0)),
                neuron_param_data, spike_ids, spike_times)

        if missing:
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region,
                recording_utils.make_missing_string(missing))

        if not spike_ids:
            return numpy.zeros((0, 2))

        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
示例#6
0
    def get_spikes(
            self, label, buffer_manager, region,
            placements, graph_mapper, application_vertex, machine_time_step):
        # pylint: disable=too-many-arguments
        spike_times = list()
        spike_ids = list()
        ms_per_tick = machine_time_step / 1000.0

        vertices = graph_mapper.get_machine_vertices(application_vertex)
        missing = []
        progress = ProgressBar(
            vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            # Read the spikes from the buffer manager
            neuron_param_region, data_missing = \
                buffer_manager.get_data_for_vertex(placement, region)
            if data_missing:
                missing.append(placement)
            self._process_spike_data(
                vertex_slice, ms_per_tick,
                int(math.ceil(vertex_slice.n_atoms / 32.0)),
                neuron_param_region.read_all(), spike_ids, spike_times)

        if missing:
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region,
                recording_utils.make_missing_string(missing))

        if not spike_ids:
            return numpy.zeros((0, 2))

        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]