def _read_in_master_pop_table(self, p, controller, master_pop_table_region,
                                  MASTER_POPULATION_TABLE_SIZE):
        '''
        reads in the master pop table from a given processor on the machine
        '''
        # Get the App Data base address for the core
        # (location where this cores memory starts in
        # sdram and region table)
        app_data_base_address_offset = getAppDataBaseAddressOffset(p)
        app_data_base_address = \
            self._read_and_convert(app_data_base_address_offset,
                                   scamp.TYPE_WORD, 4, "<I", controller)

        # Get the memory address of the master pop table region
        master_pop_region = master_pop_table_region

        master_region_base_address_address = \
            getRegionBaseAddressOffset(app_data_base_address,
                                       master_pop_region)

        master_region_base_address_offset = \
            self._read_and_convert(master_region_base_address_address,
                                   scamp.TYPE_WORD, 4, "<I", controller)

        master_region_base_address =\
            master_region_base_address_offset + app_data_base_address

        #read in the master pop table and store in ram for future use
        logger.debug("Reading {} ({}) bytes starting at {} + "
                     "4".format(MASTER_POPULATION_TABLE_SIZE,
                                hex(MASTER_POPULATION_TABLE_SIZE),
                                hex(master_region_base_address)))

        return master_region_base_address, app_data_base_address
    def _read_in_master_pop_table(self, p, controller, master_pop_table_region,
                                  MASTER_POPULATION_TABLE_SIZE):
        '''
        reads in the master pop table from a given processor on the machine
        '''
        # Get the App Data base address for the core
        # (location where this cores memory starts in
        # sdram and region table)
        app_data_base_address_offset = getAppDataBaseAddressOffset(p)
        app_data_base_address = \
            self._read_and_convert(app_data_base_address_offset,
                                   scamp.TYPE_WORD, 4, "<I", controller)

        # Get the memory address of the master pop table region
        master_pop_region = master_pop_table_region

        master_region_base_address_address = \
            getRegionBaseAddressOffset(app_data_base_address,
                                       master_pop_region)

        master_region_base_address_offset = \
            self._read_and_convert(master_region_base_address_address,
                                   scamp.TYPE_WORD, 4, "<I", controller)

        master_region_base_address =\
            master_region_base_address_offset + app_data_base_address

        #read in the master pop table and store in ram for future use
        logger.debug("Reading {} ({}) bytes starting at {} + "
                     "4".format(MASTER_POPULATION_TABLE_SIZE,
                                hex(MASTER_POPULATION_TABLE_SIZE),
                                hex(master_region_base_address)))

        return master_region_base_address, app_data_base_address
def retrieve_region_data(txrx, x, y, p, region_id, region_size):
    """Get the data from the given processor and region.

    :param txrx: transceiver to use when communicating with the board
    :param region_id: id of the region to retrieve
    :param region_size: size of the region (in words)
    :returns: a string containing data from the region
    """
    # Get the application pointer table to get the address for the region
    txrx.select(x, y)
    app_data_base_offset = memory_utils.getAppDataBaseAddressOffset(p)
    _app_data_table = txrx.memory_calls.read_mem(app_data_base_offset,
                                                 scamp.TYPE_WORD, 4)
    app_data_table = struct.unpack('<I', _app_data_table)[0]

    # Get the position of the desired region
    region_base_offset = memory_utils.getRegionBaseAddressOffset(
        app_data_table, region_id)
    _region_base = txrx.memory_calls.read_mem(region_base_offset,
                                              scamp.TYPE_WORD, 4)
    region_address = struct.unpack('<I', _region_base)[0] + app_data_table

    # Read the region
    data = txrx.memory_calls.read_mem(region_address, scamp.TYPE_WORD,
                                      region_size * 4)
    return data
    def _getSpikes(self, controller, compatible_output,
                   spikeRecordingRegion, subVertexOutSpikeBytesFunction,
                   runtime):
        """
        Return a 2-column numpy array containing cell ids and spike times for 
        recorded cells.   This is read directly from the memory for the board.
        """
        
        logger.info("Getting spikes for %s" % (self.label))
        
        spikes = numpy.zeros((0, 2))
        
        # Find all the sub-vertices that this population exists on
        for subvertex in self.subvertices:
            (x, y, p) = subvertex.placement.processor.get_coordinates()
            logger.debug("Reading spikes from chip %d, %d, core %d, lo_atom %d"
                     % (x, y, p, subvertex.lo_atom))
            controller.txrx.select(x, y)
            
            # Get the App Data for the core
            appDataBaseAddressOffset = getAppDataBaseAddressOffset(p)
            appDataBaseAddressBuf = controller.txrx.memory_calls.read_mem(
                    appDataBaseAddressOffset, scamp.TYPE_WORD, 4)
            appDataBaseAddress = struct.unpack("<I", appDataBaseAddressBuf)[0]
            
            # Get the position of the spike buffer
            spikeRegionBaseAddressOffset = getRegionBaseAddressOffset(
                    appDataBaseAddress, spikeRecordingRegion)
            spikeRegionBaseAddressBuf = controller.txrx.memory_calls.read_mem(
                    spikeRegionBaseAddressOffset, scamp.TYPE_WORD, 4)
            spikeRegionBaseAddress = struct.unpack("<I", 
                    spikeRegionBaseAddressBuf)[0]
            spikeRegionBaseAddress += appDataBaseAddress
            
            # Read the spike data size
            numberOfBytesWrittenBuf = controller.txrx.memory_calls.read_mem(
                    spikeRegionBaseAddress, scamp.TYPE_WORD, 4)
            numberOfBytesWritten = struct.unpack_from("<I", 
                    numberOfBytesWrittenBuf)[0]

            #check that the number of spikes written is smaller or the same as the size of the memory
            #region we allocated for spikes
            outSpikeBytes = subVertexOutSpikeBytesFunction(subvertex)
            machine_time_step = conf.config.getint("Machine", "machineTimeStep")
            no_machine_time_steps = int((runtime * 1000.0) / machine_time_step)
            size_of_region = self.get_recording_region_size(outSpikeBytes, no_machine_time_steps)

            if numberOfBytesWritten > size_of_region:
                raise exceptions.MemReadException("the amount of memory written was "
                                                  "larger than was allocated for it")

            
            # Read the spikes
            logger.debug("Reading %d (%s) bytes starting at %s + 4" %
                    (numberOfBytesWritten, hex(numberOfBytesWritten), 
                            hex(spikeRegionBaseAddress)))
            spikeData = controller.txrx.memory_calls.read_mem(
                    spikeRegionBaseAddress + 4, scamp.TYPE_WORD, 
                    numberOfBytesWritten)
            
            # Extract number of spike bytes from subvertex
            outSpikeBytes = subVertexOutSpikeBytesFunction(subvertex)
            numberOfTimeStepsWritten = numberOfBytesWritten / (outSpikeBytes)
            
            logger.debug("Processing %d timesteps" % numberOfTimeStepsWritten)
            
            # Loop through ticks
            for tick in range(0, numberOfTimeStepsWritten):
                
                # Convert tick to ms
                time = tick * (controller.dao.machineTimeStep / 1000.0)
                
                # Get offset into file data that the bit vector representing 
                # the state at this tick begins at
                vectorOffset = (tick * outSpikeBytes)
                
                # Loop through the words that make up this vector
                for neuronWordIndex in range(0, outSpikeBytes, 4):
                    
                    # Unpack the word containing the spikingness of 32 neurons
                    spikeVectorWord = struct.unpack_from("<I", spikeData,
                                             vectorOffset + neuronWordIndex)
                    
                    if spikeVectorWord != 0:
                        # Loop through each bit in this word
                        for neuronBitIndex in range(0, 32):
                            
                            # If the bit is set
                            neuronBitMask = (1 << neuronBitIndex)
                            if (spikeVectorWord[0] & neuronBitMask) != 0:
                                
                                # Calculate neuron ID
                                neuronID = ((neuronWordIndex * 8) + neuronBitIndex
                                        + subvertex.lo_atom)
                                
                                # Add spike time and neuron ID to returned lists
                                spikes = numpy.append(spikes, [[time, neuronID]], 0)
            
        if len(spikes) > 0:
            
            logger.debug("Arranging spikes as per output spec")
            
            if compatible_output == True:
                
                # Change the order to be neuronID : time (don't know why - this
                # is how it was done in the old code, so I am doing it here too)
                spikes[:,[0,1]] = spikes[:,[1,0]]
                
                # Sort by neuron ID and not by time 
                spikeIndex = numpy.lexsort((spikes[:,1],spikes[:,0]))
                spikes = spikes[spikeIndex]
                return spikes;
            
            # If compatible output, return sorted by spike time
            spikeIndex = numpy.lexsort((spikes[:,1], spikes[:,0]))
            spikes = spikes[spikeIndex]
            return spikes
        print("No spikes recorded")
        return spikes
    def _getSpikes(self, controller, compatible_output, spikeRecordingRegion,
                   subVertexOutSpikeBytesFunction, runtime):
        """
        Return a 2-column numpy array containing cell ids and spike times for 
        recorded cells.   This is read directly from the memory for the board.
        """

        logger.info("Getting spikes for %s" % (self.label))

        spikes = numpy.zeros((0, 2))

        # Find all the sub-vertices that this population exists on
        for subvertex in self.subvertices:
            (x, y, p) = subvertex.placement.processor.get_coordinates()
            logger.debug(
                "Reading spikes from chip %d, %d, core %d, lo_atom %d" %
                (x, y, p, subvertex.lo_atom))
            controller.txrx.select(x, y)

            # Get the App Data for the core
            appDataBaseAddressOffset = getAppDataBaseAddressOffset(p)
            appDataBaseAddressBuf = controller.txrx.memory_calls.read_mem(
                appDataBaseAddressOffset, scamp.TYPE_WORD, 4)
            appDataBaseAddress = struct.unpack("<I", appDataBaseAddressBuf)[0]

            # Get the position of the spike buffer
            spikeRegionBaseAddressOffset = getRegionBaseAddressOffset(
                appDataBaseAddress, spikeRecordingRegion)
            spikeRegionBaseAddressBuf = controller.txrx.memory_calls.read_mem(
                spikeRegionBaseAddressOffset, scamp.TYPE_WORD, 4)
            spikeRegionBaseAddress = struct.unpack(
                "<I", spikeRegionBaseAddressBuf)[0]
            spikeRegionBaseAddress += appDataBaseAddress

            # Read the spike data size
            numberOfBytesWrittenBuf = controller.txrx.memory_calls.read_mem(
                spikeRegionBaseAddress, scamp.TYPE_WORD, 4)
            numberOfBytesWritten = struct.unpack_from(
                "<I", numberOfBytesWrittenBuf)[0]

            #check that the number of spikes written is smaller or the same as the size of the memory
            #region we allocated for spikes
            outSpikeBytes = subVertexOutSpikeBytesFunction(subvertex)
            machine_time_step = conf.config.getint("Machine",
                                                   "machineTimeStep")
            no_machine_time_steps = int((runtime * 1000.0) / machine_time_step)
            size_of_region = self.get_recording_region_size(
                outSpikeBytes, no_machine_time_steps)

            if numberOfBytesWritten > size_of_region:
                raise exceptions.MemReadException(
                    "the amount of memory written was "
                    "larger than was allocated for it")

            # Read the spikes
            logger.debug("Reading %d (%s) bytes starting at %s + 4" %
                         (numberOfBytesWritten, hex(numberOfBytesWritten),
                          hex(spikeRegionBaseAddress)))
            spikeData = controller.txrx.memory_calls.read_mem(
                spikeRegionBaseAddress + 4, scamp.TYPE_WORD,
                numberOfBytesWritten)

            # Extract number of spike bytes from subvertex
            outSpikeBytes = subVertexOutSpikeBytesFunction(subvertex)
            numberOfTimeStepsWritten = numberOfBytesWritten / (outSpikeBytes)

            logger.debug("Processing %d timesteps" % numberOfTimeStepsWritten)

            # Loop through ticks
            for tick in range(0, numberOfTimeStepsWritten):

                # Convert tick to ms
                time = tick * (controller.dao.machineTimeStep / 1000.0)

                # Get offset into file data that the bit vector representing
                # the state at this tick begins at
                vectorOffset = (tick * outSpikeBytes)

                # Loop through the words that make up this vector
                for neuronWordIndex in range(0, outSpikeBytes, 4):

                    # Unpack the word containing the spikingness of 32 neurons
                    spikeVectorWord = struct.unpack_from(
                        "<I", spikeData, vectorOffset + neuronWordIndex)

                    if spikeVectorWord != 0:
                        # Loop through each bit in this word
                        for neuronBitIndex in range(0, 32):

                            # If the bit is set
                            neuronBitMask = (1 << neuronBitIndex)
                            if (spikeVectorWord[0] & neuronBitMask) != 0:

                                # Calculate neuron ID
                                neuronID = ((neuronWordIndex * 8) +
                                            neuronBitIndex + subvertex.lo_atom)

                                # Add spike time and neuron ID to returned lists
                                spikes = numpy.append(spikes,
                                                      [[time, neuronID]], 0)

        if len(spikes) > 0:

            logger.debug("Arranging spikes as per output spec")

            if compatible_output == True:

                # Change the order to be neuronID : time (don't know why - this
                # is how it was done in the old code, so I am doing it here too)
                spikes[:, [0, 1]] = spikes[:, [1, 0]]

                # Sort by neuron ID and not by time
                spikeIndex = numpy.lexsort((spikes[:, 1], spikes[:, 0]))
                spikes = spikes[spikeIndex]
                return spikes

            # If compatible output, return sorted by spike time
            spikeIndex = numpy.lexsort((spikes[:, 1], spikes[:, 0]))
            spikes = spikes[spikeIndex]
            return spikes
        print("No spikes recorded")
        return spikes
 def get_neuron_parameter(self, region, compatible_output, controller):
     if not controller.dao.has_ran:
         raise exceptions.PacmanException("The simulation has not yet ran,"
                                          "therefore gsyn cannot be "
                                          "retrieved")
     value = numpy.zeros((0, 3))
     
     # Find all the sub-vertices that this population exists on
     for subvertex in self.subvertices:
         (x, y, p) = subvertex.placement.processor.get_coordinates()
         controller.txrx.select(x, y)
         
         # Get the App Data for the core
         appDataBaseAddressOffset = getAppDataBaseAddressOffset(p)
         appDataBaseAddressBuf = controller.txrx.memory_calls.read_mem(
                 appDataBaseAddressOffset, scamp.TYPE_WORD, 4)
         appDataBaseAddress = struct.unpack("<I", appDataBaseAddressBuf)[0]
         
         # Get the position of the value buffer
         vRegionBaseAddressOffset = getRegionBaseAddressOffset(
                 appDataBaseAddress, region)
         vRegionBaseAddressBuf = controller.txrx.memory_calls.read_mem(
                 vRegionBaseAddressOffset, scamp.TYPE_WORD, 4)
         vRegionBaseAddress = struct.unpack("<I", 
                 vRegionBaseAddressBuf)[0]
         vRegionBaseAddress += appDataBaseAddress
         
         # Read the size
         numberOfBytesWrittenBuf = controller.txrx.memory_calls.read_mem(
                 vRegionBaseAddress, scamp.TYPE_WORD, 4)
         numberOfBytesWritten = struct.unpack_from("<I", 
                 numberOfBytesWrittenBuf)[0]
                 
         # Read the values
         logger.debug("Reading %d (%s) bytes starting at %s" 
                 %(numberOfBytesWritten, hex(numberOfBytesWritten), 
                         hex(vRegionBaseAddress + 4)))
         vData = controller.txrx.memory_calls.read_mem(
                 vRegionBaseAddress + 4, scamp.TYPE_WORD, 
                 numberOfBytesWritten)
         bytesPerTimeStep = subvertex.n_atoms * 4
         numberOfTimeStepsWritten = numberOfBytesWritten / bytesPerTimeStep
         msPerTimestep = controller.dao.machineTimeStep / 1000.0
         
         logger.debug("Processing %d timesteps" % numberOfTimeStepsWritten)
         
         # Standard fixed-point 'accum' type scaling
         size = len(vData) / 4
         scale = numpy.zeros(size, dtype=numpy.float)
         scale.fill(float(0x7FFF))
         
         # Add an array for time and neuron id
         time = numpy.array([int(i / subvertex.n_atoms) * msPerTimestep 
                 for i in range(size)], dtype=numpy.float)
         neuronId = numpy.array([int(i % subvertex.n_atoms) 
                 + subvertex.lo_atom for i in range(size)], 
                 dtype=numpy.uint32)
         
         # Get the values
         tempValue = numpy.frombuffer(vData, dtype="<i4")
         tempValue = numpy.divide(tempValue, scale)
         tempArray = numpy.dstack((time, neuronId, tempValue))
         tempArray = numpy.reshape(tempArray, newshape=(-1, 3))
         
         value = numpy.append(value, tempArray, axis=0)
     
     logger.debug("Arranging parameter output")
     
     if compatible_output == True:
         
         # Change the order to be neuronID : time (don't know why - this
         # is how it was done in the old code, so I am doing it here too)
         value[:,[0,1,2]] = value[:,[1,0,2]]
         
         # Sort by neuron ID and not by time 
         vIndex = numpy.lexsort((value[:,2], value[:,1], value[:,0]))
         value = value[vIndex]
         return value
     
     # If not compatible output, we will sort by time (as NEST seems to do)
     vIndex = numpy.lexsort((value[:,2], value[:,1], value[:,0]))
     value = value[vIndex]
     return value