def generateDataSpec(self, processor, subvertex, dao):
        #check that all keys for a subedge are the same when masked
        self.check_sub_edge_key_mask_consistancy(self.edge_map, self._app_mask)

        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor=processor, dao=dao)
        spec.initialise(self.core_app_identifier,
                        dao)  # User specified identifier

        spec.comment("\n*** Spec for multi case source ***\n\n")

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executable_target = None
        file_path = os.path.join(dao.get_common_binaries_directory(),
                                 'multicast_source.aplx')
        executable_target = lib_map.ExecutableTarget(file_path, x, y, p)
        memory_write_targets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int(
                (dao.run_time * 1000.0) / dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116  # User1 location reserved for core p
        memory_write_targets.append(
            lib_map.MemWriteTarget(x, y, p, user1Addr, simulationTimeInTicks))

        #reserve regions
        self.reserve_memory_regions(spec, self.memory_requirements)

        #write system region
        spec.switchWriteFocus(region=self.SYSTEM_REGION)
        spec.write(data=0xBEEF0000)
        spec.write(data=0)
        spec.write(data=0)
        spec.write(data=0)

        #write commands to memory
        spec.switchWriteFocus(region=self.COMMANDS)
        for write_command in self.writes:
            spec.write(data=write_command)

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile()
        load_targets = list()

        # Return list of executables, load files:
        return executable_target, load_targets, memory_write_targets
    def generateDataSpec(self, processor, subvertex, dao):
        """
        Model-specific construction of the data blocks necessary to build a
        single Application Monitor on one core.
        """
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier,
                        dao)  # User specified identifier

        spec.comment("\n*** Spec for External Spike Source Instance ***\n\n")

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executableTarget = lib_map.ExecutableTarget(
            dao.get_common_binaries_directory() + os.sep +
            'external_spike_source.aplx', x, y, p)

        # Calculate the size of the tables to be reserved in SDRAM:
        setupSz = 16  # Single word of info with flags, etc.
        # plus the lengths of each of the output buffer
        # regions in bytes

        # Declare random number generators and distributions:
        #self.writeRandomDistributionDeclarations(spec, dao)
        # Construct the data images needed for the Neuron:
        self.reserveMemoryRegions(spec, setupSz)
        self.writeSetupInfo(spec, subvertex)

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile()

        # No memory writes required for this Data Spec:
        memoryWriteTargets = list()
        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int(
                (dao.run_time * 1000.0) / dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116  # User1 location reserved for core p
        memoryWriteTargets.append(
            lib_map.MemWriteTarget(x, y, p, user1Addr, simulationTimeInTicks))
        loadTargets = list()

        # Return list of executables, load files:
        return executableTarget, loadTargets, memoryWriteTargets
    def generateDataSpec(self, processor, subvertex, dao):
        """
        Model-specific construction of the data blocks necessary to build a
        single Delay Extension Block on one core.
        """
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier, dao)               # User specified identifier

        spec.comment("\n*** Spec for Delay Extension Instance ***\n\n")

        # Define lists to hold information on files to load and memory to write
        # in support of this application:

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executableTarget = lib_map.ExecutableTarget(dao.get_common_binaries_directory()
                         + os.sep + 'delay_extension.aplx',x, y, p)
        
        # ###################################################################
        # Reserve SDRAM space for memory areas:

        delay_params_header_words = 3
        n_atoms = subvertex.hi_atom - subvertex.lo_atom  + 1
        block_len_words = int(ceil(n_atoms / 32.0))
        num_delay_blocks, delay_blocks = self.get_delay_blocks(subvertex)
        delay_params_sz = 4 * (delay_params_header_words 
                + (num_delay_blocks * block_len_words))
        
        spikeHistoryRegionSz = 0

        # Reserve memory:
        spec.comment("\nReserving memory space for data regions:\n\n")

        spec.reserveMemRegion( region = REGIONS.SYSTEM,
                                 size = SETUP_SZ,
                                label = 'setup' )

        spec.reserveMemRegion( region = REGIONS.DELAY_PARAMS,
                                 size = delay_params_sz,
                                label = 'delay_params' )

        self.writeSetupInfo( spec, subvertex, spikeHistoryRegionSz)

        self.writeDelayParameters(spec, processor, subvertex, num_delay_blocks,
                delay_blocks) 

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile() 

        # No memory writes or loads required for this Data Spec:
        memoryWriteTargets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int((dao.run_time * 1000.0) 
                    /  dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116 # User1 location reserved for core p
        memoryWriteTargets.append(lib_map.MemWriteTarget(x, y, p, user1Addr,
                                                         simulationTimeInTicks))

        loadTargets        = list()

        # Return list of executables, load files:
        return  executableTarget, loadTargets, memoryWriteTargets
Exemplo n.º 4
0
    def generateDataSpec(self, processor, subvertex, dao):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSourcePoisson on one core.
        """
        # Get simulation information:
        machineTimeStep = dao.machineTimeStep
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier,
                        dao)  # User-specified identifier

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executableTarget = \
            lib_map.ExecutableTarget(dao.get_common_binaries_directory() +
                                     os.sep + 'spike_source_poisson.aplx',x,
                                     y, p)

        # Get parameters about the group of neurons living on this core:
        # How many are there?:
        no_machine_time_steps = int(
            (dao.run_time * 1000.0) / dao.machineTimeStep)
        x, y, p = processor.get_coordinates()
        poissonParamsSz = self.getParamsBytes(subvertex.lo_atom,
                                              subvertex.hi_atom)
        spikeHistBuffSz = self.getSpikeBufferSize(subvertex.lo_atom,
                                                  subvertex.hi_atom,
                                                  no_machine_time_steps)

        # Reserve SDRAM space for memory areas:
        self.reserveMemoryRegions(spec, SETUP_SZ, poissonParamsSz,
                                  spikeHistBuffSz)

        # Write region 1 (system information on buffer size, etc);
        self.writeSetupInfo(spec, subvertex, spikeHistBuffSz)

        self.writePoissonParameters(spec, machineTimeStep, processor,
                                    subvertex.n_atoms)

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile()

        # No memory writes or loads required for this Data Spec:
        memoryWriteTargets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int(
                (dao.run_time * 1000.0) / dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116  # User1 location reserved for core p
        memoryWriteTargets.append(
            lib_map.MemWriteTarget(x, y, p, user1Addr, simulationTimeInTicks))
        loadTargets = list()

        # Return list of executables, load files:
        return executableTarget, loadTargets, memoryWriteTargets
Exemplo n.º 5
0
    def generateDataSpec(self, processor, subvertex, dao):

        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor=processor, dao=dao)
        spec.initialise(self.core_app_identifier,
                        dao)  # User specified identifier

        spec.comment("\n*** Spec for robot motor control ***\n\n")

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        populationIdentity = packet_conversions.get_key_from_coords(
            x, y, p + 1)  #our own key

        file_path = os.path.join(dao.get_common_binaries_directory(),
                                 'myorobot_motor_control.aplx')
        executable_target = lib_map.ExecutableTarget(file_path, x, y, p)
        memory_write_targets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int(
                (dao.run_time * 1000.0) / dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116  # User1 location reserved for core p
        memory_write_targets.append(
            lib_map.MemWriteTarget(x, y, p, user1Addr, simulationTimeInTicks))

        #reserve regions
        self.reserve_memory_regions(spec)

        #write system info
        spec.switchWriteFocus(region=self.SYSTEM_REGION)
        spec.write(data=0xBEEF0000)
        spec.write(data=0)
        spec.write(data=0)
        spec.write(data=0)
        edge_key = None
        #locate correct subedge for key
        for subedge in subvertex.out_subedges:
            if subedge.edge == self.out_going_edge:
                edge_key = subedge.key

        #write params to memory

        spec.switchWriteFocus(region=self.PARAMS)
        spec.write(data=edge_key | self.myoID)
        #        spec.write(data=populationIdentity)
        spec.write(data=spec.doubleToS1615(self.output_scale), sizeof='s1615')
        spec.write(data=self.sample_time)
        spec.write(data=spec.doubleToS1615(self.decay_factor), sizeof='s1615')
        spec.write(data=spec.doubleToS1615(self.kernel_amplitude),
                   sizeof='s1615')
        spec.write(data=self.threshold)
        spec.write(data=self.n_neurons)

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile()
        load_targets = list()

        # Return list of executables, load files:
        return executable_target, load_targets, memory_write_targets
    def generateDataSpec( self, processor, subvertex, dao ):
        """
        Model-specific construction of the data blocks necessary to build a group
        of IF_curr_exp neurons resident on a single core.
        """
        # Get simulation information:
        machineTimeStep  = dao.machineTimeStep
        
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier, dao)  # User-specified identifier

        spec.comment("\n*** Spec for block of %s neurons ***\n" % (self.model_name))

        # Load the executable to the list of load targets for this core
        # and the load addresses:
        # TODO - AMMEND FOR BINARY SEARCH PATH IF DESIRED
        x, y, p = processor.get_coordinates()
        
        # Split binary name into title and extension
        binaryTitle, binaryExtension = os.path.splitext(self._binary)

        # If we have an STDP mechanism, add it's executable suffic to title
        if self._stdp_mechanism is not None:
            binaryTitle = binaryTitle + "_" + self._stdp_mechanism.get_vertex_executable_suffix()

        # Rebuild executable name
        binaryName = os.path.join(dao.get_common_binaries_directory(), binaryTitle + binaryExtension)

        executableTarget = lib_map.ExecutableTarget(
            binaryName,
            x, y, p
        )
        
        # Calculate the number of time steps
        no_machine_time_steps = int((dao.run_time * 1000.0) / machineTimeStep)
        
        x,y,p = processor.get_coordinates()
        
        # Calculate the size of the tables to be reserved in SDRAM:
        neuronParamsSz = self.getNeuronParamsSize(subvertex.lo_atom, 
                subvertex.hi_atom)
        synapseParamsSz = self.getSynapseParameterSize(subvertex.lo_atom, 
                subvertex.hi_atom)
        allSynBlockSz     = self.getExactSynapticBlockMemorySize(subvertex)
        spikeHistBuffSz = self.getSpikeBufferSize(subvertex.lo_atom, 
                subvertex.hi_atom, no_machine_time_steps)
        potentialHistBuffSz = self.getVBufferSize(subvertex.lo_atom, 
                subvertex.hi_atom, no_machine_time_steps)
        gsynHistBuffSz = self.getGSynBufferSize(subvertex.lo_atom, 
                subvertex.hi_atom, no_machine_time_steps)
        stdpRegionSz = self.getSTDPParameterSize(subvertex.lo_atom, 
                subvertex.hi_atom, self.in_edges)
        
        # Declare random number generators and distributions:
        self.writeRandomDistributionDeclarations(spec, dao)

        # Construct the data images needed for the Neuron:
        self.reserveMemoryRegions(spec, SETUP_SIZE, neuronParamsSz, 
                synapseParamsSz, SynapticManager.ROW_LEN_TABLE_SIZE,
                SynapticManager.MASTER_POPULATION_TABLE_SIZE, allSynBlockSz,
                spikeHistBuffSz, potentialHistBuffSz, gsynHistBuffSz,
                stdpRegionSz)

        self.writeSetupInfo(spec, subvertex, spikeHistBuffSz, 
                potentialHistBuffSz, gsynHistBuffSz)

        ring_buffer_shift = self.get_ring_buffer_to_input_left_shift(subvertex)
        weight_scale = self.get_weight_scale(ring_buffer_shift)
        logger.debug("Weight scale is {}".format(weight_scale))
        
        self.writeNeuronParameters(spec, machineTimeStep, processor, subvertex,
                ring_buffer_shift)

        self.writeSynapseParameters(spec, machineTimeStep, subvertex)
        
        self.writeSTDPParameters(spec, machineTimeStep, subvertex,
                                 weight_scale, REGIONS.STDP_PARAMS)

        self.writeRowLengthTranslationTable(spec, REGIONS.ROW_LEN_TRANSLATION)
        
        self.writeSynapticMatrixAndMasterPopulationTable(spec, subvertex, 
                                                         allSynBlockSz,
                                                         weight_scale,
                                                         REGIONS.MASTER_POP_TABLE,
                                                         REGIONS.SYNAPTIC_MATRIX)
        
        for subedge in subvertex.in_subedges:
            subedge.free_sublist()

        # End the writing of this specification:
        spec.endSpec()
        spec.closeSpecFile() 

        # No memory writes required for this Data Spec:
        memoryWriteTargets = list()
        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int((dao.run_time * 1000.0) 
                    /  dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116 # User1 location reserved for core p
        memoryWriteTargets.append(lib_map.MemWriteTarget(x, y, p, user1Addr,
                                                         simulationTimeInTicks))
        loadTargets = list()

        # Return list of target cores, executables, files to load and 
        # memory writes to perform:
        return  executableTarget, loadTargets, memoryWriteTargets
    def generateDataSpec(self, processor, subvertex, dao):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSource Array on one core.
        """
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier, dao)               # User specified identifier

        spec.comment("\n*** Spec for SpikeSourceArray Instance ***\n\n")

        # Define lists to hold information on files to load and memory to write
        # in support of this application:

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executableTarget = lib_map.ExecutableTarget(dao.get_common_binaries_directory()
                         + os.sep + 'spike_source_array.aplx',x, y, p)
        
        # ###################################################################
        # Reserve SDRAM space for memory areas:

        spec.comment("\nReserving memory space for spike data region:\n\n")

        machineTimeStep = dao.machineTimeStep # usec per simulation tick
        no_machine_time_steps = int((dao.run_time * 1000.0) / machineTimeStep)
        numNeurons, tableEntries, spikeBlocks, spikeRegionSize  =  \
                    self.processSpikeArrayInfo(subvertex, machineTimeStep)
        if spikeRegionSize == 0:
            spikeRegionSize = 4

        # Calculate memory requirements:
        blockIndexRegionSize = self.getBlockIndexBytes(len(tableEntries))
        spikeHistoryRegionSz = self.getSpikeBufferSize(subvertex.lo_atom, 
                subvertex.hi_atom, no_machine_time_steps)

        # Create the data regions for the spike source array:
        self.reserveMemoryRegions(spec, SETUP_SZ, blockIndexRegionSize,
                                  spikeRegionSize, spikeHistoryRegionSz)
        self.writeSetupInfo(spec, subvertex, spikeHistoryRegionSz)
        self.writeBlockIndexRegion(spec, subvertex, numNeurons, tableEntries)
        self.writeSpikeDataRegion(spec, numNeurons, spikeBlocks)
    
        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile() 

        # No memory writes or loads required for this Data Spec:
        memoryWriteTargets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int((dao.run_time * 1000.0) 
                    /  dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116 # User1 location reserved for core p
        memoryWriteTargets.append(lib_map.MemWriteTarget(x, y, p, user1Addr,
                                                         simulationTimeInTicks))
        loadTargets        = list()

        # Return list of executables, load files:
        return  executableTarget, loadTargets, memoryWriteTargets