def generateDataSpec(self, processor, subvertex, dao):
        #check that all keys for a subedge are the same when masked
        self.check_sub_edge_key_mask_consistancy(self.edge_map, self._app_mask)

        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor=processor, dao=dao)
        spec.initialise(self.core_app_identifier,
                        dao)  # User specified identifier

        spec.comment("\n*** Spec for multi case source ***\n\n")

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executable_target = None
        file_path = os.path.join(dao.get_common_binaries_directory(),
                                 'multicast_source.aplx')
        executable_target = lib_map.ExecutableTarget(file_path, x, y, p)
        memory_write_targets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int(
                (dao.run_time * 1000.0) / dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116  # User1 location reserved for core p
        memory_write_targets.append(
            lib_map.MemWriteTarget(x, y, p, user1Addr, simulationTimeInTicks))

        #reserve regions
        self.reserve_memory_regions(spec, self.memory_requirements)

        #write system region
        spec.switchWriteFocus(region=self.SYSTEM_REGION)
        spec.write(data=0xBEEF0000)
        spec.write(data=0)
        spec.write(data=0)
        spec.write(data=0)

        #write commands to memory
        spec.switchWriteFocus(region=self.COMMANDS)
        for write_command in self.writes:
            spec.write(data=write_command)

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile()
        load_targets = list()

        # Return list of executables, load files:
        return executable_target, load_targets, memory_write_targets
    def generateDataSpec(self, processor, subvertex, dao):
        """
        Model-specific construction of the data blocks necessary to build a
        single Application Monitor on one core.
        """
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier,
                        dao)  # User specified identifier

        spec.comment("\n*** Spec for External Spike Source Instance ***\n\n")

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executableTarget = lib_map.ExecutableTarget(
            dao.get_common_binaries_directory() + os.sep +
            'external_spike_source.aplx', x, y, p)

        # Calculate the size of the tables to be reserved in SDRAM:
        setupSz = 16  # Single word of info with flags, etc.
        # plus the lengths of each of the output buffer
        # regions in bytes

        # Declare random number generators and distributions:
        #self.writeRandomDistributionDeclarations(spec, dao)
        # Construct the data images needed for the Neuron:
        self.reserveMemoryRegions(spec, setupSz)
        self.writeSetupInfo(spec, subvertex)

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile()

        # No memory writes required for this Data Spec:
        memoryWriteTargets = list()
        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int(
                (dao.run_time * 1000.0) / dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116  # User1 location reserved for core p
        memoryWriteTargets.append(
            lib_map.MemWriteTarget(x, y, p, user1Addr, simulationTimeInTicks))
        loadTargets = list()

        # Return list of executables, load files:
        return executableTarget, loadTargets, memoryWriteTargets
    def generateDataSpec(self, processor, subvertex, dao):
        """
        Model-specific construction of the data blocks necessary to build a
        single Delay Extension Block on one core.
        """
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier, dao)               # User specified identifier

        spec.comment("\n*** Spec for Delay Extension Instance ***\n\n")

        # Define lists to hold information on files to load and memory to write
        # in support of this application:

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executableTarget = lib_map.ExecutableTarget(dao.get_common_binaries_directory()
                         + os.sep + 'delay_extension.aplx',x, y, p)
        
        # ###################################################################
        # Reserve SDRAM space for memory areas:

        delay_params_header_words = 3
        n_atoms = subvertex.hi_atom - subvertex.lo_atom  + 1
        block_len_words = int(ceil(n_atoms / 32.0))
        num_delay_blocks, delay_blocks = self.get_delay_blocks(subvertex)
        delay_params_sz = 4 * (delay_params_header_words 
                + (num_delay_blocks * block_len_words))
        
        spikeHistoryRegionSz = 0

        # Reserve memory:
        spec.comment("\nReserving memory space for data regions:\n\n")

        spec.reserveMemRegion( region = REGIONS.SYSTEM,
                                 size = SETUP_SZ,
                                label = 'setup' )

        spec.reserveMemRegion( region = REGIONS.DELAY_PARAMS,
                                 size = delay_params_sz,
                                label = 'delay_params' )

        self.writeSetupInfo( spec, subvertex, spikeHistoryRegionSz)

        self.writeDelayParameters(spec, processor, subvertex, num_delay_blocks,
                delay_blocks) 

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile() 

        # No memory writes or loads required for this Data Spec:
        memoryWriteTargets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int((dao.run_time * 1000.0) 
                    /  dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116 # User1 location reserved for core p
        memoryWriteTargets.append(lib_map.MemWriteTarget(x, y, p, user1Addr,
                                                         simulationTimeInTicks))

        loadTargets        = list()

        # Return list of executables, load files:
        return  executableTarget, loadTargets, memoryWriteTargets
Beispiel #4
0
    def generateDataSpec(self, processor, subvertex, dao):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSourcePoisson on one core.
        """
        # Get simulation information:
        machineTimeStep = dao.machineTimeStep
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier,
                        dao)  # User-specified identifier

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executableTarget = \
            lib_map.ExecutableTarget(dao.get_common_binaries_directory() +
                                     os.sep + 'spike_source_poisson.aplx',x,
                                     y, p)

        # Get parameters about the group of neurons living on this core:
        # How many are there?:
        no_machine_time_steps = int(
            (dao.run_time * 1000.0) / dao.machineTimeStep)
        x, y, p = processor.get_coordinates()
        poissonParamsSz = self.getParamsBytes(subvertex.lo_atom,
                                              subvertex.hi_atom)
        spikeHistBuffSz = self.getSpikeBufferSize(subvertex.lo_atom,
                                                  subvertex.hi_atom,
                                                  no_machine_time_steps)

        # Reserve SDRAM space for memory areas:
        self.reserveMemoryRegions(spec, SETUP_SZ, poissonParamsSz,
                                  spikeHistBuffSz)

        # Write region 1 (system information on buffer size, etc);
        self.writeSetupInfo(spec, subvertex, spikeHistBuffSz)

        self.writePoissonParameters(spec, machineTimeStep, processor,
                                    subvertex.n_atoms)

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile()

        # No memory writes or loads required for this Data Spec:
        memoryWriteTargets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int(
                (dao.run_time * 1000.0) / dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116  # User1 location reserved for core p
        memoryWriteTargets.append(
            lib_map.MemWriteTarget(x, y, p, user1Addr, simulationTimeInTicks))
        loadTargets = list()

        # Return list of executables, load files:
        return executableTarget, loadTargets, memoryWriteTargets
Beispiel #5
0
    def generateDataSpec(self, processor, subvertex, dao):

        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor=processor, dao=dao)
        spec.initialise(self.core_app_identifier,
                        dao)  # User specified identifier

        spec.comment("\n*** Spec for robot motor control ***\n\n")

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        populationIdentity = packet_conversions.get_key_from_coords(
            x, y, p + 1)  #our own key

        file_path = os.path.join(dao.get_common_binaries_directory(),
                                 'myorobot_motor_control.aplx')
        executable_target = lib_map.ExecutableTarget(file_path, x, y, p)
        memory_write_targets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int(
                (dao.run_time * 1000.0) / dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116  # User1 location reserved for core p
        memory_write_targets.append(
            lib_map.MemWriteTarget(x, y, p, user1Addr, simulationTimeInTicks))

        #reserve regions
        self.reserve_memory_regions(spec)

        #write system info
        spec.switchWriteFocus(region=self.SYSTEM_REGION)
        spec.write(data=0xBEEF0000)
        spec.write(data=0)
        spec.write(data=0)
        spec.write(data=0)
        edge_key = None
        #locate correct subedge for key
        for subedge in subvertex.out_subedges:
            if subedge.edge == self.out_going_edge:
                edge_key = subedge.key

        #write params to memory

        spec.switchWriteFocus(region=self.PARAMS)
        spec.write(data=edge_key | self.myoID)
        #        spec.write(data=populationIdentity)
        spec.write(data=spec.doubleToS1615(self.output_scale), sizeof='s1615')
        spec.write(data=self.sample_time)
        spec.write(data=spec.doubleToS1615(self.decay_factor), sizeof='s1615')
        spec.write(data=spec.doubleToS1615(self.kernel_amplitude),
                   sizeof='s1615')
        spec.write(data=self.threshold)
        spec.write(data=self.n_neurons)

        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile()
        load_targets = list()

        # Return list of executables, load files:
        return executable_target, load_targets, memory_write_targets
def generate_output_raw(dao):
    """
    The nitty gritty.
    Generates load targets and executable targets comprising the simulation, and
    for when individual memory locations are to be written, generates memWrites.

    This is now largely finished. Data structures are generated for edges
    and the data structure generation for vertices is merely a prototype.

    *Side effects*:
        writes data structures for the load targets to files in the binaries directories

    :returns:
        Nothing       
    """

    executable_targets, load_targets, mem_write_targets = list(), list(), list(
    )
    chipsUsed = set()
    progress_bar = ProgressBar(len(dao.placements))

    # If we have host-side Spec Execution, execute all Data Specs now:
    try:
        dao.useHostBasedSpecExecutor = \
            conf.config.getboolean( "SpecExecution", "specExecOnHost" )
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        raise Exception("SpecExecutor could not find config information"
                        " indicating where Spec Execution should occur.")

    chips = None
    if dao.useHostBasedSpecExecutor == True:
        chips = dict()
        for placement in dao.placements:
            (x, y, p) = placement.processor.get_coordinates()
            key = "{}:{}".format(x, y)
            if not key in chips:
                chips[key] = data_spec_executor.Chip(x, y)

    for placement in dao.placements:
        if not placement.subvertex.vertex.is_virtual():

            start_addr = None
            if dao.useHostBasedSpecExecutor == True:
                dao.spec_executor = data_spec_executor.SpecExecutor()

                (x, y, p) = placement.processor.get_coordinates()
                key = "{}:{}".format(x, y)
                chip = chips[key]

                start_addr = chip.sdram_used + \
                    data_spec_constants.SDRAM_BASE_ADDR
                dao.spec_executor.setup(chip)

            subvertex = placement.subvertex

            myExecTargets, myLoadTargets, myMemWriteTargets = \
                 subvertex.generateDataSpec(placement.processor, dao)

            # Add this core to the list of targets
            if myExecTargets is not None:
                executable_targets.append(myExecTargets)

            # Append the new dataSpec file to the list of load targets:
            if myLoadTargets is not None and len(myLoadTargets) > 0:
                load_targets.extend(myLoadTargets)

            # Add required memory writes to the list of writes:
            if myMemWriteTargets is not None and len(myMemWriteTargets) > 0:
                mem_write_targets.extend(myMemWriteTargets)

            x, y, p = placement.processor.get_coordinates()
            chipsUsed.add((x, y))

            hostname = dao.machine.hostname

            if dao.useHostBasedSpecExecutor == True:
                (x, y, p) = placement.processor.get_coordinates()
                f_out = os.path.join(
                    dao.get_binaries_directory(),
                    "%s_appData_%d_%d_%d.dat" % (hostname, x, y, p))
                dao.spec_executor.finish(f_out)

                # TODO: Bring the following in line / neaten
                # ----------------------------------------------
                # Keep information on the memory region locations
                # for later report generation:
                index = "%d %d %d" % (x, y, p)
                dao.memMaps[index] = [
                    [i, s.wr_ptr_aligned, s.wr_ptr_offset, s.size, \
                                               s.memory, s.unfilled] \
                    if s is not None else [i, 0, 0, 0, [], False]
                        for (i, s) in enumerate(dao.spec_executor.memory_slots)
                ]

                # Add the files produced by the Spec Executor to the
                # list of files to load:
                load_targets.append(
                    lib_map.LoadTarget(f_out, x, y, p, start_addr))
                mem_write_targets.append(
                    lib_map.MemWriteTarget(x, y, p, 0xe5007000 + 128 * p + 112,
                                           start_addr))
        progress_bar.update()

    # populate the DAO with executable, load and memory writing requirements
    dao.set_executable_targets(executable_targets)
    dao.set_load_targets(load_targets)
    dao.set_mem_write_targets(mem_write_targets)

    # Generate core map and routing table binaries for each chip
    for coord in dao.machine.get_coords_of_all_chips():
        x, y = coord['x'], coord['y']
        chip = dao.machine.get_chip(x, y)
        routeCount = get_route_count(chip)
        if (routeCount > 0 or (x, y) in chipsUsed) and not chip.is_virtual():
            fileName = generate_routing_table(chip, routeCount, dao)
            if (conf.config.getboolean("Reports", "reportsEnabled")
                    and conf.config.getboolean("Reports", "writeRouterReports")
                    and conf.config.getboolean("Reports",
                                               "writeRouterDatReport")):
                reports.generate_router_report(fileName, chip, dao)

            # Place in the list of targets to load at 119.5MB depth in the SDRAM
            if not chip.virtual:
                load_targets.insert(
                    0,
                    lib_map.LoadTarget(
                        fileName, chip.x, chip.y, 0,
                        data_spec_constants.ROUTING_TABLE_ADDRESS))
    progress_bar.end()
    def generateDataSpec( self, processor, subvertex, dao ):
        """
        Model-specific construction of the data blocks necessary to build a group
        of IF_curr_exp neurons resident on a single core.
        """
        # Get simulation information:
        machineTimeStep  = dao.machineTimeStep
        
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier, dao)  # User-specified identifier

        spec.comment("\n*** Spec for block of %s neurons ***\n" % (self.model_name))

        # Load the executable to the list of load targets for this core
        # and the load addresses:
        # TODO - AMMEND FOR BINARY SEARCH PATH IF DESIRED
        x, y, p = processor.get_coordinates()
        
        # Split binary name into title and extension
        binaryTitle, binaryExtension = os.path.splitext(self._binary)

        # If we have an STDP mechanism, add it's executable suffic to title
        if self._stdp_mechanism is not None:
            binaryTitle = binaryTitle + "_" + self._stdp_mechanism.get_vertex_executable_suffix()

        # Rebuild executable name
        binaryName = os.path.join(dao.get_common_binaries_directory(), binaryTitle + binaryExtension)

        executableTarget = lib_map.ExecutableTarget(
            binaryName,
            x, y, p
        )
        
        # Calculate the number of time steps
        no_machine_time_steps = int((dao.run_time * 1000.0) / machineTimeStep)
        
        x,y,p = processor.get_coordinates()
        
        # Calculate the size of the tables to be reserved in SDRAM:
        neuronParamsSz = self.getNeuronParamsSize(subvertex.lo_atom, 
                subvertex.hi_atom)
        synapseParamsSz = self.getSynapseParameterSize(subvertex.lo_atom, 
                subvertex.hi_atom)
        allSynBlockSz     = self.getExactSynapticBlockMemorySize(subvertex)
        spikeHistBuffSz = self.getSpikeBufferSize(subvertex.lo_atom, 
                subvertex.hi_atom, no_machine_time_steps)
        potentialHistBuffSz = self.getVBufferSize(subvertex.lo_atom, 
                subvertex.hi_atom, no_machine_time_steps)
        gsynHistBuffSz = self.getGSynBufferSize(subvertex.lo_atom, 
                subvertex.hi_atom, no_machine_time_steps)
        stdpRegionSz = self.getSTDPParameterSize(subvertex.lo_atom, 
                subvertex.hi_atom, self.in_edges)
        
        # Declare random number generators and distributions:
        self.writeRandomDistributionDeclarations(spec, dao)

        # Construct the data images needed for the Neuron:
        self.reserveMemoryRegions(spec, SETUP_SIZE, neuronParamsSz, 
                synapseParamsSz, SynapticManager.ROW_LEN_TABLE_SIZE,
                SynapticManager.MASTER_POPULATION_TABLE_SIZE, allSynBlockSz,
                spikeHistBuffSz, potentialHistBuffSz, gsynHistBuffSz,
                stdpRegionSz)

        self.writeSetupInfo(spec, subvertex, spikeHistBuffSz, 
                potentialHistBuffSz, gsynHistBuffSz)

        ring_buffer_shift = self.get_ring_buffer_to_input_left_shift(subvertex)
        weight_scale = self.get_weight_scale(ring_buffer_shift)
        logger.debug("Weight scale is {}".format(weight_scale))
        
        self.writeNeuronParameters(spec, machineTimeStep, processor, subvertex,
                ring_buffer_shift)

        self.writeSynapseParameters(spec, machineTimeStep, subvertex)
        
        self.writeSTDPParameters(spec, machineTimeStep, subvertex,
                                 weight_scale, REGIONS.STDP_PARAMS)

        self.writeRowLengthTranslationTable(spec, REGIONS.ROW_LEN_TRANSLATION)
        
        self.writeSynapticMatrixAndMasterPopulationTable(spec, subvertex, 
                                                         allSynBlockSz,
                                                         weight_scale,
                                                         REGIONS.MASTER_POP_TABLE,
                                                         REGIONS.SYNAPTIC_MATRIX)
        
        for subedge in subvertex.in_subedges:
            subedge.free_sublist()

        # End the writing of this specification:
        spec.endSpec()
        spec.closeSpecFile() 

        # No memory writes required for this Data Spec:
        memoryWriteTargets = list()
        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int((dao.run_time * 1000.0) 
                    /  dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116 # User1 location reserved for core p
        memoryWriteTargets.append(lib_map.MemWriteTarget(x, y, p, user1Addr,
                                                         simulationTimeInTicks))
        loadTargets = list()

        # Return list of target cores, executables, files to load and 
        # memory writes to perform:
        return  executableTarget, loadTargets, memoryWriteTargets
    def generateDataSpec(self, processor, subvertex, dao):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSource Array on one core.
        """
        # Create new DataSpec for this processor:
        spec = data_spec_gen.DataSpec(processor, dao)
        spec.initialise(self.core_app_identifier, dao)               # User specified identifier

        spec.comment("\n*** Spec for SpikeSourceArray Instance ***\n\n")

        # Define lists to hold information on files to load and memory to write
        # in support of this application:

        # Load the expected executable to the list of load targets for this core
        # and the load addresses:
        x, y, p = processor.get_coordinates()
        executableTarget = lib_map.ExecutableTarget(dao.get_common_binaries_directory()
                         + os.sep + 'spike_source_array.aplx',x, y, p)
        
        # ###################################################################
        # Reserve SDRAM space for memory areas:

        spec.comment("\nReserving memory space for spike data region:\n\n")

        machineTimeStep = dao.machineTimeStep # usec per simulation tick
        no_machine_time_steps = int((dao.run_time * 1000.0) / machineTimeStep)
        numNeurons, tableEntries, spikeBlocks, spikeRegionSize  =  \
                    self.processSpikeArrayInfo(subvertex, machineTimeStep)
        if spikeRegionSize == 0:
            spikeRegionSize = 4

        # Calculate memory requirements:
        blockIndexRegionSize = self.getBlockIndexBytes(len(tableEntries))
        spikeHistoryRegionSz = self.getSpikeBufferSize(subvertex.lo_atom, 
                subvertex.hi_atom, no_machine_time_steps)

        # Create the data regions for the spike source array:
        self.reserveMemoryRegions(spec, SETUP_SZ, blockIndexRegionSize,
                                  spikeRegionSize, spikeHistoryRegionSz)
        self.writeSetupInfo(spec, subvertex, spikeHistoryRegionSz)
        self.writeBlockIndexRegion(spec, subvertex, numNeurons, tableEntries)
        self.writeSpikeDataRegion(spec, numNeurons, spikeBlocks)
    
        # End-of-Spec:
        spec.endSpec()
        spec.closeSpecFile() 

        # No memory writes or loads required for this Data Spec:
        memoryWriteTargets = list()

        simulationTimeInTicks = INFINITE_SIMULATION
        if dao.run_time is not None:
            simulationTimeInTicks = int((dao.run_time * 1000.0) 
                    /  dao.machineTimeStep)
        user1Addr = 0xe5007000 + 128 * p + 116 # User1 location reserved for core p
        memoryWriteTargets.append(lib_map.MemWriteTarget(x, y, p, user1Addr,
                                                         simulationTimeInTicks))
        loadTargets        = list()

        # Return list of executables, load files:
        return  executableTarget, loadTargets, memoryWriteTargets