示例#1
0
def run(simtime):
    """ 
    Commits the DB and runs the simulation.
    Switches on what to run are in the pyNN.spiNNaker section of pacman.cfg:
    """
    #    global original_pynn_script_directory
    pacman.original_pynn_script_directory = os.getcwd()

    print "[ pyNN ] : Running simulation - connection  with the DB will be now committed"
    db_run.set_runtime(simtime)
    db_run.close_connection()  # FIXME why do I need to close and reopen?

    if pacman.pacman_configuration.getboolean('pyNN.spiNNaker', 'run_pacman'):
        print "\n[ pyNN ] : Running pacman from", os.path.dirname(
            pacman.PACMAN_DIR)
        os.chdir(pacman.PACMAN_DIR)
        #        os.system('./pacman.sh %s' % db_run.db_abs_path)
        #        os.system('./pacman %s' % db_run.db_abs_path)
        # FIXME FIXME FIXME
        db = pacman.load_db(db_run.db_abs_path)
        db.clean_part_db()  # cleans the part_* tables
        pacman.run_pacman(db)

    if pacman.pacman_configuration.getboolean('pyNN.spiNNaker',
                                              'run_simulation'):
        board_address = pacman.pacman_configuration.get(
            'board', 'default_board_address')
        if pacman.pacman_configuration.getboolean('pyNN.spiNNaker',
                                                  'run_pacman') == False:
            print "[ pyNN ] : cannot run simulation before pacman. change your %s file" % pacman_cfg_filename
            quit(1)

        if pacman.pacman_configuration.getboolean('pyNN.spiNNaker',
                                                  'run_app_dump'):
            print "[ pyNN ] : ...running app_dump server and save results to %s" % TMP_RASTER_FILE
            # TODO better
            os.chdir(pacman.PACMAN_DIR)
            os.chdir(os.pardir)
            os.chdir('tools')
            os.system('python ./spike_receiver.py %s %s noplot &' %
                      (db_run.db_abs_path, TMP_RASTER_FILE))
            os.system('sleep 1')

        print "\n[ pyNN ] : Loading simulation on board %s (%s/tools/run.sh %s)\n" % (
            board_address, os.path.dirname(pacman.PACMAN_DIR), board_address)

        os.chdir(pacman.PACMAN_DIR)
        os.chdir(os.pardir)
        os.chdir('tools')
        #        os.system('./run.sh %s > /dev/null' % board_address)
        os.system('./run.sh %s' % board_address)
        print "[ pyNN ] : ... done ... waiting for simulation on board %s on to finish..." % (
            board_address)
        pacman.wait_for_simulation(
        )  # will wait for the simulation to finish it run_simulation is set to true in pacman.cfg
        print "[ pyNN ] : ...done!\n"
        os.system('sleep 2')
        os.chdir(pacman.original_pynn_script_directory)

    os.chdir(pacman.original_pynn_script_directory)
示例#2
0
def run(simtime):
    """ 
    Commits the DB and runs the simulation.
    Switches on what to run are in the pyNN.spiNNaker section of pacman.cfg:
    """
#    global original_pynn_script_directory
    pacman.original_pynn_script_directory = os.getcwd()
    
    print "[ pyNN ] : Running simulation - connection  with the DB will be now committed"
    db_run.set_runtime(simtime)
    db_run.close_connection()   # FIXME why do I need to close and reopen?
    
    
    if pacman.pacman_configuration.getboolean('pyNN.spiNNaker', 'run_pacman'):
        print "\n[ pyNN ] : Running pacman from", os.path.dirname(pacman.PACMAN_DIR)
        os.chdir(pacman.PACMAN_DIR)
#        os.system('./pacman.sh %s' % db_run.db_abs_path)
#        os.system('./pacman %s' % db_run.db_abs_path)        
        # FIXME FIXME FIXME
        db = pacman.load_db(db_run.db_abs_path)
        db.clean_part_db()              # cleans the part_* tables
        pacman.run_pacman(db)
        
    print "[ pyNN ] : Building network : %f seconds" % (time.time()-t0)
        

    if pacman.pacman_configuration.getboolean('pyNN.spiNNaker', 'run_simulation'):
        board_address = pacman.pacman_configuration.get('board', 'default_board_address')
        if pacman.pacman_configuration.getboolean('pyNN.spiNNaker', 'run_pacman') == False:
            print "[ pyNN ] : cannot run simulation before pacman. change your %s file" % pacman_cfg_filename
            quit(1)

        if pacman.pacman_configuration.getboolean('pyNN.spiNNaker', 'run_app_dump'):
            print "[ pyNN ] : ...running app_dump server and save results to %s"    % TMP_RASTER_FILE
            # TODO better
            os.chdir(pacman.PACMAN_DIR)
            os.chdir(os.pardir)
            os.chdir('tools')
            os.system('python ./spike_receiver.py %s %s noplot &' % (db_run.db_abs_path, TMP_RASTER_FILE))
            os.system('sleep 1')

            
        print "\n[ pyNN ] : Loading simulation on board %s (%s/tools/run.sh %s)\n" % (board_address, os.path.dirname(pacman.PACMAN_DIR), board_address)

        os.chdir(pacman.PACMAN_DIR)
        os.chdir(os.pardir)        
        os.chdir('tools')
#        os.system('./run.sh %s > /dev/null' % board_address)
        os.system('./run.sh %s' % board_address)
        print "[ pyNN ] : ... done ... waiting for simulation on board %s on to finish..." % (board_address)
        pacman.wait_for_simulation()    # will wait for the simulation to finish it run_simulation is set to true in pacman.cfg
        print "[ pyNN ] : ...done!\n"
        os.system('sleep 2')
        global complete_spike_list
        complete_spike_list = numpy.loadtxt(TMP_RASTER_FILE)

        os.chdir(pacman.original_pynn_script_directory)

    os.chdir(pacman.original_pynn_script_directory)
示例#3
0
    def plot(self, filename, plotting=False):
        db = pacman.load_db(self.db_address)

        # Find a global base neuron ID for each population
        populations = db.get_populations_size_type_method()
        base = 0
        for pop in populations:
            pop['base_id'] = base
            base += pop['size']

        # Fetch the routing key map and index it according to chip/processor coordinates
        key_map = db.get_routing_key_map()
        max_x = max(pop['x'] for pop in key_map) + 1
        max_y = max(pop['y'] for pop in key_map) + 1
        max_p = max(pop['p'] for pop in key_map) + 1
        map_temp = [[[[] for p in range(max_p)] for y in range(max_y)]
                    for x in range(max_x)]
        for pop in key_map:
            map_temp[pop['x']][pop['y']][pop['p']].append(pop)

        key_map = map_temp

        # Translate the routing keys into sequential global neuron IDs
        spikes = list()
        for (key, time) in self.routing_keys:
            chip_x = key >> 24 & 0xFF
            chip_y = key >> 16 & 0xFF
            processor = key >> 11 & 0x1F
            neuron_id = key & 0x7FF
            for pop in key_map[chip_x][chip_y][processor]:
                if neuron_id >= pop['start_id'] and neuron_id <= pop['end_id']:
                    base_id = populations[pop['population_id'] - 1][
                        'base_id']  # 'population_id - 1' because DB is 0 indexed
                    neuron_id = base_id + neuron_id - pop['start_id'] + pop[
                        'offset']  #TODO 'offset' should be called 'part_population_offset'
                    spikes.append((neuron_id, time))
                    break

        # Save the spikes in neurotools format
        self.write_neurotools_spike_file(filename, spikes)

        # Plot the spikes!
        if plotting:
            pylab.scatter([time for (neuron_id, time) in spikes],
                          [neuron_id for (neuron_id, time) in spikes],
                          c='green',
                          s=1)

            # Draw lines showing different populations
            for pop in populations:
                pylab.axhline(y=pop['base_id'], color='red', alpha=0.25)
                pylab.annotate(pop['label'], (0, pop['base_id']),
                               color='red',
                               alpha=0.25)

        # Set the plot axis and show
        #pylab.axis([0, 1000, 0, 100]) #TODO better mechanism for setting axis
        pylab.show()
示例#4
0
def callback(db_path, chunk, i):
    
    db = pacman.load_db(db_path)
    image_map = db.get_image_map()
        
    for c in chunk:
        if INFO:    print "[ synapse writer ] : [ Process %d ] : evaluating chip %d %d" % (i, c[0],c[1])
        compute_sdram_file(c[0],c[1],db,image_map,process_id=i)
    
    db.close_connection(commit=False)
示例#5
0
def callback(db_path, chunk, i):

    db = pacman.load_db(db_path)
    image_map = db.get_image_map()

    for c in chunk:
        if INFO:
            print "[ synapse writer ] : [ Process %d ] : evaluating chip %d %d" % (
                i, c[0], c[1])
        compute_sdram_file(c[0], c[1], db, image_map, process_id=i)

    db.close_connection(commit=False)
示例#6
0
    def plot(self, filename, plotting=False):
        db = pacman.load_db(self.db_address)

        # Find a global base neuron ID for each population
        populations = db.get_populations_size_type_method()
        base = 0
        for pop in populations:
            pop['base_id'] = base
            base += pop['size']

        # Fetch the routing key map and index it according to chip/processor coordinates
        key_map = db.get_routing_key_map()
        max_x = max(pop['x'] for pop in key_map) + 1
        max_y = max(pop['y'] for pop in key_map) + 1
        max_p = max(pop['p'] for pop in key_map) + 1
        map_temp = [[[[] for p in range(max_p)] for y in range(max_y)] for x in range(max_x)]
        for pop in key_map:
            map_temp[pop['x']][pop['y']][pop['p']].append(pop)

        key_map = map_temp;

        # Translate the routing keys into sequential global neuron IDs
        spikes = list()
        for (key, time) in self.routing_keys:
            chip_x = key >> 24 & 0xFF
            chip_y = key >> 16 & 0xFF
            processor = key >> 11 & 0x1F
            neuron_id = key & 0x7FF
            for pop in key_map[chip_x][chip_y][processor]:
                if neuron_id >= pop['start_id'] and neuron_id <= pop['end_id']:
                    base_id = populations[pop['population_id'] - 1]['base_id'] # 'population_id - 1' because DB is 0 indexed
                    neuron_id = base_id + neuron_id - pop['start_id'] + pop['offset'] #TODO 'offset' should be called 'part_population_offset'
                    spikes.append((neuron_id, time))
                    break

        # Save the spikes in neurotools format
        self.write_neurotools_spike_file(filename, spikes)
        
        # Plot the spikes!
        if plotting:    
            pylab.scatter([time for (neuron_id, time) in spikes], [neuron_id for (neuron_id, time) in spikes], c='green', s=1)

            # Draw lines showing different populations
            for pop in populations:
                pylab.axhline(y=pop['base_id'], color='red', alpha=0.25)
                pylab.annotate(pop['label'], (0, pop['base_id']), color='red', alpha=0.25)


        # Set the plot axis and show
        #pylab.axis([0, 1000, 0, 100]) #TODO better mechanism for setting axis
        pylab.show()
def run(db_run, runtime):
    """ 
    Commits the DB and runs the simulation.
    Switches on what to run are in the nengo_pacman_interface section of pacman.cfg:
    """
#    global original_pynn_script_directory
    original_script_directory = os.getcwd()
    
    print "[ nengo_pacman_interface ] : Running simulation - connection  with the DB will be now committed"
    db_run.set_runtime(runtime)
    db_run.close_connection()   # FIXME why do I need to close and reopen?
    
    
    if pacman.pacman_configuration.getboolean('nengo_interface', 'run_pacman'):
        print "\n[ nengo_pacman_interface ] : Running pacman from", os.path.dirname(pacman.PACMAN_DIR)
        os.chdir(pacman.PACMAN_DIR)
#        os.system('./pacman.sh %s' % db_run.db_abs_path)
#        os.system('./pacman %s' % db_run.db_abs_path)        
        # FIXME FIXME FIXME
        db = pacman.load_db(db_run.db_abs_path)
        db.clean_part_db()              # cleans the part_* tables
#        pacman.run_pacman(db)
        pacman.run_pacman(db, simulator='nengo')

        print "[ nengo_pacman_interface ] closing db...."
        db.close_connection()       # will close the connection to the db and commit the transaction    


    if pacman.pacman_configuration.getboolean('nengo_interface', 'run_simulation'):
        board_address = pacman.pacman_configuration.get('board', 'default_board_address')
        if pacman.pacman_configuration.getboolean('nengo_interface', 'run_pacman') == False:
            print "[ nengo_pacman_interface ] : cannot run simulation before pacman. change your %s file" % pacman_cfg_filename
            quit(1)
            
        print "\n[ nengo_pacman_interface ] : Running simulation on board %s (%s/tools/run.sh %s)\n" % (board_address, os.path.dirname(pacman.PACMAN_DIR), board_address)

        os.chdir(pacman.PACMAN_DIR)
        os.chdir(os.pardir)        
        os.chdir('tools')
        os.system('./run.sh %s' % board_address)
示例#8
0
    memory_pointer = BASE
    
    for c in used_cores:
        out_file_string += compute_sdram_entries(db, c, memory_pointer, process_id=process_id)
        memory_pointer = BASE + len(out_file_string)
    
    # If there's something write the file
    if len(out_file_string) > 0:            
        f = open(filename,'w+')
        f.write(out_file_string)
        f.close()


if __name__ == '__main__':
    if DEBUG:	print "\n----- creating SDRAM files"
    db = pacman.load_db(sys.argv[1])       # IMPORTS THE DB (it will also load the model libraray by default)
    global n_synapses
    n_synapses = 0
    print("Loading DB: %g" %timer.elapsedTime())
    
    image_map = db.get_image_map()

    chip_map = [ (c['x'],c['y']) for c in image_map ]
    chip_map = list(set(chip_map))  # removing duplicates http://love-python.blogspot.co.uk/2008/09/remove-duplicate-items-from-list-using.html

    
    for c in chip_map:
        x = c[0]
        y = c[1]    
        filename = './binaries/SDRAM_%d_%d.dat' % (x, y)
#            filename = '/tmp/SDRAM_%d_%d.dat' % (x, y)
示例#9
0
                'destination': 1 << 3,
                'mask': 0xFFFF0000,
                'key': r_key_mgmt
            })

    for i in range(max_router + 1):
        filename = '%s/routingtbl_%d_0.dat' % (pacman.BINARIES_DIRECTORY, i)
        write_routing_file(filename, routers[i])


def patch_router_for_output_board(db):
    routes = read_routing_file(monitoring_chip_rt_file)
    for r in routes:
        if r['destination'] & (1 << (OFFSET_CORE_ROUTE + MONITORING_CORE)):
            r['destination'] = r['destination'] | LINK_W
            if DEBUG:
                print '[ routing_patcher ] : routing key %s WEST' % hex(
                    r['key'])

        write_routing_file(monitoring_chip_rt_file, routes)


if __name__ == '__main__':
    print "\n----- patching router"
    db = pacman.load_db(
        sys.argv[1]
    )  # IMPORTS THE DB (it will also load the model libraray by default)
#    routes = patch_router_for_robot(db)
#    patch_router_for_sensors(db)
# writeback routing file