Esempio n. 1
0
    def partition_raw(self, machine, vertices, no_machine_time_steps,
                      machine_time_step_us, placer):
        '''
        entry method for partitioning
        '''
        placements = list()
        subvertices = list()

        #sort out vertex's by constraints
        sort = lambda vertex: vertex.constraints.placement_cardinality
        vertices = sorted(vertices, key=sort, reverse=True)
        
        n_atoms = 0
        for vertex in vertices:
            n_atoms += vertex.atoms
        self.progress = ProgressBar(n_atoms)
        self.partitioned = dict()

        # Partition one vertex at a time
        for vertex in vertices:
            #only partition real vertexes. virtual ones are ignored
            if vertex.is_virtual():
                self.partition_virtual_vertexes(vertex, placements,
                                                subvertices, placer)
            elif len(vertex.subvertices) == 0:
                self.partition_vertexes(vertex, no_machine_time_steps,
                                        machine_time_step_us, subvertices,
                                        placer, placements)
            else:
                print "Vertex {} is already partitioned!".format(vertex.label)
        self.progress.end()
    
        subedges = self.generate_sub_edges(subvertices)
        
        return subvertices, subedges, placements
Esempio n. 2
0
 def filterSubEdges(self, dao):
     """Go through the newly created list of sub-edges and call a model
     specific function on each one, this allows the application to prune
     sub-edges that are not really needed.
     """
     logger.info("* Running pre-routing sub-edge pruning *")
     new_subedges = list()
     progress = ProgressBar(len(dao.subedges))
     for subedge in dao.subedges:
         if subedge.edge.filterSubEdge(subedge):
             subedge.pruneable = True
         else:
             new_subedges.append(subedge)
         progress.update()
     dao.subedges = new_subedges
     
     progress.end()
Esempio n. 3
0
    def filterSubEdges(self, dao):
        """Go through the newly created list of sub-edges and call a model
        specific function on each one, this allows the application to prune
        sub-edges that are not really needed.
        """
        logger.info("* Running pre-routing sub-edge pruning *")
        new_subedges = list()
        progress = ProgressBar(len(dao.subedges))
        for subedge in dao.subedges:
            if subedge.edge.filterSubEdge(subedge):
                subedge.pruneable = True
            else:
                new_subedges.append(subedge)
            progress.update()
        dao.subedges = new_subedges

        progress.end()
Esempio n. 4
0
    def route_raw(machine,
                  sub_vertexes,
                  k=1,
                  l=0,
                  m=0,
                  BW_PER_ROUTE_ENTRY=0.01,
                  MAX_BW=250,
                  dao=None):
        """
        Modified by peter.choy 06.08.13

        Generates a list of routings for subedges in a machine...

        For the purposes of this algorithm, we are viewing the chips as 'nodes' on a graph.
        The weight of the arcs is a function of the number of routing table entries,
        the arc length, and the remaining bandwidth along the arc's corresponding
        connection.

        The inner function of the algorithm is a slightly modified Dijkstra's algorithm.
        The outer loop loops over the necessary sub edges, and re-iterates over each
        sub edge several times. Repeating Dijkstra's algorithm for each sub edge is greedy,
        and so the iterative process may or may not be necessary.

        :param `pacman103.lib.lib_machine.Machine` machine:
            machine from which to allocate processors.
        :param list subedges:
            list of :py:class:`pacman103.lib.graph.Subedge` instances to
            route across the machine.
        :param k:
            constant that is added to arc weights to represent path length
        :param l:
            constant controlling contribution of Q to total arc weights
        :param m:
            constant controlling contribution of T to total arc weights
        :returns:
            list of :py:class:`pacman103.lib.lib_map.Routing` instances.
        """

        #print("")
        #print("Starting stopwatch...")
        #start_time = clock()

        #print("")
        #print("Initialising routing data structures...")
        routings = []

        nodes_info = DijkstraRouting.initiate_node_info(machine, MAX_BW)
        dijkstra_tables = DijkstraRouting.initiate_dijkstra_tables(machine)
        #print("")
        #print("Starting routing...")
        #print("")

        DijkstraRouting.update_weights(nodes_info, machine, l, m, k, MAX_BW)

        run_num = 0
        pruned_sub_edges = 0
        edge_considered = 0
        #each subsertex represents a core in the board
        progress = ProgressBar(len(sub_vertexes))
        for subVertex in sub_vertexes:
            subedges = subVertex.out_subedges

            #locate all destination and soruce coords
            dest_processors = []
            subedges_to_route = list()
            xs, ys, ps = subVertex.placement.processor.get_coordinates()
            for subedge in subedges:
                if not subedge.pruneable:
                    dest_processors.append(
                        subedge.postsubvertex.placement.processor)
                    subedges_to_route.append(subedge)
                else:
                    pruned_sub_edges += 1

            if (len(dest_processors) != 0):

                # Update the weights according to the changes in routing
                # tables and available bandwidth
                #logger.debug("updating weights")
                DijkstraRouting.update_weights(nodes_info, machine, l, m, k,
                                               MAX_BW)

                # Reset the temporary storage of lowest cost
                DijkstraRouting.reset_tables(dijkstra_tables)

                # SD Commented this out 20/1/14, to avoid extraneous printing
                # AS intergrated this into logger.debug
                #logger.debug("***************************************************")
                #logger.debug("Source node is ({}, {})".format(xs, ys))
                #logger.debug("Destination nodes are(")
                #for processor in dest_processors:
                #    xd, yd, pd = processor.get_coordinates()
                #    logger.debug("({}, {}, {})".format(xd, yd, pd))
                #logger.debug("")

                # Set the first activated node in this run as the source
                xa, ya = xs, ys
                dijkstra_tables["{}:{}".format(xa, ya)]["activated?"] = True
                dijkstra_tables["{}:{}".format(xa, ya)]["lowest cost"] = 0
                # The cost at the source node is zero for obvious reasons.
                # Note that it is NOT 'None'

                DijkstraRouting.\
                    properate_costs_till_reached_destinations(dijkstra_tables,
                                                              nodes_info,
                                                              xa, ya,
                                                              dest_processors,
                                                              xs, ys)

                #logger.debug("Reached destination from source, retracing.")
                #helpful output data
                if conf.config.getboolean( "Routing", "generate_graphs" ) and \
                    dao is not None: # AM
                    output_folder = dao.get_reports_directory("routing_graphs")
                    router_utility = util.RouterUtil(
                        new_output_folder=output_folder)
                    router_utility.output_routing_weight(
                        router_utility,
                        dijkstra_tables,
                        machine,
                        graph_label="routing weights",
                        routing_file_name="routingWeights" +
                        str(edge_considered))
                    edge_considered += 1

                for subedge in subedges_to_route:
                    key, mask, = subedge.key, subedge.mask
                    key_mask_combo = subedge.key_mask_combo
                    dest = subedge.postsubvertex
                    xd, yd, pd = dest.placement.processor.get_coordinates()
                    routing = lib_map.Routing(subedge)
                    xt, yt = DijkstraRouting.retrace_back_to_source(
                        xd, yd, machine, nodes_info, dijkstra_tables, key,
                        mask, key_mask_combo, routing, pd, BW_PER_ROUTE_ENTRY)
                    # SD Commented out 20/1/14 to remove extraneous printing.
                    # DijkstraRouting.printRoute(xt, yt, xs, ys, dijkstra_tables, routing)

                    subedge.routing = routing

                    routings.append(routing)

                    run_num += 1

                # SD Commented this out 20/1/14, to avoid extraneous printing
                # AS modified to debug format
                #print ""
                #logger.debug("Route number {} completed from ({}, {}) "
                #             "to ({}, {})".format(run_num, xs, ys, xd, yd))
                #logger.debug("route took {}".format(DijkstraRouting.printRoute(xd, yd, xs, ys, dijkstra_tables, routing)))
                #logger.debug("*********************************"
                #             "**********************************")

                #if (run_num % 5000) == 0:
                #    logger.debug("{} routes done, please wait"
                #                 "...".format(run_num))

            progress.update()

        #finish_time = clock()

        #elapsed_time = finish_time - start_time

        #print("")
        #print("Routing finished! Created %d routes in %s seconds.") % (run_num, elapsed_time)
        #print("")
        #logger.debug("gained benefit from {} pruned routings".format(pruned_sub_edges))
        progress.end()
        return routings
        struct_file = txrx.checkfile("sark-130.struct")
        config_file = txrx.checkfile("spin{}.conf".format(5))
        boot.boot(hostname, boot_file, config_file, struct_file)
        time.sleep(2.0)

load_targets_path = os.path.join(directory, "pickled_load_targets")
mem_write_targets_path = os.path.join(directory, "pickled_mem_write_targets")
load_targets_file = open(load_targets_path, "rb")
load_targets = pickle.load(load_targets_file)
load_targets_file.close()
mem_write_targets_file = open(mem_write_targets_path, "rb")
mem_write_targets = pickle.load(mem_write_targets_file)
mem_write_targets_file.close()

print("Loading data", file=sys.stderr)
load_progress = ProgressBar(len(load_targets))
for target in load_targets:
    txrx.select(target.x, target.y, target.p)
    filename = os.path.basename(target.filename)
    filename = os.path.join(directory, filename)
    txrx.memory_calls.write_mem_from_file(target.address, scamp.TYPE_WORD,
                                          filename)
    load_progress.update()
load_progress.end()

print("Writing memory", file=sys.stderr)
mem_progress = ProgressBar(len(mem_write_targets))
for target in mem_write_targets:
    txrx.select(target.x, target.y, target.p)
    txrx.memory_calls.write_mem(target.address, scamp.TYPE_WORD,
                                struct.pack("I", target.data))
def generate_output_raw(dao):
    """
    The nitty gritty.
    Generates load targets and executable targets comprising the simulation, and
    for when individual memory locations are to be written, generates memWrites.

    This is now largely finished. Data structures are generated for edges
    and the data structure generation for vertices is merely a prototype.

    *Side effects*:
        writes data structures for the load targets to files in the binaries directories

    :returns:
        Nothing       
    """

    executable_targets, load_targets, mem_write_targets = list(), list(), list(
    )
    chipsUsed = set()
    progress_bar = ProgressBar(len(dao.placements))

    # If we have host-side Spec Execution, execute all Data Specs now:
    try:
        dao.useHostBasedSpecExecutor = \
            conf.config.getboolean( "SpecExecution", "specExecOnHost" )
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        raise Exception("SpecExecutor could not find config information"
                        " indicating where Spec Execution should occur.")

    chips = None
    if dao.useHostBasedSpecExecutor == True:
        chips = dict()
        for placement in dao.placements:
            (x, y, p) = placement.processor.get_coordinates()
            key = "{}:{}".format(x, y)
            if not key in chips:
                chips[key] = data_spec_executor.Chip(x, y)

    for placement in dao.placements:
        if not placement.subvertex.vertex.is_virtual():

            start_addr = None
            if dao.useHostBasedSpecExecutor == True:
                dao.spec_executor = data_spec_executor.SpecExecutor()

                (x, y, p) = placement.processor.get_coordinates()
                key = "{}:{}".format(x, y)
                chip = chips[key]

                start_addr = chip.sdram_used + \
                    data_spec_constants.SDRAM_BASE_ADDR
                dao.spec_executor.setup(chip)

            subvertex = placement.subvertex

            myExecTargets, myLoadTargets, myMemWriteTargets = \
                 subvertex.generateDataSpec(placement.processor, dao)

            # Add this core to the list of targets
            if myExecTargets is not None:
                executable_targets.append(myExecTargets)

            # Append the new dataSpec file to the list of load targets:
            if myLoadTargets is not None and len(myLoadTargets) > 0:
                load_targets.extend(myLoadTargets)

            # Add required memory writes to the list of writes:
            if myMemWriteTargets is not None and len(myMemWriteTargets) > 0:
                mem_write_targets.extend(myMemWriteTargets)

            x, y, p = placement.processor.get_coordinates()
            chipsUsed.add((x, y))

            hostname = dao.machine.hostname

            if dao.useHostBasedSpecExecutor == True:
                (x, y, p) = placement.processor.get_coordinates()
                f_out = os.path.join(
                    dao.get_binaries_directory(),
                    "%s_appData_%d_%d_%d.dat" % (hostname, x, y, p))
                dao.spec_executor.finish(f_out)

                # TODO: Bring the following in line / neaten
                # ----------------------------------------------
                # Keep information on the memory region locations
                # for later report generation:
                index = "%d %d %d" % (x, y, p)
                dao.memMaps[index] = [
                    [i, s.wr_ptr_aligned, s.wr_ptr_offset, s.size, \
                                               s.memory, s.unfilled] \
                    if s is not None else [i, 0, 0, 0, [], False]
                        for (i, s) in enumerate(dao.spec_executor.memory_slots)
                ]

                # Add the files produced by the Spec Executor to the
                # list of files to load:
                load_targets.append(
                    lib_map.LoadTarget(f_out, x, y, p, start_addr))
                mem_write_targets.append(
                    lib_map.MemWriteTarget(x, y, p, 0xe5007000 + 128 * p + 112,
                                           start_addr))
        progress_bar.update()

    # populate the DAO with executable, load and memory writing requirements
    dao.set_executable_targets(executable_targets)
    dao.set_load_targets(load_targets)
    dao.set_mem_write_targets(mem_write_targets)

    # Generate core map and routing table binaries for each chip
    for coord in dao.machine.get_coords_of_all_chips():
        x, y = coord['x'], coord['y']
        chip = dao.machine.get_chip(x, y)
        routeCount = get_route_count(chip)
        if (routeCount > 0 or (x, y) in chipsUsed) and not chip.is_virtual():
            fileName = generate_routing_table(chip, routeCount, dao)
            if (conf.config.getboolean("Reports", "reportsEnabled")
                    and conf.config.getboolean("Reports", "writeRouterReports")
                    and conf.config.getboolean("Reports",
                                               "writeRouterDatReport")):
                reports.generate_router_report(fileName, chip, dao)

            # Place in the list of targets to load at 119.5MB depth in the SDRAM
            if not chip.virtual:
                load_targets.insert(
                    0,
                    lib_map.LoadTarget(
                        fileName, chip.x, chip.y, 0,
                        data_spec_constants.ROUTING_TABLE_ADDRESS))
    progress_bar.end()
    def route_raw(machine, sub_vertexes, k=1, l=0, m=0, BW_PER_ROUTE_ENTRY=0.01, MAX_BW = 250, dao=None):

        """
        Modified by peter.choy 06.08.13

        Generates a list of routings for subedges in a machine...

        For the purposes of this algorithm, we are viewing the chips as 'nodes' on a graph.
        The weight of the arcs is a function of the number of routing table entries,
        the arc length, and the remaining bandwidth along the arc's corresponding
        connection.

        The inner function of the algorithm is a slightly modified Dijkstra's algorithm.
        The outer loop loops over the necessary sub edges, and re-iterates over each
        sub edge several times. Repeating Dijkstra's algorithm for each sub edge is greedy,
        and so the iterative process may or may not be necessary.

        :param `pacman103.lib.lib_machine.Machine` machine:
            machine from which to allocate processors.
        :param list subedges:
            list of :py:class:`pacman103.lib.graph.Subedge` instances to
            route across the machine.
        :param k:
            constant that is added to arc weights to represent path length
        :param l:
            constant controlling contribution of Q to total arc weights
        :param m:
            constant controlling contribution of T to total arc weights
        :returns:
            list of :py:class:`pacman103.lib.lib_map.Routing` instances.
        """

        #print("")
        #print("Starting stopwatch...")
        #start_time = clock()

        #print("")
        #print("Initialising routing data structures...")
        routings = []

        nodes_info = DijkstraRouting.initiate_node_info(machine, MAX_BW)
        dijkstra_tables = DijkstraRouting.initiate_dijkstra_tables(machine)
        #print("")
        #print("Starting routing...")
        #print("")

        DijkstraRouting.update_weights(nodes_info, machine, l, m, k, MAX_BW)

        run_num = 0
        pruned_sub_edges = 0
        edge_considered = 0
        #each subsertex represents a core in the board
        progress = ProgressBar(len(sub_vertexes))
        for subVertex in sub_vertexes:
            subedges = subVertex.out_subedges
            
            #locate all destination and soruce coords
            dest_processors = []
            subedges_to_route = list()
            xs, ys, ps = subVertex.placement.processor.get_coordinates()
            for subedge in subedges:
                if not subedge.pruneable:
                    dest_processors.append(
                            subedge.postsubvertex.placement.processor)
                    subedges_to_route.append(subedge)
                else:
                    pruned_sub_edges += 1
            
            if(len(dest_processors) != 0):

                # Update the weights according to the changes in routing
                # tables and available bandwidth
                #logger.debug("updating weights")
                DijkstraRouting.update_weights(nodes_info, machine,
                                                   l, m, k, MAX_BW)

                # Reset the temporary storage of lowest cost
                DijkstraRouting.reset_tables(dijkstra_tables)
                

                # SD Commented this out 20/1/14, to avoid extraneous printing
                # AS intergrated this into logger.debug
                #logger.debug("***************************************************")
                #logger.debug("Source node is ({}, {})".format(xs, ys))
                #logger.debug("Destination nodes are(")
                #for processor in dest_processors:
                #    xd, yd, pd = processor.get_coordinates()
                #    logger.debug("({}, {}, {})".format(xd, yd, pd))
                #logger.debug("")

                # Set the first activated node in this run as the source
                xa, ya = xs, ys
                dijkstra_tables["{}:{}".format(xa, ya)]["activated?"] = True
                dijkstra_tables["{}:{}".format(xa, ya)]["lowest cost"] = 0
                # The cost at the source node is zero for obvious reasons.
                # Note that it is NOT 'None'

                DijkstraRouting.\
                    properate_costs_till_reached_destinations(dijkstra_tables,
                                                              nodes_info,
                                                              xa, ya,
                                                              dest_processors,
                                                              xs, ys)

                #logger.debug("Reached destination from source, retracing.")
                #helpful output data
                if conf.config.getboolean( "Routing", "generate_graphs" ) and \
                    dao is not None: # AM
                    output_folder = dao.get_reports_directory("routing_graphs")
                    router_utility= util.RouterUtil(new_output_folder=output_folder)
                    router_utility.output_routing_weight(router_utility,
                                                         dijkstra_tables, machine,
                                                         graph_label="routing weights",
                                                         routing_file_name="routingWeights" + str(edge_considered))
                    edge_considered += 1

                for subedge in subedges_to_route:
                    key, mask, = subedge.key, subedge.mask
                    key_mask_combo = subedge.key_mask_combo
                    dest = subedge.postsubvertex
                    xd, yd, pd = dest.placement.processor.get_coordinates()
                    routing = lib_map.Routing(subedge)
                    xt, yt = DijkstraRouting.retrace_back_to_source(xd, yd, machine,
                                                                    nodes_info,
                                                                    dijkstra_tables,
                                                                    key, mask,
                                                                    key_mask_combo,
                                                                    routing,
                                                                    pd,
                                                                    BW_PER_ROUTE_ENTRY)
                    # SD Commented out 20/1/14 to remove extraneous printing.
                    # DijkstraRouting.printRoute(xt, yt, xs, ys, dijkstra_tables, routing)

                    subedge.routing = routing

                    routings.append(routing)

                    run_num += 1
                

                # SD Commented this out 20/1/14, to avoid extraneous printing
                # AS modified to debug format
                #print ""
                #logger.debug("Route number {} completed from ({}, {}) "
                #             "to ({}, {})".format(run_num, xs, ys, xd, yd))
                #logger.debug("route took {}".format(DijkstraRouting.printRoute(xd, yd, xs, ys, dijkstra_tables, routing)))
                #logger.debug("*********************************"
                #             "**********************************")



                #if (run_num % 5000) == 0:
                #    logger.debug("{} routes done, please wait"
                #                 "...".format(run_num))
            
            progress.update()

        #finish_time = clock()

        #elapsed_time = finish_time - start_time

        #print("")
        #print("Routing finished! Created %d routes in %s seconds.") % (run_num, elapsed_time)
        #print("")
        #logger.debug("gained benefit from {} pruned routings".format(pruned_sub_edges))
        progress.end()
        return routings
Esempio n. 8
0
class PartitionAndPlacePartitioner(AbstractPartitioner):
    
    def __init__(self, dao):
        
        self.dao = dao
        #locate the chosen placer
        placer_algorithms_list = dict(
        map( lambda (name, placer) : (name.replace("Placer",""),placer),
            inspect.getmembers(placer_algorithms, inspect.isclass)
        ))
        
        try:
            placer_class = placer_algorithms_list[
                        conf.config.get("Placer", "algorithm")]
            self.placer = placer_class(dao)
        except KeyError as e:
            raise ValueError("Invalid partitioner algorithm specified. "
                              " I don't know '%s'." % e)
            
        self.progress = None

    def partition(self):
        """
        Loads machine and vertex objects from the datastore, calls
        :py:func:`pacman103.core.mapper.partition_raw` to generate subvertices
         and subedges, and stores them in the datastore.

        :param `pacman103.core.dao` dao:
            datastore containing machine and vertex objects.
        """
        # locate correct placer

        logger.info("* Running Partitioner and Placer as one *")
        
        # Load the machine and vertices objects from the dao
        machine = self.dao.get_machine()
        vertices = self.dao.get_vertices()
        
        #calculate time steps
        no_machine_time_steps = None
        if self.dao.run_time is not None:
            no_machine_time_steps = \
                int((self.dao.run_time * 1000.0) / self.dao.machineTimeStep)
        machine_time_step_us = self.dao.machineTimeStep

        # Partition the vertices into subvertices and consequent subedges
        subvertices, subedges, placements = \
            self.partition_raw(machine, vertices,no_machine_time_steps,
                               machine_time_step_us, self.placer)

        # Store the results in the dao
        self.dao.set_subvertices(subvertices)
        self.dao.set_subedges(subedges)
        self.dao.set_placements(placements)
        
        #update dao so that controller only calls the next stack aspect
        self.dao.done_partitioner = True
        self.dao.done_placer = True
        
    def find_max_ratio(self, resources, max_resources):
        '''
        helper method for finding the max ratio for a resoruces
        '''
        cpu_ratio = (float(resources.clock_ticks) 
                / float(max_resources.clock_ticks))
        dtcm_ratio = (float(resources.dtcm) / float(max_resources.dtcm))
        sdram_ratio = (float(resources.sdram) / float(max_resources.sdram)) 
        return max((cpu_ratio, dtcm_ratio, sdram_ratio))

    def partition_raw(self, machine, vertices, no_machine_time_steps,
                      machine_time_step_us, placer):
        '''
        entry method for partitioning
        '''
        placements = list()
        subvertices = list()

        #sort out vertex's by constraints
        sort = lambda vertex: vertex.constraints.placement_cardinality
        vertices = sorted(vertices, key=sort, reverse=True)
        
        n_atoms = 0
        for vertex in vertices:
            n_atoms += vertex.atoms
        self.progress = ProgressBar(n_atoms)
        self.partitioned = dict()

        # Partition one vertex at a time
        for vertex in vertices:
            #only partition real vertexes. virtual ones are ignored
            if vertex.is_virtual():
                self.partition_virtual_vertexes(vertex, placements,
                                                subvertices, placer)
            elif len(vertex.subvertices) == 0:
                self.partition_vertexes(vertex, no_machine_time_steps,
                                        machine_time_step_us, subvertices,
                                        placer, placements)
            else:
                print "Vertex {} is already partitioned!".format(vertex.label)
        self.progress.end()
    
        subedges = self.generate_sub_edges(subvertices)
        
        return subvertices, subedges, placements
    
    def get_max_atoms_per_core(self, vertices):
        
        max_atoms_per_core = 0
        for v in vertices:
            max_for_vertex = v.get_maximum_atoms_per_core()
                
            # If there is no maximum, the maximum is the number of atoms
            if max_for_vertex is None:
                max_for_vertex = v.atoms
    
            # Override the maximum with any custom maximum
            if v.custom_max_atoms_per_core is not None:
                max_for_vertex = v.custom_max_atoms_per_core
            
            max_atoms_per_core = max(max_atoms_per_core, max_for_vertex)
        return max_atoms_per_core

    def partition_vertexes(self, vertex, no_machine_time_steps,
                           machine_time_step_us, subvertices, placer,
                           placements):
        '''
        partitions normal vertexes
        '''
        
        vertices = list()
        vertices.append(vertex)
        extra_vertices = vertex.get_partition_dependent_vertices()
        if extra_vertices is not None:
            for v in extra_vertices:
                if v.atoms != vertex.atoms:
                    raise Exception("A vertex and its partition-dependent"
                            + " vertices must have the same number of atoms")
                vertices.append(v)
                    
        
        # Prepare for partitioning, getting information
        partition_data_objects = [v.get_partition_data_object() 
                for v in vertices]
        max_atoms_per_core = self.get_max_atoms_per_core(vertices)
        
        self.partition_by_atoms(vertices, placer, vertex.atoms, 
                max_atoms_per_core, no_machine_time_steps, machine_time_step_us,
                partition_data_objects, subvertices, placements)

    def partition_by_atoms(self, vertices, placer, n_atoms, 
            max_atoms_per_core, no_machine_time_steps, machine_time_step_us,
            partition_data_objects, subvertices, placements):
        '''
        tries to partition subvertexes on how many atoms it can fit on
        each subvert
        '''
        n_atoms_placed = 0
        while n_atoms_placed < n_atoms:
            
            #logger.debug("Maximum available resources for "
            #             "partitioning: {}".format(resources))

            lo_atom = n_atoms_placed
            hi_atom = lo_atom + max_atoms_per_core - 1
            if hi_atom >= n_atoms:
                hi_atom = n_atoms - 1

            # Scale down the number of atoms to fit the available resources
            used_placements, hi_atom = self.scale_down_resources( 
                    lo_atom, hi_atom, vertices,
                    no_machine_time_steps, machine_time_step_us,
                    partition_data_objects, placer,
                    max_atoms_per_core)

            # Update where we are
            n_atoms_placed = hi_atom + 1
            
            # Create the subvertices and placements
            for (vertex, _, x, y, p, used_resources, _) in used_placements:
                            
                subvertex = graph.Subvertex(vertex, lo_atom, hi_atom, 
                        used_resources)
                processor = self.dao.machine.get_processor(x, y, p)
                placement = lib_map.Placement(subvertex, processor)
                
                subvertices.append(subvertex)
                placements.append(placement)
            
            no_atoms_this_placement = (hi_atom - lo_atom) + 1
            self.progress.update(no_atoms_this_placement)

    def scale_down_resources(self, lo_atom, hi_atom, vertices, 
            no_machine_time_steps, machine_time_step_us, 
            partition_data_objects, placer, max_atoms_per_core):
        '''
        reduces the number of atoms on a core so that it fits within the
        resoruces avilable
        '''
        
        # Find the number of atoms that will fit in each vertex given the
        # resources available
        used_placements = list()
        min_hi_atom = hi_atom
        for i in range(len(vertices)):
            vertex = vertices[i]
            partition_data_object = partition_data_objects[i]
            
            resources = placer.get_maximum_resources(vertex.constraints)
            used_resources = vertex.get_resources_for_atoms(lo_atom, hi_atom,
                no_machine_time_steps, machine_time_step_us, 
                partition_data_object)
            ratio = self.find_max_ratio(used_resources, resources)
            
            while ratio > 1.0 and hi_atom >= lo_atom:
    
                # Scale the resources by the ratio
                old_n_atoms = (hi_atom - lo_atom) + 1
                new_n_atoms = int(float(old_n_atoms) / ratio)
    
                # Avoid looping
                if old_n_atoms == new_n_atoms:
                    new_n_atoms -= 1
                else:
                    # Subtract a tenth of the difference between the old
                    # and new
                    new_n_atoms -= int((old_n_atoms - new_n_atoms) / 10.0)
    
                # Find the new resource usage
                hi_atom = lo_atom + new_n_atoms - 1
                used_resources = \
                    vertex.get_resources_for_atoms(lo_atom, hi_atom,
                                                   no_machine_time_steps,
                                                   machine_time_step_us,
                                                   partition_data_object)
                ratio = self.find_max_ratio(used_resources, resources)
              
            # If we couldn't partition, raise and exception
            if hi_atom < lo_atom:
                raise Exception("Vertex {} could not be partitioned".format(
                        vertex.label))
                
            # Try to scale up until just below the resource usage
            used_resources, hi_atom = self.scale_up_resource_usage(
                    used_resources, hi_atom, lo_atom, 
                    max_atoms_per_core, vertex, no_machine_time_steps, 
                    machine_time_step_us, partition_data_object, resources, 
                    ratio)
            
            # If this hi_atom is smaller than the current, minimum update the
            # other placements to use (hopefully) less resources 
            if hi_atom < min_hi_atom:
                min_hi_atom = hi_atom
                new_used_placements = list()
                for (v, part_obj, x, y, p, v_resources, resources) in used_placements:
                    placer.unplace_subvertex(x, y, p, v_resources)
                    new_resources = v.get_resources_for_atoms(lo_atom, 
                            min_hi_atom, no_machine_time_steps,
                            machine_time_step_us, part_obj)
                    (new_x, new_y, new_p) = placer.place_subvertex(
                            new_resources, v.constraints)
                    new_used_placements.append(v, part_obj, new_x, new_y, new_p,
                            new_resources, resources)
                used_placements = new_used_placements
                
            # Place the vertex
            x, y, p = placer.place_subvertex(used_resources, 
                    vertex.constraints)
            used_placements.append((vertex, partition_data_object, x, y, p, 
                    used_resources, resources))
            
        return used_placements, min_hi_atom


    def scale_up_resource_usage(self, used_resources, hi_atom, lo_atom, 
                        max_atoms_per_core, vertex, no_machine_time_steps,
                        machine_time_step_us, partition_data_object, resources,
                        ratio):
        '''
        tries to psuh the number of atoms into a subvertex as it can
         with the estimates
        '''
        
        previous_used_resources = used_resources
        previous_hi_atom = hi_atom
        while ((ratio < 1.0) and ((hi_atom + 1) < vertex.atoms)
                and ((hi_atom - lo_atom + 2) < max_atoms_per_core)):

            #logger.debug("Scaling up - Current subvertex from"
            #    " %d to %d of %d, ratio = %f, resources = %s" % (lo_atom,
            #             hi_atom, no_atoms, ratio, used_resources))

            previous_hi_atom = hi_atom
            hi_atom += 1

            # Find the new resource usage
            previous_used_resources = used_resources
            used_resources = \
                vertex.get_resources_for_atoms(lo_atom, hi_atom,
                                               no_machine_time_steps,
                                               machine_time_step_us,
                                               partition_data_object)
            ratio = self.find_max_ratio(used_resources, resources)
        return previous_used_resources, previous_hi_atom


    def partition_virtual_vertexes(self, vertex, placements, subvertices,
                                   placer):
        '''
        handle the paritioning of virtual vertexes
        '''
        #ask the vertex how many sub verts to split it into to.
        number_of_sub_verts = vertex.split_into_subvertex_count()
        number_per_subvert = vertex.atoms / number_of_sub_verts
        for subvert_count in range(number_of_sub_verts):
            
            #create a subvert
            start = (subvert_count * number_per_subvert)
            end = start + number_per_subvert - 1
            subvertex = graph.Subvertex(vertex, start, end, 0)
            subvertices.append(subvertex)
            
            # Update the constraint to reflect changes if there are
            # more than 1 subvert
            if vertex.constraints.p is not None:
                start_constraint = \
                    lib_map.VertexConstraints(vertex.constraints.x,
                        vertex.constraints.y,
                        vertex.constraints.p + subvert_count)
            else:
                start_constraint = vertex.constraints
            
            # Place the subvertex
            chip = self.dao.machine.get_chip(vertex.constraints.x,
                    vertex.constraints.y)
            x, y, p = placer.place_virtual_subvertex(start_constraint,
                    chip.get_processors())
            processor = self.dao.machine.get_processor(x, y, p)
            placement = lib_map.Placement(subvertex, processor)
            placements.append(placement)
        self.progress.update(vertex.atoms)
            
    #goes though the vertexes and generates subedges for all outcoming edges
    def generate_sub_edges(self, subvertices):
        '''
        Partition edges according to vertex partitioning
        '''
        subedges = list()
        for src_sv in subvertices:
            # For each out edge of the parent vertex...
            for edge in src_sv.vertex.out_edges:
                # ... and create and store a new subedge for each postsubvertex
                for dst_sv in edge.postvertex.subvertices:
                    #logger.debug(
                    #        "Creating subedge between {} ({}-{}) and {} ({}-{})"
                    #        .format(src_sv.vertex.label, src_sv.lo_atom, 
                    #                src_sv.hi_atom, dst_sv.vertex.label,
                    #                dst_sv.lo_atom, dst_sv.hi_atom))
                    subedge = edge.create_subedge(src_sv, dst_sv)
                    subedges.append(subedge)
                    
        return subedges
def generate_output_raw(dao):
    """
    The nitty gritty.
    Generates load targets and executable targets comprising the simulation, and
    for when individual memory locations are to be written, generates memWrites.

    This is now largely finished. Data structures are generated for edges
    and the data structure generation for vertices is merely a prototype.

    *Side effects*:
        writes data structures for the load targets to files in the binaries directories

    :returns:
        Nothing       
    """

    executable_targets, load_targets, mem_write_targets = list(), list(), list()
    chipsUsed = set()
    progress_bar = ProgressBar(len(dao.placements))

    # If we have host-side Spec Execution, execute all Data Specs now:
    try:
        dao.useHostBasedSpecExecutor = \
            conf.config.getboolean( "SpecExecution", "specExecOnHost" )
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        raise Exception( "SpecExecutor could not find config information"
                         " indicating where Spec Execution should occur." )
    
    chips = None
    if dao.useHostBasedSpecExecutor == True:
        chips = dict()
        for placement in dao.placements:
            (x, y, p) = placement.processor.get_coordinates()
            key = "{}:{}".format(x, y)
            if not key in chips:
                chips[key] = data_spec_executor.Chip(x, y)

    for placement in dao.placements:
        if not placement.subvertex.vertex.is_virtual():
            
            start_addr = None
            if dao.useHostBasedSpecExecutor == True:
                dao.spec_executor = data_spec_executor.SpecExecutor()
                
                (x, y, p) = placement.processor.get_coordinates()
                key = "{}:{}".format(x, y)
                chip = chips[key]

                start_addr = chip.sdram_used + \
                    data_spec_constants.SDRAM_BASE_ADDR
                dao.spec_executor.setup(chip)
            
            subvertex = placement.subvertex

            myExecTargets, myLoadTargets, myMemWriteTargets = \
                 subvertex.generateDataSpec(placement.processor, dao)

            # Add this core to the list of targets
            if myExecTargets is not None:
                executable_targets.append(myExecTargets)
            
            # Append the new dataSpec file to the list of load targets:
            if myLoadTargets is not None and len(myLoadTargets) > 0:
                load_targets.extend(myLoadTargets)
            
            # Add required memory writes to the list of writes:
            if myMemWriteTargets is not None and len(myMemWriteTargets) > 0:
                mem_write_targets.extend(myMemWriteTargets)
            
            x, y, p = placement.processor.get_coordinates()
            chipsUsed.add((x, y))
            
            hostname = dao.machine.hostname
            
            if dao.useHostBasedSpecExecutor == True:
                (x, y, p) = placement.processor.get_coordinates()
                f_out = os.path.join(
                    dao.get_binaries_directory(),
                    "%s_appData_%d_%d_%d.dat" % (hostname, x, y, p)
                )
                dao.spec_executor.finish(f_out)

                # TODO: Bring the following in line / neaten
                # ----------------------------------------------
                # Keep information on the memory region locations
                # for later report generation:
                index = "%d %d %d" % (x, y, p)
                dao.memMaps[index] = [
                    [i, s.wr_ptr_aligned, s.wr_ptr_offset, s.size, \
                                               s.memory, s.unfilled] \
                    if s is not None else [i, 0, 0, 0, [], False]
                        for (i, s) in enumerate(dao.spec_executor.memory_slots)
                ]

                # Add the files produced by the Spec Executor to the
                # list of files to load:
                load_targets.append(lib_map.LoadTarget(
                    f_out, x, y, p, start_addr))
                mem_write_targets.append(lib_map.MemWriteTarget(
                    x, y, p, 0xe5007000 + 128*p + 112, start_addr))
        progress_bar.update()

    # populate the DAO with executable, load and memory writing requirements
    dao.set_executable_targets(executable_targets)
    dao.set_load_targets(load_targets)
    dao.set_mem_write_targets(mem_write_targets)

    # Generate core map and routing table binaries for each chip
    for coord in dao.machine.get_coords_of_all_chips():
        x, y = coord['x'], coord['y']
        chip = dao.machine.get_chip(x, y)
        routeCount = get_route_count(chip)
        if (routeCount > 0 or (x, y) in chipsUsed) and not chip.is_virtual():
            fileName  = generate_routing_table(chip, routeCount, dao)
            if (conf.config.getboolean("Reports", "reportsEnabled") and
                conf.config.getboolean("Reports", "writeRouterReports") and
                conf.config.getboolean("Reports", "writeRouterDatReport")):
                reports.generate_router_report(fileName, chip, dao)
    
            # Place in the list of targets to load at 119.5MB depth in the SDRAM
            if not chip.virtual:
                load_targets.insert(
                    0, lib_map.LoadTarget(
                        fileName, chip.x, chip.y, 0,
                        data_spec_constants.ROUTING_TABLE_ADDRESS
                    )
                )
    progress_bar.end()