def test_many_subvertices(self):
        subvertices = list()
        for i in range(20 * 17): #51 atoms per each processor on 20 chips
            subvertices.append(PartitionedVertex(
                0, 50, get_resources_used_by_atoms(0, 50, []),
                "Subvertex " + str(i)))

        self.graph = PartitionableGraph("Graph",subvertices)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(subvertices)
        self.bp = RadialPlacer(self.machine, self.graph)
        self.subgraph = PartitionedGraph(subvertices=subvertices)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
        unorderdered_info = list()
        for placement in placements.placements:
            unorderdered_info.append(
                (placement.subvertex.label.split(" ")[0],
                "{:<4}".format(placement.subvertex.label.split(" ")[1]),
                 placement.subvertex.n_atoms, 'x: ',
                 placement.x, 'y: ', placement.y, 'p: ', placement.p))

        from operator import itemgetter
        sorted_info = sorted(unorderdered_info, key=itemgetter(4, 6, 8))
        from pprint import pprint as pp
        pp(sorted_info)

        pp("{}".format("=" * 50))
        sorted_info = sorted(unorderdered_info, key=lambda x: int(x[1]))
        pp(sorted_info)
 def test_partition_with_empty_graph(self):
     """
     test that the partitioner can work with an empty graph
     :return:
     """
     self.setup()
     self.graph = PartitionableGraph()
     subgraph, mapper = self.bp.partition(self.graph, self.machine)
     self.assertEqual(len(subgraph.subvertices), 0)
 def test_partition_on_target_size_vertex_than_has_to_be_split(self):
     """
     test that fixed partitioning causes correct number of subvertices
     :return:
     """
     self.setup()
     large_vertex = TestVertex(1000, "Large vertex")
     large_vertex.add_constraint(PartitionerMaximumSizeConstraint(10))
     self.graph = PartitionableGraph(
         "Graph with large vertex", [large_vertex], [])
     subgraph, mapper = self.bp.partition(self.graph, self.machine)
     self.assertEqual(len(subgraph.subvertices), 100)
 def test_partition_on_large_vertex_than_has_to_be_split(self):
     """
     test that partitioning 1 lage vertex can make it into 2 small ones
     :return:
     """
     self.setup()
     large_vertex = TestVertex(300, "Large vertex")
     self.graph = PartitionableGraph(
         "Graph with large vertex", [large_vertex], [])
     subgraph, mapper = self.bp.partition(self.graph, self.machine)
     self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256)
     self.assertGreater(len(subgraph.subvertices), 1)
 def test_place_subvertex_too_big_with_vertex(self):
     large_vertex = TestVertex(500, "Large vertex 500")
     large_subvertex = large_vertex.create_subvertex(
         0, 499, get_resources_used_by_atoms(0, 499, []))#PartitionedVertex(0, 499, "Large subvertex")
     self.graph.add_vertex(large_vertex)
     self.graph = PartitionableGraph("Graph",[large_vertex])
     self.graph_mapper = GraphMapper()
     self.graph_mapper.add_subvertices([large_subvertex], large_vertex)
     self.bp = BasicPlacer(self.machine, self.graph)
     self.subgraph = PartitionedGraph(subvertices=[large_subvertex])
     with self.assertRaises(PacmanPlaceException):
         placements = self.bp.place(self.subgraph, self.graph_mapper)
    def test_create_new_graph(self):
        vert1 = TestVertex(10, "New AbstractConstrainedVertex 1", 256)
        vert2 = TestVertex(5, "New AbstractConstrainedVertex 2", 256)
        vert3 = TestVertex(3, "New AbstractConstrainedVertex 3", 256)
        edge1 = MultiCastPartitionableEdge(vert1, vert2, None, "First edge")
        edge2 = MultiCastPartitionableEdge(vert2, vert1, None, "First edge")
        edge3 = MultiCastPartitionableEdge(vert1, vert3, None, "First edge")
        verts = [vert1, vert2, vert3]
        edges = [edge1, edge2, edge3]
        graph = PartitionableGraph("Graph", verts, edges)
        for i in range(3):
            self.assertEqual(graph.vertices[i], verts[i])
            self.assertEqual(graph.edges[i], edges[i])

        oev = graph.outgoing_edges_from_vertex(vert1)
        if edge2 in oev:
            raise AssertionError("edge2 is in outgoing_edges_from vert1")
        iev = graph.incoming_edges_to_vertex(vert1)
        if edge1 in iev or edge3 in iev:
            raise AssertionError(
                "edge1 or edge3 is in incoming_edges_to vert1")
    def test_fill_machine(self):
        subvertices = list()
        for i in range(99 * 17): #50 atoms per each processor on 20 chips
            subvertices.append(PartitionedTestVertex(
                0, 50, get_resources_used_by_atoms(0, 50, []),
                "PartitionedVertex " + str(i)))

        self.graph = PartitionableGraph("Graph",subvertices)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(subvertices)
        self.bp = BasicPlacer(self.machine, self.graph)
        self.subgraph = PartitionedGraph(subvertices=subvertices)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
    def test_too_many_subvertices(self):
        subvertices = list()
        for i in range(100 * 17): #50 atoms per each processor on 20 chips
            subvertices.append(PartitionedVertex(
                0, 50, get_resources_used_by_atoms(0, 50, []),
                "Subvertex " + str(i)))

        self.graph = PartitionableGraph("Graph",subvertices)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(subvertices)
        self.bp = RadialPlacer(self.machine, self.graph)
        self.subgraph = PartitionedGraph(subvertices=subvertices)
        with self.assertRaises(PacmanPlaceException):
            placements = self.bp.place(self.subgraph, self.graph_mapper)
    def test_many_subvertices(self):
        subvertices = list()
        for i in range(20 * 17): #50 atoms per each processor on 20 chips
            subvertices.append(PartitionedTestVertex(
                0, 50, get_resources_used_by_atoms(0, 50, []),
                "PartitionedVertex " + str(i)))

        self.graph = PartitionableGraph("Graph",subvertices)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(subvertices)
        self.bp = BasicPlacer(self.machine, self.graph)
        self.subgraph = PartitionedGraph(subvertices=subvertices)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
        for placement in placements.placements:
            print placement.subvertex.label, placement.subvertex.n_atoms, \
                'x:', placement.x, 'y:', placement.y, 'p:', placement.p
    def setup(self):
        """
        setup for all absic partitioner tests
        :return:
        """
        self.vert1 = TestVertex(10, "New AbstractConstrainedVertex 1")
        self.vert2 = TestVertex(5, "New AbstractConstrainedVertex 2")
        self.vert3 = TestVertex(3, "New AbstractConstrainedVertex 3")
        self.edge1 = MultiCastPartitionableEdge(self.vert1, self.vert2, 
                                                None, "First edge")
        self.edge2 = MultiCastPartitionableEdge(self.vert2, self.vert1,
                                                None, "Second edge")
        self.edge3 = MultiCastPartitionableEdge(self.vert1, self.vert3,
                                                None, "Third edge")
        self.verts = [self.vert1, self.vert2, self.vert3]
        self.edges = [self.edge1, self.edge2, self.edge3]
        self.graph = PartitionableGraph("Graph", self.verts, self.edges)

        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        links = list()
        links.append(Link(0, 0, 0, 0, 1, s, s))

        _sdram = SDRAM(128 * (2**20))

        links = list()

        links.append(Link(0, 0, 0, 1, 1, n, n))
        links.append(Link(0, 1, 1, 1, 0, s, s))
        links.append(Link(1, 1, 2, 0, 0, e, e))
        links.append(Link(1, 0, 3, 0, 1, w, w))
        r = Router(links, False, 100, 1024)

        ip = "192.162.240.253"
        chips = list()
        for x in range(5):
            for y in range(5):
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        self.bp = BasicPartitioner()
 def test_operation_with_same_size_as_vertex_constraint_large_vertices(self):
     """
     test that the partition and place partitioner can handle same size as
     constraints on a vertex which has to be split over many cores
     :return:
     """
     self.setup()
     constrained_vertex = TestVertex(300, "Constrained")
     new_large_vertex = TestVertex(300, "Non constrained")
     constrained_vertex.add_constraint(
         PartitionerSameSizeAsVertexConstraint(new_large_vertex))
     self.graph = PartitionableGraph(
         "New graph", [new_large_vertex, constrained_vertex])
     partitioner = PartitionAndPlacePartitioner()
     subgraph, graph_to_sub_graph_mapper = \
         partitioner.partition(self.graph, self.machine)
     self.assertEqual(len(subgraph.subvertices), 6)
 def test_operation_same_size_as_vertex_constraint_different_order(self):
     """
     test that the partition and place partitioner can handle same size as
     constraints on a vertex which has to be split over many cores where
     the order of the vetices being added is different.
     :return:
     """
     self.setup()
     constrained_vertex = TestVertex(300, "Constrained")
     new_large_vertex = TestVertex(300, "Non constrained")
     constrained_vertex.add_constraint(
         PartitionerSameSizeAsVertexConstraint(new_large_vertex))
     self.graph = PartitionableGraph("New graph",
                                     [constrained_vertex, new_large_vertex])
     partitioner = PartitionAndPlacePartitioner()
     subgraph, graph_to_sub_graph_mapper = \
         partitioner.partition(self.graph, self.machine)
     # split in 256 each, so 4 partitioned vertices
     self.assertEqual(len(subgraph.subvertices), 4)
    def test_partition_with_barely_sufficient_space(self):
        """
        test that partitioning will work when close to filling the machine
        :return:
        """
        self.setup()
        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        links = list()
        links.append(Link(0, 0, 0, 0, 1, s, s))

        _sdram = SDRAM(2**12)

        links = list()

        links.append(Link(0, 0, 0, 1, 1, n, n))
        links.append(Link(0, 1, 1, 1, 0, s, s))
        links.append(Link(1, 1, 2, 0, 0, e, e))
        links.append(Link(1, 0, 3, 0, 1, w, w))
        r = Router(links, False, 100, 1024)

        ip = "192.162.240.253"
        chips = list()
        for x in range(5):
            for y in range(5):
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        singular_vertex = TestVertex(450, "Large vertex", max_atoms_per_core=1)
        self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1)
        self.graph = PartitionableGraph(
            "Graph with large vertex", [singular_vertex], [])
        subgraph, mapper = self.bp.partition(self.graph, self.machine)
        self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1)
        self.assertEqual(len(subgraph.subvertices), 450)
    def test_partition_with_insufficient_space(self):
        """
        test that if theres not enough space, the test the partitioner will
         raise an error
        :return:
        """
        self.setup()
        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        links = list()
        links.append(Link(0, 0, 0, 0, 1, s, s))

        _sdram = SDRAM(2**11)

        links = list()

        links.append(Link(0, 0, 0, 1, 1, n, n))
        links.append(Link(0, 1, 1, 1, 0, s, s))
        links.append(Link(1, 1, 2, 0, 0, e, e))
        links.append(Link(1, 0, 3, 0, 1, w, w))
        r = Router(links, False, 100, 1024)

        ip = "192.162.240.253"
        chips = list()
        for x in range(5):
            for y in range(5):
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        large_vertex = TestVertex(3000, "Large vertex", max_atoms_per_core=1)
        self.assertEqual(large_vertex._model_based_max_atoms_per_core, 1)
        self.graph = PartitionableGraph(
            "Graph with large vertex", [large_vertex], [])
        self.assertRaises(PacmanValueError, self.bp.partition,
                          self.graph, self.machine)
Exemple #15
0
    def setUp(self):
        ########################################################################
        # Setting up vertices, edges and graph                                 #
        ########################################################################
        self.vert1 = TestVertex(100, "New AbstractConstrainedTestVertex 1")
        self.vert2 = TestVertex(5, "New AbstractConstrainedTestVertex 2")
        self.vert3 = TestVertex(3, "New AbstractConstrainedTestVertex 3")
        self.edge1 = MultiCastPartitionableEdge(self.vert1, self.vert2, 
                                                "First edge")
        self.edge2 = MultiCastPartitionableEdge(self.vert2, self.vert1, 
                                                "Second edge")
        self.edge3 = MultiCastPartitionableEdge(self.vert1, self.vert3, 
                                                "Third edge")
        self.verts = [self.vert1, self.vert2, self.vert3]
        self.edges = [self.edge1, self.edge2, self.edge3]
        self.graph = PartitionableGraph("Graph", self.verts, self.edges)

        ########################################################################
        # Setting up machine                                                   #
        ########################################################################
        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        _sdram = SDRAM(128 * (2**20))

        ip = "192.168.240.253"
        chips = list()
        for x in range(10):
            for y in range(10):
                links = list()

                links.append(Link(x, y, 0, (x + 1) % 10, y, n, n))
                links.append(Link(x, y, 1, (x + 1) % 10, (y + 1) % 10, s, s))
                links.append(Link(x, y, 2, x, (y + 1) % 10, n, n))
                links.append(Link(x, y, 3, (x - 1) % 10, y, s, s))
                links.append(Link(x, y, 4, (x - 1) % 10, (y - 1) % 10, n, n))
                links.append(Link(x, y, 5, x, (y - 1) % 10, s, s))

                r = Router(links, False, 100, 1024)
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        ########################################################################
        # Setting up subgraph and graph_mapper                                 #
        ########################################################################
        self.subvertices = list()
        self.subvertex1 = PartitionedVertex(
            0, 1, self.vert1.get_resources_used_by_atoms(0, 1, []),
            "First subvertex")
        self.subvertex2 = PartitionedVertex(
            1, 5, get_resources_used_by_atoms(1, 5, []), "Second subvertex")
        self.subvertex3 = PartitionedVertex(
            5, 10, get_resources_used_by_atoms(5, 10, []), "Third subvertex")
        self.subvertex4 = PartitionedVertex(
            10, 100, get_resources_used_by_atoms(10, 100, []),
            "Fourth subvertex")
        self.subvertices.append(self.subvertex1)
        self.subvertices.append(self.subvertex2)
        self.subvertices.append(self.subvertex3)
        self.subvertices.append(self.subvertex4)
        self.subedges = list()
        self.subgraph = PartitionedGraph("Subgraph", self.subvertices,
                                         self.subedges)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(self.subvertices)
Exemple #16
0
class Spinnaker(object):

    def __init__(self, host_name=None, timestep=None, min_delay=None,
                 max_delay=None, graph_label=None,
                 database_socket_addresses=None):

        self._hostname = host_name

        # update graph label if needed
        if graph_label is None:
            graph_label = "Application_graph"

        # delays parameters
        self._min_supported_delay = None
        self._max_supported_delay = None

        # pacman objects
        self._partitionable_graph = PartitionableGraph(label=graph_label)
        self._partitioned_graph = None
        self._graph_mapper = None
        self._placements = None
        self._router_tables = None
        self._routing_infos = None
        self._tags = None
        self._machine = None
        self._txrx = None
        self._has_ran = False
        self._reports_states = None
        self._app_id = None
        self._runtime = None

        # database objects
        self._database_socket_addresses = set()
        if database_socket_addresses is not None:
            self._database_socket_addresses.union(database_socket_addresses)

        self._database_interface = None
        self._create_database = None

        # Determine default executable folder location
        # and add this default to end of list of search paths
        executable_finder.add_path(os.path.dirname(model_binaries.__file__))

        # population holders
        self._populations = list()
        self._multi_cast_vertex = None
        self._edge_count = 0
        # specific utility vertexes
        self._live_spike_recorder = dict()

        # holder for number of times the timer event should execute for the
        # simulation
        self._no_machine_time_steps = None
        self._machine_time_step = None

        # state thats needed the first time around
        if self._app_id is None:
            self._app_id = config.getint("Machine", "appID")

            if config.getboolean("Reports", "reportsEnabled"):
                self._reports_states = ReportState(
                    config.getboolean("Reports", "writePartitionerReports"),
                    config.getboolean("Reports",
                                      "writePlacerReportWithPartitionable"),
                    config.getboolean("Reports",
                                      "writePlacerReportWithoutPartitionable"),
                    config.getboolean("Reports", "writeRouterReports"),
                    config.getboolean("Reports", "writeRouterInfoReport"),
                    config.getboolean("Reports", "writeTextSpecs"),
                    config.getboolean("Reports", "writeReloadSteps"),
                    config.getboolean("Reports", "writeTransceiverReport"),
                    config.getboolean("Reports", "outputTimesForSections"),
                    config.getboolean("Reports", "writeTagAllocationReports"))

            # set up reports default folder
            self._report_default_directory, this_run_time_string = \
                helpful_functions.set_up_report_specifics(
                    default_report_file_path=config.get(
                        "Reports", "defaultReportFilePath"),
                    max_reports_kept=config.getint(
                        "Reports", "max_reports_kept"),
                    app_id=self._app_id)

            # set up application report folder
            self._app_data_runtime_folder = \
                helpful_functions.set_up_output_application_data_specifics(
                    max_application_binaries_kept=config.getint(
                        "Reports", "max_application_binaries_kept"),
                    where_to_write_application_data_files=config.get(
                        "Reports", "defaultApplicationDataFilePath"),
                    app_id=self._app_id,
                    this_run_time_string=this_run_time_string)

        self._spikes_per_second = float(config.getfloat(
            "Simulation", "spikes_per_second"))
        self._ring_buffer_sigma = float(config.getfloat(
            "Simulation", "ring_buffer_sigma"))

        # set up machine targeted data
        self._set_up_machine_specifics(timestep, min_delay, max_delay,
                                       host_name)

        logger.info("Setting time scale factor to {}."
                    .format(self._time_scale_factor))

        logger.info("Setting appID to %d." % self._app_id)

        # get the machine time step
        logger.info("Setting machine time step to {} micro-seconds."
                    .format(self._machine_time_step))

    def _set_up_machine_specifics(self, timestep, min_delay, max_delay,
                                  hostname):
        self._machine_time_step = config.getint("Machine", "machineTimeStep")

        # deal with params allowed via the setup options
        if timestep is not None:

            # convert into milliseconds from microseconds
            timestep *= 1000
            self._machine_time_step = timestep

        if min_delay is not None and float(min_delay * 1000) < 1.0 * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support min delays below {} ms with the "
                "current machine time step"
                .format(constants.MIN_SUPPORTED_DELAY * timestep))

        natively_supported_delay_for_models = \
            constants.MAX_SUPPORTED_DELAY_TICS
        delay_extention_max_supported_delay = \
            constants.MAX_DELAY_BLOCKS \
            * constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK

        max_delay_tics_supported = \
            natively_supported_delay_for_models + \
            delay_extention_max_supported_delay

        if max_delay is not None\
           and float(max_delay * 1000) > max_delay_tics_supported * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support max delays above {} ms with the "
                "current machine time step".format(0.144 * timestep))
        if min_delay is not None:
            self._min_supported_delay = min_delay
        else:
            self._min_supported_delay = timestep / 1000.0

        if max_delay is not None:
            self._max_supported_delay = max_delay
        else:
            self._max_supported_delay = (max_delay_tics_supported *
                                         (timestep / 1000.0))

        if (config.has_option("Machine", "timeScaleFactor") and
                config.get("Machine", "timeScaleFactor") != "None"):
            self._time_scale_factor = \
                config.getint("Machine", "timeScaleFactor")
            if timestep * self._time_scale_factor < 1000:
                logger.warn("the combination of machine time step and the "
                            "machine time scale factor results in a real "
                            "timer tick that is currently not reliably "
                            "supported by the spinnaker machine.")
        else:
            self._time_scale_factor = max(1,
                                          math.ceil(1000.0 / float(timestep)))
            if self._time_scale_factor > 1:
                logger.warn("A timestep was entered that has forced pacman103 "
                            "to automatically slow the simulation down from "
                            "real time by a factor of {}. To remove this "
                            "automatic behaviour, please enter a "
                            "timescaleFactor value in your .pacman.cfg"
                            .format(self._time_scale_factor))

        if hostname is not None:
            self._hostname = hostname
            logger.warn("The machine name from PYNN setup is overriding the "
                        "machine name defined in the spynnaker.cfg file")
        elif config.has_option("Machine", "machineName"):
            self._hostname = config.get("Machine", "machineName")
        else:
            raise Exception("A SpiNNaker machine must be specified in "
                            "spynnaker.cfg.")
        use_virtual_board = config.getboolean("Machine", "virtual_board")
        if self._hostname == 'None' and not use_virtual_board:
            raise Exception("A SpiNNaker machine must be specified in "
                            "spynnaker.cfg.")

    def run(self, run_time):
        """

        :param run_time:
        :return:
        """

        # calculate number of machine time steps
        self._calculate_number_of_machine_time_steps(run_time)

        self._runtime = run_time

        xml_paths = self._create_xml_paths()

        inputs = self._create_pacman_executor_inputs()
        required_outputs = self._create_pacman_executor_outputs()
        algorithms = self._create_algorithm_list(
            config.get("Mode", "mode") == "Debug")

        pacman_exeuctor = helpful_functions.do_mapping(
            inputs, algorithms, required_outputs, xml_paths,
            config.getboolean("Reports", "outputTimesForSections"))

        # gather provenance data from the executor itself if needed
        if config.get("Reports", "writeProvanceData"):
            pacman_executor_file_path = os.path.join(
                pacman_exeuctor.get_item("ProvenanceFilePath"),
                "PACMAN_provancence_data.xml")
            pacman_exeuctor.write_provenance_data_in_xml(
                pacman_executor_file_path,
                pacman_exeuctor.get_item("MemoryTransciever"))

        # sort out outputs data
        self._txrx = pacman_exeuctor.get_item("MemoryTransciever")
        self._placements = pacman_exeuctor.get_item("MemoryPlacements")
        self._router_tables = pacman_exeuctor.get_item("MemoryRoutingTables")
        self._routing_infos = pacman_exeuctor.get_item("MemoryRoutingInfos")
        self._tags = pacman_exeuctor.get_item("MemoryTags")
        self._graph_mapper = pacman_exeuctor.get_item("MemoryGraphMapper")
        self._partitioned_graph = pacman_exeuctor.get_item(
            "MemoryPartitionedGraph")
        self._machine = pacman_exeuctor.get_item("MemoryMachine")
        self._database_interface = pacman_exeuctor.get_item(
            "DatabaseInterface")
        self._has_ran = pacman_exeuctor.get_item("RanToken")

    @staticmethod
    def _create_xml_paths():
        # add the extra xml files from the config file
        xml_paths = config.get("Mapping", "extra_xmls_paths")
        if xml_paths == "None":
            xml_paths = list()
        else:
            xml_paths = xml_paths.split(",")

        # add extra xml paths for pynn algorithms
        xml_paths.append(
            os.path.join(os.path.dirname(overridden_pacman_functions.__file__),
                         "algorithms_metadata.xml"))
        xml_paths.append(os.path.join(os.path.dirname(
            pacman_algorithm_reports.__file__), "reports_metadata.xml"))
        return xml_paths

    def _create_algorithm_list(self, in_debug_mode):
        algorithms = ""
        algorithms += (config.get("Mapping", "algorithms") + "," +
                       config.get("Mapping", "interface_algorithms"))

        # if using virtual machine, add to list of algorithms the virtual
        # machine generator, otherwise add the standard machine generator
        if config.getboolean("Machine", "virtual_board"):
            algorithms += ",FrontEndCommonVirtualMachineInterfacer"
        else:
            algorithms += ",FrontEndCommonMachineInterfacer"
            algorithms += ",FrontEndCommonApplicationRunner"

            # if going to write provenance data after the run add the two
            # provenance gatherers
            if config.get("Reports", "writeProvanceData"):
                algorithms += ",FrontEndCommonProvenanceGatherer"

            # if the end user wants reload script, add the reload script
            # creator to the list
            if config.getboolean("Reports", "writeReloadSteps"):
                algorithms += ",FrontEndCommonReloadScriptCreator"

        if config.getboolean("Reports", "writeMemoryMapReport"):
            algorithms += ",FrontEndCommonMemoryMapReport"

        if config.getboolean("Reports", "writeNetworkSpecificationReport"):
            algorithms += \
                ",FrontEndCommonNetworkSpecificationPartitionableReport"

        # define mapping between output types and reports
        if self._reports_states is not None \
                and self._reports_states.tag_allocation_report:
            algorithms += ",TagReport"
        if self._reports_states is not None \
                and self._reports_states.routing_info_report:
            algorithms += ",routingInfoReports"
        if self._reports_states is not None \
                and self._reports_states.router_report:
            algorithms += ",RouterReports"
        if self._reports_states is not None \
                and self._reports_states.partitioner_report:
            algorithms += ",PartitionerReport"
        if (self._reports_states is not None and
                self._reports_states.placer_report_with_partitionable_graph):
            algorithms += ",PlacerReportWithPartitionableGraph"
        if (self._reports_states is not None and
                self._reports_states
                .placer_report_without_partitionable_graph):
            algorithms += ",PlacerReportWithoutPartitionableGraph"

        # add debug algorithms if needed
        if in_debug_mode:
            algorithms += ",ValidRoutesChecker"

        return algorithms

    @staticmethod
    def _create_pacman_executor_outputs():

        # explicitly define what outputs spynnaker expects
        required_outputs = list()
        if config.getboolean("Machine", "virtual_board"):
            required_outputs.extend([
                "MemoryPlacements", "MemoryRoutingTables",
                "MemoryRoutingInfos", "MemoryTags", "MemoryPartitionedGraph",
                "MemoryGraphMapper"])
        else:
            required_outputs.append("RanToken")

        # if front end wants reload script, add requires reload token
        if config.getboolean("Reports", "writeReloadSteps"):
            required_outputs.append("ReloadToken")
        return required_outputs

    def _create_pacman_executor_inputs(self):

        # make a folder for the json files to be stored in
        json_folder = os.path.join(
            self._report_default_directory, "json_files")
        if not os.path.exists(json_folder):
            os.mkdir(json_folder)

        # file path to store any provenance data to
        provenance_file_path = os.path.join(self._report_default_directory,
                                            "provance_data")
        if not os.path.exists(provenance_file_path):
            os.mkdir(provenance_file_path)

        # translate config "None" to None
        width = config.get("Machine", "width")
        height = config.get("Machine", "height")
        if width == "None":
            width = None
        else:
            width = int(width)
        if height == "None":
            height = None
        else:
            height = int(height)

        number_of_boards = config.get("Machine", "number_of_boards")
        if number_of_boards == "None":
            number_of_boards = None

        scamp_socket_addresses = config.get(
            "Machine", "scamp_connections_data")
        if scamp_socket_addresses == "None":
            scamp_socket_addresses = None

        boot_port_num = config.get("Machine", "boot_connection_port_num")
        if boot_port_num == "None":
            boot_port_num = None
        else:
            boot_port_num = int(boot_port_num)

        inputs = list()
        inputs.append({'type': "MemoryPartitionableGraph",
                       'value': self._partitionable_graph})
        inputs.append({'type': 'ReportFolder',
                       'value': self._report_default_directory})
        inputs.append({'type': "ApplicationDataFolder",
                       'value': self._app_data_runtime_folder})
        inputs.append({'type': 'IPAddress', 'value': self._hostname})

        # basic input stuff
        inputs.append({'type': "BMPDetails",
                       'value': config.get("Machine", "bmp_names")})
        inputs.append({'type': "DownedChipsDetails",
                       'value': config.get("Machine", "down_chips")})
        inputs.append({'type': "DownedCoresDetails",
                       'value': config.get("Machine", "down_cores")})
        inputs.append({'type': "BoardVersion",
                       'value': config.getint("Machine", "version")})
        inputs.append({'type': "NumberOfBoards", 'value': number_of_boards})
        inputs.append({'type': "MachineWidth", 'value': width})
        inputs.append({'type': "MachineHeight", 'value': height})
        inputs.append({'type': "AutoDetectBMPFlag",
                       'value': config.getboolean("Machine",
                                                  "auto_detect_bmp")})
        inputs.append({'type': "EnableReinjectionFlag",
                       'value': config.getboolean("Machine",
                                                  "enable_reinjection")})
        inputs.append({'type': "ScampConnectionData",
                       'value': scamp_socket_addresses})
        inputs.append({'type': "BootPortNum", 'value': boot_port_num})
        inputs.append({'type': "APPID", 'value': self._app_id})
        inputs.append({'type': "RunTime", 'value': self._runtime})
        inputs.append({'type': "TimeScaleFactor",
                       'value': self._time_scale_factor})
        inputs.append({'type': "MachineTimeStep",
                       'value': self._machine_time_step})
        inputs.append({'type': "DatabaseSocketAddresses",
                       'value': self._database_socket_addresses})
        inputs.append({'type': "DatabaseWaitOnConfirmationFlag",
                       'value': config.getboolean("Database",
                                                  "wait_on_confirmation")})
        inputs.append({'type': "WriteCheckerFlag",
                       'value': config.getboolean("Mode", "verify_writes")})
        inputs.append({'type': "WriteTextSpecsFlag",
                       'value': config.getboolean("Reports",
                                                  "writeTextSpecs")})
        inputs.append({'type': "ExecutableFinder", 'value': executable_finder})
        inputs.append({'type': "MachineHasWrapAroundsFlag",
                       'value': config.getboolean("Machine",
                                                  "requires_wrap_arounds")})
        inputs.append({'type': "ReportStates", 'value': self._reports_states})
        inputs.append({'type': "UserCreateDatabaseFlag",
                       'value': config.get("Database", "create_database")})
        inputs.append({'type': "ExecuteMapping",
                       'value': config.getboolean(
                           "Database",
                           "create_routing_info_to_neuron_id_mapping")})
        inputs.append({'type': "DatabaseSocketAddresses",
                       'value': self._database_socket_addresses})
        inputs.append({'type': "SendStartNotifications",
                       'value': config.getboolean("Database",
                                                  "send_start_notification")})
        inputs.append({'type': "ProvenanceFilePath",
                       'value': provenance_file_path})

        # add paths for each file based version
        inputs.append({'type': "FileCoreAllocationsFilePath",
                       'value': os.path.join(
                           json_folder, "core_allocations.json")})
        inputs.append({'type': "FileSDRAMAllocationsFilePath",
                       'value': os.path.join(
                           json_folder, "sdram_allocations.json")})
        inputs.append({'type': "FileMachineFilePath",
                       'value': os.path.join(
                           json_folder, "machine.json")})
        inputs.append({'type': "FilePartitionedGraphFilePath",
                       'value': os.path.join(
                           json_folder, "partitioned_graph.json")})
        inputs.append({'type': "FilePlacementFilePath",
                       'value': os.path.join(
                           json_folder, "placements.json")})
        inputs.append({'type': "FileRouingPathsFilePath",
                       'value': os.path.join(
                           json_folder, "routing_paths.json")})
        inputs.append({'type': "FileConstraintsFilePath",
                       'value': os.path.join(
                           json_folder, "constraints.json")})
        return inputs

    def _calculate_number_of_machine_time_steps(self, run_time):
        if run_time is not None:
            self._no_machine_time_steps =\
                int((run_time * 1000.0) / self._machine_time_step)
            ceiled_machine_time_steps = \
                math.ceil((run_time * 1000.0) / self._machine_time_step)
            if self._no_machine_time_steps != ceiled_machine_time_steps:
                logger.warn(
                    "The runtime and machine time step combination result in "
                    "a fractional number of machine time steps")
                self._no_machine_time_steps = int(ceiled_machine_time_steps)
            for vertex in self._partitionable_graph.vertices:
                if isinstance(vertex, AbstractDataSpecableVertex):
                    vertex.set_no_machine_time_steps(
                        self._no_machine_time_steps)
        else:
            self._no_machine_time_steps = None
            logger.warn("You have set a runtime that will never end, this may"
                        "cause the neural models to fail to partition "
                        "correctly")
            for vertex in self._partitionable_graph.vertices:
                if ((isinstance(vertex, AbstractSpikeRecordable) and
                        vertex.is_recording_spikes()) or
                        (isinstance(vertex, AbstractVRecordable) and
                            vertex.is_recording_v()) or
                        (isinstance(vertex, AbstractGSynRecordable) and
                            vertex.is_recording_gsyn)):
                    raise common_exceptions.ConfigurationException(
                        "recording a population when set to infinite runtime "
                        "is not currently supportable in this tool chain."
                        "watch this space")

    @property
    def app_id(self):
        """

        :return:
        """
        return self._app_id

    @property
    def has_ran(self):
        """

        :return:
        """
        return self._has_ran

    @property
    def machine_time_step(self):
        """

        :return:
        """
        return self._machine_time_step

    @property
    def no_machine_time_steps(self):
        """

        :return:
        """
        return self._no_machine_time_steps

    @property
    def timescale_factor(self):
        """

        :return:
        """
        return self._time_scale_factor

    @property
    def spikes_per_second(self):
        """

        :return:
        """
        return self._spikes_per_second

    @property
    def ring_buffer_sigma(self):
        """

        :return:
        """
        return self._ring_buffer_sigma

    @property
    def get_multi_cast_source(self):
        """

        :return:
        """
        return self._multi_cast_vertex

    @property
    def partitioned_graph(self):
        """

        :return:
        """
        return self._partitioned_graph

    @property
    def partitionable_graph(self):
        """

        :return:
        """
        return self._partitionable_graph

    @property
    def placements(self):
        """

        :return:
        """
        return self._placements

    @property
    def transceiver(self):
        """

        :return:
        """
        return self._txrx

    @property
    def graph_mapper(self):
        """

        :return:
        """
        return self._graph_mapper

    @property
    def routing_infos(self):
        """

        :return:
        """
        return self._routing_infos

    @property
    def min_supported_delay(self):
        """ The minimum supported delay based in milliseconds
        :return:
        """
        return self._min_supported_delay

    @property
    def max_supported_delay(self):
        """ The maximum supported delay based in milliseconds
        :return:
        """
        return self._max_supported_delay

    def set_app_id(self, value):
        """

        :param value:
        :return:
        """
        self._app_id = value

    def get_current_time(self):
        """

        :return:
        """
        if self._has_ran:
            return float(self._runtime)
        return 0.0

    def __repr__(self):
        return "Spinnaker object for machine {}".format(self._hostname)

    def add_vertex(self, vertex_to_add):
        """

        :param vertex_to_add:
        :return:
        """
        if isinstance(vertex_to_add, CommandSender):
            self._multi_cast_vertex = vertex_to_add

        self._partitionable_graph.add_vertex(vertex_to_add)

        if isinstance(vertex_to_add, AbstractSendMeMulticastCommandsVertex):
            if self._multi_cast_vertex is None:
                self._multi_cast_vertex = CommandSender(
                    self._machine_time_step, self._time_scale_factor)
                self.add_vertex(self._multi_cast_vertex)
            edge = MultiCastPartitionableEdge(
                self._multi_cast_vertex, vertex_to_add)
            self._multi_cast_vertex.add_commands(vertex_to_add.commands, edge)
            self.add_edge(edge)

        # add any dependent edges and vertices if needed
        if isinstance(vertex_to_add,
                      AbstractVertexWithEdgeToDependentVertices):
            for dependant_vertex in vertex_to_add.dependent_vertices:
                self.add_vertex(dependant_vertex)
                dependant_edge = MultiCastPartitionableEdge(
                    pre_vertex=vertex_to_add, post_vertex=dependant_vertex)
                self.add_edge(
                    dependant_edge,
                    vertex_to_add.edge_partition_identifier_for_dependent_edge)

    def add_edge(self, edge_to_add, partition_identifier=None):
        """

        :param edge_to_add:
        :param partition_identifier: the partition identifier for the outgoing\
                    edge partition
        :return:
        """
        self._partitionable_graph.add_edge(edge_to_add, partition_identifier)

    def create_population(self, size, cellclass, cellparams, structure, label):
        """

        :param size:
        :param cellclass:
        :param cellparams:
        :param structure:
        :param label:
        :return:
        """
        return Population(
            size=size, cellclass=cellclass, cellparams=cellparams,
            structure=structure, label=label, spinnaker=self)

    def _add_population(self, population):
        """ Called by each population to add itself to the list
        """
        self._populations.append(population)

    def create_projection(
            self, presynaptic_population, postsynaptic_population, connector,
            source, target, synapse_dynamics, label, rng):
        """

        :param presynaptic_population:
        :param postsynaptic_population:
        :param connector:
        :param source:
        :param target:
        :param synapse_dynamics:
        :param label:
        :param rng:
        :return:
        """
        if label is None:
            label = "Projection {}".format(self._edge_count)
            self._edge_count += 1
        return Projection(
            presynaptic_population=presynaptic_population, label=label,
            postsynaptic_population=postsynaptic_population, rng=rng,
            connector=connector, source=source, target=target,
            synapse_dynamics=synapse_dynamics, spinnaker_control=self,
            machine_time_step=self._machine_time_step,
            timescale_factor=self._time_scale_factor,
            user_max_delay=self.max_supported_delay)

    def stop(self, turn_off_machine=None, clear_routing_tables=None,
             clear_tags=None):
        """
        :param turn_off_machine: decides if the machine should be powered down\
            after running the execution. Note that this powers down all boards\
            connected to the BMP connections given to the transceiver
        :type turn_off_machine: bool
        :param clear_routing_tables: informs the tool chain if it\
            should turn off the clearing of the routing tables
        :type clear_routing_tables: bool
        :param clear_tags: informs the tool chain if it should clear the tags\
            off the machine at stop
        :type clear_tags: boolean
        :return: None
        """
        for population in self._populations:
            population._end()

        # if not a virtual machine, then shut down stuff on the board
        if not config.getboolean("Machine", "virtual_board"):

            if turn_off_machine is None:
                turn_off_machine = \
                    config.getboolean("Machine", "turn_off_machine")

            if clear_routing_tables is None:
                clear_routing_tables = config.getboolean(
                    "Machine", "clear_routing_tables")

            if clear_tags is None:
                clear_tags = config.getboolean("Machine", "clear_tags")

            # if stopping on machine, clear iptags and
            if clear_tags:
                for ip_tag in self._tags.ip_tags:
                    self._txrx.clear_ip_tag(
                        ip_tag.tag, board_address=ip_tag.board_address)
                for reverse_ip_tag in self._tags.reverse_ip_tags:
                    self._txrx.clear_ip_tag(
                        reverse_ip_tag.tag,
                        board_address=reverse_ip_tag.board_address)

            # if clearing routing table entries, clear
            if clear_routing_tables:
                for router_table in self._router_tables.routing_tables:
                    if not self._machine.get_chip_at(router_table.x,
                                                     router_table.y).virtual:
                        self._txrx.clear_multicast_routes(router_table.x,
                                                          router_table.y)

            # execute app stop
            # self._txrx.stop_application(self._app_id)
            if self._create_database:
                self._database_interface.stop()

            # stop the transceiver
            if turn_off_machine:
                logger.info("Turning off machine")
            self._txrx.close(power_off_machine=turn_off_machine)

    def _add_socket_address(self, socket_address):
        """

        :param socket_address:
        :return:
        """
        self._database_socket_addresses.add(socket_address)
Exemple #17
0
class TestBasicPlacer(unittest.TestCase):
    """
    test for basic placement algorithum
    """
    def setUp(self):
        ########################################################################
        # Setting up vertices, edges and graph                                 #
        ########################################################################
        self.vert1 = TestVertex(100, "New AbstractConstrainedTestVertex 1")
        self.vert2 = TestVertex(5, "New AbstractConstrainedTestVertex 2")
        self.vert3 = TestVertex(3, "New AbstractConstrainedTestVertex 3")
        self.edge1 = MultiCastPartitionableEdge(self.vert1, self.vert2, 
                                                "First edge")
        self.edge2 = MultiCastPartitionableEdge(self.vert2, self.vert1, 
                                                "Second edge")
        self.edge3 = MultiCastPartitionableEdge(self.vert1, self.vert3, 
                                                "Third edge")
        self.verts = [self.vert1, self.vert2, self.vert3]
        self.edges = [self.edge1, self.edge2, self.edge3]
        self.graph = PartitionableGraph("Graph", self.verts, self.edges)

        ########################################################################
        # Setting up machine                                                   #
        ########################################################################
        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        _sdram = SDRAM(128 * (2**20))

        ip = "192.168.240.253"
        chips = list()
        for x in range(10):
            for y in range(10):
                links = list()

                links.append(Link(x, y, 0, (x + 1) % 10, y, n, n))
                links.append(Link(x, y, 1, (x + 1) % 10, (y + 1) % 10, s, s))
                links.append(Link(x, y, 2, x, (y + 1) % 10, n, n))
                links.append(Link(x, y, 3, (x - 1) % 10, y, s, s))
                links.append(Link(x, y, 4, (x - 1) % 10, (y - 1) % 10, n, n))
                links.append(Link(x, y, 5, x, (y - 1) % 10, s, s))

                r = Router(links, False, 100, 1024)
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        ########################################################################
        # Setting up subgraph and graph_mapper                                 #
        ########################################################################
        self.subvertices = list()
        self.subvertex1 = PartitionedVertex(
            0, 1, self.vert1.get_resources_used_by_atoms(0, 1, []),
            "First subvertex")
        self.subvertex2 = PartitionedVertex(
            1, 5, get_resources_used_by_atoms(1, 5, []), "Second subvertex")
        self.subvertex3 = PartitionedVertex(
            5, 10, get_resources_used_by_atoms(5, 10, []), "Third subvertex")
        self.subvertex4 = PartitionedVertex(
            10, 100, get_resources_used_by_atoms(10, 100, []),
            "Fourth subvertex")
        self.subvertices.append(self.subvertex1)
        self.subvertices.append(self.subvertex2)
        self.subvertices.append(self.subvertex3)
        self.subvertices.append(self.subvertex4)
        self.subedges = list()
        self.subgraph = PartitionedGraph("Subgraph", self.subvertices,
                                         self.subedges)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(self.subvertices)

    @unittest.skip("demonstrating skipping")
    def test_new_basic_placer(self):
        self.bp = BasicPlacer(self.machine, self.graph)
        self.assertEqual(self.bp._machine, self.machine)
        self.assertEqual(self.bp._graph, self.graph)

    @unittest.skip("demonstrating skipping")
    def test_place_where_subvertices_dont_have_vertex(self):
        self.bp = BasicPlacer(self.machine, self.graph)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
        for placement in placements.placements:
            print placement.subvertex.label, placement.subvertex.n_atoms, \
                'x:', placement.x, 'y:', placement.y, 'p:', placement.p

    @unittest.skip("demonstrating skipping")
    def test_place_where_subvertices_have_vertices(self):
        self.bp = BasicPlacer(self.machine, self.graph)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(self.subvertices, self.vert1)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
        for placement in placements.placements:
            print placement.subvertex.label, placement.subvertex.n_atoms, \
                'x:', placement.x, 'y:', placement.y, 'p:', placement.p

    @unittest.skip("demonstrating skipping")
    def test_place_subvertex_too_big_with_vertex(self):
        large_vertex = TestVertex(500, "Large vertex 500")
        large_subvertex = large_vertex.create_subvertex(
            0, 499, get_resources_used_by_atoms(0, 499, []))#PartitionedVertex(0, 499, "Large subvertex")
        self.graph.add_vertex(large_vertex)
        self.graph = PartitionableGraph("Graph",[large_vertex])
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices([large_subvertex], large_vertex)
        self.bp = BasicPlacer(self.machine, self.graph)
        self.subgraph = PartitionedGraph(subvertices=[large_subvertex])
        with self.assertRaises(PacmanPlaceException):
            placements = self.bp.place(self.subgraph, self.graph_mapper)

    @unittest.skip("demonstrating skipping")
    def test_try_to_place(self):
        self.assertEqual(True, False, "Test not implemented yet")

    @unittest.skip("demonstrating skipping")
    def test_deal_with_constraint_placement_subvertices_dont_have_vertex(self):
        self.bp = BasicPlacer(self.machine, self.graph)
        self.subvertex1.add_constraint(PlacerChipAndCoreConstraint(8, 3, 2))
        self.assertIsInstance(self.subvertex1.constraints[0], PlacerChipAndCoreConstraint)
        self.subvertex2.add_constraint(PlacerChipAndCoreConstraint(3, 5, 7))
        self.subvertex3.add_constraint(PlacerChipAndCoreConstraint(2, 4, 6))
        self.subvertex4.add_constraint(PlacerChipAndCoreConstraint(6, 4, 16))
        self.subvertices = list()
        self.subvertices.append(self.subvertex1)
        self.subvertices.append(self.subvertex2)
        self.subvertices.append(self.subvertex3)
        self.subvertices.append(self.subvertex4)
        self.subedges = list()
        self.subgraph = PartitionedGraph("Subgraph", self.subvertices,
                                         self.subedges)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(self.subvertices)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
        for placement in placements.placements:
            print placement.subvertex.label, placement.subvertex.n_atoms, \
                'x:', placement.x, 'y:', placement.y, 'p:', placement.p

    @unittest.skip("demonstrating skipping")
    def test_deal_with_constraint_placement_subvertices_have_vertices(self):
        self.bp = BasicPlacer(self.machine, self.graph)
        self.subvertex1.add_constraint(PlacerChipAndCoreConstraint(1, 5, 2))
        self.assertIsInstance(self.subvertex1.constraints[0], PlacerChipAndCoreConstraint)
        self.subvertex2.add_constraint(PlacerChipAndCoreConstraint(3, 5, 7))
        self.subvertex3.add_constraint(PlacerChipAndCoreConstraint(2, 4, 6))
        self.subvertex4.add_constraint(PlacerChipAndCoreConstraint(6, 7, 16))
        self.subvertices = list()
        self.subvertices.append(self.subvertex1)
        self.subvertices.append(self.subvertex2)
        self.subvertices.append(self.subvertex3)
        self.subvertices.append(self.subvertex4)
        self.subedges = list()
        self.subgraph = PartitionedGraph("Subgraph", self.subvertices,
                                         self.subedges)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(self.subvertices, self.vert1)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
        for placement in placements.placements:
            print placement.subvertex.label, placement.subvertex.n_atoms, \
                'x:', placement.x, 'y:', placement.y, 'p:', placement.p

    @unittest.skip("demonstrating skipping")
    def test_unsupported_non_placer_constraint(self):
        self.assertEqual(True, False, "Test not implemented yet")

    @unittest.skip("demonstrating skipping")
    def test_unsupported_placer_constraint(self):
        self.assertEqual(True, False, "Test not implemented yet")

    @unittest.skip("demonstrating skipping")
    def test_unsupported_placer_constraints(self):
        self.assertEqual(True, False, "Test not implemented yet")

    @unittest.skip("demonstrating skipping")
    def test_many_subvertices(self):
        subvertices = list()
        for i in range(20 * 17): #50 atoms per each processor on 20 chips
            subvertices.append(PartitionedTestVertex(
                0, 50, get_resources_used_by_atoms(0, 50, []),
                "PartitionedVertex " + str(i)))

        self.graph = PartitionableGraph("Graph",subvertices)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(subvertices)
        self.bp = BasicPlacer(self.machine, self.graph)
        self.subgraph = PartitionedGraph(subvertices=subvertices)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
        for placement in placements.placements:
            print placement.subvertex.label, placement.subvertex.n_atoms, \
                'x:', placement.x, 'y:', placement.y, 'p:', placement.p

    @unittest.skip("demonstrating skipping")
    def test_too_many_subvertices(self):
        subvertices = list()
        for i in range(100 * 17): #50 atoms per each processor on 20 chips
            subvertices.append(PartitionedTestVertex(
                0, 50, get_resources_used_by_atoms(0, 50, []),
                "PartitionedVertex " + str(i)))

        self.graph = PartitionableGraph("Graph",subvertices)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(subvertices)
        self.bp = BasicPlacer(self.machine, self.graph)
        self.subgraph = PartitionedGraph(subvertices=subvertices)
        with self.assertRaises(PacmanPlaceException):
            placements = self.bp.place(self.subgraph, self.graph_mapper)

    @unittest.skip("demonstrating skipping")
    def test_fill_machine(self):
        subvertices = list()
        for i in range(99 * 17): #50 atoms per each processor on 20 chips
            subvertices.append(PartitionedTestVertex(
                0, 50, get_resources_used_by_atoms(0, 50, []),
                "PartitionedVertex " + str(i)))

        self.graph = PartitionableGraph("Graph",subvertices)
        self.graph_mapper = GraphMapper()
        self.graph_mapper.add_subvertices(subvertices)
        self.bp = BasicPlacer(self.machine, self.graph)
        self.subgraph = PartitionedGraph(subvertices=subvertices)
        placements = self.bp.place(self.subgraph, self.graph_mapper)
Exemple #18
0
class Spinnaker(object):

    def __init__(self, host_name=None, timestep=None, min_delay=None,
                 max_delay=None, graph_label=None,
                 database_socket_addresses=None):

        self._hostname = host_name

        # update graph label if needed
        if graph_label is None:
            graph_label = "Application_graph"

        # delays parameters
        self._min_supported_delay = None
        self._max_supported_delay = None

        # pacman objects
        self._partitionable_graph = PartitionableGraph(label=graph_label)
        self._partitioned_graph = None
        self._graph_mapper = None
        self._placements = None
        self._router_tables = None
        self._routing_infos = None
        self._tags = None
        self._machine = None
        self._txrx = None
        self._reports_states = None
        self._app_id = None
        self._buffer_manager = None

        # database objects
        self._database_socket_addresses = set()
        if database_socket_addresses is not None:
            self._database_socket_addresses.union(database_socket_addresses)
        self._database_interface = None
        self._create_database = None
        self._database_file_path = None

        # Determine default executable folder location
        # and add this default to end of list of search paths
        executable_finder.add_path(os.path.dirname(model_binaries.__file__))

        # population holders
        self._populations = list()
        self._projections = list()
        self._multi_cast_vertex = None
        self._edge_count = 0
        self._live_spike_recorder = dict()

        # holder for the executable targets (which we will need for reset and
        # pause and resume functionality
        self._executable_targets = None

        # holders for data needed for reset when nothing changes in the
        # application graph
        self._processor_to_app_data_base_address_mapper = None
        self._placement_to_app_data_file_paths = None

        # holder for timing related values
        self._has_ran = False
        self._has_reset_last = False
        self._current_run_ms = 0
        self._no_machine_time_steps = None
        self._machine_time_step = None
        self._no_sync_changes = 0

        # state thats needed the first time around
        if self._app_id is None:
            self._app_id = config.getint("Machine", "appID")

            if config.getboolean("Reports", "reportsEnabled"):
                self._reports_states = ReportState(
                    config.getboolean("Reports", "writePartitionerReports"),
                    config.getboolean("Reports",
                                      "writePlacerReportWithPartitionable"),
                    config.getboolean("Reports",
                                      "writePlacerReportWithoutPartitionable"),
                    config.getboolean("Reports", "writeRouterReports"),
                    config.getboolean("Reports", "writeRouterInfoReport"),
                    config.getboolean("Reports", "writeTextSpecs"),
                    config.getboolean("Reports", "writeReloadSteps"),
                    config.getboolean("Reports", "writeTransceiverReport"),
                    config.getboolean("Reports", "outputTimesForSections"),
                    config.getboolean("Reports", "writeTagAllocationReports"))

            # set up reports default folder
            self._report_default_directory, this_run_time_string = \
                helpful_functions.set_up_report_specifics(
                    default_report_file_path=config.get(
                        "Reports", "defaultReportFilePath"),
                    max_reports_kept=config.getint(
                        "Reports", "max_reports_kept"),
                    app_id=self._app_id)

            # set up application report folder
            self._app_data_runtime_folder = \
                helpful_functions.set_up_output_application_data_specifics(
                    max_application_binaries_kept=config.getint(
                        "Reports", "max_application_binaries_kept"),
                    where_to_write_application_data_files=config.get(
                        "Reports", "defaultApplicationDataFilePath"),
                    app_id=self._app_id,
                    this_run_time_string=this_run_time_string)

        self._spikes_per_second = float(config.getfloat(
            "Simulation", "spikes_per_second"))
        self._ring_buffer_sigma = float(config.getfloat(
            "Simulation", "ring_buffer_sigma"))

        # set up machine targeted data
        self._set_up_machine_specifics(timestep, min_delay, max_delay,
                                       host_name)

        logger.info("Setting time scale factor to {}."
                    .format(self._time_scale_factor))

        logger.info("Setting appID to %d." % self._app_id)

        # get the machine time step
        logger.info("Setting machine time step to {} micro-seconds."
                    .format(self._machine_time_step))

    def _set_up_machine_specifics(self, timestep, min_delay, max_delay,
                                  hostname):
        self._machine_time_step = config.getint("Machine", "machineTimeStep")

        # deal with params allowed via the setup options
        if timestep is not None:

            # convert into milliseconds from microseconds
            timestep *= 1000
            self._machine_time_step = timestep

        if min_delay is not None and float(min_delay * 1000) < 1.0 * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support min delays below {} ms with the "
                "current machine time step"
                .format(constants.MIN_SUPPORTED_DELAY * timestep))

        natively_supported_delay_for_models = \
            constants.MAX_SUPPORTED_DELAY_TICS
        delay_extention_max_supported_delay = \
            constants.MAX_DELAY_BLOCKS \
            * constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK

        max_delay_tics_supported = \
            natively_supported_delay_for_models + \
            delay_extention_max_supported_delay

        if max_delay is not None\
           and float(max_delay * 1000) > max_delay_tics_supported * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support max delays above {} ms with the "
                "current machine time step".format(0.144 * timestep))
        if min_delay is not None:
            self._min_supported_delay = min_delay
        else:
            self._min_supported_delay = timestep / 1000.0

        if max_delay is not None:
            self._max_supported_delay = max_delay
        else:
            self._max_supported_delay = (max_delay_tics_supported *
                                         (timestep / 1000.0))

        if (config.has_option("Machine", "timeScaleFactor") and
                config.get("Machine", "timeScaleFactor") != "None"):
            self._time_scale_factor = \
                config.getint("Machine", "timeScaleFactor")
            if timestep * self._time_scale_factor < 1000:
                logger.warn("the combination of machine time step and the "
                            "machine time scale factor results in a real "
                            "timer tick that is currently not reliably "
                            "supported by the spinnaker machine.")
        else:
            self._time_scale_factor = max(1,
                                          math.ceil(1000.0 / float(timestep)))
            if self._time_scale_factor > 1:
                logger.warn("A timestep was entered that has forced pacman103 "
                            "to automatically slow the simulation down from "
                            "real time by a factor of {}. To remove this "
                            "automatic behaviour, please enter a "
                            "timescaleFactor value in your .pacman.cfg"
                            .format(self._time_scale_factor))

        if hostname is not None:
            self._hostname = hostname
            logger.warn("The machine name from PYNN setup is overriding the "
                        "machine name defined in the spynnaker.cfg file")
        elif config.has_option("Machine", "machineName"):
            self._hostname = config.get("Machine", "machineName")
        else:
            raise Exception("A SpiNNaker machine must be specified in "
                            "spynnaker.cfg.")
        use_virtual_board = config.getboolean("Machine", "virtual_board")
        if self._hostname == 'None' and not use_virtual_board:
            raise Exception("A SpiNNaker machine must be specified in "
                            "spynnaker.cfg.")

    def run(self, run_time):
        """

        :param run_time:
        :return:
        """
        logger.info("Starting execution process")

        # calculate number of machine time steps
        total_run_time = self._calculate_number_of_machine_time_steps(run_time)

        # Calculate the first machine time step to start from and set this
        # where necessary
        first_machine_time_step = int(math.ceil(
            (self._current_run_ms * 1000.0) / self._machine_time_step))
        for vertex in self._partitionable_graph.vertices:
            if isinstance(vertex, AbstractHasFirstMachineTimeStep):
                vertex.set_first_machine_time_step(first_machine_time_step)

        # get inputs
        inputs, application_graph_changed = \
            self._create_pacman_executor_inputs(run_time)

        if application_graph_changed and self._has_ran:
            raise common_exceptions.ConfigurationException(
                "Changes to the application graph are not currently supported;"
                " please instead call p.reset(), p.end(), add changes and then"
                " call p.setup()")

        # if the application graph has changed and you've already ran, kill old
        # stuff running on machine
        if application_graph_changed and self._has_ran:
            self._txrx.stop_application(self._app_id)

        # get outputs
        required_outputs = self._create_pacman_executor_outputs(
            requires_reset=False,
            application_graph_changed=application_graph_changed)

        # algorithms listing
        algorithms = self._create_algorithm_list(
            config.get("Mode", "mode") == "Debug", application_graph_changed,
            executing_reset=False)

        # xml paths to the algorithms metadata
        xml_paths = self._create_xml_paths()

        # run pacman executor
        pacman_exeuctor = helpful_functions.do_mapping(
            inputs, algorithms, required_outputs, xml_paths,
            config.getboolean("Reports", "outputTimesForSections"))

        # gather provenance data from the executor itself if needed
        if (config.get("Reports", "writeProvanceData") and
                not config.getboolean("Machine", "virtual_board")):
            pacman_executor_file_path = os.path.join(
                pacman_exeuctor.get_item("ProvenanceFilePath"),
                "PACMAN_provancence_data.xml")
            pacman_exeuctor.write_provenance_data_in_xml(
                pacman_executor_file_path,
                pacman_exeuctor.get_item("MemoryTransciever"))

        # sort out outputs data
        if application_graph_changed:
            self._update_data_structures_from_pacman_exeuctor(pacman_exeuctor)
        else:
            self._no_sync_changes = pacman_exeuctor.get_item("NoSyncChanges")
            self._has_ran = pacman_exeuctor.get_item("RanToken")

        # reset the reset flag to say the last thing was not a reset call
        self._current_run_ms = total_run_time

        # switch the reset last flag, as now the last thing to run is a run
        self._has_reset_last = False

    def reset(self):
        """ Code that puts the simulation back at time zero
        :return:
        """

        logger.info("Starting reset progress")

        inputs, application_graph_changed = \
            self._create_pacman_executor_inputs(
                this_run_time=0, is_resetting=True)

        if self._has_ran and application_graph_changed:
            raise common_exceptions.ConfigurationException(
                "Resetting the simulation after changing the model"
                " is not supported")

        algorithms = self._create_algorithm_list(
            config.get("Mode", "mode") == "Debug", application_graph_changed,
            executing_reset=True)
        xml_paths = self._create_xml_paths()
        required_outputs = self._create_pacman_executor_outputs(
            requires_reset=True,
            application_graph_changed=application_graph_changed)

        # rewind the buffers from the buffer manager, to start at the beginning
        # of the simulation again and clear buffered out
        self._buffer_manager.reset()

        # reset the current count of how many milliseconds the application
        # has ran for over multiple calls to run
        self._current_run_ms = 0

        # change number of resets as loading the binary again resets the sync\
        # to 0
        self._no_sync_changes = 0

        # sets the has ran into false state, to pretend that its like it has
        # not ran
        self._has_ran = False

        # sets the reset last flag to true, so that when run occurs, the tools
        # know to update the vertices which need to know a reset has occurred
        self._has_reset_last = True

        # reset the n_machine_time_steps from each vertex
        for vertex in self.partitionable_graph.vertices:
            vertex.set_no_machine_time_steps(0)

        # execute reset functionality
        helpful_functions.do_mapping(
            inputs, algorithms, required_outputs, xml_paths,
            config.getboolean("Reports", "outputTimesForSections"))

        # if graph has changed kill all old objects as they will need to be
        # rebuilt at next run
        if application_graph_changed:
            self._placements = self._router_tables = self._routing_infos = \
                self._tags = self._graph_mapper = self._partitioned_graph = \
                self._database_interface = self._executable_targets = \
                self._placement_to_app_data_file_paths = \
                self._processor_to_app_data_base_address_mapper = None

    def _update_data_structures_from_pacman_exeuctor(self, pacman_exeuctor):
        """ Updates all the spinnaker local data structures that it needs from\
            the pacman executor
        :param pacman_exeuctor: the pacman executor required to extract data\
                structures from.
        :return:
        """
        if not config.getboolean("Machine", "virtual_board"):
            self._txrx = pacman_exeuctor.get_item("MemoryTransciever")
            self._has_ran = pacman_exeuctor.get_item("RanToken")
            self._executable_targets = \
                pacman_exeuctor.get_item("ExecutableTargets")
            self._buffer_manager = pacman_exeuctor.get_item("BufferManager")
            self._processor_to_app_data_base_address_mapper = \
                pacman_exeuctor.get_item("ProcessorToAppDataBaseAddress")
            self._placement_to_app_data_file_paths = \
                pacman_exeuctor.get_item("PlacementToAppDataFilePaths")

        self._placements = pacman_exeuctor.get_item("MemoryPlacements")
        self._router_tables = \
            pacman_exeuctor.get_item("MemoryRoutingTables")
        self._routing_infos = \
            pacman_exeuctor.get_item("MemoryRoutingInfos")
        self._tags = pacman_exeuctor.get_item("MemoryTags")
        self._graph_mapper = pacman_exeuctor.get_item("MemoryGraphMapper")
        self._partitioned_graph = \
            pacman_exeuctor.get_item("MemoryPartitionedGraph")
        self._machine = pacman_exeuctor.get_item("MemoryMachine")
        self._database_interface = \
            pacman_exeuctor.get_item("DatabaseInterface")
        self._database_file_path = \
            pacman_exeuctor.get_item("DatabaseFilePath")
        self._no_sync_changes = pacman_exeuctor.get_item("NoSyncChanges")

    @staticmethod
    def _create_xml_paths():

        # add the extra xml files from the config file
        xml_paths = config.get("Mapping", "extra_xmls_paths")
        if xml_paths == "None":
            xml_paths = list()
        else:
            xml_paths = xml_paths.split(",")

        # add extra xml paths for pynn algorithms
        xml_paths.append(
            os.path.join(os.path.dirname(overridden_pacman_functions.__file__),
                         "algorithms_metadata.xml"))
        xml_paths.append(os.path.join(os.path.dirname(
            pacman_algorithm_reports.__file__), "reports_metadata.xml"))
        return xml_paths

    def _create_algorithm_list(
            self, in_debug_mode, application_graph_changed, executing_reset):
        algorithms = list()

        # if you've not ran before, add the buffer manager
        if (application_graph_changed and
                not config.getboolean("Machine", "virtual_board")):
            algorithms.append("FrontEndCommonBufferManagerCreater")

        # if you're needing a reset, you need to clean the binaries
        # (unless you've not ran yet)
        if executing_reset and self._has_ran:

            # kill binaries
            # TODO: when SARK 1.34 appears, this only needs to send a signal
            algorithms.append("FrontEndCommonApplicationExiter")

        # if the allocation graph has changed, need to go through mapping
        if application_graph_changed and not executing_reset:

            # if the system has ran before, kill the apps and run mapping
            # add debug algorithms if needed
            if in_debug_mode:
                algorithms.append("ValidRoutesChecker")

            algorithm_names = \
                config.get("Mapping", "algorithms")

            algorithm_strings = algorithm_names.split(",")
            for algorithm_string in algorithm_strings:
                split_string = algorithm_string.split(":")
                if len(split_string) == 1:
                    algorithms.append(split_string[0])
                else:
                    raise common_exceptions.ConfigurationException(
                        "The tool chain expects config params of list of 1 "
                        "element with ,. Where the elements are either: the "
                        "algorithum_name:algorithm_config_file_path, or "
                        "algorithum_name if its a interal to pacman algorithm."
                        " Please rectify this and try again")

            # if using virtual machine, add to list of algorithms the virtual
            # machine generator, otherwise add the standard machine generator
            if config.getboolean("Machine", "virtual_board"):
                algorithms.append("FrontEndCommonVirtualMachineInterfacer")
            else:
                # protect against the situation where the system has already
                # got a transceiver (overriding does not lose sockets)
                if self._txrx is not None:
                    self._txrx.close()
                    self._txrx = None

                algorithms.append("FrontEndCommonMachineInterfacer")
                algorithms.append("FrontEndCommonApplicationRunner")
                algorithms.append("FrontEndCommonNotificationProtocol")
                algorithms.append(
                    "FrontEndCommonPartitionableGraphApplicationDataLoader")
                algorithms.append("FrontEndCommonPartitionableGraphHost"
                                  "ExecuteDataSpecification")
                algorithms.append("FrontEndCommomLoadExecutableImages")
                algorithms.append("FrontEndCommonRoutingTableLoader")
                algorithms.append("FrontEndCommonTagsLoader")
                algorithms.append("FrontEndCommomPartitionableGraphData"
                                  "SpecificationWriter")

                # if the end user wants reload script, add the reload script
                # creator to the list (reload script currently only supported
                # for the original run)
                if (not self._has_ran and
                        config.getboolean("Reports", "writeReloadSteps")):
                    algorithms.append("FrontEndCommonReloadScriptCreator")
                elif (self.has_ran and
                        config.getboolean("Reports", "writeReloadSteps")):
                    logger.warn(
                        "The reload script cannot handle multi-runs, nor can"
                        "it handle resets, therefore it will only contain the "
                        "initial run")

            if (config.getboolean("Reports", "writeMemoryMapReport") and
                    not config.getboolean("Machine", "virtual_board")):
                algorithms.append("FrontEndCommonMemoryMapReport")

            if config.getboolean("Reports", "writeNetworkSpecificationReport"):
                algorithms.append(
                    "FrontEndCommonNetworkSpecificationPartitionableReport")

            # if going to write provenance data after the run add the two
            # provenance gatherers
            if (config.get("Reports", "writeProvanceData") and
                    not config.getboolean("Machine", "virtual_board")):
                algorithms.append("FrontEndCommonProvenanceGatherer")

            # define mapping between output types and reports
            if self._reports_states is not None \
                    and self._reports_states.tag_allocation_report:
                algorithms.append("TagReport")
            if self._reports_states is not None \
                    and self._reports_states.routing_info_report:
                algorithms.append("routingInfoReports")
            if self._reports_states is not None \
                    and self._reports_states.router_report:
                algorithms.append("RouterReports")
            if self._reports_states is not None \
                    and self._reports_states.partitioner_report:
                algorithms.append("PartitionerReport")
            if (self._reports_states is not None and
                    self._reports_states.
                    placer_report_with_partitionable_graph):
                algorithms.append("PlacerReportWithPartitionableGraph")
            if (self._reports_states is not None and
                    self._reports_states.
                    placer_report_without_partitionable_graph):
                algorithms.append("PlacerReportWithoutPartitionableGraph")
        else:

            # add function for extracting all the recorded data from
            # recorded populations
            if self._has_ran and not executing_reset:
                algorithms.append("SpyNNakerRecordingExtractor")

                # add functions for updating the models
                algorithms.append("FrontEndCommonRuntimeUpdater")
            if not self._has_ran and not executing_reset:
                algorithms.append(
                    "FrontEndCommonPartitionableGraphApplicationDataLoader")
                algorithms.append("FrontEndCommomLoadExecutableImages")
            if not executing_reset:
                algorithms.append("FrontEndCommonNotificationProtocol")

                # add functions for setting off the models again
                algorithms.append("FrontEndCommonApplicationRunner")

                # if going to write provenance data after the run add the two
                # provenance gatherers
                if config.get("Reports", "writeProvanceData"):
                    algorithms.append("FrontEndCommonProvenanceGatherer")
        return algorithms

    def _create_pacman_executor_outputs(
            self, requires_reset, application_graph_changed):

        # explicitly define what outputs spynnaker expects
        required_outputs = list()
        if config.getboolean("Machine", "virtual_board"):
            if application_graph_changed:
                required_outputs.extend([
                    "MemoryPlacements", "MemoryRoutingTables",
                    "MemoryRoutingInfos", "MemoryTags",
                    "MemoryPartitionedGraph", "MemoryGraphMapper"])
        else:
            if not requires_reset:
                required_outputs.append("RanToken")

        # if front end wants reload script, add requires reload token
        if (config.getboolean("Reports", "writeReloadSteps") and
                not self._has_ran and application_graph_changed and
                not config.getboolean("Machine", "virtual_board")):
            required_outputs.append("ReloadToken")
        return required_outputs

    def _create_pacman_executor_inputs(
            self, this_run_time, is_resetting=False):

        application_graph_changed = \
            self._detect_if_graph_has_changed(not is_resetting)
        inputs = list()

        # file path to store any provenance data to
        provenance_file_path = \
            os.path.join(self._report_default_directory, "provance_data")
        if not os.path.exists(provenance_file_path):
                os.mkdir(provenance_file_path)

        # all modes need the NoSyncChanges
        if application_graph_changed:
            self._no_sync_changes = 0
        inputs.append(
            {'type': "NoSyncChanges", 'value': self._no_sync_changes})

        # support resetting the machine during start up
        if (config.getboolean("Machine", "reset_machine_on_startup") and
                not self._has_ran and not is_resetting):
            inputs.append(
                {"type": "ResetMachineOnStartupFlag", 'value': True})
        else:
            inputs.append(
                {"type": "ResetMachineOnStartupFlag", 'value': False})

        # support runtime updater
        if self._has_ran and not is_resetting:
            no_machine_time_steps =\
                int((this_run_time * 1000.0) /
                    self._machine_time_step)
            inputs.append({'type': "RunTimeMachineTimeSteps",
                           'value': no_machine_time_steps})

        # FrontEndCommonPartitionableGraphApplicationDataLoader after a
        # reset and no changes
        if not self._has_ran and not application_graph_changed:
            inputs.append(({
                'type': "ProcessorToAppDataBaseAddress",
                "value": self._processor_to_app_data_base_address_mapper}))
            inputs.append({"type": "PlacementToAppDataFilePaths",
                           'value': self._placement_to_app_data_file_paths})
            inputs.append({'type': "WriteCheckerFlag",
                           'value': config.getboolean(
                               "Mode", "verify_writes")})

        # support resetting when there's changes in the application graph
        # (only need to exit)
        if application_graph_changed and is_resetting:
            inputs.append({"type": "MemoryTransciever", 'value': self._txrx})
            inputs.append({'type': "ExecutableTargets",
                           'value': self._executable_targets})
            inputs.append({'type': "MemoryPlacements",
                           'value': self._placements})
            inputs.append({'type': "MemoryGraphMapper",
                           'value': self._graph_mapper})
            inputs.append({'type': "APPID", 'value': self._app_id})
            inputs.append({'type': "RanToken", 'value': self._has_ran})

        elif application_graph_changed and not is_resetting:

            # make a folder for the json files to be stored in
            json_folder = os.path.join(
                self._report_default_directory, "json_files")
            if not os.path.exists(json_folder):
                os.mkdir(json_folder)

            # translate config "None" to None
            width = config.get("Machine", "width")
            height = config.get("Machine", "height")
            if width == "None":
                width = None
            else:
                width = int(width)
            if height == "None":
                height = None
            else:
                height = int(height)

            number_of_boards = config.get("Machine", "number_of_boards")
            if number_of_boards == "None":
                number_of_boards = None

            scamp_socket_addresses = config.get("Machine",
                                                "scamp_connections_data")
            if scamp_socket_addresses == "None":
                scamp_socket_addresses = None

            boot_port_num = config.get("Machine", "boot_connection_port_num")
            if boot_port_num == "None":
                boot_port_num = None
            else:
                boot_port_num = int(boot_port_num)

            inputs.append({'type': "MemoryPartitionableGraph",
                           'value': self._partitionable_graph})
            inputs.append({'type': 'ReportFolder',
                           'value': self._report_default_directory})
            inputs.append({'type': "ApplicationDataFolder",
                           'value': self._app_data_runtime_folder})
            inputs.append({'type': 'IPAddress', 'value': self._hostname})

            # basic input stuff
            inputs.append({'type': "BMPDetails",
                           'value': config.get("Machine", "bmp_names")})
            inputs.append({'type': "DownedChipsDetails",
                           'value': config.get("Machine", "down_chips")})
            inputs.append({'type': "DownedCoresDetails",
                           'value': config.get("Machine", "down_cores")})
            inputs.append({'type': "BoardVersion",
                           'value': config.getint("Machine", "version")})
            inputs.append({'type': "NumberOfBoards",
                           'value': number_of_boards})
            inputs.append({'type': "MachineWidth", 'value': width})
            inputs.append({'type': "MachineHeight", 'value': height})
            inputs.append({'type': "AutoDetectBMPFlag",
                           'value': config.getboolean("Machine",
                                                      "auto_detect_bmp")})
            inputs.append({'type': "EnableReinjectionFlag",
                           'value': config.getboolean("Machine",
                                                      "enable_reinjection")})
            inputs.append({'type': "ScampConnectionData",
                           'value': scamp_socket_addresses})
            inputs.append({'type': "BootPortNum", 'value': boot_port_num})
            inputs.append({'type': "APPID", 'value': self._app_id})
            inputs.append({'type': "RunTime", 'value': this_run_time})
            inputs.append({'type': "TimeScaleFactor",
                           'value': self._time_scale_factor})
            inputs.append({'type': "MachineTimeStep",
                           'value': self._machine_time_step})
            inputs.append({'type': "DatabaseSocketAddresses",
                           'value': self._database_socket_addresses})
            inputs.append({'type': "DatabaseWaitOnConfirmationFlag",
                           'value': config.getboolean(
                               "Database", "wait_on_confirmation")})
            inputs.append({'type': "WriteCheckerFlag",
                           'value': config.getboolean(
                               "Mode", "verify_writes")})
            inputs.append({'type': "WriteTextSpecsFlag",
                           'value': config.getboolean(
                               "Reports", "writeTextSpecs")})
            inputs.append({'type': "ExecutableFinder",
                           'value': executable_finder})
            inputs.append({'type': "MachineHasWrapAroundsFlag",
                           'value': config.getboolean(
                               "Machine", "requires_wrap_arounds")})
            inputs.append({'type': "ReportStates",
                           'value': self._reports_states})
            inputs.append({'type': "UserCreateDatabaseFlag",
                           'value': config.get("Database", "create_database")})
            inputs.append({'type': "ExecuteMapping",
                           'value': config.getboolean(
                               "Database",
                               "create_routing_info_to_neuron_id_mapping")})
            inputs.append({'type': "DatabaseSocketAddresses",
                           'value': self._database_socket_addresses})
            inputs.append({'type': "SendStartNotifications",
                           'value': config.getboolean(
                               "Database", "send_start_notification")})
            inputs.append({'type': "ProvenanceFilePath",
                           'value': provenance_file_path})

            # add paths for each file based version
            inputs.append({'type': "FileCoreAllocationsFilePath",
                           'value': os.path.join(
                               json_folder, "core_allocations.json")})
            inputs.append({'type': "FileSDRAMAllocationsFilePath",
                           'value': os.path.join(
                               json_folder, "sdram_allocations.json")})
            inputs.append({'type': "FileMachineFilePath",
                           'value': os.path.join(
                               json_folder, "machine.json")})
            inputs.append({'type': "FilePartitionedGraphFilePath",
                           'value': os.path.join(
                               json_folder, "partitioned_graph.json")})
            inputs.append({'type': "FilePlacementFilePath",
                           'value': os.path.join(
                               json_folder, "placements.json")})
            inputs.append({'type': "FileRouingPathsFilePath",
                           'value': os.path.join(
                               json_folder, "routing_paths.json")})
            inputs.append({'type': "FileConstraintsFilePath",
                           'value': os.path.join(
                               json_folder, "constraints.json")})

            if self._has_ran:
                logger.warn(
                    "The network has changed, and therefore mapping will be"
                    " done again.  Any recorded data will be erased.")
        else:
            # mapping does not need to be executed, therefore add
            # the data elements needed for the application runner and
            # runtime re-setter
            inputs.append({"type": "BufferManager",
                           "value": self._buffer_manager})
            inputs.append({'type': "DatabaseWaitOnConfirmationFlag",
                           'value': config.getboolean(
                               "Database", "wait_on_confirmation")})
            inputs.append({'type': "SendStartNotifications",
                           'value': config.getboolean(
                               "Database", "send_start_notification")})
            inputs.append({'type': "DatabaseInterface",
                           'value': self._database_interface})
            inputs.append({"type": "DatabaseSocketAddresses",
                           'value': self._database_socket_addresses})
            inputs.append({'type': "DatabaseFilePath",
                           'value': self._database_file_path})
            inputs.append({'type': "ExecutableTargets",
                           'value': self._executable_targets})
            inputs.append({'type': "APPID", 'value': self._app_id})
            inputs.append({"type": "MemoryTransciever", 'value': self._txrx})
            inputs.append({"type": "RunTime",
                           'value': this_run_time})
            inputs.append({'type': "TimeScaleFactor",
                           'value': self._time_scale_factor})
            inputs.append({'type': "LoadedReverseIPTagsToken",
                           'value': True})
            inputs.append({'type': "LoadedIPTagsToken", 'value': True})
            inputs.append({'type': "LoadedRoutingTablesToken",
                           'value': True})
            inputs.append({'type': "LoadBinariesToken", 'value': True})
            inputs.append({'type': "LoadedApplicationDataToken",
                           'value': True})
            inputs.append({'type': "MemoryPlacements",
                           'value': self._placements})
            inputs.append({'type': "MemoryGraphMapper",
                           'value': self._graph_mapper})
            inputs.append({'type': "MemoryPartitionableGraph",
                           'value': self._partitionable_graph})
            inputs.append({'type': "MemoryExtendedMachine",
                           'value': self._machine})
            inputs.append({'type': "MemoryRoutingTables",
                           'value': self._router_tables})
            inputs.append({'type': "ProvenanceFilePath",
                           'value': provenance_file_path})
            inputs.append({'type': "RanToken", 'value': self._has_ran})

        return inputs, application_graph_changed

    def _calculate_number_of_machine_time_steps(self, next_run_time):
        total_run_time = next_run_time
        if next_run_time is not None:
            total_run_time += self._current_run_ms
            machine_time_steps = (
                (total_run_time * 1000.0) / self._machine_time_step)
            if machine_time_steps != int(machine_time_steps):
                logger.warn(
                    "The runtime and machine time step combination result in "
                    "a fractional number of machine time steps")
            self._no_machine_time_steps = int(math.ceil(machine_time_steps))
        else:
            self._no_machine_time_steps = None
            for vertex in self._partitionable_graph.vertices:
                if ((isinstance(vertex, AbstractSpikeRecordable) and
                        vertex.is_recording_spikes()) or
                        (isinstance(vertex, AbstractVRecordable) and
                            vertex.is_recording_v()) or
                        (isinstance(vertex, AbstractGSynRecordable) and
                            vertex.is_recording_gsyn)):
                    raise common_exceptions.ConfigurationException(
                        "recording a population when set to infinite runtime "
                        "is not currently supported")
        for vertex in self._partitionable_graph.vertices:
            if isinstance(vertex, AbstractDataSpecableVertex):
                vertex.set_no_machine_time_steps(self._no_machine_time_steps)
        return total_run_time

    def _detect_if_graph_has_changed(self, reset_flags=True):
        """ Iterates though the graph and looks changes
        """
        changed = False
        for population in self._populations:
            if population.requires_mapping:
                changed = True
            if reset_flags:
                population.mark_no_changes()

        for projection in self._projections:
            if projection.requires_mapping:
                changed = True
            if reset_flags:
                projection.mark_no_changes()

        return changed

    @property
    def app_id(self):
        """

        :return:
        """
        return self._app_id

    @property
    def has_ran(self):
        """

        :return:
        """
        return self._has_ran

    @property
    def machine_time_step(self):
        """

        :return:
        """
        return self._machine_time_step

    @property
    def no_machine_time_steps(self):
        """

        :return:
        """
        return self._no_machine_time_steps

    @property
    def timescale_factor(self):
        """

        :return:
        """
        return self._time_scale_factor

    @property
    def spikes_per_second(self):
        """

        :return:
        """
        return self._spikes_per_second

    @property
    def ring_buffer_sigma(self):
        """

        :return:
        """
        return self._ring_buffer_sigma

    @property
    def get_multi_cast_source(self):
        """

        :return:
        """
        return self._multi_cast_vertex

    @property
    def partitioned_graph(self):
        """

        :return:
        """
        return self._partitioned_graph

    @property
    def partitionable_graph(self):
        """

        :return:
        """
        return self._partitionable_graph

    @property
    def placements(self):
        """

        :return:
        """
        return self._placements

    @property
    def transceiver(self):
        """

        :return:
        """
        return self._txrx

    @property
    def graph_mapper(self):
        """

        :return:
        """
        return self._graph_mapper

    @property
    def routing_infos(self):
        """

        :return:
        """
        return self._routing_infos

    @property
    def min_supported_delay(self):
        """
        the min supported delay based in milliseconds
        :return:
        """
        return self._min_supported_delay

    @property
    def max_supported_delay(self):
        """
        the max supported delay based in milliseconds
        :return:
        """
        return self._max_supported_delay

    @property
    def buffer_manager(self):
        return self._buffer_manager

    def set_app_id(self, value):
        """

        :param value:
        :return:
        """
        self._app_id = value

    def get_current_time(self):
        """

        :return:
        """
        if self._has_ran:
            return float(self._current_run_ms)
        return 0.0

    def __repr__(self):
        return "Spinnaker object for machine {}".format(self._hostname)

    def add_vertex(self, vertex_to_add):
        """

        :param vertex_to_add:
        :return:
        """
        if isinstance(vertex_to_add, CommandSender):
            self._multi_cast_vertex = vertex_to_add

        self._partitionable_graph.add_vertex(vertex_to_add)

        if isinstance(vertex_to_add, AbstractSendMeMulticastCommandsVertex):
            if self._multi_cast_vertex is None:
                self._multi_cast_vertex = CommandSender(
                    self._machine_time_step, self._time_scale_factor)
                self.add_vertex(self._multi_cast_vertex)
            edge = MultiCastPartitionableEdge(
                self._multi_cast_vertex, vertex_to_add)
            self._multi_cast_vertex.add_commands(vertex_to_add.commands, edge)
            self.add_edge(edge)

        # add any dependent edges and vertices if needed
        if isinstance(vertex_to_add,
                      AbstractVertexWithEdgeToDependentVertices):
            for dependant_vertex in vertex_to_add.dependent_vertices:
                self.add_vertex(dependant_vertex)
                dependant_edge = MultiCastPartitionableEdge(
                    pre_vertex=vertex_to_add, post_vertex=dependant_vertex)
                self.add_edge(
                    dependant_edge,
                    vertex_to_add.edge_partition_identifier_for_dependent_edge)

    def add_edge(self, edge_to_add, partition_identifier=None):
        """

        :param edge_to_add:
        :param partition_identifier: the partition identifier for the outgoing\
                    edge partition
        :return:
        """
        self._partitionable_graph.add_edge(edge_to_add, partition_identifier)

    def create_population(self, size, cellclass, cellparams, structure, label):
        """

        :param size:
        :param cellclass:
        :param cellparams:
        :param structure:
        :param label:
        :return:
        """
        return Population(
            size=size, cellclass=cellclass, cellparams=cellparams,
            structure=structure, label=label, spinnaker=self)

    def _add_population(self, population):
        """ Called by each population to add itself to the list
        """
        self._populations.append(population)

    def _add_projection(self, projection):
        """ called by each projection to add itself to the list
        :param projection:
        :return:
        """
        self._projections.append(projection)

    def create_projection(
            self, presynaptic_population, postsynaptic_population, connector,
            source, target, synapse_dynamics, label, rng):
        """

        :param presynaptic_population:
        :param postsynaptic_population:
        :param connector:
        :param source:
        :param target:
        :param synapse_dynamics:
        :param label:
        :param rng:
        :return:
        """
        if label is None:
            label = "Projection {}".format(self._edge_count)
            self._edge_count += 1
        return Projection(
            presynaptic_population=presynaptic_population, label=label,
            postsynaptic_population=postsynaptic_population, rng=rng,
            connector=connector, source=source, target=target,
            synapse_dynamics=synapse_dynamics, spinnaker_control=self,
            machine_time_step=self._machine_time_step,
            timescale_factor=self._time_scale_factor,
            user_max_delay=self.max_supported_delay)

    def stop(self, turn_off_machine=None, clear_routing_tables=None,
             clear_tags=None):
        """
        :param turn_off_machine: decides if the machine should be powered down\
            after running the execution. Note that this powers down all boards\
            connected to the BMP connections given to the transceiver
        :type turn_off_machine: bool
        :param clear_routing_tables: informs the tool chain if it\
            should turn off the clearing of the routing tables
        :type clear_routing_tables: bool
        :param clear_tags: informs the tool chain if it should clear the tags\
            off the machine at stop
        :type clear_tags: boolean
        :return: None
        """
        for population in self._populations:
            population._end()

        # if not a virtual machine, then shut down stuff on the board
        if not config.getboolean("Machine", "virtual_board"):

            if turn_off_machine is None:
                turn_off_machine = \
                    config.getboolean("Machine", "turn_off_machine")

            if clear_routing_tables is None:
                clear_routing_tables = config.getboolean(
                    "Machine", "clear_routing_tables")

            if clear_tags is None:
                clear_tags = config.getboolean("Machine", "clear_tags")

            # if stopping on machine, clear iptags and
            if clear_tags:
                for ip_tag in self._tags.ip_tags:
                    self._txrx.clear_ip_tag(
                        ip_tag.tag, board_address=ip_tag.board_address)
                for reverse_ip_tag in self._tags.reverse_ip_tags:
                    self._txrx.clear_ip_tag(
                        reverse_ip_tag.tag,
                        board_address=reverse_ip_tag.board_address)

            # if clearing routing table entries, clear
            if clear_routing_tables:
                for router_table in self._router_tables.routing_tables:
                    if not self._machine.get_chip_at(router_table.x,
                                                     router_table.y).virtual:
                        self._txrx.clear_multicast_routes(router_table.x,
                                                          router_table.y)

            # clear values
            self._no_sync_changes = 0

            # app stop command
            self._txrx.stop_application(self._app_id)

            if self._create_database:
                self._database_interface.stop()

            self._buffer_manager.stop()

            # stop the transceiver
            if turn_off_machine:
                logger.info("Turning off machine")
            self._txrx.close(power_off_machine=turn_off_machine)

    def _add_socket_address(self, socket_address):
        """

        :param socket_address:
        :return:
        """
        self._database_socket_addresses.add(socket_address)
Exemple #19
0
    def __init__(self, host_name=None, timestep=None, min_delay=None,
                 max_delay=None, graph_label=None,
                 database_socket_addresses=None):

        self._hostname = host_name

        # update graph label if needed
        if graph_label is None:
            graph_label = "Application_graph"

        # delays parameters
        self._min_supported_delay = None
        self._max_supported_delay = None

        # pacman objects
        self._partitionable_graph = PartitionableGraph(label=graph_label)
        self._partitioned_graph = None
        self._graph_mapper = None
        self._placements = None
        self._router_tables = None
        self._routing_infos = None
        self._tags = None
        self._machine = None
        self._txrx = None
        self._reports_states = None
        self._app_id = None
        self._buffer_manager = None

        # database objects
        self._database_socket_addresses = set()
        if database_socket_addresses is not None:
            self._database_socket_addresses.union(database_socket_addresses)
        self._database_interface = None
        self._create_database = None
        self._database_file_path = None

        # Determine default executable folder location
        # and add this default to end of list of search paths
        executable_finder.add_path(os.path.dirname(model_binaries.__file__))

        # population holders
        self._populations = list()
        self._projections = list()
        self._multi_cast_vertex = None
        self._edge_count = 0
        self._live_spike_recorder = dict()

        # holder for the executable targets (which we will need for reset and
        # pause and resume functionality
        self._executable_targets = None

        # holders for data needed for reset when nothing changes in the
        # application graph
        self._processor_to_app_data_base_address_mapper = None
        self._placement_to_app_data_file_paths = None

        # holder for timing related values
        self._has_ran = False
        self._has_reset_last = False
        self._current_run_ms = 0
        self._no_machine_time_steps = None
        self._machine_time_step = None
        self._no_sync_changes = 0

        # state thats needed the first time around
        if self._app_id is None:
            self._app_id = config.getint("Machine", "appID")

            if config.getboolean("Reports", "reportsEnabled"):
                self._reports_states = ReportState(
                    config.getboolean("Reports", "writePartitionerReports"),
                    config.getboolean("Reports",
                                      "writePlacerReportWithPartitionable"),
                    config.getboolean("Reports",
                                      "writePlacerReportWithoutPartitionable"),
                    config.getboolean("Reports", "writeRouterReports"),
                    config.getboolean("Reports", "writeRouterInfoReport"),
                    config.getboolean("Reports", "writeTextSpecs"),
                    config.getboolean("Reports", "writeReloadSteps"),
                    config.getboolean("Reports", "writeTransceiverReport"),
                    config.getboolean("Reports", "outputTimesForSections"),
                    config.getboolean("Reports", "writeTagAllocationReports"))

            # set up reports default folder
            self._report_default_directory, this_run_time_string = \
                helpful_functions.set_up_report_specifics(
                    default_report_file_path=config.get(
                        "Reports", "defaultReportFilePath"),
                    max_reports_kept=config.getint(
                        "Reports", "max_reports_kept"),
                    app_id=self._app_id)

            # set up application report folder
            self._app_data_runtime_folder = \
                helpful_functions.set_up_output_application_data_specifics(
                    max_application_binaries_kept=config.getint(
                        "Reports", "max_application_binaries_kept"),
                    where_to_write_application_data_files=config.get(
                        "Reports", "defaultApplicationDataFilePath"),
                    app_id=self._app_id,
                    this_run_time_string=this_run_time_string)

        self._spikes_per_second = float(config.getfloat(
            "Simulation", "spikes_per_second"))
        self._ring_buffer_sigma = float(config.getfloat(
            "Simulation", "ring_buffer_sigma"))

        # set up machine targeted data
        self._set_up_machine_specifics(timestep, min_delay, max_delay,
                                       host_name)

        logger.info("Setting time scale factor to {}."
                    .format(self._time_scale_factor))

        logger.info("Setting appID to %d." % self._app_id)

        # get the machine time step
        logger.info("Setting machine time step to {} micro-seconds."
                    .format(self._machine_time_step))
class TestBasicPartitioner(unittest.TestCase):
    """
    test for basic parittioning algorithum
    """

    def setup(self):
        """
        setup for all absic partitioner tests
        :return:
        """
        self.vert1 = TestVertex(10, "New AbstractConstrainedVertex 1")
        self.vert2 = TestVertex(5, "New AbstractConstrainedVertex 2")
        self.vert3 = TestVertex(3, "New AbstractConstrainedVertex 3")
        self.edge1 = MultiCastPartitionableEdge(self.vert1, self.vert2, 
                                                None, "First edge")
        self.edge2 = MultiCastPartitionableEdge(self.vert2, self.vert1,
                                                None, "Second edge")
        self.edge3 = MultiCastPartitionableEdge(self.vert1, self.vert3,
                                                None, "Third edge")
        self.verts = [self.vert1, self.vert2, self.vert3]
        self.edges = [self.edge1, self.edge2, self.edge3]
        self.graph = PartitionableGraph("Graph", self.verts, self.edges)

        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        links = list()
        links.append(Link(0, 0, 0, 0, 1, s, s))

        _sdram = SDRAM(128 * (2**20))

        links = list()

        links.append(Link(0, 0, 0, 1, 1, n, n))
        links.append(Link(0, 1, 1, 1, 0, s, s))
        links.append(Link(1, 1, 2, 0, 0, e, e))
        links.append(Link(1, 0, 3, 0, 1, w, w))
        r = Router(links, False, 100, 1024)

        ip = "192.162.240.253"
        chips = list()
        for x in range(5):
            for y in range(5):
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        self.bp = BasicPartitioner()

    def test_new_basic_partitioner(self):
        """
        test that the basic partitioner only can handle
        PartitionerMaximumSizeConstraints
        :return:
        """
        self.setup()
        self.assertEqual(self.bp._supported_constraints[0],
                         PartitionerMaximumSizeConstraint)

    def test_partition_with_no_additional_constraints(self):
        """
        test a partitionign with a graph with no extra constraints
        :return:
        """
        self.setup()
        subgraph, mapper = self.bp.partition(self.graph, self.machine)
        self.assertEqual(len(subgraph.subvertices), 3)
        vert_sizes = []
        for vert in self.verts:
            vert_sizes.append(vert.n_atoms)
        self.assertEqual(len(subgraph.subedges), 3)
        for subvert in subgraph.subvertices:
            self.assertIn(mapper.get_subvertex_slice(subvert).n_atoms,
                          vert_sizes)

    def test_partition_with_no_additional_constraints_extra_edge(self):
        """
        test that the basic form with an extra edge works
        :return:
        """
        self.setup()
        self.graph.add_edge(MultiCastPartitionableEdge(self.vert3, self.vert1,
                                                       None, "Extra edge"))
        subgraph, mapper = self.bp.partition(self.graph, self.machine)
        self.assertEqual(len(subgraph.subvertices), 3)
        self.assertEqual(len(subgraph.subedges), 4)

    def test_partition_on_large_vertex_than_has_to_be_split(self):
        """
        test that partitioning 1 lage vertex can make it into 2 small ones
        :return:
        """
        self.setup()
        large_vertex = TestVertex(300, "Large vertex")
        self.graph = PartitionableGraph(
            "Graph with large vertex", [large_vertex], [])
        subgraph, mapper = self.bp.partition(self.graph, self.machine)
        self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256)
        self.assertGreater(len(subgraph.subvertices), 1)

    def test_partition_on_very_large_vertex_than_has_to_be_split(self):
        """
        test that partitioning 1 lage vertex can make it into multiple small ones
        :return:
        """
        self.setup()
        large_vertex = TestVertex(500, "Large vertex")
        self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256)
        self.graph = PartitionableGraph(
            "Graph with large vertex", [large_vertex], [])
        subgraph, mapper = self.bp.partition(self.graph, self.machine)
        self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256)
        self.assertGreater(len(subgraph.subvertices), 1)

    def test_partition_on_target_size_vertex_than_has_to_be_split(self):
        """
        test that fixed partitioning causes correct number of subvertices
        :return:
        """
        self.setup()
        large_vertex = TestVertex(1000, "Large vertex")
        large_vertex.add_constraint(PartitionerMaximumSizeConstraint(10))
        self.graph = PartitionableGraph(
            "Graph with large vertex", [large_vertex], [])
        subgraph, mapper = self.bp.partition(self.graph, self.machine)
        self.assertEqual(len(subgraph.subvertices), 100)

    def test_partition_with_barely_sufficient_space(self):
        """
        test that partitioning will work when close to filling the machine
        :return:
        """
        self.setup()
        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        links = list()
        links.append(Link(0, 0, 0, 0, 1, s, s))

        _sdram = SDRAM(2**12)

        links = list()

        links.append(Link(0, 0, 0, 1, 1, n, n))
        links.append(Link(0, 1, 1, 1, 0, s, s))
        links.append(Link(1, 1, 2, 0, 0, e, e))
        links.append(Link(1, 0, 3, 0, 1, w, w))
        r = Router(links, False, 100, 1024)

        ip = "192.162.240.253"
        chips = list()
        for x in range(5):
            for y in range(5):
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        singular_vertex = TestVertex(450, "Large vertex", max_atoms_per_core=1)
        self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1)
        self.graph = PartitionableGraph(
            "Graph with large vertex", [singular_vertex], [])
        subgraph, mapper = self.bp.partition(self.graph, self.machine)
        self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1)
        self.assertEqual(len(subgraph.subvertices), 450)

    def test_partition_with_insufficient_space(self):
        """
        test that if theres not enough space, the test the partitioner will
         raise an error
        :return:
        """
        self.setup()
        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        links = list()
        links.append(Link(0, 0, 0, 0, 1, s, s))

        _sdram = SDRAM(2**11)

        links = list()

        links.append(Link(0, 0, 0, 1, 1, n, n))
        links.append(Link(0, 1, 1, 1, 0, s, s))
        links.append(Link(1, 1, 2, 0, 0, e, e))
        links.append(Link(1, 0, 3, 0, 1, w, w))
        r = Router(links, False, 100, 1024)

        ip = "192.162.240.253"
        chips = list()
        for x in range(5):
            for y in range(5):
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        large_vertex = TestVertex(3000, "Large vertex", max_atoms_per_core=1)
        self.assertEqual(large_vertex._model_based_max_atoms_per_core, 1)
        self.graph = PartitionableGraph(
            "Graph with large vertex", [large_vertex], [])
        self.assertRaises(PacmanValueError, self.bp.partition,
                          self.graph, self.machine)

    def test_partition_with_less_sdram_than_default(self):
        """
        test that the partitioner works when its machine is slightly malformed
        in that it has less sdram avilable
        :return:
        """
        self.setup()
        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        links = list()
        links.append(Link(0, 0, 0, 0, 1, s, s))

        _sdram = SDRAM(128 * (2**19))

        links = list()

        links.append(Link(0, 0, 0, 1, 1, n, n))
        links.append(Link(0, 1, 1, 1, 0, s, s))
        links.append(Link(1, 1, 2, 0, 0, e, e))
        links.append(Link(1, 0, 3, 0, 1, w, w))
        r = Router(links, False, 100, 1024)

        ip = "192.162.240.253"
        chips = list()
        for x in range(5):
            for y in range(5):
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        self.bp.partition(self.graph, self.machine)

    def test_partition_with_more_sdram_than_default(self):
        """
        test that the partitioner works when its machine is slightly malformed
        in that it has more sdram avilable
        :return:
        """
        self.setup()
        flops = 1000
        (e, ne, n, w, sw, s) = range(6)

        processors = list()
        for i in range(18):
            processors.append(Processor(i, flops))

        links = list()
        links.append(Link(0, 0, 0, 0, 1, s, s))

        _sdram = SDRAM(128 * (2**21))

        links = list()

        links.append(Link(0, 0, 0, 1, 1, n, n))
        links.append(Link(0, 1, 1, 1, 0, s, s))
        links.append(Link(1, 1, 2, 0, 0, e, e))
        links.append(Link(1, 0, 3, 0, 1, w, w))
        r = Router(links, False, 100, 1024)

        ip = "192.162.240.253"
        chips = list()
        for x in range(5):
            for y in range(5):
                chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))

        self.machine = Machine(chips)
        subgraph, mapper = self.bp.partition(self.graph,self.machine)

    def test_partition_with_unsupported_constraints(self):
        """
        test that when a vertex has a constraint that is unrecognised,
        it raises an error
        :return:
        """
        self.setup()
        constrained_vertex = TestVertex(13, "Constrained")
        constrained_vertex.add_constraint(
            NewPartitionerConstraint("Mock constraint"))
        graph = PartitionableGraph("Graph", [constrained_vertex], None)
        partitioner = BasicPartitioner()
        self.assertRaises(PacmanInvalidParameterException,
                          partitioner.partition, graph, self.machine)

    def test_partition_with_empty_graph(self):
        """
        test that the partitioner can work with an empty graph
        :return:
        """
        self.setup()
        self.graph = PartitionableGraph()
        subgraph, mapper = self.bp.partition(self.graph, self.machine)
        self.assertEqual(len(subgraph.subvertices), 0)