Beispiel #1
0
 def test_new_graph(self):
     """
     tests that after building a machine graph, all partitined vertices
     and partitioned edges are in existence
     """
     vertices = list()
     edges = list()
     for i in range(10):
         vertices.append(
             SimpleMachineVertex(ResourceContainer(), "V{}".format(i)))
     with self.assertRaises(NotImplementedError):
         vertices[1].add_constraint(SameAtomsAsVertexConstraint(
             vertices[4]))
         vertices[4].add_constraint(SameAtomsAsVertexConstraint(
             vertices[1]))
     for i in range(5):
         edges.append(MachineEdge(vertices[0], vertices[(i + 1)]))
     for i in range(5, 10):
         edges.append(MachineEdge(vertices[5], vertices[(i + 1) % 10]))
     graph = MachineGraph("foo")
     graph.add_vertices(vertices)
     graph.add_outgoing_edge_partition(
         MulticastEdgePartition(identifier="bar", pre_vertex=vertices[0]))
     graph.add_outgoing_edge_partition(
         MulticastEdgePartition(identifier="bar", pre_vertex=vertices[5]))
     graph.add_edges(edges, "bar")
     self.graph_there_and_back(graph)
 def test_same_atoms_as_vertex_constraint(self):
     with self.assertRaises(NotImplementedError):
         v1 = SimpleMachineVertex(None, "v1")
         v2 = SimpleMachineVertex(None, "v2")
         c1 = SameAtomsAsVertexConstraint(v1)
         self.assertEqual(c1.vertex, v1)
         self.assertEqual(c1, SameAtomsAsVertexConstraint(v1))
         self.assertEqual(str(c1), 'SameAtomsAsVertexConstraint(vertex=v1)')
         c2 = SameAtomsAsVertexConstraint(v2)
         self.assertNotEqual(c1, c2)
         self.assertNotEqual(c1, "1.2.3.4")
         d = {}
         d[c1] = 1
         d[c2] = 2
         self.assertEqual(len(d), 2)
Beispiel #3
0
    def __init__(self,
                 n_neurons,
                 delay_per_stage,
                 source_vertex,
                 machine_time_step,
                 timescale_factor,
                 constraints=None,
                 label="DelayExtension"):
        """
        :param n_neurons: the number of neurons
        :param delay_per_stage: the delay per stage
        :param source_vertex: where messages are coming from
        :param machine_time_step: how long is the machine time step
        :param timescale_factor: what slowdown factor has been applied
        :param constraints: the vertex constraints
        :param label: the vertex label
        """
        # pylint: disable=too-many-arguments
        super(DelayExtensionVertex, self).__init__(label, constraints, 256)

        self._source_vertex = source_vertex
        self._n_delay_stages = 0
        self._delay_per_stage = delay_per_stage
        self._delay_generator_data = defaultdict(list)

        # atom store
        self._n_atoms = n_neurons

        # Dictionary of vertex_slice -> delay block for data specification
        self._delay_blocks = dict()

        self.add_constraint(SameAtomsAsVertexConstraint(source_vertex))
Beispiel #4
0
def constraint_from_json(json_dict, graph=None):
    if json_dict["class"] == "BoardConstraint":
        return BoardConstraint(json_dict["board_address"])
    if json_dict["class"] == "ChipAndCoreConstraint":
        if "p" in json_dict:
            p = json_dict["p"]
        else:
            p = None
        return ChipAndCoreConstraint(json_dict["x"], json_dict["y"], p)
    if json_dict["class"] == "ContiguousKeyRangeContraint":
        return ContiguousKeyRangeContraint()
    if json_dict["class"] == "FixedKeyAndMaskConstraint":
        if "key_list_function" in json_dict:
            raise NotImplementedError("key_list_function {}".format(
                json_dict["key_list_function"]))
        return FixedKeyAndMaskConstraint(
            key_masks_from_json(json_dict["keys_and_masks"]))
    if json_dict["class"] == "FixedMaskConstraint":
        return FixedMaskConstraint(json_dict["mask"])
    if json_dict["class"] == "FixedVertexAtomsConstraint":
        return FixedVertexAtomsConstraint(json_dict["size"])
    if json_dict["class"] == "MaxVertexAtomsConstraint":
        return MaxVertexAtomsConstraint(json_dict["size"])
    if json_dict["class"] == "RadialPlacementFromChipConstraint":
        return RadialPlacementFromChipConstraint(json_dict["x"],
                                                 json_dict["y"])
    if json_dict["class"] == "SameChipAsConstraint":
        return SameChipAsConstraint(vertex_lookup(json_dict["vertex"], graph))
    if json_dict["class"] == "SameAtomsAsVertexConstraint":
        return SameAtomsAsVertexConstraint(
            vertex_lookup(json_dict["vertex"], graph))
    raise NotImplementedError("constraint {}".format(json_dict["class"]))
    def _add_delay_extension(self, pre_synaptic_population,
                             post_synaptic_population,
                             max_delay_for_projection, max_delay_per_neuron,
                             machine_time_step, timescale_factor):
        """ Instantiate delay extension component
        """
        # pylint: disable=too-many-arguments

        # Create a delay extension vertex to do the extra delays
        delay_vertex = pre_synaptic_population._internal_delay_vertex
        pre_vertex = pre_synaptic_population._get_vertex
        if delay_vertex is None:
            delay_name = "{}_delayed".format(pre_vertex.label)
            delay_vertex = DelayExtensionVertex(pre_vertex.n_atoms,
                                                max_delay_per_neuron,
                                                pre_vertex,
                                                machine_time_step,
                                                timescale_factor,
                                                label=delay_name)
            pre_synaptic_population._internal_delay_vertex = delay_vertex
            pre_vertex.add_constraint(
                SameAtomsAsVertexConstraint(delay_vertex))
            self._spinnaker_control.add_application_vertex(delay_vertex)

            # Add the edge
            delay_afferent_edge = DelayAfferentApplicationEdge(
                pre_vertex,
                delay_vertex,
                label="{}_to_DelayExtension".format(pre_vertex.label))
            self._spinnaker_control.add_application_edge(
                delay_afferent_edge, constants.SPIKE_PARTITION_ID)

        # Ensure that the delay extension knows how many states it will
        # support
        n_stages = int(
            math.ceil(
                float(max_delay_for_projection - max_delay_per_neuron) /
                float(max_delay_per_neuron)))
        if n_stages > delay_vertex.n_delay_stages:
            delay_vertex.n_delay_stages = n_stages

        # Create the delay edge if there isn't one already
        post_vertex = post_synaptic_population._get_vertex
        delay_edge = self._find_existing_edge(delay_vertex, post_vertex)
        if delay_edge is None:
            delay_edge = DelayedApplicationEdge(
                delay_vertex,
                post_vertex,
                self._synapse_information,
                label="{}_delayed_to_{}".format(pre_vertex.label,
                                                post_vertex.label))
            self._spinnaker_control.add_application_edge(
                delay_edge, constants.SPIKE_PARTITION_ID)
        else:
            delay_edge.add_synapse_information(self._synapse_information)
        return delay_edge
Beispiel #6
0
 def test_operation_with_same_size_as_vertex_constraint_chain(self):
     """ Test that a chain of same size constraints works even when the\
         order of vertices is not correct for the chain
     """
     with self.assertRaises(NotImplementedError):
         graph = ApplicationGraph("Test")
         vertex_1 = SimpleTestVertex(10, "Vertex_1", 5)
         vertex_1.splitter_object = SplitterSliceLegacy()
         vertex_2 = SimpleTestVertex(10, "Vertex_2", 4)
         vertex_3 = SimpleTestVertex(10, "Vertex_3", 2)
         vertex_3.add_constraint(SameAtomsAsVertexConstraint(vertex_2))
         vertex_2.add_constraint(SameAtomsAsVertexConstraint(vertex_1))
         vertex_2.splitter_object = SplitterSliceLegacy()
         vertex_3.splitter_object = SplitterSliceLegacy()
         graph.add_vertices([vertex_1, vertex_2, vertex_3])
         machine = virtual_machine(width=2, height=2)
         splitter_partitioner(graph, machine, plan_n_time_steps=None)
         subvertices_1 = list(vertex_1.machine_vertices)
         subvertices_2 = list(vertex_2.machine_vertices)
         subvertices_3 = list(vertex_3.machine_vertices)
         self.assertEqual(len(subvertices_1), len(subvertices_2))
         self.assertEqual(len(subvertices_2), len(subvertices_3))
Beispiel #7
0
 def test_operation_with_same_size_as_vertex_constraint_exception(self):
     """
     test that a partition same as constraint with different size atoms
     causes errors
     """
     with self.assertRaises(NotImplementedError):
         constrained_vertex = SimpleTestVertex(100, "Constrained")
         constrained_vertex.add_constraint(
             SameAtomsAsVertexConstraint(self.vert2))
         constrained_vertex.splitter_object = SplitterSliceLegacy()
         self.graph.add_vertex(constrained_vertex)
         self.assertRaises(PacmanPartitionException, splitter_partitioner,
                           self.graph, self.machine, 1000,
                           PreAllocatedResourceContainer())
Beispiel #8
0
 def test_operation_with_same_size_as_vertex_constraint(self):
     """
     test that the partition and place partitioner can handle same size as
     constraints on a vertex that is split into one core
     """
     with self.assertRaises(NotImplementedError):
         constrained_vertex = SimpleTestVertex(5, "Constrained")
         constrained_vertex.add_constraint(
             SameAtomsAsVertexConstraint(self.vert2))
         constrained_vertex.splitter_object = SplitterSliceLegacy()
         self.graph.add_vertex(constrained_vertex)
         graph, _ = splitter_partitioner(
             self.graph,
             self.machine,
             plan_n_time_steps=100,
             pre_allocated_resources=PreAllocatedResourceContainer())
         self.assertEqual(len(list(graph.vertices)), 4)
Beispiel #9
0
 def test_operation_with_same_size_as_vertex_constraint_large_vertices(
         self):
     """
     test that the partition and place partitioner can handle same size as
     constraints on a vertex which has to be split over many cores
     """
     with self.assertRaises(NotImplementedError):
         constrained_vertex = SimpleTestVertex(300, "Constrained")
         new_large_vertex = SimpleTestVertex(300, "Non constrained")
         constrained_vertex.add_constraint(
             SameAtomsAsVertexConstraint(new_large_vertex))
         new_large_vertex.splitter_object = SplitterSliceLegacy()
         constrained_vertex.splitter_object = SplitterSliceLegacy()
         self.graph.add_vertices([new_large_vertex, constrained_vertex])
         graph, _ = splitter_partitioner(
             self.graph,
             self.machine,
             plan_n_time_steps=100,
             pre_allocated_resources=PreAllocatedResourceContainer())
         self.assertEqual(len(list(graph.vertices)), 7)
Beispiel #10
0
    def __init__(self, n_neurons, delay_per_stage, source_vertex,
                 machine_time_step, timescale_factor, constraints=None,
                 label="DelayExtension"):
        """Creates a new DelayExtension Object.
        """
        # pylint: disable=too-many-arguments
        super(DelayExtensionVertex, self).__init__(label, constraints, 256)

        self._source_vertex = source_vertex
        self._n_delay_stages = 0
        self._delay_per_stage = delay_per_stage

        # atom store
        self._n_atoms = n_neurons

        # Dictionary of vertex_slice -> delay block for data specification
        self._delay_blocks = dict()

        self.add_constraint(
            SameAtomsAsVertexConstraint(source_vertex))
Beispiel #11
0
    def __init__(self,
                 n_neurons,
                 delay_per_stage,
                 source_vertex,
                 machine_time_step,
                 time_scale_factor,
                 constraints=None,
                 label="DelayExtension"):
        """
        :param int n_neurons: the number of neurons
        :param int delay_per_stage: the delay per stage
        :param ~pacman.model.graphs.application.ApplicationVertex \
                source_vertex:
            where messages are coming from
        :param int machine_time_step: how long is the machine time step
        :param int time_scale_factor: what slowdown factor has been applied
        :param iterable(~pacman.model.constraints.AbstractConstraint) \
                constraints:
            the vertex constraints
        :param str label: the vertex label
        """
        # pylint: disable=too-many-arguments
        super(DelayExtensionVertex, self).__init__(label, constraints, 256)

        self.__source_vertex = source_vertex
        self.__n_delay_stages = 0
        self.__delay_per_stage = delay_per_stage
        self.__delay_generator_data = defaultdict(list)
        self.__machine_time_step = machine_time_step
        self.__time_scale_factor = time_scale_factor
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # atom store
        self.__n_atoms = n_neurons

        # Dictionary of vertex_slice -> delay block for data specification
        self.__delay_blocks = dict()

        self.add_constraint(SameAtomsAsVertexConstraint(source_vertex))
Beispiel #12
0
 def test_operation_same_size_as_vertex_constraint_different_order(self):
     """
     test that the partition and place partitioner can handle same size as
     constraints on a vertex which has to be split over many cores where
     the order of the vertices being added is different.
     """
     with self.assertRaises(NotImplementedError):
         self.setup()
         constrained_vertex = SimpleTestVertex(300, "Constrained")
         new_large_vertex = SimpleTestVertex(300, "Non constrained")
         constrained_vertex.add_constraint(
             SameAtomsAsVertexConstraint(new_large_vertex))
         constrained_vertex.splitter_object = SplitterSliceLegacy()
         new_large_vertex.splitter_object = SplitterSliceLegacy()
         self.graph.add_vertices([constrained_vertex, new_large_vertex])
         graph, _ = self.bp(
             self.graph,
             self.machine,
             plan_n_time_steps=100,
             pre_allocated_resources=PreAllocatedResourceContainer())
         # split in 256 each, so 4 machine vertices
         self.assertEqual(len(list(graph.vertices)), 7)
Beispiel #13
0
 def test_same_atoms_as_vertex_constraint(self):
     with self.assertRaises(NotImplementedError):
         v1 = SimpleMachineVertex(None, "v1")
         c1 = SameAtomsAsVertexConstraint(v1)
         self.constraint_there_and_back(c1)