Esempio n. 1
0
    def get_resources_used_by_atoms(self, vertex_slice, n_machine_time_steps,
                                    machine_time_step):

        # build resources as i currently know
        container = ResourceContainer(
            sdram=SDRAMResource(self.get_sdram_usage_for_atoms(vertex_slice)),
            dtcm=DTCMResource(self.get_dtcm_usage_for_atoms()),
            cpu_cycles=CPUCyclesPerTickResource(
                self.get_cpu_usage_for_atoms()))

        recording_sizes = recording_utilities.get_recording_region_sizes(
            [
                self._spike_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms,
                    self._max_spikes_per_ts(vertex_slice, n_machine_time_steps,
                                            machine_time_step),
                    self._N_POPULATION_RECORDING_REGIONS)
            ], n_machine_time_steps, self._minimum_buffer_sdram,
            self._maximum_sdram_for_buffering,
            self._using_auto_pause_and_resume)
        container.extend(
            recording_utilities.get_recording_resources(
                recording_sizes, self._receive_buffer_host,
                self._receive_buffer_port))
        return container
    def _do_test(self, placer):
        machine = VirtualMachine(width=8, height=8)
        graph = MachineGraph("Test")

        vertices = [
            SimpleMachineVertex(ResourceContainer(), label="v{}".format(i))
            for i in range(100)
        ]
        for vertex in vertices:
            graph.add_vertex(vertex)

        same_vertices = [
            SimpleMachineVertex(ResourceContainer(), label="same{}".format(i))
            for i in range(10)
        ]
        random.seed(12345)
        sdram_edges = list()
        for vertex in same_vertices:
            graph.add_vertex(vertex)
            for i in range(0, random.randint(1, 5)):
                sdram_edge = MachineEdge(
                    vertex, vertices[random.randint(0, 99)],
                    traffic_type=EdgeTrafficType.SDRAM)
                sdram_edges.append(sdram_edge)
                graph.add_edge(sdram_edge, "Test")

        placements = placer(graph, machine)
        for edge in sdram_edges:
            pre_place = placements.get_placement_of_vertex(edge.pre_vertex)
            post_place = placements.get_placement_of_vertex(edge.post_vertex)
            self.assert_(pre_place.x == post_place.x)
            self.assert_(pre_place.y == post_place.y)
Esempio n. 3
0
    def test_resource_container(self):
        """
        tests that creating multiple resoruce containers doesnt cause issues.
        """
        sdram = SDRAMResource(128 * (2**20))
        dtcm = DTCMResource(128 * (2**20) + 1)
        cpu = CPUCyclesPerTickResource(128 * (2**20) + 2)

        container = ResourceContainer(dtcm, sdram, cpu)
        self.assertEqual(container.sdram.get_value(), 128 * (2**20))
        self.assertEqual(container.dtcm.get_value(), 128 * (2**20) + 1)
        self.assertEqual(container.cpu_cycles.get_value(), 128 * (2**20) + 2)

        sdram = SDRAMResource(128 * (2**19))
        dtcm = DTCMResource(128 * (2**19) + 1)
        cpu = CPUCyclesPerTickResource(128 * (2**19) + 2)

        container = ResourceContainer(dtcm, sdram, cpu)
        self.assertEqual(container.sdram.get_value(), 128 * (2**19))
        self.assertEqual(container.dtcm.get_value(), 128 * (2**19) + 1)
        self.assertEqual(container.cpu_cycles.get_value(), 128 * (2**19) + 2)

        sdram = SDRAMResource(128 * (2**21))
        dtcm = DTCMResource(128 * (2**21) + 1)
        cpu = CPUCyclesPerTickResource(128 * (2**21) + 2)

        container = ResourceContainer(dtcm, sdram, cpu)
        self.assertEqual(container.sdram.get_value(), 128 * (2**21))
        self.assertEqual(container.dtcm.get_value(), 128 * (2**21) + 1)
        self.assertEqual(container.cpu_cycles.get_value(), 128 * (2**21) + 2)
Esempio n. 4
0
 def test_tags_resources(self):
     t1 = IPtagResource("1", 2, True)  # Minimal args
     r1 = ResourceContainer(iptags=[t1])
     self.resource_there_and_back(r1)
     t2 = IPtagResource("1.2.3.4", 2, False, 4, 5)
     r2 = ResourceContainer(reverse_iptags=[t2])
     self.resource_there_and_back(r2)
Esempio n. 5
0
def create_requirement_collections(vertices, machine_graph):
    """ Get a collection of requirements that includes SDRAM edge resources
    """

    # Get all but the last requirements, keeping the SDRAM edge requirements
    required_resources = list()
    to_add_partitions = set()
    last_resources = None
    last_constraints = None
    for vertex in vertices:
        if last_resources is not None:
            required_resources.append([last_resources, last_constraints])
        last_resources = vertex.resources_required
        last_constraints = vertex.constraints
        to_add_partitions.update(
            machine_graph.get_sdram_edge_partitions_starting_at_vertex(vertex))

    # Add up all the SDRAM edge requirements
    total_sdram = 0
    for partition in to_add_partitions:
        total_sdram += partition.total_sdram_requirements()

    # Add the SDRAM requirements to the final requirements
    resources = ResourceContainer(sdram=ConstantSDRAM(total_sdram))
    resources.extend(last_resources)
    required_resources.append([resources, last_constraints])

    return required_resources
    def _integration_setup(self):
        machine_graph = MachineGraph(label="test me you git")
        n_keys_map = DictBasedMachinePartitionNKeysMap()
        v1 = SimpleMachineVertex(ResourceContainer())
        v2 = SimpleMachineVertex(ResourceContainer())
        v3 = SimpleMachineVertex(ResourceContainer())
        v4 = SimpleMachineVertex(ResourceContainer())
        machine_graph.add_vertex(v1)
        machine_graph.add_vertex(v2)
        machine_graph.add_vertex(v3)
        machine_graph.add_vertex(v4)

        e1 = MachineEdge(v1, v2, label="e1")
        e2 = MachineEdge(v1, v3, label="e2")
        e3 = MachineEdge(v2, v3, label="e3")
        e4 = MachineEdge(v1, v4, label="e4")

        machine_graph.add_outgoing_edge_partition(
            MulticastEdgePartition(identifier="part1", pre_vertex=v1))
        machine_graph.add_outgoing_edge_partition(
            MulticastEdgePartition(identifier="part2", pre_vertex=v2))
        machine_graph.add_outgoing_edge_partition(
            MulticastEdgePartition(identifier="part2", pre_vertex=v1))

        machine_graph.add_edge(e1, "part1")
        machine_graph.add_edge(e2, "part1")
        machine_graph.add_edge(e3, "part2")
        machine_graph.add_edge(e4, "part2")

        for partition in machine_graph.outgoing_edge_partitions:
            n_keys_map.set_n_keys_for_partition(partition, 24)

        return machine_graph, n_keys_map, v1, v2, v3, v4, e1, e2, e3, e4
Esempio n. 7
0
 def resources_required(self):
     if self.group.output_grp:
         resources = ResourceContainer(
             sdram=VariableSDRAM(self._sdram_fixed, self._sdram_variable))
     else:
         resources = ResourceContainer(
             sdram=ConstantSDRAM(self._sdram_fixed))
     return resources
Esempio n. 8
0
 def test_resource_container(self):
     sdram1 = ConstantSDRAM(128 * (2**20))
     dtcm = DTCMResource(128 * (2**20) + 1)
     cpu = CPUCyclesPerTickResource(128 * (2**20) + 2)
     r1 = ResourceContainer(dtcm, sdram1, cpu)
     self.resource_there_and_back(r1)
     t1 = IPtagResource("1", 2, True)  # Minimal args
     t2 = IPtagResource("1.2.3.4", 2, False, 4, 5)
     r2 = r1 = ResourceContainer(dtcm, sdram1, cpu, iptags=[t1, t2])
     self.resource_there_and_back(r2)
Esempio n. 9
0
 def resources_required(self):
     resources = ResourceContainer(sdram=SDRAMResource(
         self._calculate_sdram_requirement()),
                                   dtcm=DTCMResource(0),
                                   cpu_cycles=CPUCyclesPerTickResource(0))
     resources.extend(
         recording_utilities.get_recording_resources(
             [MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP],
             self._receive_buffer_host, self._receive_buffer_port))
     return resources
    def get_resources(
            n_machine_time_steps, time_step, time_scale_factor,
            n_samples_per_recording, sampling_frequency):
        """ Get the resources used by this vertex

        :return: Resource container
        """
        # pylint: disable=too-many-locals

        # get config
        config = globals_variables.get_simulator().config

        # get recording params
        minimum_buffer_sdram = config.getint(
            "Buffers", "minimum_buffer_sdram")
        using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        receive_buffer_host = config.get("Buffers", "receive_buffer_host")
        receive_buffer_port = read_config_int(
            config, "Buffers", "receive_buffer_port")

        # figure recording size for max run
        if not using_auto_pause_and_resume and n_machine_time_steps is None:
            raise Exception(
                "You cannot use the chip power montiors without auto pause "
                "and resume and not allocating a n_machine_time_steps")

        # figure max buffer size
        max_buffer_size = 0
        if config.getboolean("Buffers", "enable_buffered_recording"):
            max_buffer_size = config.getint(
                "Buffers", "chip_power_monitor_buffer")

        maximum_sdram_for_buffering = [max_buffer_size]

        n_recording_entries = (math.ceil(
            (sampling_frequency / (time_step * time_scale_factor))) /
            n_samples_per_recording)

        recording_size = (
            ChipPowerMonitorMachineVertex.RECORDING_SIZE_PER_ENTRY *
            n_recording_entries)

        container = ResourceContainer(
            sdram=SDRAMResource(
                ChipPowerMonitorMachineVertex.sdram_calculation()),
            cpu_cycles=CPUCyclesPerTickResource(100),
            dtcm=DTCMResource(100))
        recording_sizes = recording_utilities.get_recording_region_sizes(
            [int(recording_size) * n_machine_time_steps], minimum_buffer_sdram,
            maximum_sdram_for_buffering, using_auto_pause_and_resume)
        container.extend(recording_utilities.get_recording_resources(
            recording_sizes, receive_buffer_host, receive_buffer_port))
        return container
Esempio n. 11
0
 def resources_required(self):
     resources = ResourceContainer(
         cpu_cycles=CPUCyclesPerTickResource(45),
         dtcm=DTCMResource(100),
         sdram=SDRAMResource(constants.SYSTEM_BYTES_REQUIREMENT +
                             self.TRANSMISSION_REGION_N_BYTES))
     resources.extend(
         recording_utilities.get_recording_resources(
             [self._recording_size], self._receive_buffer_host,
             self._receive_buffer_port))
     return resources
    def resources_required(self):
        resources = ResourceContainer(cpu_cycles=CPUCyclesPerTickResource(45),
                                      dtcm=DTCMResource(100),
                                      sdram=SDRAMResource(100))

        resources.extend(
            recording_utilities.get_recording_resources(
                [self._string_data_size], self._receive_buffer_host,
                self._receive_buffer_port))

        return resources
Esempio n. 13
0
    def _do_test(self, placer):
        machine = virtual_machine(width=8, height=8)
        graph = MachineGraph("Test")

        vertices = [
            SimpleMachineVertex(ResourceContainer(), label="v{}".format(i))
            for i in range(100)
        ]
        for vertex in vertices:
            graph.add_vertex(vertex)

        same_vertices = [
            SimpleMachineVertex(ResourceContainer(), label="same{}".format(i))
            for i in range(10)
        ]
        random.seed(12345)
        for vertex in same_vertices:
            graph.add_vertex(vertex)
            for _i in range(0, random.randint(1, 5)):
                vertex.add_constraint(
                    SameChipAsConstraint(
                        vertices[random.randint(0, 99)]))

        n_keys_map = DictBasedMachinePartitionNKeysMap()

        inputs = {
            "MemoryExtendedMachine": machine,
            "MemoryMachine": machine,
            "MemoryMachineGraph": graph,
            "PlanNTimeSteps": None,
            "MemoryMachinePartitionNKeysMap": n_keys_map
        }
        algorithms = [placer]
        xml_paths = []
        executor = PACMANAlgorithmExecutor(
            algorithms, [], inputs, [], [], [], xml_paths)
        executor.execute_mapping()

        placements = executor.get_item("MemoryPlacements")
        for same in same_vertices:
            print("{0.vertex.label}, {0.x}, {0.y}, {0.p}: {1}".format(
                placements.get_placement_of_vertex(same),
                ["{0.vertex.label}, {0.x}, {0.y}, {0.p}".format(
                    placements.get_placement_of_vertex(constraint.vertex))
                 for constraint in same.constraints]))
            placement = placements.get_placement_of_vertex(same)
            for constraint in same.constraints:
                if isinstance(constraint, SameChipAsConstraint):
                    other_placement = placements.get_placement_of_vertex(
                        constraint.vertex)
                    self.assertTrue(
                        other_placement.x == placement.x and
                        other_placement.y == placement.y,
                        "Vertex was not placed on the same chip as requested")
Esempio n. 14
0
    def _do_test(self, placer):
        machine = virtual_machine(width=8, height=8)
        graph = MachineGraph("Test")
        plan_n_timesteps = 100

        vertices = [
            SimpleMachineVertex(ResourceContainer(),
                                label="v{}".format(i),
                                sdram_cost=20) for i in range(100)
        ]
        for vertex in vertices:
            graph.add_vertex(vertex)

        same_vertices = [
            SimpleMachineVertex(ResourceContainer(),
                                label="same{}".format(i),
                                sdram_cost=20) for i in range(10)
        ]
        random.seed(12345)
        sdram_edges = list()
        for vertex in same_vertices:
            graph.add_vertex(vertex)
            graph.add_outgoing_edge_partition(
                ConstantSDRAMMachinePartition(identifier="Test",
                                              pre_vertex=vertex,
                                              label="bacon"))
            for _i in range(0, random.randint(1, 5)):
                sdram_edge = SDRAMMachineEdge(vertex,
                                              vertices[random.randint(0, 99)],
                                              label="bacon",
                                              app_edge=None)
                sdram_edges.append(sdram_edge)
                graph.add_edge(sdram_edge, "Test")
        n_keys_map = DictBasedMachinePartitionNKeysMap()

        inputs = {
            "MemoryExtendedMachine": machine,
            "MemoryMachine": machine,
            "MemoryMachineGraph": graph,
            "PlanNTimeSteps": plan_n_timesteps,
            "MemoryMachinePartitionNKeysMap": n_keys_map
        }
        algorithms = [placer]
        xml_paths = []
        executor = PACMANAlgorithmExecutor(algorithms, [], inputs, [], [], [],
                                           xml_paths)
        executor.execute_mapping()
        placements = executor.get_item("MemoryPlacements")
        for edge in sdram_edges:
            pre_place = placements.get_placement_of_vertex(edge.pre_vertex)
            post_place = placements.get_placement_of_vertex(edge.post_vertex)
            assert pre_place.x == post_place.x
            assert pre_place.y == post_place.y
Esempio n. 15
0
    def test_too_many_ip_tags_for_1_board(self):
        n_extra_vertices = 3
        machine = virtual_machine(12, 12)
        eth_chips = machine.ethernet_connected_chips
        eth_chip = eth_chips[0]
        eth_chip_2 = machine.get_chip_at(eth_chip.x + 1, eth_chip.y + 1)
        eth_procs = [
            proc.processor_id for proc in eth_chip.processors
            if not proc.is_monitor
        ]
        procs = [proc for proc in eth_chip_2.processors if not proc.is_monitor]
        eth2_procs = [proc.processor_id for proc in procs]
        proc = procs[-1]
        eth_vertices = [
            SimpleMachineVertex(ResourceContainer(
                iptags=[IPtagResource("127.0.0.1", port=tag, strip_sdp=True)]),
                                label="Ethernet Vertex {}".format(proc))
            for tag in eth_chip.tag_ids
        ]
        eth2_vertices = [
            SimpleMachineVertex(ResourceContainer(iptags=[
                IPtagResource("127.0.0.1", port=10000 + tag, strip_sdp=True)
            ]),
                                label="Ethernet 2 Vertex {}".format(proc))
            for tag in range(n_extra_vertices)
        ]
        placements = Placements(
            Placement(vertex, eth_chip.x, eth_chip.y, proc)
            for proc, vertex in zip(eth_procs, eth_vertices))
        placements.add_placements(
            Placement(vertex, eth_chip_2.x, eth_chip_2.y, proc)
            for proc, vertex in zip(eth2_procs, eth2_vertices))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(machine,
                               plan_n_timesteps=None,
                               placements=placements)

        tags_by_board = defaultdict(set)
        for vertices in (eth_vertices, eth2_vertices):
            for vertex in vertices:
                iptags = tags.get_ip_tags_for_vertex(vertex)
                self.assertEqual(len(iptags), 1,
                                 "Incorrect number of tags assigned")
                placement = placements.get_placement_of_vertex(vertex)
                print(placement, "has tag", iptags[0])
                self.assertFalse(
                    iptags[0].tag in tags_by_board[iptags[0].board_address],
                    "Tag used more than once")
                tags_by_board[iptags[0].board_address].add(iptags[0].tag)

        self.assertEqual(len(tags_by_board[eth_chip.ip_address]),
                         len(eth_chip.tag_ids),
                         "Wrong number of tags assigned to first Ethernet")
    def get_resources(n_machine_time_steps, time_step, time_scale_factor,
                      n_samples_per_recording, sampling_frequency):
        """ Get the resources used by this vertex

        :return: Resource container
        """
        # pylint: disable=too-many-locals

        # get config
        config = globals_variables.get_simulator().config

        # get recording params
        minimum_buffer_sdram = config.getint("Buffers", "minimum_buffer_sdram")
        using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        receive_buffer_host = config.get("Buffers", "receive_buffer_host")
        receive_buffer_port = read_config_int(config, "Buffers",
                                              "receive_buffer_port")

        # figure recording size for max run
        if not using_auto_pause_and_resume and n_machine_time_steps is None:
            raise Exception(
                "You cannot use the chip power montiors without auto pause "
                "and resume and not allocating a n_machine_time_steps")

        # figure max buffer size
        max_buffer_size = 0
        if config.getboolean("Buffers", "enable_buffered_recording"):
            max_buffer_size = config.getint("Buffers",
                                            "chip_power_monitor_buffer")

        maximum_sdram_for_buffering = [max_buffer_size]

        n_recording_entries = (math.ceil(
            (sampling_frequency / (time_step * time_scale_factor))) /
                               n_samples_per_recording)

        recording_size = (
            ChipPowerMonitorMachineVertex.RECORDING_SIZE_PER_ENTRY *
            n_recording_entries)

        container = ResourceContainer(sdram=SDRAMResource(
            ChipPowerMonitorMachineVertex.sdram_calculation()),
                                      cpu_cycles=CPUCyclesPerTickResource(100),
                                      dtcm=DTCMResource(100))
        recording_sizes = recording_utilities.get_recording_region_sizes(
            [int(recording_size) * n_machine_time_steps], minimum_buffer_sdram,
            maximum_sdram_for_buffering, using_auto_pause_and_resume)
        container.extend(
            recording_utilities.get_recording_resources(
                recording_sizes, receive_buffer_host, receive_buffer_port))
        return container
Esempio n. 17
0
    def _do_test(self, placer):
        machine = virtual_machine(width=8, height=8)
        graph = MachineGraph("Test")

        vertices = [
            SimpleMachineVertex(ResourceContainer(), label="v{}".format(i))
            for i in range(100)
        ]
        for vertex in vertices:
            graph.add_vertex(vertex)

        same_vertices = [
            SimpleMachineVertex(ResourceContainer(), label="same{}".format(i))
            for i in range(10)
        ]
        random.seed(12345)
        for vertex in same_vertices:
            graph.add_vertex(vertex)
            for _i in range(0, random.randint(1, 5)):
                vertex.add_constraint(
                    SameChipAsConstraint(vertices[random.randint(0, 99)]))

        n_keys_map = DictBasedMachinePartitionNKeysMap()

        if placer == "ConnectiveBasedPlacer":
            placements = connective_based_placer(graph, machine, None)
        elif placer == "OneToOnePlacer":
            placements = one_to_one_placer(graph, machine, None)
        elif placer == "RadialPlacer":
            placements = radial_placer(graph, machine, None)
        elif placer == "SpreaderPlacer":
            placements = spreader_placer(graph, machine, n_keys_map, None)
        else:
            raise NotImplementedError(placer)

        for same in same_vertices:
            print("{0.vertex.label}, {0.x}, {0.y}, {0.p}: {1}".format(
                placements.get_placement_of_vertex(same), [
                    "{0.vertex.label}, {0.x}, {0.y}, {0.p}".format(
                        placements.get_placement_of_vertex(constraint.vertex))
                    for constraint in same.constraints
                ]))
            placement = placements.get_placement_of_vertex(same)
            for constraint in same.constraints:
                if isinstance(constraint, SameChipAsConstraint):
                    other_placement = placements.get_placement_of_vertex(
                        constraint.vertex)
                    self.assertTrue(
                        other_placement.x == placement.x
                        and other_placement.y == placement.y,
                        "Vertex was not placed on the same chip as requested")
Esempio n. 18
0
    def _do_test(self, placer):
        machine = virtual_machine(width=8, height=8)
        graph = MachineGraph("Test")

        vertices = [
            MockMachineVertex(ResourceContainer(),
                              label="v{}".format(i),
                              sdram_requirement=20) for i in range(100)
        ]
        for vertex in vertices:
            graph.add_vertex(vertex)

        same_vertices = [
            MockMachineVertex(ResourceContainer(),
                              label="same{}".format(i),
                              sdram_requirement=20) for i in range(10)
        ]
        random.seed(12345)
        sdram_edges = list()
        for vertex in same_vertices:
            graph.add_vertex(vertex)
            graph.add_outgoing_edge_partition(
                ConstantSDRAMMachinePartition(identifier="Test",
                                              pre_vertex=vertex,
                                              label="bacon"))
            for _i in range(0, random.randint(1, 5)):
                sdram_edge = SDRAMMachineEdge(vertex,
                                              vertices[random.randint(0, 99)],
                                              label="bacon",
                                              app_edge=None)
                sdram_edges.append(sdram_edge)
                graph.add_edge(sdram_edge, "Test")
        n_keys_map = DictBasedMachinePartitionNKeysMap()

        if placer == "ConnectiveBasedPlacer":
            placements = connective_based_placer(graph, machine, None)
        elif placer == "OneToOnePlacer":
            placements = one_to_one_placer(graph, machine, None)
        elif placer == "RadialPlacer":
            placements = radial_placer(graph, machine, None)
        elif placer == "SpreaderPlacer":
            placements = spreader_placer(graph, machine, n_keys_map, None)
        else:
            raise NotImplementedError(placer)
        for edge in sdram_edges:
            pre_place = placements.get_placement_of_vertex(edge.pre_vertex)
            post_place = placements.get_placement_of_vertex(edge.post_vertex)
            assert pre_place.x == post_place.x
            assert pre_place.y == post_place.y
    def resources_required(self):
        resources = ResourceContainer(
            sdram=ConstantSDRAM(SYSTEM_BYTES_REQUIREMENT +
                                get_recording_header_size(len(Channels)) +
                                self._string_data_size))

        return resources
    def get_resources(time_step, time_scale_factor, n_samples_per_recording,
                      sampling_frequency):
        """ Get the resources used by this vertex

        :return: Resource container
        """
        # pylint: disable=too-many-locals
        step_in_microseconds = (time_step * time_scale_factor)
        # The number of sample per step CB believes does not have to be an int
        samples_per_step = (step_in_microseconds / sampling_frequency)
        recording_per_step = (samples_per_step / n_samples_per_recording)
        max_recording_per_step = math.ceil(recording_per_step)
        overflow_recordings = max_recording_per_step - recording_per_step
        system = SYSTEM_BYTES_REQUIREMENT
        config = CONFIG_SIZE_IN_BYTES
        recording = recording_utilities.get_recording_header_size(1)
        recording += recording_utilities.get_recording_data_constant_size(1)
        fixed_sdram = system + config + recording
        with_overflow = (fixed_sdram +
                         overflow_recordings * RECORDING_SIZE_PER_ENTRY)
        per_timestep = recording_per_step * RECORDING_SIZE_PER_ENTRY

        container = ResourceContainer(sdram=VariableSDRAM(
            with_overflow, per_timestep),
                                      cpu_cycles=CPUCyclesPerTickResource(100),
                                      dtcm=DTCMResource(100))
        return container
Esempio n. 21
0
    def get_static_resources(input_filters, n_routing_keys, hostname,
                             n_provenance_items):
        """ generates resource calculation so that it can eb called from 
        outside and not instantiated
        
        :param input_filters: the input filters going into this vertex
        :param n_routing_keys: the n keys from the input filters
        :param hostname: The hostname of the host machine we're running on
        :param n_provenance_items: n provenance data items 
        :return: A resource container containing the resources used by this 
        vertex for those inputs. 
        """
        iptags = list()
        iptags.append(
            IPtagResource(
                ip_address=hostname,
                port=None,
                strip_sdp=False,
                tag=None,
                traffic_identifier=(
                    SDPTransmitterMachineVertex.IPTAG_TRAFFIC_IDENTIFIER)))

        return ResourceContainer(sdram=SDRAMResource(
            fec_constants.SYSTEM_BYTES_REQUIREMENT +
            helpful_functions.sdram_size_in_bytes_for_filter_region(
                input_filters) +
            helpful_functions.sdram_size_in_bytes_for_routing_region(
                n_routing_keys) +
            ProvidesProvenanceDataFromMachineImpl.get_provenance_data_size(
                n_provenance_items) +
            SDPTransmitterMachineVertex._transmitter_region()),
                                 iptags=iptags)
Esempio n. 22
0
 def get_resources_used_by_atoms(self, vertex_slice, graph):
     out_edges = graph.get_edges_starting_at_vertex(self)
     return ResourceContainer(
         sdram=SDRAMResource(self.get_sdram_usage_for_atoms(out_edges)),
         dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)),
         cpu_cycles=CPUCyclesPerTickResource(
             self.get_cpu_usage_for_atoms(vertex_slice)))
    def resources_required(self):
        resources = ResourceContainer(sdram=ConstantSDRAM(
            SYSTEM_BYTES_REQUIREMENT +
            recording_utilities.get_recording_header_size(1) +
            self._string_data_size))

        return resources
Esempio n. 24
0
    def test_ip_tags(self):
        machine = virtual_machine(12, 12)
        eth_chips = machine.ethernet_connected_chips
        vertices = [
            SimpleMachineVertex(ResourceContainer(
                iptags=[IPtagResource("127.0.0.1", port=None, strip_sdp=True)
                        ]),
                                label="Vertex {}".format(i))
            for i in range(len(eth_chips))
        ]
        print("Created {} vertices".format(len(vertices)))
        placements = Placements(
            Placement(vertex, chip.x, chip.y, 1)
            for vertex, chip in zip(vertices, eth_chips))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(machine,
                               plan_n_timesteps=None,
                               placements=placements)

        for vertex, chip in zip(vertices, eth_chips):
            iptags = tags.get_ip_tags_for_vertex(vertex)
            self.assertEqual(len(iptags), 1,
                             "Incorrect number of tags assigned")
            self.assertEqual(iptags[0].destination_x, chip.x,
                             "Destination of tag incorrect")
            self.assertEqual(iptags[0].destination_y, chip.y,
                             "Destination of tag incorrect")
            placement = placements.get_placement_of_vertex(vertex)
            print(placement, "has tag", iptags[0])
Esempio n. 25
0
 def get_resources_used_by_atoms(self, vertex_slice):
     return ResourceContainer(
         sdram=SDRAMResource(
             self.get_sdram_usage_for_atoms()),
         dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)),
         cpu_cycles=CPUCyclesPerTickResource(
             self.get_cpu_usage_for_atoms(vertex_slice)))
Esempio n. 26
0
def get_recording_resources(
        region_sizes, buffering_ip_address=None,
        buffering_port=None, notification_tag=None):
    """ Get the resources for recording

    :param region_sizes:\
        A list of the sizes of each region.  A size of 0 is acceptable to\
        indicate an empty region
    :type region_sizes: list(int)
    :param buffering_ip_address:\
        The IP address to receive buffering messages on, or None if buffering\
        is not in use
    :type buffering_ip_address: str
    :param buffering_port:\
        The port to receive buffering messages on, or None if a port is to be\
        assigned
    :type buffering_port: int
    :param notification_tag:\
        The tag to send buffering messages with, or None to use a default tag
    :type notification_tag: int
    :rtype: :py:class:`pacman.model.resources.ResourceContainer`
    """
    ip_tags = list()
    if buffering_ip_address is not None:
        ip_tags.append(IPtagResource(
            buffering_ip_address, buffering_port, True, notification_tag,
            TRAFFIC_IDENTIFIER))
    # return the resources including the SDRAM requirements
    return ResourceContainer(
        iptags=ip_tags,
        sdram=SDRAMResource(
            get_recording_header_size(len(region_sizes)) +
            get_recording_data_size(region_sizes)))
Esempio n. 27
0
 def resources_required(self):
     fixed = (
         SYSTEM_BYTES_REQUIREMENT +
         recording_utilities.get_recording_header_size(len(Channels)) +
         self.PARAMS_BASE_SIZE + len(self._text))
     variable = len(self._text)
     return ResourceContainer(sdram=VariableSDRAM(fixed, variable))
    def __init__(self, vertex_slice, resources_required, constraints, label,
                 app_vertex, truth_table, input_sequence, rate_on, rate_off,
                 score_delay, stochastic, incoming_spike_buffer_size,
                 simulation_duration_ms, rand_seed):

        # resources required
        self._resources_required = ResourceContainer(
            sdram=ConstantSDRAM(resources_required))

        # **NOTE** n_neurons currently ignored - width and height will be
        # specified as additional parameters, forcing their product to be
        # duplicated in n_neurons seems pointless

        self._label = label

        # Pass in variables
        self._truth_table = truth_table
        self._rate_on = rate_on
        self._rate_off = rate_off
        self._stochastic = stochastic
        self._input_sequence = input_sequence
        self._no_inputs = len(input_sequence)

        self._n_neurons = self._no_inputs
        self._rand_seed = rand_seed

        self._score_delay = score_delay

        # used to define size of recording region
        self._recording_size = int((simulation_duration_ms / 1000.) * 4)

        # Superclasses
        MachineVertex.__init__(self, label, constraints, app_vertex,
                               vertex_slice)
Esempio n. 29
0
 def test_new_graph(self):
     """
     tests that after building a machine graph, all partitined vertices
     and partitioned edges are in existence
     """
     vertices = list()
     edges = list()
     for i in range(10):
         vertices.append(
             SimpleMachineVertex(ResourceContainer(), "V{}".format(i)))
     with self.assertRaises(NotImplementedError):
         vertices[1].add_constraint(SameAtomsAsVertexConstraint(
             vertices[4]))
         vertices[4].add_constraint(SameAtomsAsVertexConstraint(
             vertices[1]))
     for i in range(5):
         edges.append(MachineEdge(vertices[0], vertices[(i + 1)]))
     for i in range(5, 10):
         edges.append(MachineEdge(vertices[5], vertices[(i + 1) % 10]))
     graph = MachineGraph("foo")
     graph.add_vertices(vertices)
     graph.add_outgoing_edge_partition(
         MulticastEdgePartition(identifier="bar", pre_vertex=vertices[0]))
     graph.add_outgoing_edge_partition(
         MulticastEdgePartition(identifier="bar", pre_vertex=vertices[5]))
     graph.add_edges(edges, "bar")
     self.graph_there_and_back(graph)
    def test_deallocation_of_resources(self):
        machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18)
        chip_sdram = machine.get_chip_at(1, 1).sdram.size
        res_sdram = 12345

        tracker = ResourceTracker(machine,
                                  plan_n_timesteps=None,
                                  preallocated_resources=None)

        sdram_res = ConstantSDRAM(res_sdram)
        resources = ResourceContainer(sdram=sdram_res)
        chip_0 = machine.get_chip_at(0, 0)

        # verify core tracker is empty
        if (0, 0) in tracker._core_tracker:
            raise Exception("shouldnt exist")

        tracker._get_core_tracker(1, 1)

        # verify core tracker not empty
        if (1, 1) not in tracker._core_tracker:
            raise Exception("should exist")

        # verify sdram tracker
        # 0, 0 in _sdram_tracker due to the get_core_tracker(0, 0) call
        if tracker._sdram_tracker[1, 1] != chip_sdram:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[1, 1]))

        # allocate some res
        chip_x, chip_y, processor_id, ip_tags, reverse_ip_tags = \
            tracker.allocate_resources(resources, [(0, 0)])

        # verify chips used is updated
        cores = list(tracker._core_tracker[(0, 0)]._cores)
        self.assertEqual(len(cores), chip_0.n_user_processors - 1)

        # verify sdram used is updated
        sdram = tracker._sdram_tracker[(0, 0)]
        self.assertEqual(sdram, chip_sdram - res_sdram)

        if (0, 0) not in tracker._chips_used:
            raise Exception("should exist")

        # deallocate res
        tracker.unallocate_resources(chip_x, chip_y, processor_id, resources,
                                     ip_tags, reverse_ip_tags)

        # verify chips used is updated
        if tracker._core_tracker[(0, 0)].n_cores_available != \
                chip_0.n_user_processors:
            raise Exception("shouldn't exist or should be right size")

        # if (0, 0) in tracker._chips_used:
        #   raise Exception("shouldnt exist")

        # verify sdram tracker
        if tracker._sdram_tracker[0, 0] != chip_sdram:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[0, 0]))
Esempio n. 31
0
    def __init__(self,
                 lpg_params,
                 constraints=None,
                 app_vertex=None,
                 label=None):
        """
        :param LivePacketGatherParams lpg_params:
        :param LivePacketGather app_vertex:
        :param str label:
        :param constraints:
        :type constraints:
            iterable(~pacman.model.constraints.AbstractConstraint)
        """
        # inheritance
        super(LivePacketGatherMachineVertex,
              self).__init__(label or lpg_params.label,
                             constraints=constraints,
                             app_vertex=app_vertex)

        self._resources_required = ResourceContainer(
            cpu_cycles=CPUCyclesPerTickResource(self.get_cpu_usage()),
            dtcm=DTCMResource(self.get_dtcm_usage()),
            sdram=ConstantSDRAM(self.get_sdram_usage()),
            iptags=[lpg_params.get_iptag_resource()])

        # app specific data items
        self._lpg_params = lpg_params
 def resources_required(self):
     resources = ResourceContainer(
         dtcm=DTCMResource(self.get_dtcm_usage()),
         sdram=SDRAMResource(self.get_sdram_usage(
             self._send_buffer_times, self._send_buffer_max_space,
             self._record_buffer_size > 0)),
         cpu_cycles=CPUCyclesPerTickResource(self.get_cpu_usage()),
         iptags=self._iptags,
         reverse_iptags=self._reverse_iptags)
     if self._iptags is None:
         resources.extend(get_recording_resources(
             [self._record_buffer_size],
             self._buffer_notification_ip_address,
             self._buffer_notification_port, self._buffer_notification_tag))
     else:
         resources.extend(get_recording_resources(
             [self._record_buffer_size]))
     return resources