Beispiel #1
0
    def __call__(self, partitioned_graph, machine):

        # check that the algorithm can handle the constraints
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=partitioned_graph.subvertices,
            supported_constraints=[
                PlacerRadialPlacementFromChipConstraint,
                TagAllocatorRequireIptagConstraint,
                TagAllocatorRequireReverseIptagConstraint,
                PlacerChipAndCoreConstraint],
            abstract_constraint_type=AbstractPlacerConstraint)

        placements = Placements()
        ordered_subverts = utility_calls.sort_objects_by_constraint_authority(
            partitioned_graph.subvertices)

        # Iterate over subvertices and generate placements
        progress_bar = ProgressBar(len(ordered_subverts),
                                   "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        for vertex in ordered_subverts:
            self._place_vertex(vertex, resource_tracker, machine, placements)
            progress_bar.update()
        progress_bar.end()
        return {'placements': placements}
    def __call__(self, subgraph, n_keys_map, routing_paths):

        # check that this algorithm supports the constraints
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=subgraph.subedges,
            supported_constraints=[
                KeyAllocatorFixedMaskConstraint,
                KeyAllocatorFixedKeyAndMaskConstraint,
                KeyAllocatorContiguousRangeContraint],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        routing_tables = MulticastRoutingTables()

        # Get the partitioned edges grouped by those that require the same key
        same_key_groups = \
            routing_info_allocator_utilities.get_edge_groups(subgraph)

        # Go through the groups and allocate keys
        progress_bar = ProgressBar(len(same_key_groups),
                                   "Allocating routing keys")
        routing_infos = RoutingInfo()
        for group in same_key_groups:
            # Check how many keys are needed for the edges of the group
            edge_n_keys = None
            for edge in group:
                n_keys = n_keys_map.n_keys_for_partitioned_edge(edge)
                if edge_n_keys is None:
                    edge_n_keys = n_keys
                elif edge_n_keys != n_keys:
                    raise PacmanRouteInfoAllocationException(
                        "Two edges require the same keys but request a"
                        " different number of keys")

            # Get any fixed keys and masks from the group and attempt to
            # allocate them
            keys_and_masks = routing_info_allocator_utilities.\
                get_fixed_key_and_mask(group)
            fixed_mask, fields = \
                routing_info_allocator_utilities.get_fixed_mask(group)

            if keys_and_masks is not None:

                self._allocate_fixed_keys_and_masks(keys_and_masks, fixed_mask)
            else:
                keys_and_masks = self._allocate_keys_and_masks(
                    fixed_mask, fields, edge_n_keys)

            # Allocate the routing information
            for edge in group:
                subedge_info = SubedgeRoutingInfo(keys_and_masks, edge)
                routing_infos.add_subedge_info(subedge_info)

                # update routing tables with entries
                routing_info_allocator_utilities.add_routing_key_entries(
                    routing_paths, subedge_info, edge, routing_tables)

            progress_bar.update()
        progress_bar.end()
        return {'routing_infos': routing_infos,
                'routing_tables': routing_tables}
Beispiel #3
0
    def get_v(self, label, buffer_manager, region, state_region, placements,
              graph_mapper, partitionable_vertex):

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        ms_per_tick = self._machine_time_step / 1000.0

        data = list()
        missing_str = ""

        progress_bar = \
            ProgressBar(len(subvertices),
                        "Getting membrane voltage for {}".format(label))

        for subvertex in subvertices:

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            placement = placements.get_placement_of_subvertex(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, missing_data =\
                buffer_manager.get_data_for_vertex(
                    x, y, p, region, state_region)
            if missing_data:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            record_raw = neuron_param_region_data_pointer.read_all()
            record_length = len(record_raw)
            n_rows = record_length / ((vertex_slice.n_atoms + 1) * 4)
            record = (numpy.asarray(record_raw, dtype="uint8").
                      view(dtype="<i4")).reshape((n_rows,
                                                  (vertex_slice.n_atoms + 1)))
            split_record = numpy.array_split(record, [1, 1], 1)
            record_time = numpy.repeat(
                split_record[0] * float(ms_per_tick), vertex_slice.n_atoms, 1)
            record_ids = numpy.tile(
                numpy.arange(vertex_slice.lo_atom, vertex_slice.hi_atom + 1),
                len(record_time)).reshape((-1, vertex_slice.n_atoms))
            record_membrane_potential = split_record[2] / 32767.0

            part_data = numpy.dstack(
                [record_ids, record_time, record_membrane_potential])
            part_data = numpy.reshape(part_data, [-1, 3])
            data.append(part_data)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing membrane voltage data in region {}"
                " from the following cores: {}".format(
                    label, region, missing_str))
        data = numpy.vstack(data)
        order = numpy.lexsort((data[:, 1], data[:, 0]))
        result = data[order]
        return result
Beispiel #4
0
    def get_spikes(self, label, buffer_manager, region, state_region, placements, graph_mapper, partitionable_vertex):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        subvertices = graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        missing_str = ""

        progress_bar = ProgressBar(len(subvertices), "Getting spikes for {}".format(label))
        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p
            lo_atom = subvertex_slice.lo_atom

            # Read the spikes
            n_words = int(math.ceil(subvertex_slice.n_atoms / 32.0))
            n_bytes = n_words * 4
            n_words_with_timestamp = n_words + 1

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, data_missing = buffer_manager.get_data_for_vertex(
                x, y, p, region, state_region
            )
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            record_raw = neuron_param_region_data_pointer.read_all()
            raw_data = (numpy.asarray(record_raw, dtype="uint8").view(dtype="<i4")).reshape(
                [-1, n_words_with_timestamp]
            )
            split_record = numpy.array_split(raw_data, [1, 1], 1)
            record_time = split_record[0] * float(ms_per_tick)
            spikes = split_record[2].byteswap().view("uint8")
            bits = numpy.fliplr(numpy.unpackbits(spikes).reshape((-1, 32))).reshape((-1, n_bytes * 8))
            time_indices, indices = numpy.where(bits == 1)
            times = record_time[time_indices].reshape((-1))
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str)
            )

        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
def validate_routes(partitioned_graph, placements, routing_infos,
                    routing_tables, machine):
    """ Go though the placements given during init and check that the\
        routing entries within the routing tables support reach the\
        correction destinations as well as not producing any cycles.

    :param partitioned_graph: the subgraph of the problem spec
    :param placements: the placements container
    :param routing_infos:  the routing info container
    :param routing_tables: the routing tables generated by the\
                routing algorithm
    :param machine: the python machine object
    :type machine: spinnmachine.machine.Machine object
    :return: None
    :raises PacmanRoutingException: when either no routing table entry is\
                found by the search on a given router, or a cycle is\
                detected

    """
    progress = ProgressBar(
        len(list(placements.placements)),
        "Verifying the routes from each core travel to the correct locations")
    for placement in placements.placements:
        outgoing_edges_for_partitioned_vertex = \
            partitioned_graph.outgoing_subedges_from_subvertex(
                placement.subvertex)

        # locate all placements to which this placement/subvertex will
        # communicate with for a given key_and_mask and search its
        # determined destinations
        key_and_masks = \
            routing_infos.get_key_and_masks_for_partitioned_vertex(
                placement.subvertex)

        # locate each set for a given key_and_mask
        for key_and_mask in key_and_masks:
            destination_placements = list()
            for outgoing_edge in outgoing_edges_for_partitioned_vertex:
                edge_key_and_masks = \
                    routing_infos.get_keys_and_masks_from_subedge(
                        outgoing_edge)
                for edge_key_and_mask in edge_key_and_masks:
                    if edge_key_and_mask == key_and_mask:
                        dest_placement = \
                            placements.get_placement_of_subvertex(
                                outgoing_edge.post_subvertex)
                        dest_tuple = PlacementTuple(x=dest_placement.x,
                                                    y=dest_placement.y,
                                                    p=dest_placement.p)
                        if dest_tuple not in destination_placements:
                            destination_placements.append(dest_tuple)

            # search for these destinations
            _search_route(placement, destination_placements, key_and_mask,
                          routing_tables, machine)
        progress.update()
    progress.end()
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex,
                   base_key_function):

        results = list()
        missing_str = ""
        ms_per_tick = self._machine_time_step / 1000.0
        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))

        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(
                    x, y, p, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = raw_spike_data.read_all()
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - base_key_function(subvertex)) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
    def __call__(self, executable_targets, app_id, transciever,
                 loaded_application_data_token):
        """ Go through the executable targets and load each binary to \
            everywhere and then send a start request to the cores that \
            actually use it
        """

        if not loaded_application_data_token:
            raise exceptions.ConfigurationException(
                "The token for having loaded the application data token is set"
                " to false and therefore I cannot run. Please fix and try "
                "again")

        progress_bar = ProgressBar(executable_targets.total_processors,
                                   "Loading executables onto the machine")
        for executable_target_key in executable_targets.binary_paths():
            file_reader = SpinnmanFileDataReader(executable_target_key)
            core_subset = executable_targets.\
                retrieve_cores_for_a_executable_target(executable_target_key)

            statinfo = os.stat(executable_target_key)
            size = statinfo.st_size

            # TODO there is a need to parse the binary and see if its
            # ITCM and DTCM requirements are within acceptable params for
            # operating on spinnaker. Currently there just a few safety
            # checks which may not be accurate enough.
            if size > constants.MAX_SAFE_BINARY_SIZE:
                logger.warn(
                    "The size of {} is large enough that its"
                    " possible that the binary may be larger than what is"
                    " supported by spinnaker currently. Please reduce the"
                    " binary size if it starts to behave strangely, or goes"
                    " into the wdog state before starting.".format(
                        executable_target_key))
                if size > constants.MAX_POSSIBLE_BINARY_SIZE:
                    raise exceptions.ConfigurationException(
                        "The size of {} is too large and therefore"
                        " will very likely cause a WDOG state. Until a more"
                        " precise measurement of ITCM and DTCM can be produced"
                        " this is deemed as an error state. Please reduce the"
                        " size of your binary or circumvent this error check."
                        .format(executable_target_key))

            transciever.execute_flood(core_subset, file_reader, app_id, size)

            acutal_cores_loaded = 0
            for chip_based in core_subset.core_subsets:
                for _ in chip_based.processor_ids:
                    acutal_cores_loaded += 1
            progress_bar.update(amount_to_add=acutal_cores_loaded)
        progress_bar.end()

        return {"LoadBinariesToken": True}
    def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph,
                                       placements, transceiver, routing_infos):
        """
        Get synaptic data for all connections in this Projection from the
        machine.
        """
        if self._stored_synaptic_data_from_machine is None:
            timer = None
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                timer = Timer()
                timer.start_timing()

            subedges = \
                graph_mapper.get_partitioned_edges_from_partitionable_edge(
                    self)
            if subedges is None:
                subedges = list()

            synaptic_list = copy.copy(self._synapse_list)
            synaptic_list_rows = synaptic_list.get_rows()
            progress_bar = ProgressBar(
                len(subedges),
                "Reading back synaptic matrix for edge between"
                " {} and {}".format(self._pre_vertex.label,
                                    self._post_vertex.label))
            for subedge in subedges:
                n_rows = subedge.get_n_rows(graph_mapper)
                pre_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.pre_subvertex)
                post_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.post_subvertex)

                sub_edge_post_vertex = \
                    graph_mapper.get_vertex_from_subvertex(
                        subedge.post_subvertex)
                rows = sub_edge_post_vertex.get_synaptic_list_from_machine(
                    placements, transceiver, subedge.pre_subvertex, n_rows,
                    subedge.post_subvertex,
                    self._synapse_row_io, partitioned_graph,
                    routing_infos, subedge.weight_scales).get_rows()

                for i in range(len(rows)):
                    synaptic_list_rows[
                        i + pre_vertex_slice.lo_atom].set_slice_values(
                            rows[i], vertex_slice=post_vertex_slice)
                progress_bar.update()
            progress_bar.end()
            self._stored_synaptic_data_from_machine = synaptic_list
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                logger.info("Time to read matrix: {}".format(
                    timer.take_sample()))

        return self._stored_synaptic_data_from_machine
    def load_initial_buffers(self):
        """ Load the initial buffers for the senders using mem writes
        """
        total_data = 0
        for vertex in self._sender_vertices:
            for region in vertex.get_regions():
                total_data += vertex.get_region_buffer_size(region)

        progress_bar = ProgressBar(
            total_data, "Loading buffers ({} bytes)".format(total_data))
        for vertex in self._sender_vertices:
            for region in vertex.get_regions():
                self._send_initial_messages(vertex, region, progress_bar)
        progress_bar.end()
    def _handle_external_algorithm(self, algorithm):
        """ Creates the input files for the algorithm
        :param algorithm: the algorthm
        :return: None
        """
        input_params = self._create_input_commands(algorithm)

        inputs = \
            [a.format(**input_params) for a in algorithm.command_line_args]

        # output debug info in case things go wrong
        logger.debug(
            "The inputs to the external mapping function are {}"
            .format(inputs))

        # create progress bar for external algorithm
        algorithum_progress_bar = ProgressBar(
            1, "Running external algorithm {}".format(algorithm.algorithm_id))

        timer = None
        if self._do_timing:
            timer = Timer()
            timer.start_timing()

        # execute other command
        child = subprocess.Popen(
            inputs, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
            stdin=subprocess.PIPE)
        child.wait()
        algorithum_progress_bar.end()

        if self._do_timing:
            self._handle_prov(timer, algorithm)

        # check the return code for a successful execution
        if child.returncode != 0:
            stdout, stderr = child.communicate()
            raise exceptions.\
                PacmanAlgorithmFailedToCompleteException(
                    "Algorithm {} returned a non-zero error code {}\n"
                    "    Inputs: {}\n"
                    "    Output: {}\n"
                    "    Error: {}\n".format(
                        algorithm.algorithm_id, child.returncode,
                        inputs, stdout, stderr))

        outputs = self._sort_out_external_algorithm_outputs(algorithm)
        self._map_output_parameters(outputs, algorithm)
def generate_sub_edges(subgraph, graph_to_subgraph_mapper, graph):
    """ Generate the sub edges for the subvertices in the graph

    :param subgraph: the partitioned graph to work with
    :type subgraph:\
                :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph`
    :param graph_to_subgraph_mapper: the mapper between the \
                partitionable graph and the partitioned graph
    :type graph_to_subgraph_mapper:\
                :py:class:`pacman.model.graph_mapper.GraphMapper`
    :param graph: the partitionable graph to work with
    :type graph:\
                :py:class:`pacman.model.graph.partitionable_graph.PartitionableGraph`
    """

    # start progress bar
    progress_bar = ProgressBar(len(subgraph.subvertices),
                               "on partitioning the partitionable_graph's "
                               "edges")

    # Partition edges according to vertex partitioning
    for src_sv in subgraph.subvertices:

        # For each out edge of the parent vertex...
        vertex = graph_to_subgraph_mapper.get_vertex_from_subvertex(src_sv)
        outgoing_partitions = \
            graph.outgoing_edges_partitions_from_vertex(vertex)
        for outgoing_partition_identifier in outgoing_partitions:
            out_edges = \
                outgoing_partitions[outgoing_partition_identifier].edges
            for edge in out_edges:

                # and create and store a new subedge for each post-subvertex
                post_vertex = edge.post_vertex
                post_subverts = (graph_to_subgraph_mapper
                                 .get_subvertices_from_vertex(post_vertex))
                for dst_sv in post_subverts:
                    subedge = edge.create_subedge(src_sv, dst_sv)
                    subgraph.add_subedge(subedge,
                                         outgoing_partition_identifier)
                    graph_to_subgraph_mapper.add_partitioned_edge(
                        subedge, edge)
        progress_bar.update()
    progress_bar.end()
    def __call__(self, placements, file_path):
        """

        :param placements:
        :param folder_path:
        :return:
        """

        progress_bar = ProgressBar(
            len(placements), "Converting to json core allocations")

        # write basic stuff
        json_core_allocations_dict = dict()

        json_core_allocations_dict['type'] = "cores"

        # process placements
        for placement in placements:
            json_core_allocations_dict[placement.subvertex.label] = \
                [placement.p, placement.p + 1]
            progress_bar.update()

        # dump dict into json file
        file_to_write = open(file_path, "w")
        json.dump(json_core_allocations_dict, file_to_write)
        file_to_write.close()

        # validate the schema
        core_allocations_schema_file_path = os.path.join(
            os.path.dirname(file_format_schemas.__file__),
            "core_allocations.json"
        )
        file_to_read = open(core_allocations_schema_file_path, "r")
        core_allocations_schema = json.load(file_to_read)
        jsonschema.validate(
            json_core_allocations_dict, core_allocations_schema)

        # complete progress bar
        progress_bar.end()

        # return the file format
        return {"FileCoreAllocations": file_path}
    def __call__(self, partitioned_graph, graph_mapper):
        """ Generate an n_keys map for the graph and add constraints
        :param partitioned_graph:
        :param graph_mapper:
        :return:
        """
        progress_bar = ProgressBar(
            len(partitioned_graph.subedges),
            "Deducing edge to number of keys map")

        n_keys_map = DictBasedPartitionedEdgeNKeysMap()
        for edge in partitioned_graph.subedges:
            vertex_slice = graph_mapper.get_subvertex_slice(
                edge.pre_subvertex)
            super_edge = (graph_mapper
                          .get_partitionable_edge_from_partitioned_edge(edge))

            if not isinstance(super_edge.pre_vertex,
                              AbstractProvidesNKeysForEdge):
                n_keys_map.set_n_keys_for_patitioned_edge(edge,
                                                          vertex_slice.n_atoms)
            else:
                n_keys_map.set_n_keys_for_patitioned_edge(
                    edge,
                    super_edge.pre_vertex.get_n_keys_for_partitioned_edge(
                        edge, graph_mapper))

            if isinstance(super_edge.pre_vertex,
                          AbstractProvidesOutgoingEdgeConstraints):
                edge.add_constraints(
                    super_edge.pre_vertex.get_outgoing_edge_constraints(
                        edge, graph_mapper))
            if isinstance(super_edge.post_vertex,
                          AbstractProvidesIncomingEdgeConstraints):
                edge.add_constraints(
                    super_edge.post_vertex.get_incoming_edge_constraints(
                        edge, graph_mapper))
            progress_bar.update()
        progress_bar.end()

        return {'n_keys_map': n_keys_map}
    def __call__(self, router_tables, app_id, transciever, machine):
        progress_bar = ProgressBar(len(list(router_tables.routing_tables)),
                                   "Loading routing data onto the machine")

        # load each router table that is needed for the application to run into
        # the chips SDRAM
        for router_table in router_tables.routing_tables:
            if not machine.get_chip_at(router_table.x, router_table.y).virtual:
                transciever.clear_multicast_routes(router_table.x,
                                                   router_table.y)
                transciever.clear_router_diagnostic_counters(router_table.x,
                                                             router_table.y)

                if len(router_table.multicast_routing_entries) > 0:
                    transciever.load_multicast_routes(
                        router_table.x, router_table.y,
                        router_table.multicast_routing_entries, app_id=app_id)
            progress_bar.update()
        progress_bar.end()

        return {"LoadedRoutingTablesToken": True}
    def __call__(self, file_path, transceiver, machine, router_tables, has_ran,
                 placements):
        """
        :param file_path: the file path to write the provenance data to
        :param transceiver: the spinnman interface object
        :param machine: the python representation of the spinnaker machine
        :param router_tables: the router tables that have been generated
        :param has_ran: token that states that the simulation has ran
        :return: none
        """

        if has_ran:
            root = etree.Element("root")
            router_file_path = os.path.join(file_path, "router_provenance.xml")
            self._write_router_provenance_data(
                root, router_tables, machine, transceiver)
            writer = open(router_file_path, "w")
            writer.write(etree.tostring(root, pretty_print=True))

            progress = ProgressBar(placements.n_placements,
                                   "Getting provenance data")

            # retrieve provenance data from any cores that provide data
            for placement in placements.placements:
                if isinstance(placement.subvertex,
                              AbstractProvidesProvenanceData):
                    core_file_path = os.path.join(
                        file_path,
                        "Provanence_data_for_{}_{}_{}_{}.xml".format(
                            placement.subvertex.label,
                            placement.x, placement.y, placement.p))
                    placement.subvertex.write_provenance_data_in_xml(
                        core_file_path, transceiver, placement)
                progress.update()
            progress.end()

        else:
            raise exceptions.ConfigurationException(
                "This function has been called before the simulation has ran."
                " This is deemed an error, please rectify and try again")
Beispiel #16
0
    def __call__(self, partitioned_graph, machine):
        """ Place a partitioned_graph so that each subvertex is placed on a\
                    core

        :param partitioned_graph: The partitioned_graph to place
        :type partitioned_graph:\
                    :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph`
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: If something\
                   goes wrong with the placement
        """

        # check that the algorithm can handle the constraints
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=partitioned_graph.subvertices,
            supported_constraints=[PlacerChipAndCoreConstraint],
            abstract_constraint_type=AbstractPlacerConstraint)

        placements = Placements()
        ordered_subverts = utility_calls.sort_objects_by_constraint_authority(
            partitioned_graph.subvertices)

        # Iterate over subvertices and generate placements
        progress_bar = ProgressBar(len(ordered_subverts),
                                   "Placing graph vertices")
        resource_tracker = ResourceTracker(machine)
        for subvertex in ordered_subverts:

            # Create and store a new placement anywhere on the board
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                subvertex.resources_required, subvertex.constraints)
            placement = Placement(subvertex, x, y, p)
            placements.add_placement(placement)
            progress_bar.update()
        progress_bar.end()
        return {'placements': placements}
    def __call__(self, subgraph, graph_mapper):
        """
        :param subgraph: the subgraph whose edges are to be filtered
        :param graph_mapper: the graph mapper between partitionable and \
                partitioned graphs.
        :return: a new graph mapper and partitioned graph
        """
        new_sub_graph = PartitionedGraph(label=subgraph.label)
        new_graph_mapper = GraphMapper(graph_mapper.first_graph_label,
                                       subgraph.label)

        # create progress bar
        progress_bar = ProgressBar(
            len(subgraph.subvertices) + len(subgraph.subedges),
            "Filtering edges")

        # add the subverts directly, as they wont be pruned.
        for subvert in subgraph.subvertices:
            new_sub_graph.add_subvertex(subvert)
            associated_vertex = graph_mapper.get_vertex_from_subvertex(subvert)
            vertex_slice = graph_mapper.get_subvertex_slice(subvert)
            new_graph_mapper.add_subvertex(
                subvertex=subvert, vertex_slice=vertex_slice,
                vertex=associated_vertex)
            progress_bar.update()

        # start checking subedges to decide which ones need pruning....
        for subvert in subgraph.subvertices:
            out_going_partitions = \
                subgraph.outgoing_edges_partitions_from_vertex(subvert)
            for partitioner_identifier in out_going_partitions:
                for subedge in \
                        out_going_partitions[partitioner_identifier].edges:
                    if not self._is_filterable(subedge, graph_mapper):
                        logger.debug("this subedge was not pruned {}"
                                     .format(subedge))
                        new_sub_graph.add_subedge(subedge,
                                                  partitioner_identifier)
                        associated_edge = graph_mapper.\
                            get_partitionable_edge_from_partitioned_edge(
                                subedge)
                        new_graph_mapper.add_partitioned_edge(
                            subedge, associated_edge)
                    else:
                        logger.debug("this subedge was pruned {}"
                                     .format(subedge))
                    progress_bar.update()
        progress_bar.end()

        # returned the pruned partitioned_graph and graph_mapper
        return {'new_sub_graph': new_sub_graph,
                'new_graph_mapper': new_graph_mapper}
    def __call__(self, file_routing_paths, partitioned_graph, placements,
                 machine):

        # load the json files
        file_routing_paths = self._handle_json_files(file_routing_paths)
        progress_bar = ProgressBar(len(file_routing_paths),
                                   "Converting to PACMAN routing paths")

        # iterate though the path for each edge and create entries
        for edge_id in file_routing_paths:
            edge = partitioned_graph.get_subedge_with_label(edge_id)

            # if the vertex is none, its a vertex with the special skills of
            # needing no cores. therefore ignore
            if edge is not None:
                placement = placements.get_placement_of_subvertex(
                    edge.pre_subvertex)
                self._create_entries_for_path(
                    edge_id, file_routing_paths[edge_id], None, placement.p,
                    partitioned_graph, machine, placements)
            progress_bar.update()
        progress_bar.end()

        return {'routing_paths': self._multi_cast_routing_paths}
    def _write_router_provenance_data(
            self, root, router_tables, machine, txrx):
        """ Writes the provenance data of the router diagnostics

        :param root: the root element to add diagnostics to
        :return: None
        """
        progress = ProgressBar(
            machine.n_chips,
            "Getting provenance data from machine's routing tables")

        # acquire diagnostic data
        router_diagnostics = dict()
        reinjector_statuses = dict()
        for router_table in router_tables.routing_tables:
            x = router_table.x
            y = router_table.y
            if not machine.get_chip_at(x, y).virtual:
                router_diagnostic = txrx.get_router_diagnostics(x, y)
                router_diagnostics[x, y] = router_diagnostic
                reinjector_status = txrx.get_reinjection_status(x, y)
                reinjector_statuses[x, y] = reinjector_status
        doc = etree.SubElement(root, "router_counters")
        expected_routers = etree.SubElement(doc, "Used_Routers")
        for x, y in router_diagnostics:
            self._write_router_diag(
                expected_routers, x, y, router_diagnostics[x, y],
                reinjector_statuses[x, y])
            progress.update()
        unexpected_routers = etree.SubElement(doc, "Unexpected_Routers")
        for chip in machine.chips:
            if not chip.virtual:
                if (chip.x, chip.y) not in router_diagnostics:
                    router_diagnostic = \
                        txrx.get_router_diagnostics(chip.x, chip.y)
                    has_dropped_mc_packets = \
                        router_diagnostic.n_dropped_multicast_packets != 0
                    has_local_multicast_packets = \
                        router_diagnostic.n_local_multicast_packets != 0
                    has_external_multicast_packets = \
                        router_diagnostic.n_external_multicast_packets != 0
                    reinjector_status = \
                        txrx.get_reinjection_status(chip.x, chip.y)
                    if (has_dropped_mc_packets or
                            has_local_multicast_packets or
                            has_external_multicast_packets):
                        self._write_router_diag(
                            unexpected_routers, chip.x, chip.y,
                            router_diagnostic, reinjector_status)
                        progress.update()
        progress.end()
    def __call__(
            self, placements, tags, txrx, reports_states, app_data_folder):
        progress_bar = ProgressBar(
            len(list(placements.placements)), "Initialising buffers")

        # Create the buffer manager
        buffer_manager = BufferManager(
            placements, tags, txrx, reports_states, app_data_folder)

        for placement in placements.placements:
            if isinstance(
                    placement.subvertex,
                    AbstractSendsBuffersFromHostPartitionedVertex):
                if placement.subvertex.buffering_input():
                    buffer_manager.add_sender_vertex(placement.subvertex)

            if isinstance(placement.subvertex, AbstractReceiveBuffersToHost):
                if placement.subvertex.buffering_output():
                    buffer_manager.add_receiving_vertex(placement.subvertex)

            progress_bar.update()
        progress_bar.end()

        return {"buffer_manager": buffer_manager}
    def __call__(self, placements, file_path):
        """

        :param placements: the memory placements object
        :param file_path: the file path for the placements.json
        :return: file path for the placements.json
        """

        # write basic stuff
        json_placement_dictory_rep = dict()

        progress_bar = ProgressBar(len(placements.placements) + 1,
                                   "converting to json placements")

        # process placements
        for placement in placements:
            json_placement_dictory_rep[placement.subvertex.label] = \
                [placement.x, placement.y]
            progress_bar.update()

        # dump dict into json file
        file_to_write = open(file_path, "w")
        json.dump(json_placement_dictory_rep, file_to_write)
        file_to_write.close()

        # validate the schema
        placements_schema_file_path = os.path.join(
            os.path.dirname(file_format_schemas.__file__), "placements.json"
        )

        file_to_read = open(placements_schema_file_path, "r")
        placements_schema = json.load(file_to_read)

        jsonschema.validate(
            json_placement_dictory_rep, placements_schema)

        progress_bar.update()
        progress_bar.end()

        # return the file format
        return {"FilePlacements": file_path}
    def __call__(self, partitioned_graph, machine, file_path):
        """
        :param partitioned_graph: the partitioned graph
        :param machine: the machine
        :return:
        """

        progress_bar = ProgressBar(
            (len(partitioned_graph.subvertices) +
             len(partitioned_graph.subedges)) + 2,
            "creating json constraints")

        json_constraints_dictory_rep = list()
        self._add_monitor_core_reserve(json_constraints_dictory_rep)
        progress_bar.update()
        self._add_extra_monitor_cores(json_constraints_dictory_rep, machine)
        progress_bar.update()
        self._search_graph_for_placement_constraints(
            json_constraints_dictory_rep, partitioned_graph, machine,
            progress_bar)

        file_to_write = open(file_path, "w")
        json.dump(json_constraints_dictory_rep, file_to_write)
        file_to_write.close()

        # validate the schema
        partitioned_graph_schema_file_path = os.path.join(
            os.path.dirname(file_format_schemas.__file__), "constraints.json"
        )

        # for debug purposes, read schema and validate
        file_to_read = open(partitioned_graph_schema_file_path, "r")
        partitioned_graph_schema = json.load(file_to_read)
        jsonschema.validate(
            json_constraints_dictory_rep, partitioned_graph_schema)

        # complete progress bar
        progress_bar.end()

        return {'constraints': file_path}
    def __call__(self, placements, machine, partitioned_graph, k=1, l=0, m=0,
                 bw_per_route_entry=BW_PER_ROUTE_ENTRY, max_bw=MAX_BW):
        """ Find routes between the subedges with the allocated information,
            placed in the given places

        :param placements: The placements of the subedges
        :type placements:\
                    :py:class:`pacman.model.placements.placements.Placements`
        :param machine: The machine through which the routes are to be found
        :type machine: :py:class:`spinn_machine.machine.Machine`
        :param partitioned_graph: the partitioned_graph object
        :type partitioned_graph:\
                    :py:class:`pacman.partitioned_graph.partitioned_graph.PartitionedGraph`
        :return: The discovered routes
        :rtype:\
                    :py:class:`pacman.model.routing_tables.multicast_routing_tables.MulticastRoutingTables`
        :raise pacman.exceptions.PacmanRoutingException: If something\
                   goes wrong with the routing
        """

        # set up basic data structures
        self._routing_paths = MulticastRoutingPaths()
        self._k = k
        self._l = l
        self._m = m
        self._bw_per_route_entry = bw_per_route_entry
        self._max_bw = max_bw
        self._machine = machine

        nodes_info = self._initiate_node_info(machine)
        dijkstra_tables = self._initiate_dijkstra_tables(machine)
        self._update_all_weights(nodes_info, machine)

        # each subvertex represents a core in the board
        progress = ProgressBar(len(list(placements.placements)),
                               "Creating routing entries")

        for placement in placements.placements:
            subvert = placement.subvertex
            out_going_sub_edges = \
                partitioned_graph.outgoing_subedges_from_subvertex(subvert)
            out_going_sub_edges = filter(
                lambda edge: isinstance(edge, MultiCastPartitionedEdge),
                out_going_sub_edges)

            dest_chips = set()
            subedges_to_route = list()

            for subedge in out_going_sub_edges:
                destination_subvetex = subedge.post_subvertex
                destination_placement = \
                    placements.get_placement_of_subvertex(destination_subvetex)

                chip = machine.get_chip_at(destination_placement.x,
                                           destination_placement.y)
                dest_chips.add((chip.x, chip.y))
                subedges_to_route.append(subedge)

            if len(dest_chips) != 0:
                self._update_all_weights(nodes_info, machine)
                self._reset_tables(dijkstra_tables)
                dijkstra_tables[
                    (placement.x, placement.y)]["activated?"] = True
                dijkstra_tables[(placement.x, placement.y)]["lowest cost"] = 0
                self._propagate_costs_until_reached_destinations(
                    dijkstra_tables, nodes_info, dest_chips, placement.x,
                    placement.y)

            for subedge in subedges_to_route:
                dest = subedge.post_subvertex
                dest_placement = placements.get_placement_of_subvertex(dest)
                self._retrace_back_to_source(
                    dest_placement.x, dest_placement.y, dijkstra_tables,
                    dest_placement.p, subedge, nodes_info, placement.p)
            progress.update()
        progress.end()
        return {'routing_paths': self._routing_paths}
    def __call__(
            self, processor_to_app_data_base_address, transceiver,
            placement_to_app_data_files, app_id, verify=False):

        # go through the placements and see if there's any application data to
        # load
        progress_bar = ProgressBar(len(placement_to_app_data_files),
                                   "Loading application data onto the machine")
        for (x, y, p, label) in placement_to_app_data_files:
            logger.debug(
                "loading application data for vertex {}".format(label))
            key = (x, y, p, label)
            start_address = \
                processor_to_app_data_base_address[key]['start_address']
            memory_written = \
                processor_to_app_data_base_address[key]['memory_written']
            memory_used = \
                processor_to_app_data_base_address[key]['memory_used']

            # Allocate the SDRAM requirement and replace the start address
            # assigned via the DSE
            start_address_malloced = \
                transceiver.malloc_sdram(x, y, memory_used, app_id)

            processor_to_app_data_base_address[key]['start_address'] = \
                start_address_malloced

            # set start address to be that of the allocated version
            start_address = start_address_malloced

            application_file_paths = placement_to_app_data_files[key]

            for file_path_for_application_data in application_file_paths:
                application_data_file_reader = SpinnmanFileDataReader(
                    file_path_for_application_data)
                logger.debug(
                    "writing application data for vertex {}".format(label))
                transceiver.write_memory(
                    x, y, start_address, application_data_file_reader,
                    memory_written)
                application_data_file_reader.close()

                if verify:
                    application_data_file_reader = SpinnmanFileDataReader(
                        file_path_for_application_data)
                    all_data = application_data_file_reader.readall()
                    read_data = transceiver.read_memory(
                        x, y, start_address, memory_written)
                    if read_data != all_data:
                        raise Exception("Miss Write of {}, {}, {}, {}"
                                        .format(x, y, p, start_address))
                    application_data_file_reader.close()

                # update user 0 so that it points to the start of the
                # applications data region on SDRAM
                logger.debug(
                    "writing user 0 address for vertex {}".format(label))
                user_o_register_address = \
                    transceiver.get_user_0_register_address_from_core(x, y, p)
                transceiver.write_memory(
                    x, y, user_o_register_address, start_address)
            progress_bar.update()
        progress_bar.end()

        return {"LoadedApplicationDataToken": True}
Beispiel #25
0
    def __call__(self, graph, machine):

        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            supported_constraints=[PartitionerMaximumSizeConstraint],
            abstract_constraint_type=AbstractPartitionerConstraint)

        # start progress bar
        progress_bar = ProgressBar(len(graph.vertices),
                                   "Partitioning graph vertices")
        vertices = graph.vertices
        subgraph = PartitionedGraph(label="partitioned_graph for partitionable"
                                          "_graph {}".format(graph.label))
        graph_to_subgraph_mapper = GraphMapper(graph.label, subgraph.label)
        resource_tracker = ResourceTracker(machine)

        # Partition one vertex at a time
        for vertex in vertices:

            # Get the usage of the first atom, then assume that this
            # will be the usage of all the atoms
            requirements = vertex.get_resources_used_by_atoms(Slice(0, 1),
                                                              graph)

            # Locate the maximum resources available
            max_resources_available = \
                resource_tracker.get_maximum_constrained_resources_available(
                    vertex.constraints)

            # Find the ratio of each of the resources - if 0 is required,
            # assume the ratio is the max available
            atoms_per_sdram = self._get_ratio(
                max_resources_available.sdram.get_value(),
                requirements.sdram.get_value())
            atoms_per_dtcm = self._get_ratio(
                max_resources_available.dtcm.get_value(),
                requirements.dtcm.get_value())
            atoms_per_cpu = self._get_ratio(
                max_resources_available.cpu.get_value(),
                requirements.cpu.get_value())

            max_atom_values = [atoms_per_sdram, atoms_per_dtcm, atoms_per_cpu]

            max_atoms_constraints = utility_calls.locate_constraints_of_type(
                vertex.constraints, PartitionerMaximumSizeConstraint)
            for max_atom_constraint in max_atoms_constraints:
                max_atom_values.append(max_atom_constraint.size)

            atoms_per_core = min(max_atom_values)

            # Partition into subvertices
            counted = 0
            while counted < vertex.n_atoms:

                # Determine subvertex size
                remaining = vertex.n_atoms - counted
                if remaining > atoms_per_core:
                    alloc = atoms_per_core
                else:
                    alloc = remaining

                # Create and store new subvertex, and increment elements
                #  counted
                if counted < 0 or counted + alloc - 1 < 0:
                    raise PacmanPartitionException("Not enough resources"
                                                   " available to create"
                                                   " subvertex")

                vertex_slice = Slice(counted, counted + (alloc - 1))
                subvertex_usage = vertex.get_resources_used_by_atoms(
                    vertex_slice, graph)

                subvert = vertex.create_subvertex(
                    vertex_slice, subvertex_usage,
                    "{}:{}:{}".format(vertex.label, counted,
                                      (counted + (alloc - 1))),
                    partition_algorithm_utilities.
                    get_remaining_constraints(vertex))
                subgraph.add_subvertex(subvert)
                graph_to_subgraph_mapper.add_subvertex(
                    subvert, vertex_slice, vertex)
                counted = counted + alloc

                # update allocated resources
                resource_tracker.allocate_constrained_resources(
                    subvertex_usage, vertex.constraints)

            # update and end progress bars as needed
            progress_bar.update()
        progress_bar.end()

        partition_algorithm_utilities.generate_sub_edges(
            subgraph, graph_to_subgraph_mapper, graph)

        return {'Partitioned_graph': subgraph,
                'Graph_mapper': graph_to_subgraph_mapper}
    def __call__(self, partitioned_graph, file_path):
        """

        :param partitioned_graph:
        :param folder_path:
        :return:
        """
        progress_bar = ProgressBar(len(partitioned_graph.subvertices), "Converting to json partitioned graph")
        # write basic stuff
        json_graph_dictory_rep = dict()

        # write vertices data
        vertices_resources = dict()
        json_graph_dictory_rep["vertices_resources"] = vertices_resources

        edges_resources = defaultdict()
        json_graph_dictory_rep["edges"] = edges_resources

        for vertex in partitioned_graph.subvertices:

            # handle external devices
            if isinstance(vertex, AbstractVirtualVertex):
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = 0

            # handle tagged vertices
            elif len(utility_calls.locate_constraints_of_type(vertex.constraints, AbstractTagAllocatorConstraint)) != 0:

                # handle the edge between the tag-able vertex and the fake
                # vertex
                hyper_edge_dict = dict()
                edges_resources[hashlib.md5(vertex.label).hexdigest()] = hyper_edge_dict
                hyper_edge_dict["source"] = str(id(vertex))
                hyper_edge_dict["sinks"] = [hashlib.md5(vertex.label).hexdigest()]
                hyper_edge_dict["weight"] = 1.0
                hyper_edge_dict["type"] = "FAKE_TAG_EDGE"

                # add the tag-able vertex
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = DEFAULT_NOUMBER_OF_CORES_USED_PER_PARTITIONED_VERTEX
                vertex_resources["sdram"] = int(vertex.resources_required.sdram.get_value())

                # add fake vertex
                vertex_resources = dict()
                vertices_resources[hashlib.md5(vertex.label).hexdigest()] = vertex_resources
                vertex_resources["cores"] = 0
                vertex_resources["sdram"] = 0

            # handle standard vertices
            else:
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = DEFAULT_NOUMBER_OF_CORES_USED_PER_PARTITIONED_VERTEX
                vertex_resources["sdram"] = int(vertex.resources_required.sdram.get_value())
            vertex_outgoing_partitions = partitioned_graph.outgoing_edges_partitions_from_vertex(vertex)

            # handle the vertex edges
            for vertex_partition in vertex_outgoing_partitions:
                hyper_edge_dict = dict()
                edges_resources["{}:{}".format(id(vertex), vertex_partition)] = hyper_edge_dict
                hyper_edge_dict["source"] = str(id(vertex))

                sinks_string = []
                for edge in vertex_outgoing_partitions[vertex_partition].edges:
                    sinks_string.append(str(id(edge.post_subvertex)))
                hyper_edge_dict["sinks"] = sinks_string
                hyper_edge_dict["weight"] = 1.0
                hyper_edge_dict["type"] = vertex_outgoing_partitions[vertex_partition].type.name.lower()
            progress_bar.update()

        file_to_write = open(file_path, "w")
        json.dump(json_graph_dictory_rep, file_to_write)
        file_to_write.close()

        # validate the schema
        partitioned_graph_schema_file_path = os.path.join(
            os.path.dirname(file_format_schemas.__file__), "partitioned_graph.json"
        )
        file_to_read = open(partitioned_graph_schema_file_path, "r")
        partitioned_graph_schema = json.load(file_to_read)
        jsonschema.validate(json_graph_dictory_rep, partitioned_graph_schema)

        progress_bar.end()

        return {"partitioned_graph": file_path}
    def __call__(
            self, partitioned_graph, user_create_database, tags,
            runtime, machine, time_scale_factor, machine_time_step,
            partitionable_graph, graph_mapper, placements, routing_infos,
            router_tables, execute_mapping, database_directory):

        self._writer = DatabaseWriter(database_directory)

        # add database generation if requested
        self._needs_database = \
            helpful_functions.auto_detect_database(partitioned_graph)
        if ((self._user_create_database == "None" and self._needs_database) or
                self._user_create_database == "True"):

            database_progress = ProgressBar(10, "Creating database")

            self._writer.add_system_params(
                time_scale_factor, machine_time_step, runtime)
            database_progress.update()
            self._writer.add_machine_objects(machine)
            database_progress.update()
            self._writer.add_partitioned_vertices(
                partitioned_graph, graph_mapper, partitionable_graph)
            database_progress.update()
            self._writer.add_placements(placements, partitioned_graph)
            database_progress.update()
            self._writer.add_routing_infos(
                routing_infos, partitioned_graph)
            database_progress.update()
            self._writer.add_routing_tables(router_tables)
            database_progress.update()
            self._writer.add_tags(partitioned_graph, tags)
            database_progress.update()
            if execute_mapping:
                self._writer.create_atom_to_event_id_mapping(
                    graph_mapper=graph_mapper,
                    partitionable_graph=partitionable_graph,
                    partitioned_graph=partitioned_graph,
                    routing_infos=routing_infos)
            database_progress.update()
            database_progress.update()
            database_progress.end()

        return {"database_interface": self,
                "database_file_path": self.database_file_path}
    def __call__(self, machine, file_path):
        """

        :param machine:
        :param file_path:
        :return:
        """
        progress_bar = ProgressBar(
            ((machine.max_chip_x + 1) * (machine.max_chip_y + 1)) + 2,
            "Converting to json machine")

        # write basic stuff
        json_dictory_rep = dict()
        json_dictory_rep['width'] = machine.max_chip_x + 1
        json_dictory_rep['height'] = machine.max_chip_y + 1
        json_dictory_rep['chip_resources'] = dict()
        json_dictory_rep['chip_resources']['cores'] = CHIP_HOMOGENIOUS_CORES
        json_dictory_rep['chip_resources']['sdram'] = CHIP_HOMOGENIOUS_SDRAM
        json_dictory_rep['chip_resources']['sram'] = CHIP_HOMOGENIOUS_SRAM
        json_dictory_rep['chip_resources']["router_entries"] = \
            ROUTER_HOMOGENIOUS_ENTRIES
        json_dictory_rep['chip_resources']['tags'] = CHIP_HOMOGENIOUS_TAGS

        # handle exceptions
        json_dictory_rep['dead_chips'] = list()
        json_dictory_rep['dead_links'] = list()
        chip_resource_exceptions = defaultdict()

        # write dead chips
        for x_coord in range(0, machine.max_chip_x + 1):
            for y_coord in range(0, machine.max_chip_y + 1):
                if (not machine.is_chip_at(x_coord, y_coord) or
                        machine.get_chip_at(x_coord, y_coord).virtual):
                    json_dictory_rep['dead_chips'].append([x_coord, y_coord])
                else:
                    # write dead links
                    for link_id in range(0, ROUTER_MAX_NUMBER_OF_LINKS):
                        router = machine.get_chip_at(x_coord, y_coord).router
                        if not router.is_link(link_id):
                            json_dictory_rep['dead_links'].append(
                                [x_coord, y_coord, "{}".format(
                                 constants.EDGES(link_id).name.lower())])
                    self._check_for_exceptions(
                        json_dictory_rep, x_coord, y_coord, machine,
                        chip_resource_exceptions)
                progress_bar.update()

        # convert dict into list
        chip_resouce_exceptions_list = []
        for (chip_x, chip_y) in chip_resource_exceptions:
            chip_resouce_exceptions_list.append(
                [chip_x, chip_y, chip_resource_exceptions[(chip_x, chip_y)]])
        progress_bar.update()

        # store exceptions into json form
        json_dictory_rep['chip_resource_exceptions'] = \
            chip_resouce_exceptions_list

        # dump to json file
        file_to_write = open(file_path, "w")
        json.dump(json_dictory_rep, file_to_write)
        file_to_write.close()

        # validate the schema
        machine_schema_file_path = os.path.join(
            os.path.dirname(file_format_schemas.__file__), "machine.json"
        )
        file_to_read = open(machine_schema_file_path, "r")
        machine_schema = json.load(file_to_read)
        jsonschema.validate(
            json_dictory_rep, machine_schema)

        # update and complete progress bar
        progress_bar.update()
        progress_bar.end()

        return {'file_machine': file_path}
    def __call__(
            self, machine, partitionable_graph=None, partitioned_graph=None):
        """

        :param partitionable_graph:
        :param partitioned_graph:
        :param machine:
        :return:
        """
        if partitionable_graph is not None:

            # Go through the groups and allocate keys
            progress_bar = ProgressBar(
                (len(partitionable_graph.vertices) + len(list(machine.chips))),
                "Allocating virtual identifiers")
        elif partitioned_graph is not None:
            # Go through the groups and allocate keys
            progress_bar = ProgressBar(
                (len(partitioned_graph.subvertices) +
                 len(list(machine.chips))),
                "Allocating virtual identifiers")
        else:
            progress_bar = ProgressBar(len(list(machine.chips)),
                                       "Allocating virtual identifiers")

        # allocate standard ids for real chips
        for chip in machine.chips:
            expected_chip_id = (chip.x << 8) + chip.y
            self._allocate_elements(expected_chip_id, 1)
            progress_bar.update()

        if partitionable_graph is not None:

            # allocate ids for virtual chips
            for vertex in partitionable_graph.vertices:
                if isinstance(vertex, AbstractVirtualVertex):
                    chip_id_x, chip_id_y = self._allocate_id()
                    vertex.set_virtual_chip_coordinates(chip_id_x, chip_id_y)
                    machine_algorithm_utilities.create_virtual_chip(
                        machine, vertex)
                progress_bar.update()
            progress_bar.end()
        elif partitioned_graph is not None:

            # allocate ids for virtual chips
            for vertex in partitioned_graph.subvertices:
                if isinstance(vertex, AbstractVirtualVertex):
                    chip_id_x, chip_id_y = self._allocate_id()
                    vertex.set_virtual_chip_coordinates(chip_id_x, chip_id_y)
                    machine_algorithm_utilities.create_virtual_chip(
                        machine, vertex)
                progress_bar.update()
            progress_bar.end()

        return {"machine": machine}
    def __call__(self, graph, machine):
        """ Partition a partitionable_graph so that each subvertex will fit\
            on a processor within the machine

        :param graph: The partitionable_graph to partition
        :type graph:\
                    :py:class:`pacman.model.graph.partitionable_graph.PartitionableGraph`
        :param machine: The machine with respect to which to partition the\
                    partitionable_graph
        :type machine: :py:class:`spinn_machine.machine.Machine`
        :return: A partitioned_graph of partitioned vertices and partitioned\
                    edges
        :rtype:\
                    :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph`
        :raise pacman.exceptions.PacmanPartitionException: If something\
                   goes wrong with the partitioning
        """
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            abstract_constraint_type=AbstractPartitionerConstraint,
            supported_constraints=[PartitionerMaximumSizeConstraint,
                                   PartitionerSameSizeAsVertexConstraint])

        # Load the vertices and create the subgraph to fill
        vertices = graph.vertices
        subgraph = PartitionedGraph(
            label="partitioned graph for {}".format(graph.label))
        graph_mapper = GraphMapper(graph.label, subgraph.label)

        # sort out vertex's by constraints
        vertices = utility_calls.sort_objects_by_constraint_authority(vertices)

        # Set up the progress
        n_atoms = 0
        for vertex in vertices:
            n_atoms += vertex.n_atoms
        progress_bar = ProgressBar(n_atoms, "Partitioning graph vertices")

        resource_tracker = ResourceTracker(machine)

        # Partition one vertex at a time
        for vertex in vertices:

            # check that the vertex hasn't already been partitioned
            subverts_from_vertex = \
                graph_mapper.get_subvertices_from_vertex(vertex)

            # if not, partition
            if subverts_from_vertex is None:
                self._partition_vertex(vertex, subgraph, graph_mapper,
                                       resource_tracker, graph)
            progress_bar.update(vertex.n_atoms)
        progress_bar.end()

        partition_algorithm_utilities.generate_sub_edges(
            subgraph, graph_mapper, graph)

        results = dict()
        results['partitioned_graph'] = subgraph
        results['graph_mapper'] = graph_mapper
        return results