예제 #1
0
def _adjust_ge100_branches(levellist):
    """This adjust branches that are greater than or equal to 100% to be
    100% - sum(other branches).  This helps prevent unphysical errors
    downstream.
    """
    n = len(levellist)
    brsum = defaultdict(float)
    bridx = defaultdict(lambda: (-1, -1.0))
    baddies = []
    for i, (nuc, rx, hl, lvl, br, ms, sp) in enumerate(levellist):
        if rx == 0:
            continue
        if br >= bridx[nuc][1]:
            bridx[nuc] = (i, br)
        brsum[nuc] += br
        nucrx = (nuc, rx)
        if nucrx in _BAD_RX:
            baddies.append(i)
    # adjust branch ratios
    for nuc, (i, br) in bridx.items():
        row = levellist[i]
        # this line ensures that all branches sum to 100.0 within floating point
        new_br = 100.0 - brsum[nuc] + br
        new_row = row[:4] + (new_br,) + row[5:]
        levellist[i] = new_row
    # remove bad reaction rows
    for i in baddies[::-1]:
        del levellist[i]
 def resume(self):
     """ Resets states so that it can behave in a resumed mode
     """
     self._end_buffering_state = dict()
     self._is_flushed = defaultdict(lambda: False)
     self._sequence_no = defaultdict(lambda: 0xFF)
     self._last_packet_received = defaultdict(lambda: None)
     self._last_packet_sent = defaultdict(lambda: None)
     self._end_buffering_sequence_no = dict()
 def resume(self):
     """ Resets states so that it can behave in a resumed mode
     """
     self._end_buffering_state = dict()
     self._is_flushed = defaultdict(lambda: False)
     self._sequence_no = defaultdict(lambda: 0xFF)
     self._last_packet_received = defaultdict(lambda: None)
     self._last_packet_sent = defaultdict(lambda: None)
     self._end_buffering_sequence_no = dict()
 def reset(self):
     if os.path.exists(self._db_file):
         if self._db:
             self._db.close()
         os.remove(self._db_file)
     self._db = SqlLiteDatabase(self._db_file)
     self._is_flushed = defaultdict(lambda: False)
     self._sequence_no = defaultdict(lambda: 0xFF)
     self._last_packet_received = defaultdict(lambda: None)
     self._last_packet_sent = defaultdict(lambda: None)
     self._end_buffering_sequence_no = dict()
     self._end_buffering_state = dict()
 def reset(self):
     if os.path.exists(self._db_file):
         if self._db:
             self._db.close()
         os.remove(self._db_file)
     self._db = SqlLiteDatabase(self._db_file)
     self._is_flushed = defaultdict(lambda: False)
     self._sequence_no = defaultdict(lambda: 0xFF)
     self._last_packet_received = defaultdict(lambda: None)
     self._last_packet_sent = defaultdict(lambda: None)
     self._end_buffering_sequence_no = dict()
     self._end_buffering_state = dict()
예제 #6
0
    def __call__(self,
                 live_packet_gatherer_parameters,
                 machine,
                 machine_graph,
                 application_graph=None,
                 graph_mapper=None):
        """ Add LPG vertices on Ethernet connected chips as required.

        :param live_packet_gatherer_parameters:\
            the Live Packet Gatherer parameters requested by the script
        :param machine: the SpiNNaker machine as discovered
        :param application_graph: the application graph
        :param machine_graph: the machine graph
        :return: mapping between LPG parameters and LPG vertex
        """
        # pylint: disable=too-many-arguments

        # create progress bar
        progress = ProgressBar(machine.ethernet_connected_chips,
                               string_describing_what_being_progressed=(
                                   "Adding Live Packet Gatherers to Graph"))

        # Keep track of the vertices added by parameters and board address
        lpg_params_to_vertices = defaultdict(dict)

        # for every Ethernet connected chip, add the gatherers required
        for chip in progress.over(machine.ethernet_connected_chips):
            for params in live_packet_gatherer_parameters:
                if (params.board_address is None
                        or params.board_address == chip.ip_address):
                    lpg_params_to_vertices[params][chip.x, chip.y] = \
                        self._add_lpg_vertex(application_graph, graph_mapper,
                                             machine_graph, chip, params)

        return lpg_params_to_vertices
    def __call__(
            self, live_packet_gatherer_parameters, machine, machine_graph,
            application_graph=None, graph_mapper=None):
        """ Add LPG vertices on Ethernet connected chips as required.

        :param live_packet_gatherer_parameters:\
            the Live Packet Gatherer parameters requested by the script
        :param machine: the SpiNNaker machine as discovered
        :param application_graph: the application graph
        :param machine_graph: the machine graph
        :return: mapping between LPG parameters and LPG vertex
        """
        # pylint: disable=too-many-arguments

        # create progress bar
        progress = ProgressBar(
            machine.ethernet_connected_chips,
            string_describing_what_being_progressed=(
                "Adding Live Packet Gatherers to Graph"))

        # Keep track of the vertices added by parameters and board address
        lpg_params_to_vertices = defaultdict(dict)

        # for every Ethernet connected chip, add the gatherers required
        for chip in progress.over(machine.ethernet_connected_chips):
            for params in live_packet_gatherer_parameters:
                if (params.board_address is None or
                        params.board_address == chip.ip_address):
                    lpg_params_to_vertices[params][chip.x, chip.y] = \
                        self._add_lpg_vertex(application_graph, graph_mapper,
                                             machine_graph, chip, params)

        return lpg_params_to_vertices
예제 #8
0
    def __init__(self):

        # Mapping of (board address, tag) to IPTag
        self._ip_tags = dict()

        # Mapping of (board address, tag) to ReverseIPTag
        self._reverse_ip_tags = dict()

        # Mapping of vertex to list of IPTag
        self._ip_tags_by_vertex = defaultdict(list)

        # Mapping of vertex to list of ReverseIPTag
        self._reverse_ip_tags_by_vertex = defaultdict(list)

        # Set of ports already assigned on a board
        self._ports_assigned = set()
    def __init__(
            self, x, y, ip_address, report_default_directory,
            write_data_speed_up_report, constraints=None):
        super(DataSpeedUpPacketGatherMachineVertex, self).__init__(
            label="mc_data_speed_up_packet_gatherer_on_{}_{}".format(x, y),
            constraints=constraints)

        # data holders for the output, and sequence numbers
        self._view = None
        self._max_seq_num = None
        self._output = None

        # Create a connection to be used
        self._x = x
        self._y = y
        self._ip_address = ip_address
        self._remote_tag = None

        # local provenance storage
        self._provenance_data_items = defaultdict(list)

        # create report if it doesn't already exist
        self._report_path = \
            os.path.join(report_default_directory, self.REPORT_NAME)
        self._write_data_speed_up_report = write_data_speed_up_report

        # Stored reinjection status for resetting timeouts
        self._last_status = None
예제 #10
0
    def __init__(self, n_neurons, delay_per_stage, source_vertex,
                 machine_time_step, timescale_factor, constraints=None,
                 label="DelayExtension"):
        """
        :param n_neurons: the number of neurons
        :param delay_per_stage: the delay per stage
        :param source_vertex: where messages are coming from
        :param machine_time_step: how long is the machine time step
        :param timescale_factor: what slowdown factor has been applied
        :param constraints: the vertex constraints
        :param label: the vertex label
        """
        # pylint: disable=too-many-arguments
        super(DelayExtensionVertex, self).__init__(label, constraints, 256)

        self._source_vertex = source_vertex
        self._n_delay_stages = 0
        self._delay_per_stage = delay_per_stage
        self._delay_generator_data = defaultdict(list)
        self._n_subvertices = 0
        self._n_data_specs = 0

        # atom store
        self._n_atoms = n_neurons

        # Dictionary of vertex_slice -> delay block for data specification
        self._delay_blocks = dict()

        self.add_constraint(
            SameAtomsAsVertexConstraint(source_vertex))
예제 #11
0
    def test_pg_files(self):
        """
        That the pg files command tells us which files are associated with
        a particular PG
        """
        file_count = 20
        self.mount_a.run_shell(["mkdir", "mydir"])
        self.mount_a.create_n_files("mydir/myfile", file_count)

        # Some files elsewhere in the system that we will ignore
        # to check that the tool is filtering properly
        self.mount_a.run_shell(["mkdir", "otherdir"])
        self.mount_a.create_n_files("otherdir/otherfile", file_count)

        pgs_to_files = defaultdict(list)
        # Rough (slow) reimplementation of the logic
        for i in range(0, file_count):
            file_path = "mydir/myfile_{0}".format(i)
            ino = self.mount_a.path_to_ino(file_path)
            obj = "{0:x}.{1:08x}".format(ino, 0)
            pgid = json.loads(self.fs.mon_manager.raw_cluster_cmd(
                "osd", "map", self.fs.get_data_pool_name(), obj,
                "--format=json-pretty"
            ))['pgid']
            pgs_to_files[pgid].append(file_path)
            log.info("{0}: {1}".format(file_path, pgid))

        pg_count = self.fs.get_pgs_per_fs_pool()
        for pg_n in range(0, pg_count):
            pg_str = "{0}.{1}".format(self.fs.get_data_pool_id(), pg_n)
            out = self.fs.data_scan(["pg_files", "mydir", pg_str])
            lines = [l for l in out.split("\n") if l]
            log.info("{0}: {1}".format(pg_str, lines))
            self.assertSetEqual(set(lines), set(pgs_to_files[pg_str]))
    def __init__(self, n_neurons, delay_per_stage, source_vertex,
                 machine_time_step, timescale_factor, constraints=None,
                 label="DelayExtension"):
        """
        :param n_neurons: the number of neurons
        :param delay_per_stage: the delay per stage
        :param source_vertex: where messages are coming from
        :param machine_time_step: how long is the machine time step
        :param timescale_factor: what slowdown factor has been applied
        :param constraints: the vertex constraints
        :param label: the vertex label
        """
        # pylint: disable=too-many-arguments
        super(DelayExtensionVertex, self).__init__(label, constraints, 256)

        self._source_vertex = source_vertex
        self._n_delay_stages = 0
        self._delay_per_stage = delay_per_stage
        self._delay_generator_data = defaultdict(list)
        self._n_subvertices = 0
        self._n_data_specs = 0

        # atom store
        self._n_atoms = n_neurons

        # Dictionary of vertex_slice -> delay block for data specification
        self._delay_blocks = dict()

        self.add_constraint(
            SameAtomsAsVertexConstraint(source_vertex))
예제 #13
0
 def _structure_dict_entry():
     """Static method to generate entries for the structure dict."""
     return {
         "pin": set(),
         "rdesc": set(),
         "rprop": set(),
         "pin_rdesc_rprop": defaultdict(lambda: {"data_tuples": []}),
     }
예제 #14
0
파일: endl.py 프로젝트: opotowsky/pyne
 def _structure_dict_entry():
     """Static method to generate entries for the structure dict."""
     return {
         'pin': set(),
         'rdesc': set(),
         'rprop': set(),
         'pin_rdesc_rprop': defaultdict(lambda: {'data_tuples': []})
     }
예제 #15
0
    def get_all_perf_counters(self, prio_limit=PRIO_USEFUL):
        """
        Return the perf counters currently known to this ceph-mgr
        instance, filtered by priority equal to or greater than `prio_limit`.

        The result is a map of string to dict, associating services
        (like "osd.123") with their counters.  The counter
        dict for each service maps counter paths to a counter
        info structure, which is the information from
        the schema, plus an additional "value" member with the latest
        value.
        """

        result = defaultdict(dict)

        for server in self.list_servers():
            for service in server['services']:
                if service['type'] not in ("rgw", "mds", "osd", "mon"):
                    continue

                schema = self.get_perf_schema(service['type'], service['id'])
                if not schema:
                    self.log.warn("No perf counter schema for {0}.{1}".format(
                        service['type'], service['id']))
                    continue

                # Value is returned in a potentially-multi-service format,
                # get just the service we're asking about
                svc_full_name = "{0}.{1}".format(service['type'],
                                                 service['id'])
                schema = schema[svc_full_name]

                # Populate latest values
                for counter_path, counter_schema in schema.items():
                    # self.log.debug("{0}: {1}".format(
                    #     counter_path, json.dumps(counter_schema)
                    # ))
                    if counter_schema['priority'] < prio_limit:
                        continue

                    counter_info = dict(counter_schema)

                    # Also populate count for the long running avgs
                    if counter_schema['type'] & self.PERFCOUNTER_LONGRUNAVG:
                        v, c = self.get_latest_avg(service['type'],
                                                   service['id'], counter_path)
                        counter_info['value'], counter_info['count'] = v, c
                        result[svc_full_name][counter_path] = counter_info
                    else:
                        counter_info['value'] = self.get_latest(
                            service['type'], service['id'], counter_path)

                    result[svc_full_name][counter_path] = counter_info

        self.log.debug("returning {0} counter".format(len(result)))

        return result
    def __init__(self, connection_selector):
        super(ReadIOBufProcess, self).__init__(connection_selector)

        # A dictionary of (x, y, p) -> iobuf address
        self._iobuf_address = dict()

        # A dictionary of (x, y, p) -> OrderedDict(n) -> bytearray
        self._iobuf = defaultdict(OrderedDict)

        # A dictionary of (x, y, p) -> OrderedDict(n) -> memoryview
        self._iobuf_view = defaultdict(OrderedDict)

        # A list of extra reads that need to be done as a result of the first
        # read = list of (x, y, p, n, base_address, size, offset)
        self._extra_reads = list()

        # A list of next reads that need to be done as a result of the first
        # read = list of (x, y, p, n, next_address, first_read_size)
        self._next_reads = list()
예제 #17
0
파일: endl.py 프로젝트: opotowsky/pyne
 def __init__(self, fh):
     self.structure = defaultdict(Library._structure_dict_entry)
     self.intdict = {
         0: self._linlin,
         2: self._linlin,
         3: self._loglin,
         4: self._linlog,
         5: self._loglog,
     }
     self.fh = fh
     # read headers for all tables
     self._read_headers()
def _get_all_values(worksheet, evaluate_formulas):
    data = worksheet.spreadsheet.values_get(
        worksheet.title,
        params={
            "valueRenderOption": (
                "UNFORMATTED_VALUE" if evaluate_formulas else "FORMULA"
            ),
            "dateTimeRenderOption": "FORMATTED_STRING",
        },
    )
    (row_offset, column_offset) = (1, 1)
    (last_row, last_column) = (worksheet.row_count, worksheet.col_count)
    values = data.get("values", [])

    rect_values = fill_gaps(
        values,
        rows=last_row - row_offset + 1,
        cols=last_column - column_offset + 1,
    )

    cells = [
        Cell(row=i + row_offset, col=j + column_offset, value=value)
        for i, row in enumerate(rect_values)
        for j, value in enumerate(row)
    ]

    # defaultdicts fill in gaps for empty rows/cells not returned by gdocs
    rows = defaultdict(lambda: defaultdict(str))
    for cell in cells:
        row = rows.setdefault(int(cell.row), defaultdict(str))
        row[cell.col] = cell.value

    if not rows:
        return []

    all_row_keys = chain.from_iterable(row.keys() for row in rows.values())
    rect_cols = range(1, max(all_row_keys) + 1)
    rect_rows = range(1, max(rows.keys()) + 1)

    return [[rows[i][j] for j in rect_cols] for i in rect_rows]
예제 #19
0
class TestRecordableSpikeInjector(BaseTestCase):

    _n_spikes = defaultdict(lambda: 0)
    _n_neurons = 100

    def _inject(self, label, connection):
        time.sleep(0.1)
        for _ in range(5000):
            neuron_id = randint(0, self._n_neurons - 1)
            self._n_spikes[neuron_id] += 1
            connection.send_spike(label, neuron_id)
            time.sleep(0.001)

    def recordable_spike_injector(self):
        p.setup(1.0)
        pop = p.Population(self._n_neurons,
                           p.external_devices.SpikeInjector(),
                           label="input")
        pop.record("spikes")

        connection = p.external_devices.SpynnakerLiveSpikesConnection(
            send_labels=["input"])
        connection.add_start_callback("input", self._inject)

        p.run(10000)
        spikes = pop.get_data("spikes").segments[0].spiketrains
        p.end()

        spike_trains = dict()
        for spiketrain in spikes:
            i = spiketrain.annotations['source_index']
            if __name__ == "__main__":
                if self._n_spikes[i] != len(spiketrain):
                    print(
                        "Incorrect number of spikes, expected {} but got {}:".
                        format(self._n_spikes[i], len(spiketrain)))
                    print(spiketrain)
            else:
                assert self._n_spikes[i] == len(spiketrain)
            spike_trains[i] = spiketrain

        for (index, count) in iteritems(self._n_spikes):
            if __name__ == "__main__":
                if index not in spike_trains:
                    print("Neuron {} should have spiked {} times but didn't".
                          format(index, count))
            else:
                assert index in spike_trains

    def test_recordable_spike_injector(self):
        self.runsafe(self.recordable_spike_injector)
    def test_too_many_ip_tags_for_1_board(self):
        n_extra_vertices = 3
        machine = VirtualMachine(12, 12, with_wrap_arounds=True)
        eth_chips = machine.ethernet_connected_chips
        eth_chip = eth_chips[0]
        eth_chip_2 = machine.get_chip_at(eth_chip.x + 1, eth_chip.y + 1)
        eth_procs = [
            proc.processor_id for proc in eth_chip.processors
            if not proc.is_monitor]
        procs = [proc for proc in eth_chip_2.processors if not proc.is_monitor]
        eth2_procs = [proc.processor_id for proc in procs]
        proc = procs[-1]
        eth_vertices = [
            SimpleMachineVertex(
                ResourceContainer(iptags=[IPtagResource(
                    "127.0.0.1", port=tag, strip_sdp=True)]),
                label="Ethernet Vertex {}".format(proc))
            for tag in eth_chip.tag_ids]
        eth2_vertices = [
            SimpleMachineVertex(
                ResourceContainer(iptags=[IPtagResource(
                    "127.0.0.1", port=10000 + tag, strip_sdp=True)]),
                label="Ethernet 2 Vertex {}".format(proc))
            for tag in range(n_extra_vertices)]
        placements = Placements(
            Placement(vertex, eth_chip.x, eth_chip.y, proc)
            for proc, vertex in zip(eth_procs, eth_vertices))
        placements.add_placements(
            Placement(vertex, eth_chip_2.x, eth_chip_2.y, proc)
            for proc, vertex in zip(eth2_procs, eth2_vertices))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(
            machine, plan_n_timesteps=None, placements=placements)

        tags_by_board = defaultdict(set)
        for vertices in (eth_vertices, eth2_vertices):
            for vertex in vertices:
                iptags = tags.get_ip_tags_for_vertex(vertex)
                self.assertEqual(
                    len(iptags), 1, "Incorrect number of tags assigned")
                placement = placements.get_placement_of_vertex(vertex)
                print(placement, "has tag", iptags[0])
                self.assertFalse(
                    iptags[0].tag in tags_by_board[iptags[0].board_address],
                    "Tag used more than once")
                tags_by_board[iptags[0].board_address].add(iptags[0].tag)

        self.assertEqual(
            len(tags_by_board[eth_chip.ip_address]), len(eth_chip.tag_ids),
            "Wrong number of tags assigned to first Ethernet")
예제 #21
0
    def __init__(self, ctx, fscid=None, name='cephfs', create=False):
        # Deliberately skip calling parent constructor
        self._ctx = ctx

        self.id = None
        self.name = None
        self.ec_profile = None
        self.metadata_pool_name = None
        self.metadata_overlay = False
        self.data_pool_name = None
        self.data_pools = None

        # Hack: cheeky inspection of ceph.conf to see what MDSs exist
        self.mds_ids = set()
        for line in open("ceph.conf").readlines():
            match = re.match("^\[mds\.(.+)\]$", line)
            if match:
                self.mds_ids.add(match.group(1))

        if not self.mds_ids:
            raise RuntimeError("No MDSs found in ceph.conf!")

        self.mds_ids = list(self.mds_ids)

        log.info("Discovered MDS IDs: {0}".format(self.mds_ids))

        self.mon_manager = LocalCephManager()

        self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids])

        self.client_remote = LocalRemote()

        self._conf = defaultdict(dict)

        if name is not None:
            if fscid is not None:
                raise RuntimeError("cannot specify fscid when creating fs")
            if create and not self.legacy_configured():
                self.create()
        else:
            if fscid is not None:
                self.id = fscid
                self.getinfo(refresh=True)

        # Stash a reference to the first created filesystem on ctx, so
        # that if someone drops to the interactive shell they can easily
        # poke our methods.
        if not hasattr(self._ctx, "filesystem"):
            self._ctx.filesystem = self
예제 #22
0
    def get_pool_list_with_stats(cls, application=None):
        # pylint: disable=too-many-locals
        pools = cls.get_pool_list(application)

        pools_w_stats = []

        pg_summary = mgr.get("pg_summary")
        pool_stats = defaultdict(
            lambda: defaultdict(lambda: collections.deque(maxlen=10)))

        df = mgr.get("df")
        pool_stats_dict = dict([(p['id'], p['stats']) for p in df['pools']])
        now = time.time()
        for pool_id, stats in pool_stats_dict.items():
            for stat_name, stat_val in stats.items():
                pool_stats[pool_id][stat_name].appendleft((now, stat_val))

        for pool in pools:
            pool['pg_status'] = pg_summary['by_pool'][pool['pool'].__str__()]
            stats = pool_stats[pool['pool']]
            s = {}

            def get_rate(series):
                if len(series) >= 2:
                    return differentiate(*series[0:1])
                return 0

            for stat_name, stat_series in stats.items():
                s[stat_name] = {
                    'latest': stat_series[0][1],
                    'rate': get_rate(stat_series),
                    'series': [i for i in stat_series]
                }
            pool['stats'] = s
            pools_w_stats.append(pool)
        return pools_w_stats
예제 #23
0
def get_versioned_symbols(libs):
    """Get versioned symbols used in libraries
    :param libs: {realpath: soname} dict to search for versioned symbols e.g.
    {'/path/to/external_ref.so.1.2.3': 'external_ref.so.1'}
    :return: {soname: {depname: set([symbol_version])}} e.g.
    {'external_ref.so.1': {'libc.so.6', set(['GLIBC_2.5','GLIBC_2.12'])}}
    """
    result = {}
    for path, elf in elf_file_filter(libs.keys()):
        # {depname: set(symbol_version)}, e.g.
        # {'libc.so.6', set(['GLIBC_2.5','GLIBC_2.12'])}
        elf_versioned_symbols = defaultdict(lambda: set())
        for key, value in elf_find_versioned_symbols(elf):
            log.debug('path %s, key %s, value %s', path, key, value)
            elf_versioned_symbols[key].add(value)
        result[libs[path]] = elf_versioned_symbols
    return result
    def __init__(self, population):
        """
        :param population: the population to record for
        """

        self._population = population

        # file flags, allows separate files for the recorded variables
        self._write_to_files_indicators = {
            'spikes': None,
            'gsyn_exc': None,
            'gsyn_inh': None,
            'v': None}

        # Create a dict of variable name -> bool array of indices in population
        # that are recorded (initially all False)
        self._indices_to_record = defaultdict(
            lambda: numpy.repeat(False, population.size))
예제 #25
0
    def __init__(self, population):
        """
        :param population: the population to record for
        """

        self.__population = population

        # file flags, allows separate files for the recorded variables
        self.__write_to_files_indicators = {
            'spikes': None,
            'gsyn_exc': None,
            'gsyn_inh': None,
            'v': None}

        # Create a dict of variable name -> bool array of indices in population
        # that are recorded (initially all False)
        self.__indices_to_record = defaultdict(
            lambda: numpy.repeat(False, population.size))
    def __call__(self, machine, file_path):
        """
        :param machine:
        :param file_path:
        """
        progress = ProgressBar(
            (machine.max_chip_x + 1) * (machine.max_chip_y + 1) + 2,
            "Converting to JSON machine")

        # write basic stuff
        json_obj = {
            "width": machine.max_chip_x + 1,
            "height": machine.max_chip_y + 1,
            "chip_resources": {
                "cores": CHIP_HOMOGENEOUS_CORES,
                "sdram": CHIP_HOMOGENEOUS_SDRAM,
                "sram": CHIP_HOMOGENEOUS_SRAM,
                "router_entries": ROUTER_HOMOGENEOUS_ENTRIES,
                "tags": CHIP_HOMOGENEOUS_TAGS},
            "dead_chips": [],
            "dead_links": []}

        # handle exceptions (dead chips)
        exceptions = defaultdict(dict)
        for x in range(0, machine.max_chip_x + 1):
            for y in progress.over(range(0, machine.max_chip_y + 1), False):
                self._add_exceptions(json_obj, machine, x, y, exceptions)
        json_obj["chip_resource_exceptions"] = [
            [x, y, exceptions[x, y]] for x, y in exceptions]
        progress.update()

        # dump to json file
        with open(file_path, "w") as f:
            json.dump(json_obj, f)

        progress.update()

        # validate the schema
        file_format_schemas.validate(json_obj, "machine.json")

        # update and complete progress bar
        progress.end()

        return file_path
예제 #27
0
def list_machines(t, machines, jobs):
    """ Display a table summarising the available machines and their load.

    Parameters
    ----------
    t : :py:class:`.Terminal`
        An output styling object for stdout.
    machines : [{...}, ...]
        The list of machines and their properties returned from the server.
    jobs : [{...}, ...]
        The list of jobs and their properties returned from the server.

    Returns
    -------
        An error code: 0 on success.
    """
    machine_jobs = defaultdict(list)
    for job in jobs:
        machine_jobs[job["allocated_machine_name"]].append(job)

    table = [[
        (t.underscore_bright, "Name"),
        (t.underscore_bright, "Num boards"),
        (t.underscore_bright, "In-use"),
        (t.underscore_bright, "Jobs"),
        (t.underscore_bright, "Tags"),
    ]]

    for machine in machines:
        table.append([
            machine["name"],
            ((machine["width"] * machine["height"] * 3) -
             len(machine["dead_boards"])),
            sum(len(job["boards"]) for job in machine_jobs[machine["name"]]),
            len(machine_jobs[machine["name"]]),
            ", ".join(machine["tags"]),
        ])

    print(render_table(table))
    def __call__(self, machine_graph, plan_n_timesteps, file_path):
        """
        :param machine_graph: The graph to convert
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :param file_path: Where to write the JSON
        """
        progress = ProgressBar(
            machine_graph.n_vertices + 1, "Converting to JSON graph")

        # write basic stuff
        json_graph = dict()

        # write vertices data
        vertices_resources = dict()
        json_graph["vertices_resources"] = vertices_resources

        edges_resources = defaultdict()
        json_graph["edges"] = edges_resources

        vertex_by_id = dict()
        partition_by_id = dict()
        for vertex in progress.over(machine_graph.vertices, False):
            self._convert_vertex(
                vertex, vertex_by_id, vertices_resources, edges_resources,
                machine_graph, plan_n_timesteps, partition_by_id)

        with open(file_path, "w") as f:
            json.dump(json_graph, f)
        progress.update()

        file_format_schemas.validate(json_graph, "machine_graph.json")

        progress.end()

        return file_path, vertex_by_id, partition_by_id
예제 #29
0
    def _sort_left_over_verts_based_on_incoming_packets(
            self, machine_graph, placed_vertices, n_keys_map):
        """ sort left overs verts so that the ones with the most costly verts
        are at the front of the list

        :param MachineGraph machine_graph: machine graph
        :param set(MachineVertex) placed_vertices: the verts already placed
        :param AbstractMachinePartitionNKeysMap n_keys_map:
            map between partition to n keys.
        :return: new list of verts to process.
        :rtype: list(MachineVertex)
        """

        vert_list = list()
        incoming_size_map = defaultdict(list)
        for vertex in machine_graph.vertices:
            if vertex not in placed_vertices:
                incoming_size = self._get_cost(vertex, machine_graph,
                                               n_keys_map)
                incoming_size_map[incoming_size].append(vertex)
        sorted_keys = sorted(incoming_size_map.keys(), reverse=True)
        for key in sorted_keys:
            vert_list.extend(incoming_size_map[key])
        return vert_list
예제 #30
0
if os.environ.get('READTHEDOCS', None) != 'True':

    # scipy must be added in config.py as a mock
    install_requires.append('scipy')
    install_requires.append('csa')

# Build a list of all project modules, as well as supplementary files
main_package = "spynnaker"
extensions = {
    ".aplx", ".boot", ".cfg", ".json", ".sql", ".template", ".xml", ".xsd",
    ".dict"
}
main_package_dir = os.path.join(os.path.dirname(__file__), main_package)
start = len(main_package_dir)
packages = []
package_data = defaultdict(list)
for dirname, dirnames, filenames in os.walk(main_package_dir):
    if '__init__.py' in filenames:
        package = "{}{}".format(main_package,
                                dirname[start:].replace(os.sep, '.'))
        packages.append(package)
    for filename in filenames:
        _, ext = os.path.splitext(filename)
        if ext in extensions:
            package = "{}{}".format(main_package,
                                    dirname[start:].replace(os.sep, '.'))
            package_data[package].append(filename)

setup(name="sPyNNaker",
      version=__version__,
      description="SpiNNaker implementation of PyNN",
예제 #31
0
def get_wheel_elfdata(wheel_fn: str):
    full_elftree = {}
    nonpy_elftree = {}
    full_external_refs = {}
    versioned_symbols = defaultdict(lambda: set())  # type: Dict[str, Set[str]]
    uses_ucs2_symbols = False
    uses_PyFPE_jbuf = False

    with InGenericPkgCtx(wheel_fn) as ctx:
        shared_libraries_in_purelib = []

        for fn, elf in elf_file_filter(ctx.iter_files()):

            # Check for invalid binary wheel format: no shared library should
            # be found in purelib
            so_path_split = fn.split(os.sep)

            # If this is in purelib, add it to the list of shared libraries in purelib
            if 'purelib' in so_path_split:
                shared_libraries_in_purelib.append(so_path_split[-1])

            # If at least one shared library exists in purelib, this is going to
            # fail and there's no need to do further checks
            if not shared_libraries_in_purelib:
                log.debug('processing: %s', fn)
                elftree = lddtree(fn)

                for key, value in elf_find_versioned_symbols(elf):
                    log.debug('key %s, value %s', key, value)
                    versioned_symbols[key].add(value)

                is_py_ext, py_ver = elf_is_python_extension(fn, elf)

                # If the ELF is a Python extention, we definitely need to include
                # its external dependencies.
                if is_py_ext:
                    full_elftree[fn] = elftree
                    uses_PyFPE_jbuf |= elf_references_PyFPE_jbuf(elf)
                    if py_ver == 2:
                        uses_ucs2_symbols |= any(
                            True for _ in elf_find_ucs2_symbols(elf))
                    full_external_refs[fn] = lddtree_external_references(elftree,
                                                                         ctx.path)
                else:
                    # If the ELF is not a Python extension, it might be included in
                    # the wheel already because auditwheel repair vendored it, so
                    # we will check whether we should include its internal
                    # references later.
                    nonpy_elftree[fn] = elftree

        # If at least one shared library exists in purelib, raise an error
        if shared_libraries_in_purelib:
            raise RuntimeError(
                (
                    'Invalid binary wheel, found the following shared library/libraries in purelib folder:\n'
                    '\t%s\n'
                    'The wheel has to be platlib compliant in order to be repaired by auditwheel.'
                ) % '\n\t'.join(shared_libraries_in_purelib)
            )

        # Get a list of all external libraries needed by ELFs in the wheel.
        needed_libs = {
            lib
            for elf in itertools.chain(full_elftree.values(),
                                       nonpy_elftree.values())
            for lib in elf['needed']
        }

        for fn in nonpy_elftree.keys():
            # If a non-pyextension ELF file is not needed by something else
            # inside the wheel, then it was not checked by the logic above and
            # we should walk its elftree.
            if basename(fn) not in needed_libs:
                full_elftree[fn] = nonpy_elftree[fn]
                full_external_refs[fn] = lddtree_external_references(
                    nonpy_elftree[fn], ctx.path)

    log.debug(json.dumps(full_elftree, indent=4))

    return (full_elftree, full_external_refs, versioned_symbols,
            uses_ucs2_symbols, uses_PyFPE_jbuf)
 def __init__(self):
     self._data = defaultdict(dict)
예제 #33
0
class AbstractPyNNModel(object):
    """ A Model that can be passed in to a Population object in PyNN
    """

    _max_atoms_per_core = defaultdict(lambda: sys.maxsize)

    @classmethod
    def set_model_max_atoms_per_core(cls, n_atoms=sys.maxsize):
        """ Set the maximum number of atoms per core for this model

        :param n_atoms: The new maximum, or None for the largest possible
        :type n_atoms: int or None
        """
        AbstractPyNNModel._max_atoms_per_core[cls] = n_atoms

    @classmethod
    def get_max_atoms_per_core(cls):
        """ Get the maximum number of atoms per core for this model

        :rtype: int
        """
        return AbstractPyNNModel._max_atoms_per_core[cls]

    @staticmethod
    def _get_init_params_and_svars(cls):
        init = getattr(cls, "__init__")
        params = None
        if hasattr(init, "_parameters"):
            params = getattr(init, "_parameters")
        svars = None
        if hasattr(init, "_state_variables"):
            svars = getattr(init, "_state_variables")
        return init, params, svars

    @classproperty
    def default_parameters(cls):
        """ Get the default values for the parameters of the model.

        :rtype: dict(str, object)
        """
        init, params, svars = cls._get_init_params_and_svars(cls)
        return get_dict_from_init(init, skip=svars, include=params)

    @classproperty
    def default_initial_values(cls):
        """ Get the default initial values for the state variables of the model

        :rtype: dict(str, object)
        """
        init, params, svars = cls._get_init_params_and_svars(cls)
        if params is None and svars is None:
            return {}
        return get_dict_from_init(init, skip=params, include=svars)

    @classmethod
    def get_parameter_names(cls):
        """ Get the names of the parameters of the model

        :rtype: list(str)
        """
        return cls.default_parameters.keys()

    @classmethod
    def has_parameter(cls, name):
        """ Determine if the model has a parameter with the given name

        :param name: The name of the parameter to check for
        :type name: str
        :rtype: bool
        """
        return name in cls.default_parameters

    @abstractproperty
    @staticmethod
    def default_population_parameters():
        """ Get the default values for the parameters at the population-level;\
            these are parameters that can be passed in to the Population\
            constructor in addition to the standard PyNN options

        :rtype: dict(str, object)
        """

    @abstractmethod
    def create_vertex(self, n_neurons, label, constraints):
        """ Create a vertex for a population of the model
예제 #34
0
    def __call__(self, machine_graph, machine, n_keys_map, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: the machine graph
        :param ~spinn_machine.Machine machine: the SpiNNaker machine
        :param AbstractMachinePartitionNKeysMap n_keys_map:
            the n keys from partition map
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: placements.
        :rtype: Placements
        """
        # create progress bar
        progress_bar = ProgressBar(
            (machine_graph.n_vertices * self.ITERATIONS) + self.STEPS,
            "Placing graph vertices via spreading over an entire machine")

        # check that the algorithm can handle the constraints
        self._check_constraints(
            machine_graph.vertices,
            additional_placement_constraints={SameChipAsConstraint})
        progress_bar.update()

        # get same chip groups
        same_chip_vertex_groups = get_same_chip_vertex_groups(machine_graph)
        progress_bar.update()
        # get chip and core placed verts
        hard_chip_constraints = self._locate_hard_placement_verts(
            machine_graph)
        progress_bar.update()
        # get one to one groups
        one_to_one_groups = create_vertices_groups(
            machine_graph.vertices,
            functools.partial(self._find_one_to_one_vertices,
                              graph=machine_graph))
        progress_bar.update()

        # sort chips so that they are radial from a given point and other
        # init data structs
        chips_in_order = self._determine_chip_list(machine)
        resource_tracker = ResourceTracker(machine,
                                           plan_n_timesteps,
                                           chips=chips_in_order)
        placements = Placements()
        placed_vertices = set()
        cost_per_chip = defaultdict(int)
        progress_bar.update()

        # allocate hard ones
        for hard_vertex in hard_chip_constraints:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                hard_vertex.resources_required, hard_vertex.constraints)
            placements.add_placement(Placement(hard_vertex, x, y, p))
            placed_vertices.add(hard_vertex)
            cost_per_chip[x, y] += self._get_cost(hard_vertex, machine_graph,
                                                  n_keys_map)

        # place groups of verts that need the same chip on the same chip,
        self._place_same_chip_verts(same_chip_vertex_groups, chips_in_order,
                                    placements, progress_bar, resource_tracker,
                                    placed_vertices, cost_per_chip,
                                    machine_graph, n_keys_map)

        # place 1 group per chip if possible on same chip as any already
        # placed verts. if not then radially from it.
        self._place_one_to_one_verts(one_to_one_groups, chips_in_order,
                                     placements, progress_bar,
                                     resource_tracker, placed_vertices,
                                     cost_per_chip, machine_graph, n_keys_map,
                                     machine)

        # place vertices which don't have annoying placement constraints.
        # spread them over the chips so that they have minimal impact on the
        # overall incoming packet cost per router.
        self._place_left_over_verts(machine_graph, chips_in_order, placements,
                                    progress_bar, resource_tracker,
                                    placed_vertices, cost_per_chip, n_keys_map)
        progress_bar.end()

        # return the built placements
        return placements
예제 #35
0
    def _fit(self, X, y, groups, parameter_iterable):
        """
        Actual fitting,  performing the search over parameters.
        Taken from https://github.com/scikit-learn/scikit-learn/blob/0.18.X
                    .../sklearn/model_selection/_search.py
        """

        estimator = self.estimator
        cv = sklearn.model_selection._validation.check_cv(
            self.cv, y, classifier=is_classifier(estimator))
        self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)

        X, y, groups = indexable(X, y, groups)
        n_splits = cv.get_n_splits(X, y, groups)
        if self.verbose > 0 and isinstance(parameter_iterable, Sized):
            n_candidates = len(parameter_iterable)
            self.printfn(
                "Fitting {0} folds for each of {1} candidates, totalling"
                " {2} fits".format(n_splits, n_candidates,
                                   n_candidates * n_splits))

        base_estimator = clone(self.estimator)
        pre_dispatch = self.pre_dispatch

        cv_iter = list(cv.split(X, y, groups))
        out = Parallel(
            n_jobs=self.n_jobs,
            verbose=self.verbose,
            pre_dispatch=pre_dispatch)(
                delayed(sklearn.model_selection._validation._fit_and_score)(
                    clone(base_estimator),
                    X,
                    y,
                    self.scorer_,
                    train,
                    test,
                    self.verbose,
                    parameters,
                    fit_params=self.fit_params,
                    return_train_score=self.return_train_score,
                    return_n_test_samples=True,
                    return_times=True,
                    return_parameters=True,
                    error_score=self.error_score)
                for parameters in parameter_iterable
                for train, test in cv_iter)

        # if one choose to see train score, "out" will contain train score info
        if self.return_train_score:
            (train_scores, test_scores, test_sample_counts, fit_time,
             score_time, parameters) = zip(*out)
        else:
            (test_scores, test_sample_counts, fit_time, score_time,
             parameters) = zip(*out)

        candidate_params = parameters[::n_splits]
        n_candidates = len(candidate_params)

        results = dict()

        def _store(key_name, array, weights=None, splits=False, rank=False):
            """A small helper to store the scores/times to the cv_results_"""
            array = np.array(array,
                             dtype=np.float64).reshape(n_candidates, n_splits)
            if splits:
                for split_i in range(n_splits):
                    results["split%d_%s" %
                            (split_i, key_name)] = array[:, split_i]

            array_means = np.average(array, axis=1, weights=weights)
            results['mean_%s' % key_name] = array_means
            # Weighted std is not directly available in numpy
            array_stds = np.sqrt(
                np.average((array - array_means[:, np.newaxis])**2,
                           axis=1,
                           weights=weights))
            results['std_%s' % key_name] = array_stds

            if rank:
                results["rank_%s" % key_name] = np.asarray(rankdata(
                    -array_means, method='min'),
                                                           dtype=np.int32)

        # Computed the (weighted) mean and std for test scores alone
        # NOTE test_sample counts (weights) remain the same for all candidates
        test_sample_counts = np.array(test_sample_counts[:n_splits],
                                      dtype=np.int)

        _store('test_score',
               test_scores,
               splits=True,
               rank=True,
               weights=test_sample_counts if self.iid else None)
        if self.return_train_score:
            _store('train_score', train_scores, splits=True)
        _store('fit_time', fit_time)
        _store('score_time', score_time)

        best_index = np.flatnonzero(results["rank_test_score"] == 1)[0]
        best_parameters = candidate_params[best_index]

        # Use one MaskedArray and mask all the places where the param is not
        # applicable for that candidate. Use defaultdict as each candidate may
        # not contain all the params
        param_results = defaultdict(
            partial(MaskedArray,
                    np.empty(n_candidates, ),
                    mask=True,
                    dtype=object))
        for cand_i, params in enumerate(candidate_params):
            for name, value in params.items():
                # An all masked empty array gets created for the key
                # `"param_%s" % name` at the first occurence of `name`.
                # Setting the value at an index also unmasks that index
                param_results["param_%s" % name][cand_i] = value

        results.update(param_results)

        # Store a list of param dicts at the key 'params'
        results['params'] = candidate_params

        self.cv_results_ = results
        self.best_index_ = best_index
        self.n_splits_ = n_splits

        if self.refit:
            # fit the best estimator using the entire dataset
            # clone first to work around broken estimators
            best_estimator = clone(base_estimator).set_params(
                **best_parameters)
            if y is not None:
                best_estimator.fit(X, y, **self.fit_params)
            else:
                best_estimator.fit(X, **self.fit_params)
            self.best_estimator_ = best_estimator
        return self
except ImportError:
    from collections import defaultdict
import os

__version__ = None
exec(open("spinn_front_end_common/_version.py").read())
assert __version__

# Build a list of all project modules, as well as supplementary files
main_package = "spinn_front_end_common"
extensions = {".aplx", ".boot", ".cfg", ".json", ".sql", ".template", ".xml",
              ".xsd", ".dict"}
main_package_dir = os.path.join(os.path.dirname(__file__), main_package)
start = len(main_package_dir)
packages = []
package_data = defaultdict(list)
for dirname, dirnames, filenames in os.walk(main_package_dir):
    if '__init__.py' in filenames:
        package = "{}{}".format(
            main_package, dirname[start:].replace(os.sep, '.'))
        packages.append(package)
    for filename in filenames:
        _, ext = os.path.splitext(filename)
        if ext in extensions:
            package = "{}{}".format(
                main_package, dirname[start:].replace(os.sep, '.'))
            package_data[package].append(filename)

setup(
    name="SpiNNFrontEndCommon",
    version=__version__,
예제 #37
0
파일: cephfs.py 프로젝트: sonya1st/ceph
    def fs_status(self, fs_id):
        mds_versions = defaultdict(list)

        fsmap = mgr.get("fs_map")
        filesystem = None
        for fs in fsmap['filesystems']:
            if fs['id'] == fs_id:
                filesystem = fs
                break

        if filesystem is None:
            raise cherrypy.HTTPError(404,
                                     "CephFS id {0} not found".format(fs_id))

        rank_table = []

        mdsmap = filesystem['mdsmap']

        client_count = 0

        for rank in mdsmap["in"]:
            up = "mds_{0}".format(rank) in mdsmap["up"]
            if up:
                gid = mdsmap['up']["mds_{0}".format(rank)]
                info = mdsmap['info']['gid_{0}'.format(gid)]
                dns = mgr.get_latest("mds", info['name'], "mds.inodes")
                inos = mgr.get_latest("mds", info['name'], "mds_mem.ino")

                if rank == 0:
                    client_count = mgr.get_latest("mds", info['name'],
                                                  "mds_sessions.session_count")
                elif client_count == 0:
                    # In case rank 0 was down, look at another rank's
                    # sessionmap to get an indication of clients.
                    client_count = mgr.get_latest("mds", info['name'],
                                                  "mds_sessions.session_count")

                laggy = "laggy_since" in info

                state = info['state'].split(":")[1]
                if laggy:
                    state += "(laggy)"

                # Populate based on context of state, e.g. client
                # ops for an active daemon, replay progress, reconnect
                # progress
                if state == "active":
                    activity = CephService.get_rate("mds",
                                                    info['name'],
                                                    "mds_server.handle_client_request")
                else:
                    activity = 0.0

                metadata = mgr.get_metadata('mds', info['name'])
                mds_versions[metadata.get('ceph_version', 'unknown')].append(
                    info['name'])
                rank_table.append(
                    {
                        "rank": rank,
                        "state": state,
                        "mds": info['name'],
                        "activity": activity,
                        "dns": dns,
                        "inos": inos
                    }
                )

            else:
                rank_table.append(
                    {
                        "rank": rank,
                        "state": "failed",
                        "mds": "",
                        "activity": 0.0,
                        "dns": 0,
                        "inos": 0
                    }
                )

        # Find the standby replays
        # pylint: disable=unused-variable
        for gid_str, daemon_info in mdsmap['info'].items():
            if daemon_info['state'] != "up:standby-replay":
                continue

            inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino")
            dns = mgr.get_latest("mds", daemon_info['name'], "mds.inodes")

            activity = CephService.get_rate(
                "mds", daemon_info['name'], "mds_log.replay")

            rank_table.append(
                {
                    "rank": "{0}-s".format(daemon_info['rank']),
                    "state": "standby-replay",
                    "mds": daemon_info['name'],
                    "activity": activity,
                    "dns": dns,
                    "inos": inos
                }
            )

        df = mgr.get("df")
        pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
        osdmap = mgr.get("osd_map")
        pools = dict([(p['pool'], p) for p in osdmap['pools']])
        metadata_pool_id = mdsmap['metadata_pool']
        data_pool_ids = mdsmap['data_pools']

        pools_table = []
        for pool_id in [metadata_pool_id] + data_pool_ids:
            pool_type = "metadata" if pool_id == metadata_pool_id else "data"
            stats = pool_stats[pool_id]
            pools_table.append({
                "pool": pools[pool_id]['pool_name'],
                "type": pool_type,
                "used": stats['bytes_used'],
                "avail": stats['max_avail']
            })

        standby_table = []
        for standby in fsmap['standbys']:
            metadata = mgr.get_metadata('mds', standby['name'])
            mds_versions[metadata.get('ceph_version', 'unknown')].append(
                standby['name'])

            standby_table.append({
                'name': standby['name']
            })

        return {
            "cephfs": {
                "id": fs_id,
                "name": mdsmap['fs_name'],
                "client_count": client_count,
                "ranks": rank_table,
                "pools": pools_table
            },
            "standbys": standby_table,
            "versions": mds_versions
        }
예제 #38
0
    def handle_fs_status(self, cmd):
        output = ""

        fs_filter = cmd.get('fs', None)

        mds_versions = defaultdict(list)

        fsmap = self.get("fs_map")
        for filesystem in fsmap['filesystems']:
            if fs_filter and filesystem['mdsmap']['fs_name'] != fs_filter:
                continue

            rank_table = PrettyTable(
                ("Rank", "State", "MDS", "Activity", "dns", "inos"),
                hrules=prettytable.FRAME)

            mdsmap = filesystem['mdsmap']

            client_count = 0

            for rank in mdsmap["in"]:
                up = "mds_{0}".format(rank) in mdsmap["up"]
                if up:
                    gid = mdsmap['up']["mds_{0}".format(rank)]
                    info = mdsmap['info']['gid_{0}'.format(gid)]
                    dns = self.get_latest("mds", info['name'], "mds_mem.dn")
                    inos = self.get_latest("mds", info['name'], "mds_mem.ino")

                    if rank == 0:
                        client_count = self.get_latest(
                            "mds", info['name'], "mds_sessions.session_count")
                    elif client_count == 0:
                        # In case rank 0 was down, look at another rank's
                        # sessionmap to get an indication of clients.
                        client_count = self.get_latest(
                            "mds", info['name'], "mds_sessions.session_count")

                    laggy = "laggy_since" in info

                    state = info['state'].split(":")[1]
                    if laggy:
                        state += "(laggy)"
                    if state == "active" and not laggy:
                        c_state = self.colorize(state, self.GREEN)
                    else:
                        c_state = self.colorize(state, self.YELLOW)

                    # Populate based on context of state, e.g. client
                    # ops for an active daemon, replay progress, reconnect
                    # progress
                    activity = ""

                    if state == "active":
                        activity = "Reqs: " + self.format_dimless(
                            self.get_rate("mds", info['name'],
                                          "mds_server.handle_client_request"),
                            5) + "/s"

                    metadata = self.get_metadata('mds', info['name'])
                    mds_versions[metadata.get('ceph_version',
                                              "unknown")].append(info['name'])
                    rank_table.add_row([
                        self.bold(rank.__str__()), c_state, info['name'],
                        activity,
                        self.format_dimless(dns, 5),
                        self.format_dimless(inos, 5)
                    ])

                else:
                    rank_table.add_row([rank, "failed", "", "", "", ""])

            # Find the standby replays
            for gid_str, daemon_info in six.iteritems(mdsmap['info']):
                if daemon_info['state'] != "up:standby-replay":
                    continue

                inos = self.get_latest("mds", daemon_info['name'],
                                       "mds_mem.ino")
                dns = self.get_latest("mds", daemon_info['name'], "mds_mem.dn")

                activity = "Evts: " + self.format_dimless(
                    self.get_rate("mds", daemon_info['name'],
                                  "mds_log.replayed"), 5) + "/s"

                metadata = self.get_metadata('mds', daemon_info['name'])
                mds_versions[metadata.get('ceph_version', "unknown")].append(
                    daemon_info['name'])

                rank_table.add_row([
                    "{0}-s".format(daemon_info['rank']), "standby-replay",
                    daemon_info['name'], activity,
                    self.format_dimless(dns, 5),
                    self.format_dimless(inos, 5)
                ])

            df = self.get("df")
            pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
            osdmap = self.get("osd_map")
            pools = dict([(p['pool'], p) for p in osdmap['pools']])
            metadata_pool_id = mdsmap['metadata_pool']
            data_pool_ids = mdsmap['data_pools']

            pools_table = PrettyTable(["Pool", "type", "used", "avail"])
            for pool_id in [metadata_pool_id] + data_pool_ids:
                pool_type = "metadata" if pool_id == metadata_pool_id else "data"
                stats = pool_stats[pool_id]
                pools_table.add_row([
                    pools[pool_id]['pool_name'], pool_type,
                    self.format_bytes(stats['bytes_used'], 5),
                    self.format_bytes(stats['max_avail'], 5)
                ])

            output += "{0} - {1} clients\n".format(mdsmap['fs_name'],
                                                   client_count)
            output += "=" * len(mdsmap['fs_name']) + "\n"
            output += rank_table.get_string()
            output += "\n" + pools_table.get_string() + "\n"

        if not output and fs_filter is not None:
            return errno.EINVAL, "", "Invalid filesystem: " + fs_filter

        standby_table = PrettyTable(["Standby MDS"])
        for standby in fsmap['standbys']:
            metadata = self.get_metadata('mds', standby['name'])
            mds_versions[metadata.get('ceph_version',
                                      "unknown")].append(standby['name'])

            standby_table.add_row([standby['name']])

        output += "\n" + standby_table.get_string() + "\n"

        if len(mds_versions) == 1:
            output += "MDS version: {0}".format(mds_versions.keys()[0])
        else:
            version_table = PrettyTable(["version", "daemons"])
            for version, daemons in six.iteritems(mds_versions):
                version_table.add_row([version, ", ".join(daemons)])
            output += version_table.get_string() + "\n"

        return 0, output, ""
예제 #39
0
    def __init__(self, fname, label):

        self.name = fname  #file name of input transcript
        self.label = label
        #init for updateWordCount()
        self.words = []  #sequenced word list
        self.wordCount = defaultdict(int)  #dictionary of words and their count
        self.wordCountSort = defaultdict(int)  #sorted dictionary
        self.transSize = 0  #words the transcript have
        self.transSizeNS = 0  #meaningful words the transcript have
        self.wordsetSize = 0  #all different words
        #init for updateKeywordStatistics()
        self.keywordStat = defaultdict(
            dict)  #num(int) certain keyword appears in the transcript
        self.keywordStatSorted = defaultdict(dict)
        self.keywordLoc = defaultdict(
            dict)  #locations(list) certain keyword appears in the transcript
        self.keywordPart = defaultdict(
            dict)  #percentage per keyword from all keywords
        self.keywordPer = float(0)  #percentage all keywords from all words
        #init for questionMark()
        self.questionStat = 0
        self.questionLoc = []
        self.questionPer = float(0)
        #init for emoVoice()
        self.emoTerm = []
        self.emoLoc = []
        #init for keywordQuestionmark()
        self.keywordQNum = defaultdict(dict)
        self.keywordQPer = defaultdict(dict)
        self.keywordQOutput = defaultdict(dict)
        #init for nonSpeech()
        self.nSpeech = []
        #init for lengthByTurn()
        self.listTurn = []

        print("Reading data...")
        from nltk.tokenize import sent_tokenize
        self.data = []
        self.lines = []
        self.sents = []
        self.sentNum = 0
        with open(fname, encoding='utf-8', errors='ignore') as f:
            for line in f:
                line = line.lower()
                if (line[0] == label):
                    self.lines.append(sent_tokenize(line))
                    for sent in sent_tokenize(line):
                        self.sents.append(sent)
                    self.sentNum += len(sent_tokenize(line))
                    tokenizer = nltk.tokenize.TreebankWordTokenizer()
                    self.data += tokenizer.tokenize(line)
        print("done")

        print("Initializing keyword dictionary...")
        # Dictionary of key-terms for CTS fidelity
        self.keywordCTS = defaultdict(set)

        self.keywordCTS['Agenda'] = {'agenda','priorities','priority','do first','most important','work on','focus on','talk about' \
                                     ,'plan','todo list' ,'focus on first','focus on today','focus on during the session' \
                                     ,'talk about today','talk about first','talk about during the session' \
                                     ,'work on today','work on first','work on during the session' \
                                     ,'you like to','you want to','add to the agenda','add anything to the agenda','last week' \
                                     ,'evidence','mistake in thinking','what did you think','what did you want' \
                                     ,'how did you feel'}

        self.keywordCTS['Feedback'] = {'feedback','reaction','advise','advice','suggestion','previous','last time','last week','last session','past session' \
                                       ,'think about today','things go today','think about today\'s session','concern' \
                                       ,'what question','what questions','unhelpful','helpful','least helpful','about today\'s session' \
                                       ,'anything i can do better','anything we can do better','concerns about today\'s session','helpful about the session' \
                                       ,'can help you','we can try','learn','take in','skill','learn skills','achieve','goals','goal' \
                                       ,'if i understand you correctly','are you saying','do i have it right','work on your goals' \
                                       ,'was this helpful','how am i doing today','am i explaining things clearly','do you understand' \
                                       ,'do you follow me'}

        self.keywordCTS['Understanding'] = {'understand','understanding','recognize','observe','grasp','comprehend','know','understand why' \
                                            ,'sounds like','you are saying','you were feeling','you felt','i felt','i was feeling' \
                                            ,'see','makes sense','i see','feel that way','feel this way'}

        self.keywordCTS['Interpersonal Effectiveness'] = {'sorry','hard','difficult','tough' \
                                                          ,'dissappointing','stressful','stressed' \
                                                          ,'scary','frightening','upset','upsetting'\
                                                          ,'unfortunate'}

        self.keywordCTS['Collaboration'] = {
            'choice', 'you want to do', 'good idea', 'because', 'will',
            'help you get your goal'
        }

        self.keywordCTS['Guided Discovery'] = {'meaning','mean','self','how','why','evidence' \
                                               ,'conclusion','conclude','decide','decision','decided' \
                                               ,'know','proof','tell me more','assume','assumption' \
                                               ,'hypothesis','disprove','facts','fact','solutions' \
                                               ,'brainstorm','solve','alternative','other explanations' \
                                               ,'another way','other way','to think about','to explain','reason'}

        self.keywordCTS['Focus on Key Cognitions'] = {'thinking','tell yourself','through your mind' \
                                                      ,'what did you tell yourself','what went through your mind' \
                                                      ,'thought','think','connection','lead to','connected' \
                                                      ,'connect','link','linked','make you','you do','feel about the thought'}

        self.keywordCTS['Choices of Intervention'] = {}

        self.keywordCTS['Homework'] = {'homework','review','homework review','at home','practice','assignment','assign','get in the way of','work around' \
                                       ,'assigned','progress','learned','improve','learn','skills','skill','out of session','outside of session' \
                                       ,'goal','better','barrier','in the way','expect','problems','problem','succeed','success'}

        self.keywordCTS['Social Skills Training'] = {'rational','help you learn this skill','help you with your goal' \
                                                     ,'demonstrate','to make your next role','play better','play even better' \
                                                     ,'try to focus on','do well','did well','did a good job' \
                                                     ,'for the next role play','recommend focusing on'}
try:
    from collections.abc import defaultdict
except ImportError:
    from collections import defaultdict
try:
    from inspect import getfullargspec
except ImportError:
    # Python 2.7 hack
    from inspect import getargspec as getfullargspec
from functools import wraps
from six import iteritems, itervalues

_instances = list()
_methods = defaultdict(dict)
_injectables = None


class InjectionException(Exception):
    """ Raised when there is an error with injection.
    """


def supports_injection(injectable_class):
    """ Indicate that the class has methods on which objects can be injected.
    """
    orig_init = injectable_class.__init__

    def new_init(self, *args, **kwargs):
        # pylint: disable=protected-access
        orig_init(self, *args, **kwargs)
        for method in itervalues(injectable_class.__dict__):
예제 #41
0
 def __init__(self, ctx):
     # Deliberately skip calling parent constructor
     self._ctx = ctx
     self.mon_manager = LocalCephManager()
     self._conf = defaultdict(dict)
    def _determine_algorithm_order(
            self, inputs, required_outputs, algorithm_data,
            optional_algorithm_data, converter_algorithms_datas,
            tokens, required_output_tokens):
        """ Takes the algorithms and determines which order they need to be\
            executed to generate the correct data objects

        :param inputs: list of input types
        :type inputs: iterable(str)
        :param required_outputs: \
            the set of outputs that this workflow is meant to generate
        :param converter_algorithms_datas: the set of converter algorithms
        :param optional_algorithm_data: the set of optional algorithms
        :rtype: None
        """

        # Go through the algorithms and get all possible outputs
        all_outputs = set(iterkeys(inputs))
        for algorithms in (algorithm_data, optional_algorithm_data):
            for algorithm in algorithms:

                # Get the algorithm output types
                alg_outputs = {
                    output.output_type for output in algorithm.outputs}

                # Remove from the outputs any optional input that is also an
                # output
                for alg_input in algorithm.optional_inputs:
                    for matching in alg_input.get_matching_inputs(alg_outputs):
                        alg_outputs.discard(matching)
                all_outputs.update(alg_outputs)

        # Set up the token tracking and make all specified tokens complete
        token_states = TokenStates()
        for token_name in tokens:
            token = Token(token_name)
            token_states.track_token(token)
            token_states.process_output_token(token)

        # Go through the algorithms and add in the tokens that can be completed
        # by any of the algorithms
        for algorithms in (algorithm_data, optional_algorithm_data):
            for algorithm in algorithms:
                for token in algorithm.generated_output_tokens:
                    if not token_states.is_token_complete(token):
                        token_states.track_token(token)

        # Go through the algorithms and add a fake token for any algorithm that
        # requires an optional token that can't be provided and a fake input
        # for any algorithm that requires an optional input that can't be
        # provided.  This allows us to require the other optional inputs and
        # tokens so that algorithms that provide those items are run before
        # those that can make use of them.
        fake_inputs = set()
        fake_tokens = TokenStates()
        for algorithms in (algorithm_data, optional_algorithm_data):
            for algorithm in algorithms:
                for input_parameter in algorithm.optional_inputs:
                    if not input_parameter.input_matches(all_outputs):
                        fake_inputs.update(
                            input_parameter.get_fake_inputs(all_outputs))
                for token in algorithm.optional_input_tokens:
                    if (not token_states.is_tracking_token(token) and
                            not fake_tokens.is_token_complete(token)):
                        fake_tokens.track_token(token)
                        fake_tokens.process_output_token(token)

        input_types = set(iterkeys(inputs))

        allocated_algorithms = list()
        generated_outputs = set()
        generated_outputs.union(input_types)
        algorithms_to_find = list(algorithm_data)
        optionals_to_use = list(optional_algorithm_data)
        outputs_to_find = self._remove_outputs_which_are_inputs(
            required_outputs, inputs)
        tokens_to_find = self._remove_complete_tokens(
            token_states, required_output_tokens)

        while algorithms_to_find or outputs_to_find or tokens_to_find:

            suitable_algorithm = None
            algorithm_list = None

            # Order of searching - each combination will be attempted in order;
            # the first matching algorithm will be used (and search will stop)
            # Elements are:
            #  1. Algorithm list to search,
            #  2. check generated outputs,
            #  3. require optional inputs)
            order = [

                # Check required algorithms forcing optional inputs
                (algorithms_to_find, False, True),

                # Check optional algorithms forcing optional inputs
                (optionals_to_use, True, True),

                # Check required algorithms without optional inputs
                # - shouldn't need to do this, but might if an optional input
                # is also a generated output of the same algorithm
                (algorithms_to_find, False, False),

                # Check optional algorithms without optional inputs
                # - as above, it shouldn't be necessary but might be if an
                # optional input is also an output of the same algorithm
                (optionals_to_use, True, False),

                # Check converter algorithms
                # (only if they generate something new)
                (converter_algorithms_datas, True, False)
            ]

            for (algorithms, check_outputs, force_required) in order:
                suitable_algorithm, algorithm_list = \
                    self._locate_suitable_algorithm(
                        algorithms, input_types, generated_outputs,
                        token_states, fake_inputs, fake_tokens,
                        check_outputs, force_required)
                if suitable_algorithm is not None:
                    break

            if suitable_algorithm is not None:
                # Remove the value
                self._remove_algorithm_and_update_outputs(
                    algorithm_list, suitable_algorithm, input_types,
                    generated_outputs, outputs_to_find)

                # add the suitable algorithms to the list and take the outputs
                # as new inputs
                allocated_algorithms.append(suitable_algorithm)

                # Mark any tokens generated as complete
                for output_token in suitable_algorithm.generated_output_tokens:
                    token_states.process_output_token(output_token)
                    if token_states.is_token_complete(
                            Token(output_token.name)):
                        tokens_to_find.discard(output_token.name)
            else:

                # Failed to find an algorithm to run!
                algorithms_to_find_names = list()
                for algorithm in algorithms_to_find:
                    algorithms_to_find_names.append(algorithm.algorithm_id)
                optional_algorithms_names = list()
                for algorithm in optional_algorithm_data:
                    optional_algorithms_names.append(algorithm.algorithm_id)
                algorithms_used = list()
                for algorithm in allocated_algorithms:
                    algorithms_used.append(algorithm.algorithm_id)
                algorithm_input_requirement_breakdown = ""
                for algorithm in algorithms_to_find:
                    algorithm_input_requirement_breakdown += \
                        self._deduce_inputs_required_to_run(
                            algorithm, input_types, token_states,
                            fake_inputs, fake_tokens)
                for algorithm in optionals_to_use:
                    algorithm_input_requirement_breakdown += \
                        self._deduce_inputs_required_to_run(
                            algorithm, input_types, token_states,
                            fake_inputs, fake_tokens)
                algorithms_by_output = defaultdict(list)
                algorithms_by_token = defaultdict(list)
                for algorithms in (algorithm_data, optional_algorithm_data):
                    for algorithm in algorithms:
                        for output in algorithm.outputs:
                            algorithms_by_output[output.output_type].append(
                                algorithm.algorithm_id)
                        for token in algorithm.generated_output_tokens:
                            algorithms_by_token[token.name].append(
                                "{}: part={}".format(
                                    algorithm.algorithm_id, token.part))

                raise PacmanConfigurationException(
                    "Unable to deduce a future algorithm to use.\n"
                    "    Inputs: {}\n"
                    "    Fake Inputs: {}\n"
                    "    Outputs to find: {}\n"
                    "    Tokens complete: {}\n"
                    "    Fake tokens complete: {}\n"
                    "    Tokens to find: {}\n"
                    "    Required algorithms remaining to be used: {}\n"
                    "    Optional Algorithms unused: {}\n"
                    "    Functions used: {}\n"
                    "    Algorithm by outputs: {}\n"
                    "    Algorithm by tokens: {}\n"
                    "    Inputs required per function: \n{}\n".format(
                        sorted(input_types),
                        sorted(fake_inputs),
                        outputs_to_find,
                        token_states.get_completed_tokens(),
                        fake_tokens.get_completed_tokens(),
                        tokens_to_find,
                        algorithms_to_find_names,
                        optional_algorithms_names,
                        algorithms_used,
                        algorithms_by_output,
                        algorithms_by_token,
                        algorithm_input_requirement_breakdown))

        # Test that the outputs are generated
        all_required_outputs_generated = True
        failed_to_generate_output_string = ""
        for output in outputs_to_find:
            if output not in generated_outputs:
                all_required_outputs_generated = False
                failed_to_generate_output_string += ":{}".format(output)

        if not all_required_outputs_generated:
            raise PacmanConfigurationException(
                "Unable to generate outputs {}".format(
                    failed_to_generate_output_string))

        self._algorithms = allocated_algorithms
        self._completed_tokens = token_states.get_completed_tokens()
 def __init__(self, test_case):
     self._test_case = test_case
     self._n_cores_in_app = defaultdict(lambda: 0)
     self._executable_on_core = dict()
 def __init__(self):
     self._sdram_usage = defaultdict(lambda: 0)
     self._region_sizes = dict()
     self._vertices_by_chip = defaultdict(list)
    def __call__(self, machine_graph, n_keys_map, routing_tables):
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint,
                FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        check_types_of_edge_constraint(machine_graph)

        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, _shared_keys, fixed_masks, fixed_fields, flexi_fields,
         continuous, noncontinuous) = \
            get_edge_groups(machine_graph, EdgeTrafficType.MULTICAST)
        if flexi_fields:
            raise PacmanConfigurationException(
                "MallocBasedRoutingInfoAllocator does not support FlexiField")

        # Even non-continuous keys will be continuous
        for group in noncontinuous:
            continuous.add(group)

        # Go through the groups and allocate keys
        progress = ProgressBar(
            machine_graph.n_outgoing_edge_partitions,
            "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            # Get any fixed keys and masks from the group and attempt to
            # allocate them
            fixed_mask = None
            fixed_key_and_mask_constraint = locate_constraints_of_type(
                group.constraints, FixedKeyAndMaskConstraint)[0]

            # attempt to allocate them
            self._allocate_fixed_keys_and_masks(
                fixed_key_and_mask_constraint.keys_and_masks, fixed_mask)

            # update the pacman data objects
            self._update_routing_objects(
                fixed_key_and_mask_constraint.keys_and_masks, routing_infos,
                group)
            continuous.remove(group)

        for group in progress.over(fixed_masks, False):
            # get mask and fields if need be
            fixed_mask = locate_constraints_of_type(
                group.constraints, FixedMaskConstraint)[0].mask

            fields = None
            if group in fixed_fields:
                fields = locate_constraints_of_type(
                    group.constraints, FixedKeyFieldConstraint)[0].fields
                fixed_fields.remove(group)

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                fixed_mask, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        for group in progress.over(fixed_fields, False):
            fields = locate_constraints_of_type(
                group.constraints, FixedKeyFieldConstraint)[0].fields

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                None, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        # Sort the rest of the groups, using the routing tables for guidance
        # Group partitions by those which share routes in any table
        partition_groups = OrderedDict()
        routers = reversed(sorted(
            routing_tables.get_routers(),
            key=lambda item: len(routing_tables.get_entries_for_router(
                item[0], item[1]))))
        for x, y in routers:

            # Find all partitions that share a route in this table
            partitions_by_route = defaultdict(OrderedSet)
            routing_table = routing_tables.get_entries_for_router(x, y)
            for partition, entry in iteritems(routing_table):
                if partition in continuous:
                    entry_hash = sum(
                        1 << i
                        for i in entry.link_ids)
                    entry_hash += sum(
                        1 << (i + 6)
                        for i in entry.processor_ids)
                    partitions_by_route[entry_hash].add(partition)

            for entry_hash, partitions in iteritems(partitions_by_route):
                found_groups = list()
                for partition in partitions:
                    if partition in partition_groups:
                        found_groups.append(partition_groups[partition])

                if not found_groups:
                    # If no group was found, create a new one
                    for partition in partitions:
                        partition_groups[partition] = partitions

                elif len(found_groups) == 1:
                    # If a single other group was found, merge it
                    for partition in partitions:
                        found_groups[0].add(partition)
                        partition_groups[partition] = found_groups[0]

                else:
                    # Merge the groups
                    new_group = partitions
                    for group in found_groups:
                        for partition in group:
                            new_group.add(partition)
                    for partition in new_group:
                        partition_groups[partition] = new_group

        # Sort partitions by largest group
        continuous = list(OrderedSet(
            tuple(group) for group in itervalues(partition_groups)))

        for group in reversed(sorted(continuous, key=len)):
            for partition in progress.over(group, False):
                keys_and_masks = self._allocate_keys_and_masks(
                    None, None, n_keys_map.n_keys_for_partition(partition))

                # update the pacman data objects
                self._update_routing_objects(
                    keys_and_masks, routing_infos, partition)

        progress.end()
        return routing_infos
예제 #46
0
    def fit(self, X, y=None, groups=None, callback=None):
        """Run fit on the estimator with randomly drawn parameters.

        Parameters
        ----------
        X : array-like or sparse matrix, shape = [n_samples, n_features]
            The training input samples.

        y : array-like, shape = [n_samples] or [n_samples, n_output]
            Target relative to X for classification or regression (class
            labels should be integers or strings).

        groups : array-like, with shape (n_samples,), optional
            Group labels for the samples used while splitting the dataset into
            train/test set.

        callback: [callable, list of callables, optional]
            If callable then `callback(res)` is called after each parameter
            combination tested. If list of callables, then each callable in
            the list is called.
        """

        # check if space is a single dict, convert to list if so
        search_spaces = self.search_spaces
        if isinstance(search_spaces, dict):
            search_spaces = [search_spaces]

        callbacks = check_callback(callback)

        if self.optimizer_kwargs is None:
            self.optimizer_kwargs_ = {}
        else:
            self.optimizer_kwargs_ = dict(self.optimizer_kwargs)
        random_state = check_random_state(self.random_state)
        self.optimizer_kwargs_['random_state'] = random_state

        # Instantiate optimizers for all the search spaces.
        optimizers = []
        for search_space in search_spaces:
            if isinstance(search_space, tuple):
                search_space = search_space[0]
            optimizers.append(self._make_optimizer(search_space))
        self.optimizers_ = optimizers  # will save the states of the optimizers

        self.cv_results_ = defaultdict(list)
        self.best_index_ = None
        self.multimetric_ = False
        self.optimizer_results_ = {}

        n_points = self.n_points

        for search_space, optimizer in zip(search_spaces, optimizers):
            # if not provided with search subspace, n_iter is taken as
            # self.n_iter
            if isinstance(search_space, tuple):
                search_space, n_iter = search_space
            else:
                n_iter = self.n_iter

            # do the optimization for particular search space
            while n_iter > 0:
                # when n_iter < n_points points left for evaluation
                n_points_adjusted = min(n_iter, n_points)

                optim_result = self._step(X,
                                          y,
                                          search_space,
                                          optimizer,
                                          groups=groups,
                                          n_points=n_points_adjusted)
                n_iter -= n_points

                if eval_callbacks(callbacks, optim_result):
                    break

            self.optimizer_results_[optimizer] = optim_result

        # Refit the best model on the the whole dataset
        if self.refit:
            self._fit_best_model(X, y)

        return self
    def test_local_verts_go_to_local_lpgs(self):
        machine = VirtualMachine(width=12, height=12, with_wrap_arounds=True)
        graph = MachineGraph("Test")

        default_params = {
            'use_prefix': False,
            'key_prefix': None,
            'prefix_type': None,
            'message_type': EIEIOType.KEY_32_BIT,
            'right_shift': 0,
            'payload_as_time_stamps': True,
            'use_payload_prefix': True,
            'payload_prefix': None,
            'payload_right_shift': 0,
            'number_of_packets_sent_per_time_step': 0,
            'hostname': None,
            'port': None,
            'strip_sdp': None,
            'board_address': None,
            'tag': None}

        # data stores needed by algorithm
        live_packet_gatherers = dict()
        extended = dict(default_params)
        extended.update({'partition_id': "EVENTS"})
        default_params_holder = LivePacketGatherParameters(**extended)
        live_packet_gatherers[default_params_holder] = list()

        live_packet_gatherers_to_vertex_mapping = dict()
        live_packet_gatherers_to_vertex_mapping[default_params_holder] = dict()

        placements = Placements()

        # add LPG's (1 for each Ethernet connected chip)
        for chip in machine.ethernet_connected_chips:
            extended = dict(default_params)
            extended.update({'label': 'test'})
            vertex = LivePacketGatherMachineVertex(**extended)
            graph.add_vertex(vertex)
            placements.add_placement(
                Placement(x=chip.x, y=chip.y, p=2, vertex=vertex))
            live_packet_gatherers_to_vertex_mapping[
                default_params_holder][chip.x, chip.y] = vertex

        # tracker of wirings
        verts_expected = defaultdict(list)
        positions = list()
        positions.append([0, 0, 0, 0])
        positions.append([4, 4, 0, 0])
        positions.append([1, 1, 0, 0])
        positions.append([2, 2, 0, 0])
        positions.append([8, 4, 8, 4])
        positions.append([11, 4, 8, 4])
        positions.append([4, 11, 4, 8])
        positions.append([4, 8, 4, 8])
        positions.append([0, 11, 8, 4])
        positions.append([11, 11, 4, 8])
        positions.append([8, 8, 4, 8])
        positions.append([4, 0, 0, 0])
        positions.append([7, 7, 0, 0])

        # add graph vertices which reside on areas of the machine to ensure
        #  spread over boards.
        for x, y, eth_x, eth_y in positions:
            vertex = SimpleMachineVertex(resources=ResourceContainer())
            graph.add_vertex(vertex)
            live_packet_gatherers[default_params_holder].append(vertex)
            verts_expected[eth_x, eth_y].append(vertex)
            placements.add_placement(Placement(x=x, y=y, p=5, vertex=vertex))

        # run edge inserter that should go boom
        edge_inserter = InsertEdgesToLivePacketGatherers()
        edge_inserter(
            live_packet_gatherer_parameters=live_packet_gatherers,
            placements=placements,
            live_packet_gatherers_to_vertex_mapping=(
                live_packet_gatherers_to_vertex_mapping),
            machine=machine, machine_graph=graph, application_graph=None,
            graph_mapper=None)

        # verify edges are in the right place
        for chip in machine.ethernet_connected_chips:
            edges = graph.get_edges_ending_at_vertex(
                live_packet_gatherers_to_vertex_mapping[
                    default_params_holder][chip.x, chip.y])
            for edge in edges:
                self.assertIn(edge.pre_vertex, verts_expected[chip.x, chip.y])
예제 #48
0
 def __init__(self):
     self.__data = defaultdict(dict)
예제 #49
0
파일: term.py 프로젝트: project-rig/spalloc
def render_table(table, column_sep="  "):
    """ Render an ASCII table with optional ANSI escape codes.

    An example table::

        Something   Another thing  Finally
        some value  woah              1234
        ace         duuued              -1
        magic       rather good       9001

    Parameters
    ----------
    table : [row, ...]
        A table to render. Each row contains an iterable of column values which
        may be either values or a tuples (f, value) where value is the string
        to print, or an integer to print right-aligned. If a column is a tuple,
        f is a formatting function which is applied to the string before the
        table is finally displayed. Columns are padded to have matching widths
        *before* any formatting functions are applied.
    column_sep : str
        String inserted between each column.

    Returns
    -------
    str
        The formatted table.
    """
    # Determine maximum column widths
    column_widths = defaultdict(lambda: 0)
    for row in table:
        for i, column in enumerate(row):
            if isinstance(column, string_types):
                string = column
            elif isinstance(column, int):
                string = str(column)
            else:
                _, string = column
            column_widths[i] = max(len(str(string)), column_widths[i])

    # Render the table cells with padding [[str, ...], ...]
    out = []
    for row in table:
        rendered_row = []
        out.append(rendered_row)
        for i, column in enumerate(row):
            # Get string length and formatted string
            if isinstance(column, string_types):
                string = column
                length = len(string)
                right_align = False
            elif isinstance(column, int):
                string = str(column)
                length = len(string)
                right_align = True
            elif isinstance(column[1], string_types):
                f, string = column
                length = len(string)
                right_align = False
                string = f(string)
            elif isinstance(column[1], int):
                f, string = column
                length = len(str(string))
                right_align = True
                string = f(string)

            padding = " " * (column_widths[i] - length)
            if right_align:
                rendered_row.append(padding + string)
            else:
                rendered_row.append(string + padding)

    # Render the final table
    return "\n".join(column_sep.join(row).rstrip() for row in out)
예제 #50
0
 def __init__(self):
     self._sdram_usage = defaultdict(lambda: 0)
     self._region_sizes = dict()
     self._vertices_by_chip = defaultdict(list)