def extract_resource_doc(self, resource, url, route_doc=None): route_doc = {} if route_doc is None else route_doc if route_doc is False: return False doc = merge(getattr(resource, '__apidoc__', {}), route_doc) if doc is False: return False # ensure unique names for multiple routes to the same resource # provides different Swagger operationId's doc["name"] = ( "{}_{}".format(resource.__name__, url) if route_doc else resource.__name__ ) params = merge(self.expected_params(doc), doc.get('params', OrderedDict())) params = merge(params, extract_path_params(url)) # Track parameters for late deduplication up_params = {(n, p.get('in', 'query')): p for n, p in params.items()} need_to_go_down = set() methods = [m.lower() for m in resource.methods or []] for method in methods: method_doc = doc.get(method, OrderedDict()) method_impl = getattr(resource, method) if hasattr(method_impl, 'im_func'): method_impl = method_impl.im_func elif hasattr(method_impl, '__func__'): method_impl = method_impl.__func__ method_doc = merge(method_doc, getattr(method_impl, '__apidoc__', OrderedDict())) if method_doc is not False: method_doc['docstring'] = parse_docstring(method_impl) method_params = self.expected_params(method_doc) method_params = merge(method_params, method_doc.get('params', {})) inherited_params = OrderedDict((k, v) for k, v in iteritems(params) if k in method_params) method_doc['params'] = merge(inherited_params, method_params) for name, param in method_doc['params'].items(): key = (name, param.get('in', 'query')) if key in up_params: need_to_go_down.add(key) doc[method] = method_doc # Deduplicate parameters # For each couple (name, in), if a method overrides it, # we need to move the paramter down to each method if need_to_go_down: for method in methods: method_doc = doc.get(method) if not method_doc: continue params = { (n, p.get('in', 'query')): p for n, p in (method_doc['params'] or {}).items() } for key in need_to_go_down: if key not in params: method_doc['params'][key[0]] = up_params[key] doc['params'] = OrderedDict( (k[0], p) for k, p in up_params.items() if k not in need_to_go_down ) return doc
def test_marshal(self): model = OrderedDict([('foo', fields.Raw)]) marshal_dict = OrderedDict([('foo', 'bar'), ('bat', 'baz')]) output = marshal(marshal_dict, model) assert isinstance(output, dict) assert not isinstance(output, OrderedDict) assert output == {'foo': 'bar'}
def expected_params(self, doc): params = OrderedDict() if 'expect' not in doc: return params for expect in doc.get('expect', []): if isinstance(expect, RequestParser): parser_params = OrderedDict((p['name'], p) for p in expect.__schema__) params.update(parser_params) elif isinstance(expect, ModelBase): params['payload'] = not_none({ 'name': 'payload', 'required': True, 'in': 'body', 'schema': self.serialize_schema(expect), }) elif isinstance(expect, (list, tuple)): if len(expect) == 2: # this is (payload, description) shortcut model, description = expect params['payload'] = not_none({ 'name': 'payload', 'required': True, 'in': 'body', 'schema': self.serialize_schema(model), 'description': description }) else: params['payload'] = not_none({ 'name': 'payload', 'required': True, 'in': 'body', 'schema': self.serialize_schema(expect), }) return params
def test_marshal_wildcard_nested(self): nest = fields.Nested( OrderedDict([('thumbnail', fields.String), ('video', fields.String)])) wild = fields.Wildcard(nest) wildcard_fields = OrderedDict([('*', wild)]) model = OrderedDict([('preview', fields.Nested(wildcard_fields))]) sub_dict = OrderedDict([('9:16', { 'thumbnail': 24, 'video': 12 }), ('16:9', { 'thumbnail': 25, 'video': 11 }), ('1:1', { 'thumbnail': 26, 'video': 10 })]) marshal_dict = OrderedDict([('preview', sub_dict)]) output = marshal(marshal_dict, model) assert output == { 'preview': { '1:1': { 'thumbnail': '26', 'video': '10' }, '16:9': { 'thumbnail': '25', 'video': '11' }, '9:16': { 'thumbnail': '24', 'video': '12' } } }
def __init__(self, allowed_variables, n_neurons): self.__sampling_rates = OrderedDict() self.__indexes = dict() self.__n_neurons = n_neurons for variable in allowed_variables: self.__sampling_rates[variable] = 0 self.__indexes[variable] = None
def setUp(self): self.tmpdir = TempDir("dataframetest") self.testfilename = os.path.join(self.tmpdir.path, "dataframetest.nix") self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite) self.block = self.file.create_block("test block", "recordingsession") self.df1_dtype = OrderedDict([('name', np.int64), ('id', str), ('time', float), ('sig1', np.float64), ('sig2', np.int32)]) self.df1_data = [(1, "alpha", 20.18, 5.0, 100), (2, "beta", 20.09, 5.5, 101), (2, "gamma", 20.05, 5.1, 100), (1, "delta", 20.15, 5.3, 150), (2, "epsilon", 20.23, 5.7, 200), (2, "fi", 20.07, 5.2, 300), (1, "zeta", 20.12, 5.1, 39), (1, "eta", 20.27, 5.1, 600), (2, "theta", 20.15, 5.6, 400), (2, "iota", 20.08, 5.1, 200)] other_arr = np.arange(11101, 11200).reshape((33, 3)) other_di = OrderedDict({'name': np.int64, 'id': int, 'time': float}) self.df1 = self.block.create_data_frame("test df", "signal1", data=self.df1_data, col_dict=self.df1_dtype) self.df2 = self.block.create_data_frame("other df", "signal2", data=self.df1_data, col_dict=self.df1_dtype) self.df3 = self.block.create_data_frame("reference df", "signal3", data=other_arr, col_dict=other_di) self.dtype = self.df1._h5group.group["data"].dtype
def test_marshal_wildcard_with_skip_none(self): wild = fields.Wildcard(fields.String) model = OrderedDict([('foo', fields.Raw), ('*', wild)]) marshal_dict = OrderedDict([('foo', None), ('bat', None), ('baz', 'biz'), ('bar', None)]) output = marshal(marshal_dict, model, skip_none=True) assert output == {'baz': 'biz'}
def mergeliftOver(f1, f2, annotations, outputfile, verbose="F"): o = open(outputfile, 'w') # read in file 1 and make dictionary readdict = OrderedDict() f = open(f1, 'r') for l in f: e = l.strip().split('\t') readdict[e[3]] = e[:3] f.close() # read in file2 and print out matches f = open(f2, 'r') for l in f: e = l.strip().split('\t') if e[3] in readdict: readdict[e[3]] += e[:3] f = open(annotations, 'r') for l in f: e = l.strip().split('\t') if e[-1] in readdict: readdict[e[-1]] += e[:-1] for i, val in readdict.items(): print("\t".join(val), file=o) f.close() o.close()
def test_with_nested_field(self, api): nested_fields = api.model('NestedModel', {'name': fields.String}) field = fields.List(fields.Nested(nested_fields)) assert field.__schema__ == {'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}} data = [{'name': 'John Doe', 'age': 42}, {'name': 'Jane Doe', 'age': 66}] expected = [OrderedDict([('name', 'John Doe')]), OrderedDict([('name', 'Jane Doe')])] self.assert_field(field, data, expected)
def test_marshal_list_of_lists(self): model = OrderedDict([('foo', fields.Raw), ('fee', fields.List(fields.List(fields.String)))]) marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', [['fye'], ['fum']])]) output = marshal(marshal_fields, model) expected = OrderedDict([('foo', 'bar'), ('fee', [['fye'], ['fum']])]) assert output == expected
def merge(self, newly_generated, previously_generated): """returns previously_generated with marked sections updated from the same marked sections in newly_generated. Everything outside these sections in previously_generated is returned as-is. A marked section starts with "// GENERATED {name} START" and ends with "// GENERATED {name} END". If previously_generated has a generated section replaced with "// GENERATED {name} DISABLED", that section will no longer be updated. """ # If neither file has a GENERATED section, there's nothing for us to merge. if self._section_delimiter.search(newly_generated) is None and \ self._section_delimiter.search(previously_generated) is None: LOG.warn("No generated sections found; not merging") return newly_generated # Extract the generated section names from the output and make sure they're all matched sections = OrderedDict() matcher = Matcher(self._section_delimiter, newly_generated) for _ in matcher: section = self._extract_generated_section(matcher, newly_generated) if section.name in sections: raise RuntimeError("Section %s used more than once" % section.name) sections[section.name] = section # Merge with the previously generated source merged = [] matcher = Matcher(self._section_delimiter, previously_generated) current_start = 0 for _ in matcher: merged.append(previously_generated[current_start:matcher.start()]) existing_section = self._extract_generated_section(matcher, previously_generated) new_section = sections.pop(existing_section.name, None) if new_section is None: # Allow generated sections to be dropped in the template, but warn in case # something odd's happening LOG.warn("Dropping previously-generated section '%s' that's no longer generated by the template" % matcher.group(1)) elif existing_section.disabled: # If the existing code disables this generation, add that disabled comment merged.append(existing_section.contents) else: # Otherwise pop in the newly-generated code in place of what was there before merged.append(new_section.contents) current_start = matcher.end() # Add generated sections that weren't present in the old output before the last # non-generated code. It's a 50-50 shot, so warn when this happens for new_section in six.itervalues(sections): LOG.warn("Adding previously-missing generated section '%s' before the last non-generated text" % new_section.name) merged.append(new_section.contents) # Add any text past the last previously-generated section merged.append(previously_generated[current_start:]) return ''.join(merged)
def test_list_of_raw(self): field = fields.Wildcard(fields.Raw) data = [{'a': 1, 'b': 1}, {'a': 2, 'b': 1}, {'a': 3, 'b': 1}] expected = [OrderedDict([('a', 1), ('b', 1)]), OrderedDict([('a', 2), ('b', 1)]), OrderedDict([('a', 3), ('b', 1)])] self.assert_field(field, data, expected) data = [1, 2, 'a'] self.assert_field(field, data, data)
def test_marshal_nested_with_skip_none(self): model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String) ]), skip_none=True))]) marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)]) output = marshal(marshal_fields, model, skip_none=True) expected = OrderedDict([('foo', 'bar')]) assert output == expected
def __init__(self, devices, create_edges, max_atoms_per_core, neuron_impl, pynn_model, translator=None, spikes_per_second=None, label=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, constraints=None): """ :param n_neurons: The number of neurons in the population :param devices:\ The AbstractMulticastControllableDevice instances to be controlled\ by the population :param create_edges:\ True if edges to the devices should be added by this dev (set\ to False if using the dev over Ethernet using a translator) :param translator:\ Translator to be used when used for Ethernet communication. Must\ be provided if the dev is to be controlled over Ethernet. """ # pylint: disable=too-many-arguments, too-many-locals if not devices: raise ConfigurationException("No devices specified") # Create a partition to key map self.__partition_id_to_key = OrderedDict( (str(dev.device_control_partition_id), dev.device_control_key) for dev in devices) # Create a partition to atom map self.__partition_id_to_atom = { partition: i for (i, partition) in enumerate(self.__partition_id_to_key.keys()) } self.__devices = devices self.__message_translator = translator # Add the edges to the devices if required self.__dependent_vertices = list() if create_edges: self.__dependent_vertices = devices super(ExternalDeviceLifControlVertex, self).__init__(len(devices), label, constraints, max_atoms_per_core, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, neuron_impl, pynn_model)
def test_marshal_list_of_nesteds(self): model = OrderedDict([('foo', fields.Raw), ('fee', fields.List(fields.Nested({'fye': fields.String})))]) marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', { 'fye': 'fum' })]) output = marshal(marshal_fields, model) expected = OrderedDict([('foo', 'bar'), ('fee', [OrderedDict([('fye', 'fum')])])]) assert output == expected
def test_marshal_nested_with_null(self): model = OrderedDict([ ('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String), ('blah', fields.String)]), allow_null=True)) ]) marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)]) output = marshal(marshal_fields, model) expected = OrderedDict([('foo', 'bar'), ('fee', None)]) assert output == expected
def test_order(self): parsed = Mask('f_3, nested{f_1, f_2, f_3}, f_2, f_1') expected = OrderedDict([ ('f_3', True), ('nested', OrderedDict([ ('f_1', True), ('f_2', True), ('f_3', True), ])), ('f_2', True), ('f_1', True), ]) assert parsed == expected
def test_marshal_wildcard_list(self): wild = fields.Wildcard(fields.List(fields.String)) wildcard_fields = OrderedDict([('*', wild)]) model = OrderedDict([('preview', fields.Nested(wildcard_fields))]) sub_dict = OrderedDict([('1:1', [1, 2, 3]), ('16:9', [4, 5, 6]), ('9:16', [7, 8, 9])]) marshal_dict = OrderedDict([('preview', sub_dict)]) output = marshal(marshal_dict, model) assert output == { 'preview': { '9:16': ['7', '8', '9'], '16:9': ['4', '5', '6'], '1:1': ['1', '2', '3'] } }
def test_marshal_nested_dict(self): model = OrderedDict([ ('foo', fields.Raw), ('bar', OrderedDict([ ('a', fields.Raw), ('b', fields.Raw), ])), ]) marshal_fields = OrderedDict([('foo', 'foo-val'), ('bar', 'bar-val'), ('bat', 'bat-val'), ('a', 1), ('b', 2), ('c', 3)]) output = marshal(marshal_fields, model) expected = OrderedDict([('foo', 'foo-val'), ('bar', OrderedDict([('a', 1), ('b', 2)]))]) assert output == expected
def __init__(self, allowed_vertex_types, allowed_edge_types, allowed_partition_types, label): """ :param allowed_vertex_types:\ A single or tuple of types of vertex to be allowed in the graph :param allowed_edge_types:\ A single or tuple of types of edges to be allowed in the graph :param allowed_partition_types:\ A single or tuple of types of partitions to be allowed in the graph :param label: The label on the graph, or None """ super(Graph, self).__init__(None) self._allowed_vertex_types = allowed_vertex_types self._allowed_edge_types = allowed_edge_types self._allowed_partition_types = allowed_partition_types self._vertices = OrderedSet() self._outgoing_edge_partitions_by_name = OrderedDict() self._outgoing_edges = DefaultOrderedDict(OrderedSet) self._incoming_edges = DefaultOrderedDict(OrderedSet) self._incoming_edges_by_partition_name = DefaultOrderedDict(list) self._outgoing_edge_partitions_by_pre_vertex = \ DefaultOrderedDict(OrderedSet) self._outgoing_edge_partition_by_edge = OrderedDict() self._label = label
def test_marshal_wildcard_with_envelope(self): wild = fields.Wildcard(fields.String) model = OrderedDict([('foo', fields.Raw), ('*', wild)]) marshal_dict = OrderedDict([('foo', { 'bat': 'baz' }), ('a', 'toto'), ('b', 'tata')]) output = marshal(marshal_dict, model, envelope='hey') assert output == { 'hey': { 'a': 'toto', 'b': 'tata', 'foo': { 'bat': 'baz' } } }
def _load_table(self, resource_name): """Build table structure from resource data :param resource_name: """ tabular_resource = self.__tabular_resources[resource_name] try: # Sorting fields in the same order as they appear in the schema # is necessary for tables to be converted into pandas.DataFrame fields = [] if 'schema' in tabular_resource.descriptor: fields = [f['name'] for f in tabular_resource.descriptor['schema']['fields']] elif len(tabular_resource.read(keyed=True)) > 0: fields = tabular_resource.read(keyed=True)[0].keys() return [order_columns_in_row(fields, row) for row in tabular_resource.read(keyed=True)] except (AttributeError, SchemaValidationError, ValueError, TypeError) \ as e: warnings.warn( 'Unable to set column types automatically using {} schema. ' 'Data types may need to be adjusted manually. ' 'Error: {}'.format(resource_name, e)) self.__invalid_schemas.append(resource_name) file_format = tabular_resource.descriptor['format'] with Stream(io.BytesIO(self.raw_data[resource_name]), format=file_format, headers=1, scheme='stream', encoding='utf-8') as stream: return [OrderedDict(zip(stream.headers, row)) for row in stream.iter()]
def test_with_ordered_dict(self): data = OrderedDict({ 'integer': 42, 'string': 'a string', 'boolean': True, }) result = mask.apply(data, '{integer, string}') assert result == {'integer': 42, 'string': 'a string'}
def test_marshal_decorator_with_envelope(self): model = OrderedDict([('foo', fields.Raw)]) @marshal_with(model, envelope='hey') def try_me(): return OrderedDict([('foo', 'bar'), ('bat', 'baz')]) assert try_me() == {'hey': {'foo': 'bar'}}
def test_marshal_decorator(self): model = OrderedDict([('foo', fields.Raw)]) @marshal_with(model) def try_me(): return OrderedDict([('foo', 'bar'), ('bat', 'baz')]) assert try_me() == {'foo': 'bar'}
def test_marshal_decorator_tuple(self): model = OrderedDict([('foo', fields.Raw)]) @marshal_with(model) def try_me(): headers = {'X-test': 123} return OrderedDict([('foo', 'bar'), ('bat', 'baz')]), 200, headers assert try_me() == ({'foo': 'bar'}, 200, {'X-test': 123})
def test_marshal_decorator_with_skip_none(self): model = OrderedDict([('foo', fields.Raw), ('bat', fields.Raw), ('qux', fields.Raw)]) @marshal_with(model, skip_none=True) def try_me(): return OrderedDict([('foo', 'bar'), ('bat', None)]) assert try_me() == {'foo': 'bar'}
def not_none_sorted(data): ''' Remove all keys where value is None :param OrderedDict data: A dictionary with potentially some values set to None :return: The same dictionary without the keys with values to ``None`` :rtype: OrderedDict ''' return OrderedDict((k, v) for k, v in sorted(iteritems(data)) if v is not None)
def test_skip_none_presents_data(self): model = OrderedDict([ ('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String), ('blah', fields.String), ('foe', fields.String)]), skip_none=True)) ]) marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', { 'blah': 'cool', 'foe': None })]) output = marshal(marshal_fields, model) expected = OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('blah', 'cool')]))]) assert output == expected
def __init__(self, file): self.file = file self.errors = OrderedDict() self.errors['file_errors'] = [] self.errors['blocks'] = [] self.errors['sections'] = [] # only for file.py use, number will not be correct # if a function addressing same object is called more than once self.error_count = 0
def __iter__(self): for token in base.Filter.__iter__(self): if token["type"] in ("StartTag", "EmptyTag"): attrs = OrderedDict() for name, value in sorted(token["data"].items(), key=_attr_key): attrs[name] = value token["data"] = attrs yield token
def test_timestamp_noautoupdate(self): self.file.auto_update_timestamps = False df = self.block.create_data_frame("df.time", "test.time", col_dict=OrderedDict({"idx": int})) dftime = df.updated_at time.sleep(1) df.units = ("ly", ) self.assertEqual(dftime, df.updated_at)
def group_exceptions(error_requests, exceptions, tracebacks): """ Groups exceptions into a form usable by an exception. :param error_requests: the error requests :param exceptions: the exceptions :param tracebacks: the tracebacks :return: a sorted exception pile :rtype: dict(Exception,_Group) """ data = OrderedDict() for error_request, exception, trace_back in zip( error_requests, exceptions, tracebacks): for stored_exception in data.keys(): if isinstance(exception, type(stored_exception)): found_exception = stored_exception break else: data[exception] = _Group(trace_back) found_exception = exception data[found_exception].add_coord(error_request.sdp_header) for exception in data: data[exception].finalise() return data.items()
class Chip(object): """ Represents a SpiNNaker chip with a number of cores, an amount of\ SDRAM shared between the cores, and a router.\ The chip is iterable over the processors, yielding\ (processor_id, processor) where: * processor_id is the ID of a processor * processor is the processor with processor_id """ # tag 0 is reserved for stuff like IO STD IPTAG_IDS = OrderedSet(range(1, 8)) __slots__ = ( "_x", "_y", "_p", "_router", "_sdram", "_ip_address", "_virtual", "_tag_ids", "_nearest_ethernet_x", "_nearest_ethernet_y", "_n_user_processors" ) @staticmethod def default_processors(): processors = dict() processors[0] = Processor.factory(0, True) for i in range(1, Machine.MAX_CORES_PER_CHIP): processors[i] = Processor.factory(i) return processors DEFAULT_PROCESSORS = default_processors.__func__() # pylint: disable=too-many-arguments def __init__(self, x, y, processors, router, sdram, nearest_ethernet_x, nearest_ethernet_y, ip_address=None, virtual=False, tag_ids=None): """ :param x: the x-coordinate of the chip's position in the\ two-dimensional grid of chips :type x: int :param y: the y-coordinate of the chip's position in the\ two-dimensional grid of chips :type y: int :param processors: an iterable of processor objects :type processors: iterable(:py:class:`~spinn_machine.Processor`) :param router: a router for the chip :type router: :py:class:`~spinn_machine.Router` :param sdram: an SDRAM for the chip :type sdram: :py:class:`~spinn_machine.SDRAM` :param ip_address: \ the IP address of the chip or None if no Ethernet attached :type ip_address: str :param virtual: boolean which defines if this chip is a virtual one :type virtual: bool :param tag_ids: IDs to identify the chip for SDP can be empty to define no tags or None to allocate tag automatically based on if there is an ip_address :type tag_ids: iterable(int) or None :param nearest_ethernet_x: the nearest Ethernet x coordinate :type nearest_ethernet_x: int or None :param nearest_ethernet_y: the nearest Ethernet y coordinate :type nearest_ethernet_y: int or None :raise spinn_machine.exceptions.SpinnMachineAlreadyExistsException: \ If processors contains any two processors with the same\ processor_id """ self._x = x self._y = y if processors is None: self._p = Chip.DEFAULT_PROCESSORS self._n_user_processors = Machine.MAX_CORES_PER_CHIP - 1 else: self._p = OrderedDict() self._n_user_processors = 0 for processor in sorted(processors, key=lambda i: i.processor_id): if processor.processor_id in self._p: raise SpinnMachineAlreadyExistsException( "processor on {}:{}".format(x, y), str(processor.processor_id)) self._p[processor.processor_id] = processor if not processor.is_monitor: self._n_user_processors += 1 self._router = router self._sdram = sdram self._ip_address = ip_address if tag_ids is not None: self._tag_ids = tag_ids elif self._ip_address is None: self._tag_ids = [] else: self._tag_ids = self.IPTAG_IDS self._virtual = virtual self._nearest_ethernet_x = nearest_ethernet_x self._nearest_ethernet_y = nearest_ethernet_y def is_processor_with_id(self, processor_id): """ Determines if a processor with the given ID exists in the chip.\ Also implemented as __contains__(processor_id) :param processor_id: the processor ID to check for :type processor_id: int :return: Whether the processor with the given ID exists :rtype: bool :raise None: does not raise any known exceptions """ return processor_id in self._p def get_processor_with_id(self, processor_id): """ Return the processor with the specified ID or None if the\ processor does not exist. :param processor_id: the ID of the processor to return :type processor_id: int :return: \ the processor with the specified ID, or None if no such processor :rtype: :py:class:`~spinn_machine.Processor` :raise None: does not raise any known exceptions """ if processor_id in self._p: return self._p[processor_id] return None @property def x(self): """ The x-coordinate of the chip in the two-dimensional grid of chips :return: the x-coordinate of the chip :rtype: int :raise None: does not raise any known exceptions """ return self._x @property def y(self): """ The y-coordinate of the chip in the two-dimensional grid of chips :return: the y-coordinate of the chip :rtype: int :raise None: does not raise any known exceptions """ return self._y @property def processors(self): """ An iterable of available processors :return: iterable of processors :rtype: iterable(:py:class:`~spinn_machine.Processor`) :raise None: does not raise any known exceptions """ return itervalues(self._p) @property def n_processors(self): """ The total number of processors """ return len(self._p) @property def n_user_processors(self): """ The total number of processors that are not monitors """ return self._n_user_processors @property def virtual(self): """ Boolean which defines if the chip is virtual or not :return: if the chip is virtual :rtype: boolean :raise None: this method does not raise any known exceptions """ return self._virtual @property def router(self): """ The router object associated with the chip :return: router associated with the chip :rtype: :py:class:`~spinn_machine.Router` :raise None: does not raise any known exceptions """ return self._router @property def sdram(self): """ The SDRAM associated with the chip :return: SDRAM associated with the chip :rtype: :py:class:`~spinn_machine.SDRAM` :raise None: does not raise any known exceptions """ return self._sdram @property def ip_address(self): """ The IP address of the chip :return: IP address of the chip, or None if there is no Ethernet\ connected to the chip :rtype: str :raise None: does not raise any known exceptions """ return self._ip_address @property def nearest_ethernet_x(self): """ The x coordinate of the nearest Ethernet chip :return: the x coordinate of the nearest Ethernet chip :rtype: int :raise None: does not raise any known exceptions """ return self._nearest_ethernet_x @property def nearest_ethernet_y(self): """ The y coordinate of the nearest Ethernet chip :return: the y coordinate of the nearest Ethernet chip :rtype: int :raise None: does not raise any known exceptions """ return self._nearest_ethernet_y @property def tag_ids(self): """ The tag IDs supported by this chip :return: the set of IDs. :raise None: this method does not raise any exception """ return self._tag_ids def get_first_none_monitor_processor(self): """ Get the first processor in the list which is not a monitor core :return: a processor """ for processor in self.processors: if not processor.is_monitor: return processor def __iter__(self): """ Get an iterable of processor identifiers and processors :return: An iterable of (processor_id, processor) where: * processor_id is the ID of a processor * processor is the processor with the ID :rtype: iterable(int,:py:class:`~spinn_machine.Processor`) :raise None: does not raise any known exceptions """ return iteritems(self._p) def __len__(self): """ The number of processors associated with this chip. :return: The number of items in the underlying iterator. :rtype: int """ return len(self._p) def __getitem__(self, processor_id): if processor_id in self._p: return self._p[processor_id] # Note difference from get_processor_with_id(); this is to conform to # standard Python semantics raise KeyError(processor_id) def __contains__(self, processor_id): return self.is_processor_with_id(processor_id) __REPR_TEMPLATE = ("[Chip: x={}, y={}, sdram={}, ip_address={}, " "router={}, processors={}, nearest_ethernet={}:{}]") def __str__(self): return self.__REPR_TEMPLATE.format( self._x, self._y, self.sdram, self.ip_address, self.router, list(self._p.values()), self._nearest_ethernet_x, self._nearest_ethernet_y) def __repr__(self): return self.__str__()
class Graph(ConstrainedObject, AbstractGraph): """ A graph implementation that specifies the allowed types of the\ vertices and edges. """ __slots__ = [ # The classes of vertex that are allowed in this graph "_allowed_vertex_types", # The classes of edges that are allowed in this graph "_allowed_edge_types", # The classes of outgoing edge partition that are allowed in this # graph "_allowed_partition_types", # The vertices of the graph "_vertices", # The outgoing edge partitions of the graph by name "_outgoing_edge_partitions_by_name", # The outgoing edges by pre-vertex "_outgoing_edges", # The incoming edges by post-vertex "_incoming_edges", # map between incoming edges and their associated partitions "_incoming_edges_by_partition_name", # The outgoing edge partitions by pre-vertex "_outgoing_edge_partitions_by_pre_vertex", # the outgoing partitions by edge "_outgoing_edge_partition_by_edge", # The label of the graph "_label"] def __init__(self, allowed_vertex_types, allowed_edge_types, allowed_partition_types, label): """ :param allowed_vertex_types:\ A single or tuple of types of vertex to be allowed in the graph :param allowed_edge_types:\ A single or tuple of types of edges to be allowed in the graph :param allowed_partition_types:\ A single or tuple of types of partitions to be allowed in the graph :param label: The label on the graph, or None """ super(Graph, self).__init__(None) self._allowed_vertex_types = allowed_vertex_types self._allowed_edge_types = allowed_edge_types self._allowed_partition_types = allowed_partition_types self._vertices = OrderedSet() self._outgoing_edge_partitions_by_name = OrderedDict() self._outgoing_edges = DefaultOrderedDict(OrderedSet) self._incoming_edges = DefaultOrderedDict(OrderedSet) self._incoming_edges_by_partition_name = DefaultOrderedDict(list) self._outgoing_edge_partitions_by_pre_vertex = \ DefaultOrderedDict(OrderedSet) self._outgoing_edge_partition_by_edge = OrderedDict() self._label = label @property @overrides(AbstractGraph.label) def label(self): return self._label @overrides(AbstractGraph.add_vertex) def add_vertex(self, vertex): if not isinstance(vertex, self._allowed_vertex_types): raise PacmanInvalidParameterException( "vertex", vertex.__class__, "Vertices of this graph must be one of the following types:" " {}".format(self._allowed_vertex_types)) self._vertices.add(vertex) @overrides(AbstractGraph.add_edge) def add_edge(self, edge, outgoing_edge_partition_name): # verify that the edge is one suitable for this graph if not isinstance(edge, self._allowed_edge_types): raise PacmanInvalidParameterException( "edge", edge.__class__, "Edges of this graph must be one of the following types:" " {}".format(self._allowed_edge_types)) if edge.pre_vertex not in self._vertices: raise PacmanInvalidParameterException( "edge", edge.pre_vertex, "pre-vertex must be known in graph") if edge.post_vertex not in self._vertices: raise PacmanInvalidParameterException( "edge", edge.post_vertex, "post-vertex must be known in graph") # Add the edge to the partition partition = None if ((edge.pre_vertex, outgoing_edge_partition_name) not in self._outgoing_edge_partitions_by_name): partition = OutgoingEdgePartition( outgoing_edge_partition_name, self._allowed_edge_types) self._outgoing_edge_partitions_by_pre_vertex[ edge.pre_vertex].add(partition) self._outgoing_edge_partitions_by_name[ edge.pre_vertex, outgoing_edge_partition_name] = partition else: partition = self._outgoing_edge_partitions_by_name[ edge.pre_vertex, outgoing_edge_partition_name] partition.add_edge(edge) # Add the edge to the indices self._outgoing_edges[edge.pre_vertex].add(edge) self._incoming_edges_by_partition_name[ (edge.post_vertex, outgoing_edge_partition_name)].append(edge) self._incoming_edges[edge.post_vertex].add(edge) self._outgoing_edge_partition_by_edge[edge] = partition @overrides(AbstractGraph.add_outgoing_edge_partition) def add_outgoing_edge_partition(self, outgoing_edge_partition): # verify that this partition is suitable for this graph if not isinstance( outgoing_edge_partition, self._allowed_partition_types): raise PacmanInvalidParameterException( "outgoing_edge_partition", outgoing_edge_partition.__class__, "Partitions of this graph must be one of the following types:" " {}".format(self._allowed_partition_types)) # check this partition doesn't already exist if ((outgoing_edge_partition.pre_vertex, outgoing_edge_partition.identifier) in self._outgoing_edge_partitions_by_name): raise PacmanAlreadyExistsException( "{}".format(OutgoingEdgePartition.__class__), (outgoing_edge_partition.pre_vertex, outgoing_edge_partition.identifier)) self._outgoing_edge_partitions_by_pre_vertex[ outgoing_edge_partition.pre_vertex].add(outgoing_edge_partition) self._outgoing_edge_partitions_by_name[ outgoing_edge_partition.pre_vertex, outgoing_edge_partition.identifier] = outgoing_edge_partition @property @overrides(AbstractGraph.vertices) def vertices(self): return self._vertices @property @overrides(AbstractGraph.n_vertices) def n_vertices(self): return len(self._vertices) @property @overrides(AbstractGraph.edges) def edges(self): return [ edge for partition in self._outgoing_edge_partitions_by_name.values() for edge in partition.edges] @property @overrides(AbstractGraph.outgoing_edge_partitions) def outgoing_edge_partitions(self): return self._outgoing_edge_partitions_by_name.values() @property @overrides(AbstractGraph.n_outgoing_edge_partitions) def n_outgoing_edge_partitions(self): return len(self._outgoing_edge_partitions_by_name) @overrides(AbstractGraph.get_outgoing_partition_for_edge) def get_outgoing_partition_for_edge(self, edge): return self._outgoing_edge_partition_by_edge[edge] @overrides(AbstractGraph.get_edges_starting_at_vertex) def get_edges_starting_at_vertex(self, vertex): return self._outgoing_edges[vertex] @overrides(AbstractGraph.get_edges_ending_at_vertex) def get_edges_ending_at_vertex(self, vertex): if vertex not in self._incoming_edges: return [] return self._incoming_edges[vertex] @overrides(AbstractGraph.get_edges_ending_at_vertex_with_partition_name) def get_edges_ending_at_vertex_with_partition_name( self, vertex, partition_name): key = (vertex, partition_name) if key not in self._incoming_edges_by_partition_name: return [] return self._incoming_edges_by_partition_name[key] @overrides(AbstractGraph.get_outgoing_edge_partitions_starting_at_vertex) def get_outgoing_edge_partitions_starting_at_vertex(self, vertex): return self._outgoing_edge_partitions_by_pre_vertex[vertex] @overrides(AbstractGraph.get_outgoing_edge_partition_starting_at_vertex) def get_outgoing_edge_partition_starting_at_vertex( self, vertex, outgoing_edge_partition_name): return self._outgoing_edge_partitions_by_name.get( (vertex, outgoing_edge_partition_name), None)
def get_edge_groups(machine_graph, traffic_type): """ Utility method to get groups of edges using any\ :py:class:`~pacman.model.constraints.key_allocator_constraints.KeyAllocatorSameKeyConstraint`\ constraints. Note that no checking is done here about conflicts\ related to other constraints. :param machine_graph: the machine graph :param traffic_type: the traffic type to group """ # mapping between partition and shared key group it is in partition_groups = OrderedDict() # process each partition one by one in a bubble sort kinda way for vertex in machine_graph.vertices: for partition in machine_graph.\ get_outgoing_edge_partitions_starting_at_vertex(vertex): # only process partitions of the correct traffic type if partition.traffic_type == traffic_type: # Get a set of partitions that should be grouped together shared_key_constraints = locate_constraints_of_type( partition.constraints, ShareKeyConstraint) partitions_to_group = [partition] for constraint in shared_key_constraints: partitions_to_group.extend(constraint.other_partitions) # Get a set of groups that should be grouped groups_to_group = [ partition_groups.get(part_to_group, [part_to_group]) for part_to_group in partitions_to_group] # Group the groups new_group = ConstraintGroup( part for group in groups_to_group for part in group) partition_groups.update( {part: new_group for part in new_group}) # Keep track of groups fixed_key_groups = list() shared_key_groups = list() fixed_mask_groups = list() fixed_field_groups = list() flexi_field_groups = list() continuous_groups = list() noncontinuous_groups = list() groups_by_type = { FixedKeyAndMaskConstraint: fixed_key_groups, FixedMaskConstraint: fixed_mask_groups, FixedKeyFieldConstraint: fixed_field_groups, FlexiKeyFieldConstraint: flexi_field_groups, } groups = OrderedSet(itervalues(partition_groups)) for group in groups: # Get all expected constraints in the group constraints = [ constraint for partition in group for constraint in locate_constraints_of_type( partition.constraints, (FixedKeyAndMaskConstraint, FixedMaskConstraint, FlexiKeyFieldConstraint, FixedKeyFieldConstraint))] # Check that the possibly conflicting constraints are equal if constraints and not all( constraint_a == constraint_b for constraint_a in constraints for constraint_b in constraints): raise PacmanRouteInfoAllocationException( "The group of partitions {} have conflicting constraints" .format(constraints)) # If no constraints, must be one of the non-specific groups if not constraints: # If the group has only one item, it is not shared if len(group) == 1: continuous_constraints = [ constraint for partition in group for constraint in locate_constraints_of_type( constraints, ContiguousKeyRangeContraint)] if continuous_constraints: continuous_groups.append(group) else: noncontinuous_groups.append(group) # If the group has more than one partition, it must be shared else: shared_key_groups.append(group) # If constraints found, put the group in the appropriate constraint # group else: group._set_constraint(constraints[0]) constraint_type = type(constraints[0]) groups_by_type[constraint_type].append(group) # return the set of groups return (fixed_key_groups, shared_key_groups, fixed_mask_groups, fixed_field_groups, flexi_field_groups, continuous_groups, noncontinuous_groups)
def __init__(self, x, y, processors, router, sdram, nearest_ethernet_x, nearest_ethernet_y, ip_address=None, virtual=False, tag_ids=None): """ :param x: the x-coordinate of the chip's position in the\ two-dimensional grid of chips :type x: int :param y: the y-coordinate of the chip's position in the\ two-dimensional grid of chips :type y: int :param processors: an iterable of processor objects :type processors: iterable(:py:class:`~spinn_machine.Processor`) :param router: a router for the chip :type router: :py:class:`~spinn_machine.Router` :param sdram: an SDRAM for the chip :type sdram: :py:class:`~spinn_machine.SDRAM` :param ip_address: \ the IP address of the chip or None if no Ethernet attached :type ip_address: str :param virtual: boolean which defines if this chip is a virtual one :type virtual: bool :param tag_ids: IDs to identify the chip for SDP can be empty to define no tags or None to allocate tag automatically based on if there is an ip_address :type tag_ids: iterable(int) or None :param nearest_ethernet_x: the nearest Ethernet x coordinate :type nearest_ethernet_x: int or None :param nearest_ethernet_y: the nearest Ethernet y coordinate :type nearest_ethernet_y: int or None :raise spinn_machine.exceptions.SpinnMachineAlreadyExistsException: \ If processors contains any two processors with the same\ processor_id """ self._x = x self._y = y if processors is None: self._p = Chip.DEFAULT_PROCESSORS self._n_user_processors = Machine.MAX_CORES_PER_CHIP - 1 else: self._p = OrderedDict() self._n_user_processors = 0 for processor in sorted(processors, key=lambda i: i.processor_id): if processor.processor_id in self._p: raise SpinnMachineAlreadyExistsException( "processor on {}:{}".format(x, y), str(processor.processor_id)) self._p[processor.processor_id] = processor if not processor.is_monitor: self._n_user_processors += 1 self._router = router self._sdram = sdram self._ip_address = ip_address if tag_ids is not None: self._tag_ids = tag_ids elif self._ip_address is None: self._tag_ids = [] else: self._tag_ids = self.IPTAG_IDS self._virtual = virtual self._nearest_ethernet_x = nearest_ethernet_x self._nearest_ethernet_y = nearest_ethernet_y
def __init__(self, default_factory=None, *a, **kw): if (default_factory is not None and not isinstance(default_factory, Callable)): raise TypeError('first argument must be callable') OrderedDict.__init__(self, *a, **kw) self.default_factory = default_factory
def __getitem__(self, key): try: return OrderedDict.__getitem__(self, key) except KeyError: return self.__missing__(key)
def create_data_frame(self, name="", type_="", col_dict=None, col_names=None, col_dtypes=None, data=None, compression=Compression.No, copy_from=None, keep_copy_id=True): """ Create/copy a new data frame for this block. Either ``col_dict`` or ``col_name`` and ``col_dtypes`` must be given. If both are given, ``col_dict`` will be used. :param name: The name of the data frame to create/copy. :type name: str :param type_: The type of the data frame. :type type_: str :param col_dict: The dictionary that specify column names and data type in each column :type col_dict:dict or OrderedDict of {str: type} :param col_names: The collection of name of all columns in order :type col_names: tuples or list or np.array of string :param col_dtypes: The collection of data type of all columns in order :type col_dtypes: tuples or list or np.array of type :param data: Data to write after storage has been created :type data: array-like data with compound data type as specified in the columns :param compression: En-/disable dataset compression. :type compression: :class:`~nixio.Compression` :param copy_from: The DataFrame to be copied, None in normal mode :type copy_from: DataFrame :param keep_copy_id: Specify if the id should be copied in copy mode :type keep_copy_id: bool :returns: The newly created data frame. :rtype: :class:`~nixio.DataFrame` """ if copy_from: if not isinstance(copy_from, DataFrame): raise TypeError("Object to be copied is not a DataFrame") id = self._copy_objects(copy_from, "data_frames", keep_copy_id, name) return self.data_frames[id] util.check_entity_name_and_type(name, type_) if (isinstance(col_dict, dict) and not isinstance(col_dict, OrderedDict) and sys.version_info[0] < 3): raise TypeError("Python 2 users should use name_list " "or OrderedDict created with LIST and TUPLES " "to create DataFrames as the order " "of the columns cannot be maintained in Py2") if data is not None: shape = len(data) else: shape = 0 data_frames = self._h5group.open_group("data_frames") if col_dict is None: if col_names is not None: if col_dtypes is not None: col_dict = OrderedDict( (str(nam), dt) for nam, dt in zip(col_names, col_dtypes) ) elif col_dtypes is None and data is not None: col_dtypes = [] for x in data[0]: col_dtypes.append(type(x)) col_dict = OrderedDict( (str(nam), dt) for nam, dt in zip(col_names, col_dtypes) ) else: # col_dtypes is None and data is None raise (ValueError, "The data type of each column have to be specified") else: # if col_names is None if data is not None and type(data[0]) == np.void: col_dtype = data[0].dtype for i, dt in enumerate(col_dtype.fields.values()): if dt[0] == np.dtype(str): cn = list(col_dtype.fields.keys()) raw_dt = col_dtype.fields.values() raw_dt = list(raw_dt) raw_dt_list = [ele[0] for ele in raw_dt] col_dict = OrderedDict(zip(cn, raw_dt_list)) else: # data is None or type(data[0]) != np.void # data_type doesnt matter raise (ValueError, "No information about column names is provided!") if col_dict is not None: for nam, dt in col_dict.items(): if isclass(dt): if any(issubclass(dt, st) for st in string_types) \ or issubclass(dt, np.string_): col_dict[nam] = util.vlen_str_dtype dt_arr = list(col_dict.items()) col_dtype = np.dtype(dt_arr) df = DataFrame._create_new(self, data_frames, name, type_, shape, col_dtype, compression) if data is not None: if type(data[0]) == np.void: data = np.ascontiguousarray(data, dtype=col_dtype) df.write_direct(data) else: data = list(map(tuple, data)) arr = np.ascontiguousarray(data, dtype=col_dtype) df.write_direct(arr) return df
class CPUInfos(object): """ A set of CPU information objects. """ __slots__ = [ "_cpu_infos"] def __init__(self): self._cpu_infos = OrderedDict() def add_processor(self, x, y, processor_id, cpu_info): """ Add a processor on a given chip to the set. :param x: The x-coordinate of the chip :type x: int :param y: The y-coordinate of the chip :type y: int :param processor_id: A processor ID :type processor_id: int :param cpu_info: The CPU information for the core :type cpu_info: :py:class:`spinnman.model.enums.cpu_info.CPUInfo` """ self._cpu_infos[x, y, processor_id] = cpu_info @property def cpu_infos(self): """ The one per core core info. :return: iterable of x,y,p core info """ return iteritems(self._cpu_infos) def __iter__(self): return iter(self._cpu_infos) def iteritems(self): """ Get an iterable of (x, y, p), cpu_info """ return iteritems(self._cpu_infos) def items(self): return self._cpu_infos.items() def values(self): return self._cpu_infos.values() def itervalues(self): """ Get an iterable of cpu_info. """ return itervalues(self._cpu_infos) def keys(self): return self._cpu_infos.keys() def iterkeys(self): """ Get an iterable of (x, y, p). """ return iterkeys(self._cpu_infos) def __len__(self): """ The total number of processors that are in these core subsets. """ return len(self._cpu_infos)
def __init__(self): self._cpu_infos = OrderedDict()