def test_update(self): for kw in 'self', 'dict', 'other', 'iterable': d = collections.UserDict() d.update(**{kw: 42}) self.assertEqual(list(d.items()), [(kw, 42)]) self.assertRaises(TypeError, collections.UserDict().update, 42) self.assertRaises(TypeError, collections.UserDict().update, {}, {}) self.assertRaises(TypeError, collections.UserDict.update)
def test_init(self): for kw in 'self', 'other', 'iterable': self.assertEqual(list(collections.UserDict(**{kw: 42}).items()), [(kw, 42)]) self.assertEqual(list(collections.UserDict({}, dict=42).items()), [('dict', 42)]) self.assertEqual(list(collections.UserDict({}, dict=None).items()), [('dict', None)]) self.assertEqual(list(collections.UserDict(dict={'a': 42}).items()), [('dict', {'a': 42})]) self.assertRaises(TypeError, collections.UserDict, 42) self.assertRaises(TypeError, collections.UserDict, (), ()) self.assertRaises(TypeError, collections.UserDict.__init__)
def test_init(self): for kw in 'self', 'other', 'iterable': self.assertEqual(list(collections.UserDict(**{kw: 42}).items()), [(kw, 42)]) self.assertEqual(list(collections.UserDict({}, dict=42).items()), [('dict', 42)]) self.assertEqual(list(collections.UserDict({}, dict=None).items()), [('dict', None)]) with self.assertWarnsRegex(PendingDeprecationWarning, "'dict'"): self.assertEqual(list(collections.UserDict(dict={'a': 42}).items()), [('a', 42)]) self.assertRaises(TypeError, collections.UserDict, 42) self.assertRaises(TypeError, collections.UserDict, (), ()) self.assertRaises(TypeError, collections.UserDict.__init__)
def test_custom_asserts(self): # This would always trigger the KeyError from trying to put # an array of equal-length UserDicts inside an ndarray. data = JSONArray([collections.UserDict({'a': 1}), collections.UserDict({'b': 2}), collections.UserDict({'c': 3})]) a = pd.Series(data) self.assert_series_equal(a, a) self.assert_frame_equal(a.to_frame(), a.to_frame()) b = pd.Series(data.take([0, 0, 1])) with pytest.raises(AssertionError): self.assert_series_equal(a, b) with pytest.raises(AssertionError): self.assert_frame_equal(a.to_frame(), b.to_frame())
def test_userdict(): assert_lldb_repr(collections.UserDict(), '{}', 'UserDict()') assert_lldb_repr(collections.UserDict({ 1: 2, 3: 4 }), '{1: 2, 3: 4}', 'UserDict({1: 2, 3: 4})') assert_lldb_repr(collections.UserDict({ 1: 2, 'a': 'b' }), "{1: 2, u?'a': u?'b'}", "UserDict({1: 2, 'a': 'b'})") assert_lldb_repr(collections.UserDict({ i: i for i in range(16) }), ('{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8,' ' 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15}'), 'UserDict({i: i for i in range(16)})')
class JSONDtype(ExtensionDtype): type = compat.Mapping name = 'json' try: na_value = collections.UserDict() except AttributeError: # source compatibility with Py2. na_value = {} @classmethod def construct_array_type(cls): """Return the array type associated with this dtype Returns ------- type """ return JSONArray @classmethod def construct_from_string(cls, string): if string == cls.name: return cls() else: raise TypeError("Cannot construct a '{}' from " "'{}'".format(cls, string))
def test_user_dict(self): d = collections.UserDict() self.assertEqual(pprint.pformat(d, width=1), "{}") words = 'the quick brown fox jumped over a lazy dog'.split() d = collections.UserDict(zip(words, itertools.count())) self.assertEqual( pprint.pformat(d), """\ {'a': 6, 'brown': 2, 'dog': 8, 'fox': 3, 'jumped': 4, 'lazy': 7, 'over': 5, 'quick': 1, 'the': 0}""")
def testUserDict(self): obj = collections.UserDict() obj['a'] = 1 obj['b'] = 2 d = serpent.dumps(obj) obj2 = serpent.loads(d) self.assertEqual({'a': 1, 'b': 2}, obj2)
def create_one_ovsdb_table(attrs=None, max_rows=sys.maxsize): """Create a fake OVSDB table. :param Dictionary attrs: A dictionary with all attributes :param Int max_rows: A num of max rows :return: A FakeResource object faking the OVSDB table """ attrs = attrs or {} # Set default attributes. ovsdb_table_attrs = { 'rows': collections.UserDict(), 'columns': {}, 'indexes': [], 'max_rows': max_rows, } # Overwrite default attributes. ovsdb_table_attrs.update(attrs) result = FakeResource(info=copy.deepcopy(ovsdb_table_attrs), loaded=True) result.rows.indexes = {} return result
def make_data(): # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer return [ collections.UserDict([(random.choice(string.ascii_letters), random.randint(0, 100)) for _ in range(random.randint(0, 10))]) for _ in range(100) ]
def test_parse_rejects_non_dict_argmap_mapping(parser, web_request): web_request.json = {"username": 42, "password": 42} argmap = collections.UserDict( {"username": fields.Field(), "password": fields.Field()} ) # UserDict is dict-like in all meaningful ways, but not a subclass of `dict` # it will therefore be rejected with a TypeError when used with pytest.raises(TypeError): parser.parse(argmap, web_request)
def test_custom_asserts(self): # This would always trigger the KeyError from trying to put # an array of equal-length UserDicts inside an ndarray. data = JSONArray([ collections.UserDict({"a": 1}), collections.UserDict({"b": 2}), collections.UserDict({"c": 3}), ]) a = pd.Series(data) self.assert_series_equal(a, a) self.assert_frame_equal(a.to_frame(), a.to_frame()) b = pd.Series(data.take([0, 0, 1])) msg = r"ExtensionArray are different" with pytest.raises(AssertionError, match=msg): self.assert_series_equal(a, b) with pytest.raises(AssertionError, match=msg): self.assert_frame_equal(a.to_frame(), b.to_frame())
class JSONDtype(ExtensionDtype): type = collections.Mapping name = 'json' try: na_value = collections.UserDict() except AttributeError: # source compatibility with Py2. na_value = {} @classmethod def construct_from_string(cls, string): if string == cls.name: return cls() else: raise TypeError("Cannot construct a '{}' from " "'{}'".format(cls, string))
class FakeTable(object): rows = collections.UserDict( {'fake-id-1': FakeRow(uuid='fake-id-1', name='Fake1')}) rows.indexes = {} indexes = []
def test_itervariant_isuserdict_success(self): obj = iter(collections.UserDict(self.SOME_DICT).items()) assert comma.helpers.is_dict_or_list(obj) is dict
def poller(hosts, oids_groups, community, timeout=3, backoff=2, retry=2, msg_type="GetNext", include_ts=False): """ A generator that yields SNMP data :param hosts: hosts :param oids_groups: oids_groups :param community: community :type hosts: list | tuple :type oids_groups: list | tuple :type community: str :return: host, main_oid, index_part, value :rtype: tuple """ job_queue = queue.Queue() socksize = 0x2000000 retried_req = collections.defaultdict(int) # message cache reqid_to_msg = {} pending_query = {} # ip => fqdn target_info = {} # fqdn => ips target_info_r = resolve(hosts) varbinds_cache = {} for fqdn, ips in list(target_info_r.items()): if ips: ip = ips[0] if ":" not in ip: ip = "::ffff:" + ip target_info[ip] = fqdn varbinds_cache[ip] = collections.UserDict() varbinds_cache[ip].by_oids = {} else: logger.error("unable to resolve %s. skipping this host", fqdn) del target_info_r[fqdn] # preparation of targets start_reqid = random.randint(1, 999) * 10000 for oids_group in oids_groups: if isinstance(oids_group, list): oids_group = tuple(oids_group) target_oid_group = (oids_group, oids_group) for fqdn, ips in target_info_r.items(): ip = ips[0] if ":" not in ip: ip = "::ffff:" + ip varbinds_cache[ip][start_reqid] = target_oid_group varbinds_cache[ip].by_oids[target_oid_group] = start_reqid start_reqid += 10000 # add initial jobs for ip, poll_data in varbinds_cache.items(): for reqid in poll_data: if ":" not in ip: ip = "::ffff:" + ip job_queue.put((ip, reqid)) # preparation of sockets socket_map = {} epoll = poll() socket_count = min((MAX_SOCKETS_COUNT, len(target_info_r))) for _ in range(socket_count): new_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) new_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False) new_sock.bind(("::", 0)) socket_map[new_sock.fileno()] = new_sock epoll.register(new_sock, POLLOUT) # main loop while True: try: events = epoll.poll(0.1) for fileno, event in events: if event & POLLOUT: fdfmt = POLLIN if not job_queue.empty(): host, pdudata_reqid = job_queue.get() oids_to_poll, main_oids = varbinds_cache[host][ pdudata_reqid] q = (pdudata_reqid, oids_to_poll) if q in reqid_to_msg: message = reqid_to_msg[q] else: message = snmp_parser.msg_encode( pdudata_reqid, community, oids_to_poll, max_repetitions=20, msg_type=msg_type) reqid_to_msg[q] = message socket_map[fileno].sendto(message, (host, 161)) pending_query[(host, pdudata_reqid)] = int(time()) if DEBUG: logger.debug('sendto %s reqid=%s get oids=%s', host, pdudata_reqid, oids_to_poll) job_queue.task_done() if not job_queue.empty(): fdfmt = fdfmt | POLLOUT epoll.modify(fileno, fdfmt) elif event & POLLIN: data, remotehost = socket_map[fileno].recvfrom(socksize) ts = time() host_ip = remotehost[0] if ":" not in host_ip: host_ip = "::ffff:" + host_ip try: pdudata_reqid, error_status, error_index, var_bind_list = snmp_parser.msg_decode( data) except Exception as e: # print('Error 5') logger.critical( "%r. unable to decode PDU from %s. data=%r", e, host_ip, data) continue if pending_query.pop( (host_ip, pdudata_reqid), None) is None: # print('Error 4') if DEBUG: logger.debug( "received answer after timeout from %s reqid=%s", host_ip, pdudata_reqid) continue if error_status: # print('Error 3') logger.error('%s get error_status %s at %s. query=%s', target_info[host_ip], error_status, error_index, varbinds_cache[host_ip][pdudata_reqid][0]) continue if DEBUG: logger.debug('%s recv reqid=%s' % (host_ip, pdudata_reqid)) if pdudata_reqid not in varbinds_cache[host_ip]: # print('Error 2') if DEBUG: logger.debug( 'received unknown reqid=%s for host=%s. skipping', pdudata_reqid, host_ip) continue oids_to_poll, main_oids = varbinds_cache[host_ip][ pdudata_reqid] main_oids_len = len(main_oids) main_oids_positions = cycle(range(main_oids_len)) var_bind_list_len = len(var_bind_list) skip_column = {} # if some oid in requested oids is not supported, column with it is index will # be filled with another oid. need to skip last_seen_index = {} for var_bind_pos in range(var_bind_list_len): oid, value = var_bind_list[var_bind_pos] # oids in received var_bind_list in round-robin order respectively query main_oids_pos = next(main_oids_positions) if value is None: # print('Found none value') if DEBUG: logger.debug('found none value %s %s %s' % (host_ip, oid, value)) skip_column[main_oids_pos] = True if main_oids_pos in skip_column: continue main_oid = main_oids[main_oids_pos] if msg_type == "GetBulk": if oid.startswith(main_oid + '.'): index_part = oid[len(main_oid) + 1:] last_seen_index[main_oids_pos] = index_part if include_ts: yield (target_info[host_ip], main_oid, index_part, value, ts) else: yield (target_info[host_ip], main_oid, index_part, value) else: # print('Not found') if DEBUG: logger.debug( 'host_ip=%s column_pos=%s skip oid %s=%s, reqid=%s. Not found in %s' % (host_ip, main_oids_pos, oid, value, pdudata_reqid, main_oids)) logger.debug( 'vp=%s oid=%s main_oid=%s main_oids_pos=%s main_oids=%s', var_bind_pos, oid, main_oid, main_oids_pos, main_oids) skip_column[main_oids_pos] = True if len(skip_column) == var_bind_list_len: break else: yield (target_info[host_ip], main_oid, "", value) skip_column[main_oids_pos] = True if len(skip_column) < main_oids_len: if len(skip_column): oids_to_poll = list() new_main_oids = list() for pos in range(main_oids_len): if pos in skip_column: continue oids_to_poll.append( "%s.%s" % (main_oids[pos], last_seen_index[pos])) new_main_oids.append(main_oids[pos]) oids_to_poll = tuple(oids_to_poll) new_main_oids = tuple(new_main_oids) else: oids_to_poll = tuple( "%s.%s" % (main_oids[p], last_seen_index[p]) for p in range(main_oids_len)) new_main_oids = main_oids oid_group = (oids_to_poll, new_main_oids) if oid_group in varbinds_cache[host_ip]: next_reqid = varbinds_cache[host_ip][oid_group] else: next_reqid = pdudata_reqid + 10 varbinds_cache[host_ip][next_reqid] = oid_group varbinds_cache[host_ip].by_oids[ oid_group] = next_reqid job_queue.put((host_ip, next_reqid)) else: if DEBUG: logger.debug( 'found not interested in oid=%s value=%s host=%s reqid=%s' % (oid, value, host_ip, pdudata_reqid)) epoll.modify(fileno, POLLOUT | POLLIN) elif event & POLLERR: logger.critical('socket error') raise Exception('epoll error') if not events and job_queue.empty() and not pending_query: break if pending_query: # check timeouts cur_time = int(time()) timeouted_querys = [] for query, query_time in pending_query.items(): attempt = retried_req.get(query, 1) if attempt == 1: query_timeout = attempt * timeout else: query_timeout = attempt * backoff * timeout if cur_time - query_time > query_timeout: timeouted_querys.append(query) if DEBUG: logger.warning('timeout %s > %s. attempt=%s, %s', cur_time - query_time, query_timeout, attempt, query) for timeouted_query in timeouted_querys: if timeouted_query not in retried_req or retried_req[ timeouted_query] < retry: # return 0 # try: # print('Error 1') # raise ConnectionError('Timeout error!') # except ConnectionError: # return 0 if DEBUG: logger.debug('resend %s', timeouted_query) job_queue.put(timeouted_query) retried_req[timeouted_query] += 1 raise ConnectionError('Timeout error!') else: print('query timeout for OID') logger.warning( "%s ip=%s query timeout for OID's: %s", target_info[timeouted_query[0]], timeouted_query[0], varbinds_cache[ timeouted_query[0]][timeouted_query[1]][0]) del pending_query[timeouted_query] if not job_queue.empty(): sockets_write_count = min(job_queue.qsize(), len(socket_map)) for sock in list(socket_map.values())[0:sockets_write_count]: epoll.modify(sock, POLLOUT | POLLIN) except InterruptedError: # signal in syscall. suppressed by default on python 3.5 pass
def _from_factorized(cls, values, original): return cls([collections.UserDict(x) for x in values if x != ()])
def test_all(self): u = collections.UserDict() u0 = collections.UserDict(d0) u1 = collections.UserDict(d1) u2 = collections.UserDict(d2) uu = collections.UserDict(u) uu0 = collections.UserDict(u0) uu1 = collections.UserDict(u1) uu2 = collections.UserDict(u2) self.assertEqual(collections.UserDict(one=1, two=2), d2) self.assertEqual(collections.UserDict([('one', 1), ('two', 2)]), d2) with self.assertWarnsRegex(DeprecationWarning, "'dict'"): self.assertEqual( collections.UserDict(dict=[('one', 1), ('two', 2)]), d2) self.assertEqual( collections.UserDict([('one', 1), ('two', 2)], two=3, three=5), d3) self.assertEqual(collections.UserDict.fromkeys('one two'.split()), d4) self.assertEqual(collections.UserDict().fromkeys('one two'.split()), d4) self.assertEqual(collections.UserDict.fromkeys('one two'.split(), 1), d5) self.assertEqual(collections.UserDict().fromkeys('one two'.split(), 1), d5) self.assertTrue(u1.fromkeys('one two'.split()) is not u1) self.assertIsInstance(u1.fromkeys('one two'.split()), collections.UserDict) self.assertIsInstance(u2.fromkeys('one two'.split()), collections.UserDict) self.assertEqual(str(u0), str(d0)) self.assertEqual(repr(u1), repr(d1)) self.assertIn(repr(u2), ("{'one': 1, 'two': 2}", "{'two': 2, 'one': 1}")) all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2] for a in all: for b in all: self.assertEqual(a == b, len(a) == len(b)) self.assertEqual(u2['one'], 1) self.assertRaises(KeyError, u1.__getitem__, 'two') u3 = collections.UserDict(u2) u3['two'] = 2 u3['three'] = 3 del u3['three'] self.assertRaises(KeyError, u3.__delitem__, 'three') u3.clear() self.assertEqual(u3, {}) u2a = u2.copy() self.assertEqual(u2a, u2) u2b = collections.UserDict(x=42, y=23) u2c = u2b.copy() self.assertEqual(u2b, u2c) class MyUserDict(collections.UserDict): def display(self): print(self) m2 = MyUserDict(u2) m2a = m2.copy() self.assertEqual(m2a, m2) m2['foo'] = 'bar' self.assertNotEqual(m2a, m2) self.assertEqual(sorted(u2.keys()), sorted(d2.keys())) self.assertEqual(sorted(u2.items()), sorted(d2.items())) self.assertEqual(sorted(u2.values()), sorted(d2.values())) for i in u2.keys(): self.assertIn(i, u2) self.assertEqual(i in u1, i in d1) self.assertEqual(i in u0, i in d0) t = collections.UserDict() t.update(u2) self.assertEqual(t, u2) for i in u2.keys(): self.assertEqual(u2.get(i), u2[i]) self.assertEqual(u1.get(i), d1.get(i)) self.assertEqual(u0.get(i), d0.get(i)) for i in range(20): u2[i] = str(i) ikeys = [] for k in u2: ikeys.append(k) keys = u2.keys() self.assertEqual(set(ikeys), set(keys)) t = collections.UserDict() self.assertEqual(t.setdefault('x', 42), 42) self.assertIn('x', t) self.assertEqual(t.setdefault('x', 23), 42) t = collections.UserDict(x=42) self.assertEqual(t.pop('x'), 42) self.assertRaises(KeyError, t.pop, 'x') self.assertEqual(t.pop('x', 1), 1) t['x'] = 42 self.assertEqual(t.pop('x', 1), 42) t = collections.UserDict(x=42) self.assertEqual(t.popitem(), ('x', 42)) self.assertRaises(KeyError, t.popitem)
# this assert passes, which is good assert 'var' in g # bug 2 -- var is undefined when f is called assert g['f']() == 123 # issue 1597 (locals can be any mapping type) import collections x = eval("a", {}, collections.defaultdict(list)) assert x == [] y = eval("a", {}, collections.defaultdict(list, a=1)) assert y == 1 z = eval("a", {}, collections.UserDict(a=2)) assert z == 2 # issue 1808 class A: pass try: exec(A()) raise Exception('should have raised TypeError') except TypeError as exc: assert exc.args[0] == 'exec() arg 1 must be a string, ' \ 'bytes or code object'
d1['a'] = 'A' d1['b'] = 'B' d1['c'] = 'C' d1['1'] = '1' d1['2'] = '2' # 6、defaultdict --- 字典值提供默认类型,字典的子类,提供了一个工厂函数,为字典查询提供一个默认值 s = [('red', 1), ('blue', 2), ('red', 3), ('blue', 4), ('red', 1), ('blue', 4)] d = collections.defaultdict(set) for k, v in s: d[k].add(v) # 7、UserDict --- 封装了字典对象,简化了字典子类化,数据通过UserDict实例的data属性存取; # collections.Userdict()创建的字典所占空间是普通的dict字典的1/4,但前者还会封装一个data的实例 d0 = dict(a=1, b=2) d1 = collections.UserDict(d0) import sys sys.getsizeof(d0) # 248 sys.getsizeof(d1) # 64 sys.getsizeof(d1.data) # 248 # 8、UserList --- 封装了列表对象,简化了列表子类化,模拟一个列表。 # 这个实例的内容被保存为一个正常列表,通过 UserList 的 data 属性存取 l1 = ['a', 'b', 'c', 'd', 'e', 'f'] import sys l0 = collections.UserList(l1) sys.getsizeof(l1) # 104
def testSubclassesOfDictPassAssertion(self): dct = collections.UserDict({1: "foo", 2: "bar", 3: "baz"}) precondition.AssertDictType(dct, int, str)
def user_dict_modified(param=collections.UserDict()): # Noncompliant param.clear()
def make_defaultdict(): d = collections.defaultdict() for i in range(100000): d[i] = i return d @pytest.mark.parametrize( "param, wanted_size", [ (list(range(100000)), 558), (set(range(100000)), 558), ({i: i for i in range(100000)}, 1339), ("x" * 100000, 1049), (b"x" * 100000, 1049), (bytearray([64] * 100000), 1049), (bytearray([64] * 100000 + [128]), 1049), # UnicodeDecodeError (make_defaultdict(), 1339), (collections.OrderedDict({i: i for i in range(100000)}), 1339), (collections.UserDict({i: i for i in range(100000)}), 1339), (collections.UserList(range(100000)), 558), (collections.UserString("x" * 100000), 1049), (collections.UserString(b"x" * 100000), 1049), ], ) def test_truncation(param, wanted_size): notice = dict(params=dict(param=param)) b = jsonify_notice(notice) assert len(b) == wanted_size
def main(argv): log_dir = pathlib.Path(__file__).parent.absolute() log_file = os.path.join(log_dir, 'output.log') input_dir = '' try: opts, args = getopt.getopt(argv, "hp:", ["path="]) except getopt.GetoptError: print('mainLauncher.py -p <directory>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('mainLauncher.py -p <directory>') sys.exit() elif opt in ("-p", "--path"): input_dir = arg if os.path.isdir(input_dir): os.chdir(input_dir) else: print('Directory {} does not exist'.format(input_dir)) sys.exit() text_collector = collections.UserDict() ext_counter = collections.Counter() total_count = 0 failed = 0 for path, dirs, files in os.walk(input_dir): total_count += len(files) for filename in files: name, ext = os.path.splitext(filename) # counting files by extension ext_counter[ext] += 1 try: with open(os.path.join(path, filename), "r+") as file: for last_line in file: pass st = ntpath.basename(filename) # collecting filename and its last line text_collector[st] = last_line[:-1] if last_line.endswith('\n') else last_line except IOError as e: print('Unable to open {0} file due to error: {1}'.format(filename, e)) failed += 1 continue except UnicodeDecodeError as e: print('Unable to decode {0} file due to error: {1}'.format(filename, e)) failed += 1 continue except UnboundLocalError as e: print('Unbound error for {0} file due to error: {1}'.format(filename, e)) failed += 1 continue print('Total number of files in {0}: {1}'.format(input_dir, total_count)) print('Failed to read last line for {} files'.format(failed)) print('Following extensions were discovered:') for ext, count in ext_counter.most_common(): if ext == '': print("Files without extension:", count) else: print("'{0}': {1} {2}".format(ext, count, "occurrence" if count == 1 else "occurrences")) try: os.chdir(log_dir) if os.path.isfile(log_file): os.remove(log_file) with open('output.log', "w+") as file: for key, value in text_collector.items(): file.write("{0}: '{1}'\r\n".format(key, value)) print('Log file {} successfully created'.format(log_file)) except IOError as e: print('Failed to write to {0} due to {1}'.format(log_file, e))
def test_general_eval(self): # Tests that general mappings can be used for the locals argument class M: "Test mapping interface versus possible calls from eval()." def __getitem__(self, key): if key == 'a': return 12 raise KeyError def keys(self): return list('xyz') m = M() g = globals() self.assertEqual(eval('a', g, m), 12) self.assertRaises(NameError, eval, 'b', g, m) self.assertEqual(eval('dir()', g, m), list('xyz')) self.assertEqual(eval('globals()', g, m), g) self.assertEqual(eval('locals()', g, m), m) self.assertRaises(TypeError, eval, 'a', m) class A: "Non-mapping" pass m = A() self.assertRaises(TypeError, eval, 'a', g, m) # Verify that dict subclasses work as well class D(dict): def __getitem__(self, key): if key == 'a': return 12 return dict.__getitem__(self, key) def keys(self): return list('xyz') d = D() self.assertEqual(eval('a', g, d), 12) self.assertRaises(NameError, eval, 'b', g, d) self.assertEqual(eval('dir()', g, d), list('xyz')) self.assertEqual(eval('globals()', g, d), g) self.assertEqual(eval('locals()', g, d), d) # Verify locals stores (used by list comps) eval('[locals() for i in (2,3)]', g, d) eval('[locals() for i in (2,3)]', g, collections.UserDict()) class SpreadSheet: "Sample application showing nested, calculated lookups." _cells = {} def __setitem__(self, key, formula): self._cells[key] = formula def __getitem__(self, key): return eval(self._cells[key], globals(), self) ss = SpreadSheet() ss['a1'] = '5' ss['a2'] = 'a1*6' ss['a3'] = 'a2*7' self.assertEqual(ss['a3'], 210) # Verify that dir() catches a non-list returned by eval # SF bug #1004669 class C: def __getitem__(self, item): raise KeyError(item) def keys(self): return 1 # used to be 'a' but that's no longer an error self.assertRaises(TypeError, eval, 'dir()', globals(), C())
def test_isuserdict_success(self): obj = collections.UserDict(self.SOME_DICT) assert comma.helpers.is_dict_or_list(obj) is dict
def __new__(cls): return collections.UserDict()
return 42 class AnswerDict2(collections.UserDict): def __getitem__(self, item): return 42 ad = AnswerDict(a='foo') ad2 = AnswerDict2(a='foo') print(ad['a']) # 42 print(ad2['a']) # 42 print(ad) # {'a': 'foo'} print(ad2) # {'a': 'foo'} d1 = {} d2 = {} d3 = collections.UserDict() d1.update(ad) d2.update(ad2) d3.update(ad) print(d2) # {'a': 42} print(d1) # {'a': 'foo'} print(d3) # {'a': 42} # UserDict를 상속 받거나, UserDict object로 만들거나.
def test_all(self): # Test constructors u = collections.UserDict() u0 = collections.UserDict(d0) u1 = collections.UserDict(d1) u2 = collections.UserDict(d2) uu = collections.UserDict(u) uu0 = collections.UserDict(u0) uu1 = collections.UserDict(u1) uu2 = collections.UserDict(u2) # keyword arg constructor self.assertEqual(collections.UserDict(one=1, two=2), d2) # item sequence constructor self.assertEqual(collections.UserDict([('one',1), ('two',2)]), d2) self.assertEqual(collections.UserDict(dict=[('one',1), ('two',2)]), {'dict': [('one', 1), ('two', 2)]}) # both together self.assertEqual(collections.UserDict([('one',1), ('two',2)], two=3, three=5), d3) # alternate constructor self.assertEqual(collections.UserDict.fromkeys('one two'.split()), d4) self.assertEqual(collections.UserDict().fromkeys('one two'.split()), d4) self.assertEqual(collections.UserDict.fromkeys('one two'.split(), 1), d5) self.assertEqual(collections.UserDict().fromkeys('one two'.split(), 1), d5) self.assertTrue(u1.fromkeys('one two'.split()) is not u1) self.assertIsInstance(u1.fromkeys('one two'.split()), collections.UserDict) self.assertIsInstance(u2.fromkeys('one two'.split()), collections.UserDict) # Test __repr__ self.assertEqual(str(u0), str(d0)) self.assertEqual(repr(u1), repr(d1)) self.assertIn(repr(u2), ("{'one': 1, 'two': 2}", "{'two': 2, 'one': 1}")) # Test rich comparison and __len__ all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2] for a in all: for b in all: self.assertEqual(a == b, len(a) == len(b)) # Test __getitem__ self.assertEqual(u2["one"], 1) self.assertRaises(KeyError, u1.__getitem__, "two") # Test __setitem__ u3 = collections.UserDict(u2) u3["two"] = 2 u3["three"] = 3 # Test __delitem__ del u3["three"] self.assertRaises(KeyError, u3.__delitem__, "three") # Test clear u3.clear() self.assertEqual(u3, {}) # Test copy() u2a = u2.copy() self.assertEqual(u2a, u2) u2b = collections.UserDict(x=42, y=23) u2c = u2b.copy() # making a copy of a UserDict is special cased self.assertEqual(u2b, u2c) class MyUserDict(collections.UserDict): def display(self): print(self) m2 = MyUserDict(u2) m2a = m2.copy() self.assertEqual(m2a, m2) # SF bug #476616 -- copy() of UserDict subclass shared data m2['foo'] = 'bar' self.assertNotEqual(m2a, m2) # Test keys, items, values self.assertEqual(sorted(u2.keys()), sorted(d2.keys())) self.assertEqual(sorted(u2.items()), sorted(d2.items())) self.assertEqual(sorted(u2.values()), sorted(d2.values())) # Test "in". for i in u2.keys(): self.assertIn(i, u2) self.assertEqual(i in u1, i in d1) self.assertEqual(i in u0, i in d0) # Test update t = collections.UserDict() t.update(u2) self.assertEqual(t, u2) # Test get for i in u2.keys(): self.assertEqual(u2.get(i), u2[i]) self.assertEqual(u1.get(i), d1.get(i)) self.assertEqual(u0.get(i), d0.get(i)) # Test "in" iteration. for i in range(20): u2[i] = str(i) ikeys = [] for k in u2: ikeys.append(k) keys = u2.keys() self.assertEqual(set(ikeys), set(keys)) # Test setdefault t = collections.UserDict() self.assertEqual(t.setdefault("x", 42), 42) self.assertIn("x", t) self.assertEqual(t.setdefault("x", 23), 42) # Test pop t = collections.UserDict(x=42) self.assertEqual(t.pop("x"), 42) self.assertRaises(KeyError, t.pop, "x") self.assertEqual(t.pop("x", 1), 1) t["x"] = 42 self.assertEqual(t.pop("x", 1), 42) # Test popitem t = collections.UserDict(x=42) self.assertEqual(t.popitem(), ("x", 42)) self.assertRaises(KeyError, t.popitem)
def test_dict(): # mapping d = {'a': 1, 'b': 2} r = cast(dict, collections.OrderedDict(d)) assert r == {'a': 1, 'b': 2} # list assert cast(dict, [('a', 1), ('b', 2)]) == {'a': 1, 'b': 2} # None with pytest.raises(TypeError): cast(dict, None) # dict class X(Object): i: int data = {i: {'i': i} for i in range(10)} r = cast(Dict, data) assert isinstance(r, dict) assert r == data r = cast(dict, data) assert isinstance(r, dict) assert r == data # generic dict r = cast(Dict[str, X], data) assert isinstance(r, dict) assert len(r) == len(data) for i, (k, v) in enumerate(r.items()): assert k == str(i) assert isinstance(v, X) assert v.i == i assert cast(Dict[str, int], [('a', 1), ('b', 2)]) == {'a': 1, 'b': 2} if sys.version_info >= (3, 9): r = cast(dict[str, X], data) assert isinstance(r, dict) assert len(r) == len(data) for i, (k, v) in enumerate(r.items()): assert k == str(i) assert isinstance(v, X) assert v.i == i # no copy data = {str(i): i for i in range(10)} assert cast(dict, data) is data assert cast(Dict, data) is data assert cast(Dict[str, int], data) is data # copy expected = data.copy() data['9'] = '9' assert cast(dict, data) is data assert cast(Dict, data) is data assert cast(Dict[str, int], data) == expected assert cast(Dict[str, int], collections.UserDict(data)) == expected