def check_gc_during_creation(self, makeref): if test_support.check_impl_detail(): import gc thresholds = gc.get_threshold() gc.set_threshold(1, 1, 1) gc_collect() class A: pass def callback(*args): pass referenced = A() a = A() a.a = a a.wr = makeref(referenced) try: # now make sure the object and the ref get labeled as # cyclic trash: a = A() weakref.ref(referenced, callback) finally: if test_support.check_impl_detail(): gc.set_threshold(*thresholds)
def test_bug737473(self): import os, tempfile, time savedpath = sys.path[:] testdir = tempfile.mkdtemp() try: sys.path.insert(0, testdir) testfile = os.path.join(testdir, 'test_bug737473.py') with open(testfile, 'w') as f: print >> f, """ def test(): raise ValueError""" if 'test_bug737473' in sys.modules: del sys.modules['test_bug737473'] import test_bug737473 try: test_bug737473.test() except ValueError: # this loads source code to linecache traceback.extract_tb(sys.exc_traceback) # If this test runs too quickly, test_bug737473.py's mtime # attribute will remain unchanged even if the file is rewritten. # Consequently, the file would not reload. So, added a sleep() # delay to assure that a new, distinct timestamp is written. # Since WinME with FAT32 has multisecond resolution, more than # three seconds are needed for this test to pass reliably :-( time.sleep(4) with open(testfile, 'w') as f: print >> f, """ def test(): raise NotImplementedError""" reload(test_bug737473) try: test_bug737473.test() except NotImplementedError: src = traceback.extract_tb(sys.exc_traceback)[-1][-1] self.assertEqual(src, 'raise NotImplementedError') finally: sys.path[:] = savedpath for f in os.listdir(testdir): os.unlink(os.path.join(testdir, f)) os.rmdir(testdir) err = self.get_exception_format(self.syntax_error_bad_indentation2, IndentationError) self.assertEqual(len(err), 4) self.assertEqual(err[1].strip(), "print(2)") self.assertIn("^", err[2]) if check_impl_detail(): self.assertEqual(err[1].find("p"), err[2].find("^")) if check_impl_detail(pypy=True): self.assertEqual(err[1].find("2)") + 1, err[2].find("^"))
def test_builtin_function(self): eq = self.assertEqual # Functions eq(repr(hash), '<built-in function hash>') # Methods if check_impl_detail(cpython=True): self.assertTrue(repr(''.split).startswith( '<built-in method split of str object at 0x')) elif check_impl_detail(pypy=True): eq(repr(''.split), "<bound method str.split of ''>")
def test_rewrite_pyc_with_read_only_source(self): # Issue 6074: a long time ago on posix, and more recently on Windows, # a read only source file resulted in a read only pyc file, which # led to problems with updating it later sys.path.insert(0, os.curdir) fname = TESTFN + os.extsep + "py" try: # Write a Python file, make it read-only and import it with open(fname, 'w') as f: f.write("x = 'original'\n") # Tweak the mtime of the source to ensure pyc gets updated later s = os.stat(fname) os.utime(fname, (s.st_atime, s.st_mtime - 100000000)) os.chmod(fname, 0400) m1 = __import__(TESTFN) self.assertEqual(m1.x, 'original') # Change the file and then reimport it os.chmod(fname, 0600) with open(fname, 'w') as f: f.write("x = 'rewritten'\n") unload(TESTFN) m2 = __import__(TESTFN) self.assertEqual(m2.x, 'rewritten') # Now delete the source file and check the pyc was rewritten if check_impl_detail(pypy=False): unlink(fname) unload(TESTFN) m3 = __import__(TESTFN) self.assertEqual(m3.x, 'rewritten') finally: chmod_files(TESTFN) remove_files(TESTFN) unload(TESTFN) del sys.path[0]
class StringlikeHashRandomizationTests(HashRandomizationTests): if check_impl_detail(pypy=True): EMPTY_STRING_HASH = -2 else: EMPTY_STRING_HASH = 0 def test_null_hash(self): # PYTHONHASHSEED=0 disables the randomized hash if IS_64BIT: known_hash_of_obj = 1453079729188098211 else: known_hash_of_obj = -1600925533 # Randomization is disabled by default: self.assertEqual(self.get_hash(self.repr_), known_hash_of_obj) # It can also be disabled by setting the seed to 0: self.assertEqual(self.get_hash(self.repr_, seed=0), known_hash_of_obj) def test_fixed_hash(self): # test a fixed seed for the randomized hash # Note that all types share the same values: if IS_64BIT: if sys.byteorder == 'little': h = -4410911502303878509 else: h = -3570150969479994130 else: if sys.byteorder == 'little': h = -206076799 else: h = -1024014457 self.assertEqual(self.get_hash(self.repr_, seed=42), h)
def test_cmptypes(self): # Built-in tp_compare slots expect their arguments to have the # same type, but a user-defined __coerce__ doesn't have to obey. # SF #980352 evil_coercer = CoerceTo(42) # Make sure these don't crash any more self.assertNotEquals(cmp(u'fish', evil_coercer), 0) self.assertNotEquals(cmp(slice(1), evil_coercer), 0) # ...but that this still works if check_impl_detail(): # NB. I (arigo) would consider the following as implementation- # specific. For example, in CPython, if we replace 42 with 42.0 # both below and in CoerceTo() above, then the test fails. This # hints that the behavior is really dependent on some obscure # internal details. class WackyComparer(object): def __cmp__(slf, other): self.assert_(other == 42, 'expected evil_coercer, got %r' % other) return 0 self.assertEquals(cmp(WackyComparer(), evil_coercer), 0) # ...and classic classes too, since that code path is a little different class ClassicWackyComparer: def __cmp__(slf, other): self.assert_(other == 42, 'expected evil_coercer, got %r' % other) return 0 self.assertEquals(cmp(ClassicWackyComparer(), evil_coercer), 0)
def test_module_with_large_stack(self, module='longlist'): # Regression test for http://bugs.python.org/issue561858. filename = module + os.extsep + 'py' # Create a file with a list of 65000 elements. with open(filename, 'w+') as f: f.write('d = [\n') for i in range(65000): f.write('"",\n') f.write(']') # Compile & remove .py file, we only need .pyc (or .pyo). with open(filename, 'r') as f: py_compile.compile(filename) if check_impl_detail(pypy=False): # pypy refuses to import a .pyc if the .py does not exist unlink(filename) # Need to be able to load from current dir. sys.path.append('') # This used to crash. exec 'import ' + module reload(longlist) # Cleanup. del sys.path[-1] unlink(filename + 'c') unlink(filename + 'o')
def test_no_len_for_infinite_repeat(self): # The repeat() object can also be infinite if test_support.check_impl_detail(pypy=True): # 3.4 (PEP 424) behavior self.assertEqual(len(repeat(None)), NotImplemented) else: self.assertRaises(TypeError, len, repeat(None))
def test_excluding_predicates(self): self.istest(inspect.isbuiltin, 'sys.exit') if check_impl_detail(): self.istest(inspect.isbuiltin, '[].append') self.istest(inspect.iscode, 'mod.spam.func_code') self.istest(inspect.isframe, 'tb.tb_frame') self.istest(inspect.isfunction, 'mod.spam') self.istest(inspect.ismethod, 'mod.StupidGit.abuse') self.istest(inspect.ismethod, 'git.argue') self.istest(inspect.ismodule, 'mod') self.istest(inspect.istraceback, 'tb') self.istest(inspect.isdatadescriptor, '__builtin__.file.closed') self.istest(inspect.isdatadescriptor, '__builtin__.file.softspace') self.istest(inspect.isgenerator, '(x for x in xrange(2))') self.istest(inspect.isgeneratorfunction, 'generator_function_example') if hasattr(types, 'GetSetDescriptorType'): self.istest(inspect.isgetsetdescriptor, 'type(tb.tb_frame).f_locals') else: self.assertFalse( inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals)) if hasattr(types, 'MemberDescriptorType'): # App-level slots are member descriptors on both PyPy and # CPython, but the various built-in attributes are all # getsetdescriptors on PyPy. So check ismemberdescriptor() # with an app-level slot. self.istest(inspect.ismemberdescriptor, 'ExampleClassWithSlot.myslot') else: self.assertFalse( inspect.ismemberdescriptor(type(lambda: None).func_globals))
def test_repr(self): l0 = [] l2 = [0, 1, 2] a0 = self.type2test(l0) a2 = self.type2test(l2) self.assertEqual(str(a0), str(l0)) self.assertEqual(repr(a0), repr(l0)) self.assertEqual(repr(a2), repr(l2)) self.assertEqual(str(a2), "[0, 1, 2]") self.assertEqual(repr(a2), "[0, 1, 2]") a2.append(a2) a2.append(3) self.assertEqual(str(a2), "[0, 1, 2, [...], 3]") self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]") if test_support.check_impl_detail(): depth = sys.getrecursionlimit() + 100 else: depth = 1000 * 1000 # should be enough to exhaust the stack l0 = [] for i in xrange(depth): l0 = [l0] self.assertRaises(RuntimeError, repr, l0)
def test_invalid_identitifer(self): m = ast.Module([ast.Expr(ast.Name(u"x", ast.Load()))]) ast.fix_missing_locations(m) with self.assertRaises(TypeError) as cm: compile(m, "<test>", "exec") if test_support.check_impl_detail(): self.assertIn("identifier must be of type str", str(cm.exception))
def test_invalid_string(self): m = ast.Module([ast.Expr(ast.Str(43))]) ast.fix_missing_locations(m) with self.assertRaises(TypeError) as cm: compile(m, "<test>", "exec") if test_support.check_impl_detail(): self.assertIn("string must be of type str or uni", str(cm.exception))
def test_cmptypes(self): # Built-in tp_compare slots expect their arguments to have the # same type, but a user-defined __coerce__ doesn't have to obey. # SF #980352 evil_coercer = CoerceTo(42) # Make sure these don't crash any more self.assertNotEqual(cmp(u'fish', evil_coercer), 0) self.assertNotEqual(cmp(slice(1), evil_coercer), 0) # ...but that this still works if check_impl_detail(): # NB. I (arigo) would consider the following as implementation- # specific. For example, in CPython, if we replace 42 with 42.0 # both below and in CoerceTo() above, then the test fails. This # hints that the behavior is really dependent on some obscure # internal details. class WackyComparer(object): def __cmp__(slf, other): self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) return 0 __hash__ = None # Invalid cmp makes this unhashable self.assertEqual(cmp(WackyComparer(), evil_coercer), 0) # ...and classic classes too, since that code path is a little different class ClassicWackyComparer: def __cmp__(slf, other): self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) return 0 self.assertEqual(cmp(ClassicWackyComparer(), evil_coercer), 0)
def test_one(n): global mutate, dict1, dict2, dict1keys, dict2keys # Fill the dicts without mutating them. mutate = 0 dict1keys = fill_dict(dict1, range(n), n) dict2keys = fill_dict(dict2, range(n), n) # Enable mutation, then compare the dicts so long as they have the # same size. mutate = 1 if verbose: print "trying w/ lengths", len(dict1), len(dict2), while dict1 and len(dict1) == len(dict2): if verbose: print ".", try: if random.random() < 0.5: c = cmp(dict1, dict2) else: c = dict1 == dict2 except RuntimeError: # CPython never raises RuntimeError here, but other implementations # might, and it's fine. if check_impl_detail(cpython=True): raise if verbose: print
def test_popitem(self): # dict.popitem() for copymode in -1, +1: # -1: b has same structure as a # +1: b is a.copy() for log2size in range(12): size = 2**log2size a = {} b = {} for i in range(size): a[repr(i)] = i if copymode < 0: b[repr(i)] = i if copymode > 0: b = a.copy() for i in range(size): ka, va = ta = a.popitem() self.assertEqual(va, int(ka)) kb, vb = tb = b.popitem() self.assertEqual(vb, int(kb)) if test_support.check_impl_detail(): self.assertFalse(copymode < 0 and ta != tb) self.assertFalse(a) self.assertFalse(b) d = {} self.assertRaises(KeyError, d.popitem)
def test_excluding_predicates(self): self.istest(inspect.isbuiltin, 'sys.exit') if check_impl_detail(): self.istest(inspect.isbuiltin, '[].append') self.istest(inspect.iscode, 'mod.spam.func_code') self.istest(inspect.isframe, 'tb.tb_frame') self.istest(inspect.isfunction, 'mod.spam') self.istest(inspect.ismethod, 'mod.StupidGit.abuse') self.istest(inspect.ismethod, 'git.argue') self.istest(inspect.ismodule, 'mod') self.istest(inspect.istraceback, 'tb') self.istest(inspect.isdatadescriptor, '__builtin__.file.closed') self.istest(inspect.isdatadescriptor, '__builtin__.file.softspace') self.istest(inspect.isgenerator, '(x for x in xrange(2))') self.istest(inspect.isgeneratorfunction, 'generator_function_example') if hasattr(types, 'GetSetDescriptorType'): self.istest(inspect.isgetsetdescriptor, 'type(tb.tb_frame).f_locals') else: self.assertFalse( inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals)) if hasattr(types, 'MemberDescriptorType'): self.istest(inspect.ismemberdescriptor, 'type(lambda: None).func_globals') else: self.assertFalse( inspect.ismemberdescriptor(type(lambda: None).func_globals))
def test_buffers(self): self.assertRaises(ValueError, buffer, 'asdf', -1) cmp(buffer("abc"), buffer("def") ) # used to raise a warning: tp_compare didn't return -1, 0, or 1 self.assertRaises(TypeError, buffer, None) a = buffer('asdf') hash(a) b = a * 5 if a == b: self.fail('buffers should not be equal') if str(b) != ('asdf' * 5): self.fail('repeated buffer has wrong content') if str(a * 0) != '': self.fail('repeated buffer zero times has wrong content') if str(a + buffer('def')) != 'asdfdef': self.fail('concatenation of buffers yields wrong content') if str(buffer(a)) != 'asdf': self.fail('composing buffers failed') if str(buffer(a, 2)) != 'df': self.fail('specifying buffer offset failed') if str(buffer(a, 0, 2)) != 'as': self.fail('specifying buffer size failed') if str(buffer(a, 1, 2)) != 'sd': self.fail('specifying buffer offset and size failed') self.assertRaises(ValueError, buffer, buffer('asdf', 1), -1) if str(buffer(buffer('asdf', 0, 2), 0)) != 'as': self.fail('composing length-specified buffer failed') if str(buffer(buffer('asdf', 0, 2), 0, 5000)) != 'as': self.fail('composing length-specified buffer failed') if str(buffer(buffer('asdf', 0, 2), 0, -1)) != 'as': self.fail('composing length-specified buffer failed') if str(buffer(buffer('asdf', 0, 2), 1, 2)) != 's': self.fail('composing length-specified buffer failed') try: a[1] = 'g' except TypeError: pass else: self.fail("buffer assignment should raise TypeError") try: a[0:1] = 'g' except TypeError: pass else: self.fail("buffer slice assignment should raise TypeError") # array.array() returns an object that does not implement a char buffer, # something which int() uses for conversion. import array try: int(buffer(array.array('c', '5'))) except TypeError: pass else: if check_impl_detail(): self.fail("char buffer (at C level) not working")
def test_rewrite_pyc_with_read_only_source(self): # Issue 6074: a long time ago on posix, and more recently on Windows, # a read only source file resulted in a read only pyc file, which # led to problems with updating it later sys.path.insert(0, os.curdir) fname = TESTFN + os.extsep + "py" try: # Write a Python file, make it read-only and import it with open(fname, 'w') as f: f.write("x = 'original'\n") # Tweak the mtime of the source to ensure pyc gets updated later s = os.stat(fname) os.utime(fname, (s.st_atime, s.st_mtime-100000000)) os.chmod(fname, 0400) m1 = __import__(TESTFN) self.assertEqual(m1.x, 'original') # Change the file and then reimport it os.chmod(fname, 0600) with open(fname, 'w') as f: f.write("x = 'rewritten'\n") unload(TESTFN) m2 = __import__(TESTFN) self.assertEqual(m2.x, 'rewritten') # Now delete the source file and check the pyc was rewritten if check_impl_detail(pypy=False): unlink(fname) unload(TESTFN) m3 = __import__(TESTFN) self.assertEqual(m3.x, 'rewritten') finally: chmod_files(TESTFN) remove_files(TESTFN) unload(TESTFN) del sys.path[0]
def test_descriptors(self): eq = self.assertEqual # method descriptors if check_impl_detail(cpython=True): eq(repr(dict.items), "<method 'items' of 'dict' objects>") elif check_impl_detail(pypy=True): eq(repr(dict.items), "<unbound method dict.items>") # XXX member descriptors # XXX attribute descriptors # XXX slot descriptors # static and class methods class C: def foo(cls): pass x = staticmethod(C.foo) self.assertTrue(repr(x).startswith('<staticmethod object at 0x')) x = classmethod(C.foo) self.assertTrue(repr(x).startswith('<classmethod object at 0x'))
def test_c_buffer_raw(self, memoryview=memoryview): buf = c_buffer(32) buf.raw = memoryview("Hello, World") self.assertEqual(buf.value, "Hello, World") if test_support.check_impl_detail(): self.assertRaises(TypeError, setattr, buf, "value", memoryview("abc")) self.assertRaises(ValueError, setattr, buf, "raw", memoryview("x" * 100))
def test_repr_deep(self): if test_support.check_impl_detail(): depth = sys.getrecursionlimit() + 100 else: depth = 1000 * 1000 # should be enough to exhaust the stack a = self.type2test([]) for i in xrange(depth): a = self.type2test([a]) self.assertRaises(RuntimeError, repr, a)
def test_bad_indentation(self): err = self.get_exception_format(self.syntax_error_bad_indentation, IndentationError) self.assertTrue(len(err) == 4) self.assertTrue(err[1].strip() == "print 2") if check_impl_detail(): # on CPython, there is a "^" at the end of the line # on PyPy, there is a "^" too, but at the start, more logically self.assertIn("^", err[2]) self.assertTrue(err[1].find("2") == err[2].find("^"))
def test_select_mutated(self): a = [] class F: def fileno(self): del a[-1] return sys.__stdout__.fileno() a[:] = [F()] * 10 result = select.select([], a, []) # CPython: 'a' ends up with 5 items, because each fileno() # removes an item and at the middle the iteration stops. # PyPy: 'a' ends up empty, because the iteration is done on # a copy of the original list: fileno() is called 10 times. if test_support.check_impl_detail(cpython=True): self.assertEqual(len(result[1]), 5) self.assertEqual(len(a), 5) if test_support.check_impl_detail(pypy=True): self.assertEqual(len(result[1]), 10) self.assertEqual(len(a), 0)
def _test_dict_attribute(self, cls): obj = cls() obj.x = 5 self.assertEqual(obj.__dict__, {'x': 5}) if support.check_impl_detail(): with self.assertRaises(AttributeError): obj.__dict__ = {} with self.assertRaises(AttributeError): del obj.__dict__
def test_exec_with_general_mapping_for_locals(self): class M: "Test mapping interface versus possible calls from eval()." def __getitem__(self, key): if key == 'a': return 12 raise KeyError def __setitem__(self, key, value): self.results = (key, value) def keys(self): return list('xyz') m = M() g = globals() exec 'z = a' in g, m self.assertEqual(m.results, ('z', 12)) try: exec 'z = b' in g, m except NameError: pass else: self.fail('Did not detect a KeyError') exec 'z = dir()' in g, m self.assertEqual(m.results, ('z', list('xyz'))) exec 'z = globals()' in g, m self.assertEqual(m.results, ('z', g)) exec 'z = locals()' in g, m self.assertEqual(m.results, ('z', m)) if check_impl_detail(): try: exec 'z = b' in m except TypeError: pass else: self.fail('Did not validate globals as a real dict') class A: "Non-mapping" pass m = A() try: exec 'z = a' in g, m except TypeError: pass else: self.fail('Did not validate locals as a mapping') # Verify that dict subclasses work as well class D(dict): def __getitem__(self, key): if key == 'a': return 12 return dict.__getitem__(self, key) d = D() exec 'z = a' in g, d self.assertEqual(d['z'], 12)
def test_exc(formatstr, args, exception, excmsg): try: testformat(formatstr, args) except exception, exc: if str(exc) == excmsg or not test_support.check_impl_detail(): if verbose: print "yes" else: if verbose: print 'no' print 'Unexpected ', exception, ':', repr(str(exc))
def test_startswith_endswith_errors(self): with self.assertRaises(UnicodeDecodeError): '\xff'.startswith(u'x') with self.assertRaises(UnicodeDecodeError): '\xff'.endswith(u'x') for meth in ('foo'.startswith, 'foo'.endswith): with self.assertRaises(TypeError) as cm: meth(['f']) if test_support.check_impl_detail(): exc = str(cm.exception) self.assertIn('unicode', exc) self.assertIn('str', exc) self.assertIn('tuple', exc)
def test_buffers(self): self.assertRaises(ValueError, buffer, 'asdf', -1) cmp(buffer("abc"), buffer("def")) # used to raise a warning: tp_compare didn't return -1, 0, or 1 self.assertRaises(TypeError, buffer, None) a = buffer('asdf') hash(a) b = a * 5 if a == b: self.fail('buffers should not be equal') if str(b) != ('asdf' * 5): self.fail('repeated buffer has wrong content') if str(a * 0) != '': self.fail('repeated buffer zero times has wrong content') if str(a + buffer('def')) != 'asdfdef': self.fail('concatenation of buffers yields wrong content') if str(buffer(a)) != 'asdf': self.fail('composing buffers failed') if str(buffer(a, 2)) != 'df': self.fail('specifying buffer offset failed') if str(buffer(a, 0, 2)) != 'as': self.fail('specifying buffer size failed') if str(buffer(a, 1, 2)) != 'sd': self.fail('specifying buffer offset and size failed') self.assertRaises(ValueError, buffer, buffer('asdf', 1), -1) if str(buffer(buffer('asdf', 0, 2), 0)) != 'as': self.fail('composing length-specified buffer failed') if str(buffer(buffer('asdf', 0, 2), 0, 5000)) != 'as': self.fail('composing length-specified buffer failed') if str(buffer(buffer('asdf', 0, 2), 0, -1)) != 'as': self.fail('composing length-specified buffer failed') if str(buffer(buffer('asdf', 0, 2), 1, 2)) != 's': self.fail('composing length-specified buffer failed') try: a[1] = 'g' except TypeError: pass else: self.fail("buffer assignment should raise TypeError") try: a[0:1] = 'g' except TypeError: pass else: self.fail("buffer slice assignment should raise TypeError") # array.array() returns an object that does not implement a char buffer, # something which int() uses for conversion. import array try: int(buffer(array.array('c', '5'))) except TypeError: pass else: if check_impl_detail(): self.fail("char buffer (at C level) not working")
def test_main(): if hasattr(float, '__getformat__') and float.__getformat__( 'double').startswith('IEEE'): lst = [IEEEFormatTestCase] else: lst = [] if test_support.check_impl_detail(): lst.extend([ FormatFunctionsTestCase, UnknownFormatTestCase, ]) test_support.run_unittest(*lst)
def test_uninitialized(self): # An uninitialized module has no __dict__ or __name__, # and __doc__ is None foo = ModuleType.__new__(ModuleType) self.assertFalse(foo.__dict__) if check_impl_detail(): self.assertTrue(foo.__dict__ is None) self.assertRaises(SystemError, dir, foo) try: s = foo.__name__ self.fail("__name__ = %s" % repr(s)) except AttributeError: pass self.assertEqual(foo.__doc__, ModuleType.__doc__)
def test_hash_randomization(self): # Verify that -R enables hash randomization: self.verify_valid_flag("-R") hashes = [] for i in range(2): code = 'print(hash("spam"))' data = self.start_python("-R", "-c", code) hashes.append(data) if check_impl_detail(pypy=False): # PyPy does not really implement it! self.assertNotEqual(hashes[0], hashes[1]) # Verify that sys.flags contains hash_randomization code = "import sys; print sys.flags" data = self.start_python("-R", "-c", code) self.assertTrue("hash_randomization=1" in data)
def test_hash_randomization(self): # Verify that -R enables hash randomization: self.verify_valid_flag('-R') hashes = [] for i in range(2): code = 'print(hash("spam"))' data = self.start_python('-R', '-c', code) hashes.append(data) if check_impl_detail(pypy=False): # PyPy does not really implement it! self.assertNotEqual(hashes[0], hashes[1]) # Verify that sys.flags contains hash_randomization code = 'import sys; print sys.flags' data = self.start_python('-R', '-c', code) self.assertTrue('hash_randomization=1' in data)
def cannot_set_attr(self, obj, name, value, exceptions): if not test_support.check_impl_detail(): exceptions = (TypeError, AttributeError) # Helper method for other tests. try: setattr(obj, name, value) except exceptions: pass else: self.fail("shouldn't be able to set %s to %r" % (name, value)) try: delattr(obj, name) except exceptions: pass else: self.fail("shouldn't be able to del %s" % name)
def blowstack(fxn, arg, compare_to): # Make sure that calling isinstance with a deeply nested tuple for its # argument will raise RuntimeError eventually. tuple_arg = (compare_to, ) if test_support.check_impl_detail(cpython=True): RECURSION_LIMIT = sys.getrecursionlimit() else: # on non-CPython implementations, the maximum # actual recursion limit might be higher, but # probably not higher than 99999 # RECURSION_LIMIT = 99999 for cnt in xrange(RECURSION_LIMIT + 5): tuple_arg = (tuple_arg, ) fxn(arg, tuple_arg)
def blowstack(fxn, arg, compare_to): # Make sure that calling isinstance with a deeply nested tuple for its # argument will raise RuntimeError eventually. tuple_arg = (compare_to,) if test_support.check_impl_detail(cpython=True): RECURSION_LIMIT = sys.getrecursionlimit() else: # on non-CPython implementations, the maximum # actual recursion limit might be higher, but # probably not higher than 99999 # RECURSION_LIMIT = 99999 for cnt in xrange(RECURSION_LIMIT + 5): tuple_arg = (tuple_arg,) fxn(arg, tuple_arg)
def test_proxy_ref(self): o = C() o.bar = 1 ref1 = weakref.proxy(o, self.callback) ref2 = weakref.proxy(o, self.callback) del o test_support.gc_collect() def check(proxy): proxy.bar self.assertRaises(weakref.ReferenceError, check, ref1) self.assertRaises(weakref.ReferenceError, check, ref2) if test_support.check_impl_detail(): # Works only with refcounting self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C())) self.assert_(self.cbcalled == 2)
def _test_default_attrs(self, ctor, *args): obj = ctor(*args) # Check defaults self.assertEqual(obj.dialect.delimiter, ",") self.assertEqual(obj.dialect.doublequote, True) self.assertEqual(obj.dialect.escapechar, None) self.assertEqual(obj.dialect.lineterminator, "\r\n") self.assertEqual(obj.dialect.quotechar, '"') self.assertEqual(obj.dialect.quoting, csv.QUOTE_MINIMAL) self.assertEqual(obj.dialect.skipinitialspace, False) self.assertEqual(obj.dialect.strict, False) if test_support.check_impl_detail(): # Try deleting or changing attributes (they are read-only) self.assertRaises(TypeError, delattr, obj.dialect, "delimiter") self.assertRaises(TypeError, setattr, obj.dialect, "delimiter", ":") self.assertRaises(AttributeError, delattr, obj.dialect, "quoting") self.assertRaises(AttributeError, setattr, obj.dialect, "quoting", None)
def test_yet_more_evil_still_undecodable(self): # Issue #25388 src = b"#\x00\n#\xfd\n" tmpd = tempfile.mkdtemp() try: fn = os.path.join(tmpd, "bad.py") with open(fn, "wb") as fp: fp.write(src) try: rc, out, err = script_helper.assert_python_failure(fn) except AssertionError: if check_impl_detail(pypy=True): # as long as we don't crash return raise finally: test_support.rmtree(tmpd) self.assertIn(b"Non-ASCII", err)
def test_subclass_refs_dont_replace_standard_refs(self): class MyRef(weakref.ref): pass o = Object(42) r1 = MyRef(o) r2 = weakref.ref(o) self.assertTrue(r1 is not r2) self.assertEqual(weakref.getweakrefs(o), [r2, r1]) self.assertEqual(weakref.getweakrefcount(o), 2) r3 = MyRef(o) self.assertEqual(weakref.getweakrefcount(o), 3) refs = weakref.getweakrefs(o) self.assertEqual(len(refs), 3) assert set(refs) == set((r1, r2, r3)) if test_support.check_impl_detail(): self.assertTrue(r2 is refs[0]) self.assertIn(r1, refs[1:]) self.assertIn(r3, refs[1:])
def test_AST_objects(self): if test_support.check_impl_detail(): # PyPy also provides a __dict__ to the ast.AST base class. x = ast.AST() try: x.foobar = 21 except AttributeError, e: self.assertEquals(e.args[0], "'_ast.AST' object has no attribute 'foobar'") else: self.assert_(False) try: ast.AST(lineno=2) except AttributeError, e: self.assertEquals(e.args[0], "'_ast.AST' object has no attribute 'lineno'")