def drive_one(pattern, length): q, r = divmod(length, len(pattern)) teststring = pattern * q + pattern[:r] verify(len(teststring) == length) try_one(teststring) try_one(teststring + "x") try_one(teststring[:-1])
def test_im_doc(): class C: def foo(self): "hello" verify(C.foo.__doc__ == "hello") verify(C().foo.__doc__ == "hello") cantset(C.foo, "__doc__", "hello") cantset(C().foo, "__doc__", "hello")
def test_logs(): import math if verbose: print "log and log10" LOG10E = math.log10(math.e) for exp in range(10) + [100, 1000, 10000]: value = 10 ** exp log10 = math.log10(value) verify(fcmp(log10, exp) == 0) # log10(value) == exp, so log(value) == log10(value)/log10(e) == # exp/LOG10E expected = exp / LOG10E log = math.log(value) verify(fcmp(log, expected) == 0) for bad in -(1L << 10000), -2L, 0L: try: math.log(bad) raise TestFailed("expected ValueError from log(<= 0)") except ValueError: pass try: math.log10(bad) raise TestFailed("expected ValueError from log10(<= 0)") except ValueError: pass
def test_im_class(): class C: def foo(self): pass verify(C.foo.im_class is C) verify(C().foo.im_class is C) cantset(C.foo, "im_class", C) cantset(C().foo, "im_class", C)
def test_im_name(): class C: def foo(self): pass verify(C.foo.__name__ == "foo") verify(C().foo.__name__ == "foo") cantset(C.foo, "__name__", "foo") cantset(C().foo, "__name__", "foo")
def test_func_code(): a = b = 24 def f(): pass def g(): print 12 def f1(): print a def g1(): print b def f2(): print a, b verify(type(f.func_code) is types.CodeType) f.func_code = g.func_code cantset(f, "func_code", None) # can't change the number of free vars cantset(f, "func_code", f1.func_code, exception=ValueError) cantset(f1, "func_code", f.func_code, exception=ValueError) cantset(f1, "func_code", f2.func_code, exception=ValueError) f1.func_code = g1.func_code
def check_all(self, modname): names = {} try: exec "import %s" % modname in names except ImportError: # Silent fail here seems the best route since some modules # may not be available in all environments. # Since an ImportError may leave a partial module object in # sys.modules, get rid of that first. Here's what happens if # you don't: importing pty fails on Windows because pty tries to # import FCNTL, which doesn't exist. That raises an ImportError, # caught here. It also leaves a partial pty module in sys.modules. # So when test_pty is called later, the import of pty succeeds, # but shouldn't. As a result, test_pty crashes with an # AttributeError instead of an ImportError, and regrtest interprets # the latter as a test failure (ImportError is treated as "test # skipped" -- which is what test_pty should say on Windows). try: del sys.modules[modname] except KeyError: pass return verify(hasattr(sys.modules[modname], "__all__"), "%s has no __all__ attribute" % modname) names = {} exec "from %s import *" % modname in names if names.has_key("__builtins__"): del names["__builtins__"] keys = Set(names) all = Set(sys.modules[modname].__all__) verify(keys == all, "%s != %s" % (keys, all))
def check_all(self, modname): names = {} try: exec "import %s" % modname in names except ImportError: # Silent fail here seems the best route since some modules # may not be available in all environments. # Since an ImportError may leave a partial module object in # sys.modules, get rid of that first. Here's what happens if # you don't: importing pty fails on Windows because pty tries to # import FCNTL, which doesn't exist. That raises an ImportError, # caught here. It also leaves a partial pty module in sys.modules. # So when test_pty is called later, the import of pty succeeds, # but shouldn't. As a result, test_pty crashes with an # AttributeError instead of an ImportError, and regrtest interprets # the latter as a test failure (ImportError is treated as "test # skipped" -- which is what test_pty should say on Windows). try: del sys.modules[modname] except KeyError: pass return verify(hasattr(sys.modules[modname], "__all__"), "%s has no __all__ attribute" % modname) names = {} exec "from %s import *" % modname in names if names.has_key("__builtins__"): del names["__builtins__"] keys = Set(names) all = Set(sys.modules[modname].__all__) verify(keys==all, "%s != %s" % (keys, all))
def check_all(self, modname): names = {} original_sys_modules = sys.modules.copy() try: exec "import %s" % modname in names except ImportError: # Silent fail here seems the best route since some modules # may not be available in all environments. # We restore sys.modules to avoid leaving broken modules behind, # but we must not remove built-in modules from sys.modules # (because they can't be re-imported, typically) for name in sys.modules.keys(): if name in original_sys_modules: continue # XXX hackish mod = sys.modules[name] if not hasattr(mod, '__file__'): continue if (mod.__file__.lower().endswith('.py') or mod.__file__.lower().endswith('.pyc') or mod.__file__.lower().endswith('.pyo')): del sys.modules[name] return verify(hasattr(sys.modules[modname], "__all__"), "%s has no __all__ attribute" % modname) names = {} exec "from %s import *" % modname in names if names.has_key("__builtins__"): del names["__builtins__"] keys = set(names) all = set(sys.modules[modname].__all__) verify(keys==all, "%s != %s" % (keys, all))
def test_float_overflow(): import math if verbose: print "long->float overflow" for x in -2.0, -1.0, 0.0, 1.0, 2.0: verify(float(long(x)) == x) shuge = '12345' * 120 huge = 1L << 30000 mhuge = -huge namespace = {'huge': huge, 'mhuge': mhuge, 'shuge': shuge, 'math': math} for test in ["float(huge)", "float(mhuge)", "complex(huge)", "complex(mhuge)", "complex(huge, 1)", "complex(mhuge, 1)", "complex(1, huge)", "complex(1, mhuge)", "1. + huge", "huge + 1.", "1. + mhuge", "mhuge + 1.", "1. - huge", "huge - 1.", "1. - mhuge", "mhuge - 1.", "1. * huge", "huge * 1.", "1. * mhuge", "mhuge * 1.", "1. // huge", "huge // 1.", "1. // mhuge", "mhuge // 1.", "1. / huge", "huge / 1.", "1. / mhuge", "mhuge / 1.", "1. ** huge", "huge ** 1.", "1. ** mhuge", "mhuge ** 1.", "math.sin(huge)", "math.sin(mhuge)", "math.sqrt(huge)", "math.sqrt(mhuge)", # should do better "math.floor(huge)", "math.floor(mhuge)",]: #XXX: not working #"float(shuge) == int(shuge)"]: try: eval(test, namespace) except OverflowError: pass else: raise TestFailed("expected OverflowError from %s" % test)
def check_all(self, modname): names = {} original_sys_modules = sys.modules.copy() try: exec "import %s" % modname in names except ImportError: # Silent fail here seems the best route since some modules # may not be available in all environments. # We restore sys.modules to avoid leaving broken modules behind, # but we must not remove built-in modules from sys.modules # (because they can't be re-imported, typically) for name in sys.modules.keys(): if name in original_sys_modules: continue # XXX hackish mod = sys.modules[name] if not hasattr(mod, '__file__'): continue if (mod.__file__.lower().endswith('.py') or mod.__file__.lower().endswith('.pyc') or mod.__file__.lower().endswith('.pyo')): del sys.modules[name] return verify(hasattr(sys.modules[modname], "__all__"), "%s has no __all__ attribute" % modname) names = {} exec "from %s import *" % modname in names if names.has_key("__builtins__"): del names["__builtins__"] keys = set(names) all = set(sys.modules[modname].__all__) verify(keys == all, "%s != %s" % (keys, all))
def test(): if not hasattr(gc, 'get_debug'): if verbose: print "skipping test_gc: too many GC differences with CPython" return if verbose: print "disabling automatic collection" enabled = gc.isenabled() gc.disable() verify(not gc.isenabled()) debug = gc.get_debug() gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak try: test_all() finally: gc.set_debug(debug) # test gc.enable() even if GC is disabled by default if verbose: print "restoring automatic collection" # make sure to always test gc.enable() gc.enable() verify(gc.isenabled()) if not enabled: gc.disable()
def test_main(verbose=None): from test.test_support import verify import sys for func in tests: expected = tests[func] result = list(func()) verify(result == expected, "%s: expected %s, got %s" % (func.__name__, expected, result))
def test_im_dict(): class C: def foo(self): pass foo.bar = 42 verify(C.foo.__dict__ == {'bar': 42}) verify(C().foo.__dict__ == {'bar': 42}) cantset(C.foo, "__dict__", C.foo.__dict__) cantset(C().foo, "__dict__", C.foo.__dict__)
def test_im_self(): class C: def foo(self): pass verify(C.foo.im_self is None) c = C() verify(c.foo.im_self is c) cantset(C.foo, "im_self", None) cantset(c.foo, "im_self", c)
def test_main(verbose=None): from test.test_support import verify import sys for func in tests: expected = tests[func] result = list(func()) verify(result == expected, "%s: expected %s, got %s" % ( func.__name__, expected, result))
def test_im_func(): def foo(self): pass class C: pass C.foo = foo verify(C.foo.im_func is foo) verify(C().foo.im_func is foo) cantset(C.foo, "im_func", foo) cantset(C().foo, "im_func", foo)
def testdgram(proto, addr): s = socket.socket(proto, socket.SOCK_DGRAM) s.sendto(teststring, addr) buf = data = receive(s, 100) while data and '\n' not in buf: data = receive(s, 100) buf += data verify(buf == teststring) s.close()
def test_anon(): print " anonymous mmap.mmap(-1, PAGESIZE)..." m = mmap.mmap(-1, PAGESIZE) for x in xrange(PAGESIZE): verify(m[x] == '\0', "anonymously mmap'ed contents should be zero") for x in xrange(PAGESIZE): m[x] = ch = chr(x & 255) vereq(m[x], ch)
def teststream(proto, addr): s = socket.socket(proto, socket.SOCK_STREAM) s.connect(addr) s.sendall(teststring) buf = data = receive(s, 100) while data and '\n' not in buf: data = receive(s, 100) buf += data verify(buf == teststring) s.close()
def test_keys(): d = dbm.open(filename, 'c') verify(d.keys() == []) d['a'] = 'b' d['12345678910'] = '019237410982340912840198242' d.keys() if d.has_key('a'): if verbose: print 'Test dbm keys: ', d.keys() d.close()
def QueueJoinTest(q): global cum cum = 0 for i in (0,1): threading.Thread(target=worker, args=(q,)).start() for i in xrange(100): q.put(i) q.join() verify(cum==sum(range(100)), "q.join() did not block until all tasks were done") for i in (0,1): q.put(None) # instruct the threads to close q.join() # verify that you can join twice
def QueueJoinTest(q): global cum cum = 0 for i in (0, 1): threading.Thread(target=worker, args=(q, )).start() for i in xrange(100): q.put(i) q.join() verify(cum == sum(range(100)), "q.join() did not block until all tasks were done") for i in (0, 1): q.put(None) # instruct the threads to close q.join() # verify that you can join twice
def cantset(obj, name, value): verify(hasattr(obj, name)) # Otherwise it's probably a typo try: setattr(obj, name, value) except (AttributeError, TypeError): pass else: raise TestFailed, "shouldn't be able to set %s to %r" % (name, value) try: delattr(obj, name) except (AttributeError, TypeError): pass else: raise TestFailed, "shouldn't be able to del %s" % name
def cantset(obj, name, value, exception=(AttributeError, TypeError)): verify(hasattr(obj, name)) # Otherwise it's probably a typo try: setattr(obj, name, value) except exception: pass else: raise TestFailed, "shouldn't be able to set %s to %r" % (name, value) try: delattr(obj, name) except (AttributeError, TypeError): pass else: raise TestFailed, "shouldn't be able to del %s" % name
def __init__(self, formatpair, bytesize): assert len(formatpair) == 2 self.formatpair = formatpair for direction in "<>!=": for code in formatpair: format = direction + code verify(struct.calcsize(format) == bytesize) self.bytesize = bytesize self.bitsize = bytesize * 8 self.signed_code, self.unsigned_code = formatpair self.unsigned_min = 0 self.unsigned_max = 2L**self.bitsize - 1 self.signed_min = -(2L**(self.bitsize - 1)) self.signed_max = 2L**(self.bitsize - 1) - 1
def __init__(self, formatpair, bytesize): assert len(formatpair) == 2 self.formatpair = formatpair for direction in "<>!=": for code in formatpair: format = direction + code verify(struct.calcsize(format) == bytesize) self.bytesize = bytesize self.bitsize = bytesize * 8 self.signed_code, self.unsigned_code = formatpair self.unsigned_min = 0 self.unsigned_max = 2L**self.bitsize - 1 self.signed_min = -(2L**(self.bitsize-1)) self.signed_max = 2L**(self.bitsize-1) - 1
def test_poll1(): """Basic functional test of poll object Create a bunch of pipe and test that poll works with them. """ print "Running poll test 1" p = select.poll() NUM_PIPES = 12 MSG = " This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_PIPES): rd, wr = os.pipe() p.register(rd, select.POLLIN) p.register(wr, select.POLLOUT) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd while writers: ready = p.poll() ready_writers = find_ready_matching(ready, select.POLLOUT) if not ready_writers: raise RuntimeError, "no pipes ready for writing" wr = random.choice(ready_writers) os.write(wr, MSG) ready = p.poll() ready_readers = find_ready_matching(ready, select.POLLIN) if not ready_readers: raise RuntimeError, "no pipes ready for reading" rd = random.choice(ready_readers) buf = os.read(rd, MSG_LEN) verify(len(buf) == MSG_LEN) print buf os.close(r2w[rd]) os.close(rd) p.unregister(r2w[rd]) p.unregister(rd) writers.remove(r2w[rd]) poll_unit_tests() print "Poll test 1 complete"
def test_poll1(): """Basic functional test of poll object Create a bunch of pipe and test that poll works with them. """ print 'Running poll test 1' p = select.poll() NUM_PIPES = 12 MSG = " This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_PIPES): rd, wr = os.pipe() p.register(rd, select.POLLIN) p.register(wr, select.POLLOUT) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd while writers: ready = p.poll() ready_writers = find_ready_matching(ready, select.POLLOUT) if not ready_writers: raise RuntimeError, "no pipes ready for writing" wr = random.choice(ready_writers) os.write(wr, MSG) ready = p.poll() ready_readers = find_ready_matching(ready, select.POLLIN) if not ready_readers: raise RuntimeError, "no pipes ready for reading" rd = random.choice(ready_readers) buf = os.read(rd, MSG_LEN) verify(len(buf) == MSG_LEN) print buf os.close(r2w[rd]) os.close(rd) p.unregister(r2w[rd]) p.unregister(rd) writers.remove(r2w[rd]) poll_unit_tests() print 'Poll test 1 complete'
def check_all(self, modname): names = {} try: exec "import %s" % modname in names except ImportError: # Silent fail here seems the best route since some modules # may not be available in all environments. return verify(hasattr(sys.modules[modname], "__all__"), "%s has no __all__ attribute" % modname) names = {} exec "from %s import *" % modname in names if names.has_key("__builtins__"): del names["__builtins__"] keys = set(names) all = set(sys.modules[modname].__all__) verify(keys == all, "%s != %s" % (keys, all))
def test_func_defaults(): def f(a, b): return (a, b) verify(f.func_defaults is None) f.func_defaults = (1, 2) verify(f.func_defaults == (1, 2)) verify(f(10) == (10, 2)) def g(a=1, b=2): return (a, b) verify(g.func_defaults == (1, 2)) del g.func_defaults verify(g.func_defaults is None) try: g() except TypeError: pass else: raise TestFailed, "shouldn't be allowed to call g() w/o defaults"
def SimpleQueueTest(q): if not q.empty(): raise RuntimeError, "Call this function with an empty queue" # I guess we better check things actually queue correctly a little :) q.put(111) q.put(222) verify(q.get() == 111 and q.get() == 222, "Didn't seem to queue the correct data!") for i in range(QUEUE_SIZE-1): q.put(i) verify(not q.empty(), "Queue should not be empty") verify(not q.full(), "Queue should not be full") q.put("last") verify(q.full(), "Queue should be full") try: q.put("full", block=0) raise TestFailed("Didn't appear to block with a full queue") except Queue.Full: pass try: q.put("full", timeout=0.01) raise TestFailed("Didn't appear to time-out with a full queue") except Queue.Full: pass # Test a blocking put _doBlockingTest(q.put, ("full",), q.get, ()) _doBlockingTest(q.put, ("full", True, 10), q.get, ()) # Empty it for i in range(QUEUE_SIZE): q.get() verify(q.empty(), "Queue should be empty") try: q.get(block=0) raise TestFailed("Didn't appear to block with an empty queue") except Queue.Empty: pass try: q.get(timeout=0.01) raise TestFailed("Didn't appear to time-out with an empty queue") except Queue.Empty: pass # Test a blocking get _doBlockingTest(q.get, (), q.put, ('empty',)) _doBlockingTest(q.get, (True, 10), q.put, ('empty',))
def test_keys(): d = dbm.open(filename, 'c') verify(d.keys() == []) d['a'] = 'b' d['12345678910'] = '019237410982340912840198242' verify(d.keys() == ['12345678910', 'a']) verify(d.get('a') == 'b') verify(d.get('b', None) == None) try: d.get('b') except KeyError, e: pass
def test_native_qQ(): bytes = struct.calcsize('q') # The expected values here are in big-endian format, primarily because # I'm on a little-endian machine and so this is the clearest way (for # me) to force the code to get exercised. for format, input, expected in ( ('q', -1, '\xff' * bytes), ('q', 0, '\x00' * bytes), ('Q', 0, '\x00' * bytes), ('q', 1L, '\x00' * (bytes-1) + '\x01'), ('Q', (1L << (8*bytes))-1, '\xff' * bytes), ('q', (1L << (8*bytes-1))-1, '\x7f' + '\xff' * (bytes - 1))): got = struct.pack(format, input) native_expected = bigendian_to_native(expected) verify(got == native_expected, "%r-pack of %r gave %r, not %r" % (format, input, got, native_expected)) retrieved = struct.unpack(format, got)[0] verify(retrieved == input, "%r-unpack of %r gave %r, not %r" % (format, got, retrieved, input))
def test(): if verbose: print "disabling automatic collection" enabled = gc.isenabled() gc.disable() verify(not gc.isenabled()) debug = gc.get_debug() gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak try: test_all() finally: gc.set_debug(debug) # test gc.enable() even if GC is disabled by default if verbose: print "restoring automatic collection" # make sure to always test gc.enable() gc.enable() verify(gc.isenabled()) if not enabled: gc.disable()
def DeleteTestData(root_key): key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS) sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS) # It is not necessary to delete the values before deleting # the key (although subkeys must not exist). We delete them # manually just to prove we can :-) for value_name, value_data, value_type in test_data: DeleteValue(sub_key, value_name) nkeys, nvalues, since_mod = QueryInfoKey(sub_key) verify(nkeys==0 and nvalues==0, "subkey not empty before delete") sub_key.Close() DeleteKey(key, "sub_key") try: # Shouldnt be able to delete it twice! DeleteKey(key, "sub_key") verify(0, "Deleting the key twice succeeded") except EnvironmentError: pass key.Close() DeleteKey(root_key, test_key_name) # Opening should now fail! try: key = OpenKey(root_key, test_key_name) verify(0, "Could open the non-existent key") except WindowsError: # Use this error name this time pass
def main(): for i in range(NUM_THREADS): thread.start_new(f, (i, )) time.sleep(LONGSLEEP) a = alive.keys() a.sort() verify(a == range(NUM_THREADS)) prefork_lives = alive.copy() if sys.platform in ['unixware7']: cpid = os.fork1() else: cpid = os.fork() if cpid == 0: # Child time.sleep(LONGSLEEP) n = 0 for key in alive.keys(): if alive[key] != prefork_lives[key]: n = n + 1 os._exit(n) else: # Parent spid, status = os.waitpid(cpid, 0) verify(spid == cpid) verify(status == 0, "cause = %d, exit = %d" % (status & 0xff, status >> 8)) global stop # Tell threads to die stop = 1 time.sleep(2 * SHORTSLEEP) # Wait for threads to die
def main(): for i in range(NUM_THREADS): thread.start_new(f, (i,)) time.sleep(LONGSLEEP) a = alive.keys() a.sort() verify(a == range(NUM_THREADS)) prefork_lives = alive.copy() if sys.platform in ['unixware7']: cpid = os.fork1() else: cpid = os.fork() if cpid == 0: # Child time.sleep(LONGSLEEP) n = 0 for key in alive.keys(): if alive[key] != prefork_lives[key]: n = n+1 os._exit(n) else: # Parent spid, status = os.waitpid(cpid, 0) verify(spid == cpid) verify(status == 0, "cause = %d, exit = %d" % (status&0xff, status>>8) ) global stop # Tell threads to die stop = 1 time.sleep(2*SHORTSLEEP) # Wait for threads to die
def test_native_qQ(): bytes = struct.calcsize('q') # The expected values here are in big-endian format, primarily because # I'm on a little-endian machine and so this is the clearest way (for # me) to force the code to get exercised. for format, input, expected in (('q', -1, '\xff' * bytes), ('q', 0, '\x00' * bytes), ('Q', 0, '\x00' * bytes), ('q', 1L, '\x00' * (bytes - 1) + '\x01'), ('Q', (1L << (8 * bytes)) - 1, '\xff' * bytes), ('q', (1L << (8 * bytes - 1)) - 1, '\x7f' + '\xff' * (bytes - 1))): got = struct.pack(format, input) native_expected = bigendian_to_native(expected) verify( got == native_expected, "%r-pack of %r gave %r, not %r" % (format, input, got, native_expected)) retrieved = struct.unpack(format, got)[0] verify( retrieved == input, "%r-unpack of %r gave %r, not %r" % (format, got, retrieved, input))
def test_func_closure(): a = 12 def f(): print a c = f.func_closure verify(isinstance(c, tuple)) verify(len(c) == 1) verify(c[0].__class__.__name__ == "cell") # don't have a type object handy cantset(f, "func_closure", c)