def test_comparison(self): t = tsc_time.now_tsc() t2 = tsc_time.now_tsc() self.assert_(t2 > t) self.assert_(t < t2) self.assert_(t.tsc + 1 > t) self.assert_(t.tsc - 1 < t) self.assert_(t.tsc + 1L > t) self.assert_(t.tsc - 1L < t) # Floating point uses larger numbers because of a loss in precision # when converting to floating point, ex: # >>> float(28536304964998994L) # 28536304964998992.0 self.assert_(t.tsc + 1000.0 > t) self.assert_(t.tsc - 1000.0 < t) t = tsc_time.now_posix_sec() time.sleep(2) t2 = tsc_time.now_posix_sec() self.assert_(t2 > t) self.assert_(t < t2) self.assert_(int(t) + 1 > t) self.assert_(int(t) - 1 < t) self.assert_(int(t) + 1L > t) self.assert_(int(t) - 1L < t) self.assert_(int(t) + 1.0 > t) self.assert_(int(t) - 1.0 < t) t = tsc_time.now_posix_usec() time.sleep(0.1) t2 = tsc_time.now_posix_usec() self.assert_(t2 > t) self.assert_(t < t2) self.assert_(int(t) + 1 > t) self.assert_(int(t) - 1 < t) self.assert_(int(t) + 1L > t) self.assert_(int(t) - 1L < t) self.assert_(int(t) + 1.0 > t) self.assert_(int(t) - 1.0 < t) t = tsc_time.now_posix_fsec() time.sleep(0.1) t2 = tsc_time.now_posix_fsec() self.assert_(t2 > t) self.assert_(t < t2) self.assert_(int(t) + 1 > t) self.assert_(int(t) - 1 < t) self.assert_(int(t) + 1L > t) self.assert_(int(t) - 1L < t) self.assert_(int(t) + 1.0 > t) self.assert_(int(t) - 1.0 < t)
def test_math(self): t = tsc_time.now_tsc() self.assertEqual(t + 1, t.tsc + 1) self.assertEqual(t - 1, t.tsc - 1) self.assertEqual(t + 1L, t.tsc + 1) self.assertEqual(t - 1L, t.tsc - 1) # Removing floating point comparison because large floating point # numbers loose precision, ex: # >>> float(28536304964998994L) # 28536304964998992.0 #self.assertEqual(t + 1.0, t.tsc + 1) #self.assertEqual(t - 1.0, t.tsc - 1) self.assertRaises(TypeError, lambda: t + 'hi') self.assertRaises(TypeError, lambda: t - 'hi') t = tsc_time.now_posix_sec() self.assertEqual(t + 1, t.as_posix_sec() + 1) self.assertEqual(t - 1, t.as_posix_sec() - 1) self.assertEqual(t + 1L, t.as_posix_sec() + 1) self.assertEqual(t - 1L, t.as_posix_sec() - 1) self.assertEqual(t + 1.0, t.as_posix_sec() + 1) self.assertEqual(t - 1.0, t.as_posix_sec() - 1) self.assertRaises(TypeError, lambda: t + 'hi') self.assertRaises(TypeError, lambda: t - 'hi') t = tsc_time.now_posix_usec() self.assertEqual(t + 1, t.as_posix_usec() + 1) self.assertEqual(t - 1, t.as_posix_usec() - 1) self.assertEqual(t + 1L, t.as_posix_usec() + 1) self.assertEqual(t - 1L, t.as_posix_usec() - 1) self.assertEqual(t + 1.0, t.as_posix_usec() + 1) self.assertEqual(t - 1.0, t.as_posix_usec() - 1) self.assertRaises(TypeError, lambda: t + 'hi') self.assertRaises(TypeError, lambda: t - 'hi') t = tsc_time.now_posix_fsec() # We lose some precision with double conversions done in C versus # Python's conversions. self._assert_close(t + 1, t.as_posix_fsec() + 1, 0.001) self._assert_close(t - 1, t.as_posix_fsec() - 1, 0.001) self._assert_close(t + 1L, t.as_posix_fsec() + 1, 0.001) self._assert_close(t - 1L, t.as_posix_fsec() - 1, 0.001) self._assert_close(t + 1.0, t.as_posix_fsec() + 1, 0.001) self._assert_close(t - 1.0, t.as_posix_fsec() - 1, 0.001) self.assertRaises(TypeError, lambda: t + 'hi') self.assertRaises(TypeError, lambda: t - 'hi')
def test_types(self): t = tsc_time.now_tsc() self.assertEqual(int(t), t.tsc) self.assertEqual(long(t), t.tsc) self.assertEqual(float(t), float(t.tsc)) t = tsc_time.now_posix_sec() self.assertEqual(int(t), t.as_posix_sec()) self.assertEqual(long(t), t.as_posix_sec()) self.assertEqual(float(t), float(t.as_posix_sec())) t = tsc_time.now_posix_usec() self.assertEqual(int(t), t.as_posix_usec()) self.assertEqual(long(t), t.as_posix_usec()) self.assertEqual(float(t), float(t.as_posix_usec())) t = tsc_time.now_posix_fsec() self.assertEqual(int(t), t.as_posix_sec()) self.assertEqual(long(t), t.as_posix_sec()) self.assertEqual(float(t), t.as_posix_fsec())