def testUnitTypes(self): """Test flattening with units. The flattening code should not do unit conversion, but should leave that up to the LabRAD manager to handle. Basically, for purposes of flattening, a unit is a unit. """ tests = [ (Value(5.0, 'ft'), ['v[m]'], 'v[ft]'), # real value array (U.ValueArray([1, 2, 3], ''), [], '*v[]'), (U.ValueArray([1, 2, 3], 'm'), ['*v[m]'], '*v[m]'), # complex value array (U.ValueArray([1j, 2j, 3j], ''), [], '*c[]'), (U.ValueArray([1j, 2j, 3j], 'm'), [], '*c[m]') ] for data, hints, tag in tests: self.assertEqual(T.flatten(data, hints)[1], T.parseTypeTag(tag)) # we disallow flattening a float to a value with units, # as this is a major source of bugs try: T.flatten(5.0, 'v[m]') except Exception: pass else: raise Exception('Cannot flatten float to value with units')
def testTypeConversions(self): m = units.Unit('m') V = units.Unit('V') GHz = units.Unit('GHz') x1 = 1.0 * m x2 = 5j * V a = np.arange(10) * 1.0 va = units.ValueArray(np.arange(10) * 1.0, 'GHz') # Unit times number self.assertIsInstance(1.0 * m, units.Value) self.assertIsInstance(1 * m, units.Value) self.assertIsInstance(m * 1.0, units.Value) self.assertIsInstance(m * 1, units.Value) # Value times value or number self.assertIsInstance(x1 * x1, units.Value) self.assertIsInstance(x1 * 5, units.Value) self.assertIsInstance(0 * x1, units.Value) # Unit times complex self.assertIsInstance((1 + 1j) * V, units.Complex) self.assertIsInstance(V * (1 + 1j), units.Complex) # Value times Complex/complex self.assertIsInstance(x1 * 1j, units.Complex) self.assertIsInstance(1j * x1, units.Complex) self.assertIsInstance(x2 * x1, units.Complex) self.assertIsInstance(x1 * x2, units.Complex) # Unit/Value/ValueArray times array self.assertIsInstance(x1 * a, units.ValueArray) self.assertIsInstance(x2 * a, units.ValueArray) self.assertIsInstance(GHz * a, units.ValueArray) self.assertIsInstance(va * a, units.ValueArray) # Unit/Value/ValueArray times ValueArray self.assertIsInstance(x1 * va, units.ValueArray) self.assertIsInstance(x2 * va, units.ValueArray) self.assertIsInstance(GHz * va, units.ValueArray) self.assertIsInstance(va * va, units.ValueArray) # array times ? self.assertIsInstance(a * x1, units.ValueArray) self.assertIsInstance(a * x2, units.ValueArray) self.assertIsInstance(a * GHz, units.ValueArray) self.assertIsInstance(a * va, units.ValueArray) # ValueArray times ? self.assertIsInstance(va * x1, units.ValueArray) self.assertIsInstance(va * x2, units.ValueArray) self.assertIsInstance(va * GHz, units.ValueArray) self.assertIsInstance(va * a, units.ValueArray)
def testFailedFlattening(self): """ Trying to flatten data to an incompatible type should raise an error. """ cases = [ # Simple cases (1, ['s', 'v[Hz]']), ('X', ['i', 'v', 'w']), (5.0, ['s', 'b', 't', 'w', 'i', 'v[Hz]']), # Value (5.0, 'v[Hz]'), (Value(4, 'm'), 'v[]'), (Value(3, 's'), ['v[Hz]', 'i', 'w']), # ndarray (np.array([1, 2, 3], dtype='int32'), '*v[Hz]'), (np.array([1.0, 2.4]), ['*i', '*w']), # ValueArray (U.ValueArray([1, 2, 3], 'm'), '*v[s]'), (U.ValueArray([1, 2], 'm'), '*v[]') ] for data, targetTag in cases: self.assertRaises(T.FlatteningError, T.flatten, data, targetTag)
def _unflatten_as_array(self, s, endianness, elem, dims, size): """Unflatten to numpy array.""" def make(t, width): a = np.fromstring(s.get(size*width), dtype=np.dtype(t)) if endianness != SYSTEM_BYTE_ORDER: a.byteswap(True) # inplace return a if elem == TBool(): a = make('bool', 1) elif elem == TInt(): a = make('i4', 4) elif elem == TUInt(): a = make('u4', 4) elif elem <= TValue(): a = make('f8', 8) elif elem <= TComplex(): a = make('c16', 16) else: raise TypeError("Cannot make numpy array with %s"%(elem,)) a.shape = dims + a.shape[1:] # handle clusters as elements if elem <= TValue() or elem <= TComplex(): if elem.unit is not None and elem.unit != '': a = U.ValueArray(a, elem.unit) else: a = U.DimensionlessArray(a) return a
def testDefaultFlatAndBack(self): """ Test roundtrip python->LabRAD->python conversion. No type requirements are given in these tests. In other words, we allow pylabrad to choose a default type for flattening. In this test, we expect A == unflatten(*flatten(A)). In other words, we expect the default type chosen for each object to unflatten as an object equal to the one originally flattened. """ tests = [ # simple types None, True, False, 1, -1, 2, -2, 0x7FFFFFFF, -0x80000000, 1L, 2L, 3L, 4L, 0L, 0xFFFFFFFFL, '', 'a', '\x00\x01\x02\x03', datetime.now(), # values 5.0, Value(6, ''), Value(7, 'ms'), 8 + 0j, Complex(9 + 0j, ''), Complex(10 + 0j, 'GHz'), # ValueArray and ndarray # These types should be invariant under flattening followed by # unflattening. Note, however, that since eg. [1, 2, 3] will # unflatten as ndarray with dtype=int32, we do not put lists # in this test. U.ValueArray([1, 2, 3], 'm'), U.ValueArray([1j, 2j, 3j], 's'), np.array([1, 3, 4], dtype='int32'), np.array([1.1, 2.2, 3.3]), # clusters (1, True, 'a'), ((1, 2), ('a', False)), # lists [], #[1, 2, 3, 4], #[1L, 2L, 3L, 4L], [[]], [['a', 'bb', 'ccc'], ['dddd', 'eeeee', 'ffffff']], # more complex stuff [(1L, 'a'), (2L, 'b')], ] for data_in in tests: data_out = T.unflatten(*T.flatten(data_in)) if isinstance(data_in, U.ValueArray): self.assertTrue(data_in.allclose(data_out)) elif isinstance(data_in, np.ndarray): np.testing.assert_array_equal(data_out, data_in) else: self.assertEqual(data_in, data_out)
def testNumpyArrayScalar(self): with self.assertRaises(TypeError): T.flatten(np.array(5)) with self.assertRaises(TypeError): T.flatten(U.ValueArray(np.array(5), 'ns'))