def test_constant(): c = constant(2, name='a') assert c.name == 'a' assert c.dtype == 'int8' c = constant(2, dtype='float32') assert c.name is None assert c.dtype == 'float32'
def make_node(self, ref, values, ref_dim, val_dim, *_hash): assert (values.ndim == 3) ref = as_tensor_variable(ref.astype("float32")) values = as_tensor_variable(values.astype("float32")) ref_dim = get_scalar_constant_value(ref_dim) val_dim = get_scalar_constant_value(val_dim) if "int" not in str(ref_dim.dtype) or "int" not in str(val_dim.dtype): raise ValueError("ref_dim and val_dim must be integers.") scaled_ref = ref * float(np.sqrt(2 / 3) * (ref_dim + 1)) if len(_hash) == 0: hash_struct = PermutohedralHashTable()(scaled_ref, ref_dim) else: assert (len(_hash) == 6) hash_struct = [as_tensor_variable(v) for v in _hash] # Should we not do this? bcast = [False for _ in range(3)] if val_dim == 1: bcast[0] = True out_type = values.type.clone(broadcastable=bcast) ref_dim = constant(ref_dim, dtype="int32", name="ref_dim") val_dim = constant(val_dim, dtype="int32", name="val_dim") inputs = [ref, values, ref_dim, val_dim] + hash_struct return Apply(self, inputs, [out_type()])
def test_constant(): c = constant(2, name='a') assert c.name == 'a' assert c.dtype == 'int8' c = constant(2, dtype='float32') assert c.name is None assert c.dtype == 'float32'
def TestConstant(): c = constant(2, name="a") assert c.name == "a" assert c.dtype == "int8" c = constant(2, dtype="float32") assert c.name is None assert c.dtype == "float32"
def test_composite_clone_float32(self): w = int8() x = float16() y = float32() cz = Composite([x, y], [tanh(x + cast(y, "float16"))]) c = Composite( [w, x, y], [ cz(x, y) - cz(x, y) ** 2 + cast(x, "int16") + cast(x, "float32") + cast(w, "float16") - constant(np.float16(1.0)) ], ) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc) v = uint8() w = float16() x = float16() y = float16() z = float16() c = Composite([v, w, x, y, z], [switch(v, mul(w, x, y), z)]) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc)
def test_composite_clone_float32(self): def has_f16(comp): if any(v.type == float16 for v in comp.fgraph.variables): return True return False w = int8() x = float16() y = float32() cz = Composite([x, y], [tanh(x + cast(y, "float16"))]) c = Composite( [w, x, y], [ cz(x, y) - cz(x, y)**2 + cast(x, "int16") + cast(x, "float32") + cast(w, "float16") - constant(np.float16(1.0)) ], ) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc) v = uint8() w = float16() x = float16() y = float16() z = float16() c = Composite([v, w, x, y, z], [switch(v, mul(w, x, y), z)]) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc)
def make_node(self, points, dim): assert (points.ndim == 3) points = as_tensor_variable(points.astype("float32")) dim = get_scalar_constant_value(dim) if "int" not in str(dim.dtype): raise ValueError("dim must be an integer.") dim = constant(dim, dtype="int32", name="dim") entries_type = TensorType("int32", broadcastable=(False, )) keys_type = TensorType("int16", broadcastable=(False, False)) neib_ent_type = TensorType("int32", broadcastable=(False, False, False)) bary_type = TensorType("float32", broadcastable=points.type.broadcastable) valid_entries_type = TensorType("int32", broadcastable=(False, )) n_valid_type = TensorType("int32", broadcastable=(False, )) out_vars = [ entries_type(name="hash_entries"), keys_type(name="hash_keys"), neib_ent_type(name="neighbor_entries"), bary_type(name="barycentric_coords"), valid_entries_type(name="valid_entries"), n_valid_type(name="n_valid") ] # Two sets of entries can't be meaningfully compared without also # having the corresponding keys. Since we can only define per-output # comparisons, we have to hope that any time someone compares two # tables for equality, they will check all outputs. out_vars[0].tag.values_eq_approx = lambda e1, e2: True out_vars[2].tag.values_eq_approx = lambda e1, e2: True # The number of valid entries between two equivalent tables may be # different since it includes duplicates. out_vars[5].tag.values_eq_approx = lambda n1, n2: True def keys_comparison(k1, k2): k1 = [tuple(k) for k in np.asarray(k1)] k2 = [tuple(k) for k in np.asarray(k2)] return set(k1) == set(k2) out_vars[1].tag.values_eq_approx = keys_comparison def valid_entries_comparison(e1, e2): e1 = np.asarray(e1) e2 = np.asarray(e2) return len(np.unique(e1)) == len(np.unique(e2)) out_vars[4].tag.values_eq_approx = valid_entries_comparison return Apply(self, [points, dim], out_vars)
def test_composite_clone_float32(self): w = int8() x = float16() y = float32() cz = Composite([x, y], [tanh(x + cast(y, 'float16'))]) c = Composite([w, x, y], [ cz(x, y) - cz(x, y)**2 + cast(x, 'int16') + cast(x, 'float32') + cast(w, 'float16') - constant(np.float16(1.0)) ]) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc)
def test_composite_clone_float32(self): w = int8() x = float16() y = float32() cz = Composite([x, y], [tanh(x + cast(y, 'float16'))]) c = Composite([w, x, y], [cz(x, y) - cz(x, y)**2 + cast(x, 'int16') + cast(x, 'float32') + cast(w, 'float16') - constant(np.float16(1.0))]) assert has_f16(c) nc = c.clone_float32() assert not has_f16(nc)