Ejemplo n.º 1
0
 def _infer_value_domain(cls, **kwargs):
     # rely on the underlying distribution's logic to infer the event_shape given param domains
     instance = cls.dist_class(**{k: _dummy_tensor(domain) for k, domain in kwargs.items()}, validate_args=False)
     out_shape = instance.event_shape
     if isinstance(instance.support, constraints._IntegerInterval):
         out_dtype = int(instance.support.upper_bound + 1)
     else:
         out_dtype = 'real'
     return Domain(dtype=out_dtype, shape=out_shape)
Ejemplo n.º 2
0
def test_unary(symbol, data):
    dtype = 'real'
    if symbol == '~':
        data = bool(data)
        dtype = 2
    expected_data = unary_eval(symbol, data)

    x = Number(data, dtype)
    actual = unary_eval(symbol, x)
    check_funsor(actual, {}, Domain((), dtype), expected_data)
Ejemplo n.º 3
0
 def __init__(self, data, inputs=None, dtype="real"):
     assert isinstance(data, np.ndarray) or np.isscalar(data)
     assert isinstance(inputs, tuple)
     assert all(isinstance(d.dtype, integer_types) for k, d in inputs)
     inputs = OrderedDict(inputs)
     output = Domain(data.shape[len(inputs):], dtype)
     fresh = frozenset(inputs.keys())
     bound = frozenset()
     super(Array, self).__init__(inputs, output, fresh, bound)
     self.data = data
Ejemplo n.º 4
0
 def __init__(self, data, inputs=None, dtype="real"):
     assert isinstance(data, torch.Tensor)
     assert isinstance(inputs, tuple)
     if not torch._C._get_tracing_state():
         assert len(inputs) <= data.dim()
         for (k, d), size in zip(inputs, data.shape):
             assert d.dtype == size
     inputs = OrderedDict(inputs)
     output = Domain(data.shape[len(inputs):], dtype)
     fresh = frozenset(inputs.keys())
     bound = frozenset()
     super(Tensor, self).__init__(inputs, output, fresh, bound)
     self.data = data
Ejemplo n.º 5
0
 def __init__(self, var, expr):
     assert isinstance(var, Variable)
     assert isinstance(var.dtype, int)
     assert isinstance(expr, Funsor)
     inputs = expr.inputs.copy()
     inputs.pop(var.name, None)
     shape = (var.dtype,) + expr.output.shape
     output = Domain(shape, expr.dtype)
     fresh = frozenset()
     bound = frozenset({var.name})
     super(Lambda, self).__init__(inputs, output, fresh, bound)
     self.var = var
     self.expr = expr
Ejemplo n.º 6
0
 def __init__(self, data, dtype=None):
     assert isinstance(data, numbers.Number)
     if isinstance(dtype, int):
         data = type(dtype)(data)
         if dtype != 2:  # booleans have bitwise interpretation
             assert 0 <= data and data < dtype
     else:
         assert isinstance(dtype, str) and dtype == "real"
         data = float(data)
     inputs = OrderedDict()
     output = Domain((), dtype)
     super(Number, self).__init__(inputs, output)
     self.data = data
Ejemplo n.º 7
0
 def _infer_value_domain(cls, **kwargs):
     # rely on the underlying distribution's logic to infer the event_shape given param domains
     instance = cls.dist_class(**{
         k: dummy_numeric_array(domain)
         for k, domain in kwargs.items()
     },
                               validate_args=False)
     out_shape = instance.event_shape
     if type(instance.support).__name__ == "_IntegerInterval":
         out_dtype = int(instance.support.upper_bound + 1)
     else:
         out_dtype = 'real'
     return Domain(dtype=out_dtype, shape=out_shape)
Ejemplo n.º 8
0
def test_binary(symbol, data1, data2):
    dtype = 'real'
    if symbol in BOOLEAN_OPS:
        dtype = 2
        data1 = bool(data1)
        data2 = bool(data2)
    try:
        expected_data = binary_eval(symbol, data1, data2)
    except ZeroDivisionError:
        return

    x1 = Number(data1, dtype)
    x2 = Number(data2, dtype)
    actual = binary_eval(symbol, x1, x2)
    check_funsor(actual, {}, Domain((), dtype), expected_data)
Ejemplo n.º 9
0
def test_reduce_event(op, event_shape, dims):
    sizes = {'a': 3, 'b': 4, 'c': 5}
    batch_shape = tuple(sizes[d] for d in dims)
    shape = batch_shape + event_shape
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    numeric_op = REDUCE_OP_TO_NUMERIC[op]
    data = rand(shape) + 0.5
    dtype = 'real'
    if op in [ops.and_, ops.or_]:
        data = ops.astype(data, 'uint8')
    expected_data = numeric_op(data.reshape(batch_shape + (-1, )), -1)

    x = Tensor(data, inputs, dtype=dtype)
    op_name = numeric_op.__name__[1:] if op in [ops.min, ops.max
                                                ] else numeric_op.__name__
    actual = getattr(x, op_name)()
    check_funsor(actual, inputs, Domain((), dtype), expected_data)
Ejemplo n.º 10
0
def test_reduce_event(op, event_shape, dims):
    sizes = {'a': 3, 'b': 4, 'c': 5}
    batch_shape = tuple(sizes[d] for d in dims)
    shape = batch_shape + event_shape
    inputs = OrderedDict((d, bint(sizes[d])) for d in dims)
    torch_op = REDUCE_OP_TO_TORCH[op]
    data = torch.rand(shape) + 0.5
    dtype = 'real'
    if op in [ops.and_, ops.or_]:
        data = data.byte()
    expected_data = torch_op(data.reshape(batch_shape + (-1, )), -1)
    if op in [ops.min, ops.max]:
        expected_data = expected_data[0]

    x = Tensor(data, inputs, dtype=dtype)
    actual = getattr(x, torch_op.__name__)()
    check_funsor(actual, inputs, Domain((), dtype), expected_data)
Ejemplo n.º 11
0
def test_binary(symbol, data1, data2):
    dtype = 'real'
    if symbol in BOOLEAN_OPS:
        dtype = 2
        data1 = bool(data1)
        data2 = bool(data2)
    try:
        expected_data = binary_eval(symbol, data1, data2)
    except ZeroDivisionError:
        return

    x1 = Number(data1, dtype)
    x2 = Number(data2, dtype)
    actual = binary_eval(symbol, x1, x2)
    check_funsor(actual, {}, Domain((), dtype), expected_data)
    with interpretation(normalize):
        actual_reflect = binary_eval(symbol, x1, x2)
    assert actual.output == actual_reflect.output
Ejemplo n.º 12
0
def torch_stack(parts, dim=0):
    """
    Wrapper around :func:`torch.stack` to operate on real-valued Funsors.

    Note this operates only on the ``output`` tensor. To stack funsors in a
    new named dim, instead use :class:`~funsor.terms.Stack`.
    """
    assert isinstance(dim, int)
    assert isinstance(parts, tuple)
    assert len(set(x.output for x in parts)) == 1
    shape = parts[0].output.shape
    if dim >= 0:
        dim = dim - len(shape) - 1
    assert dim < 0
    split = dim + len(shape) + 1
    shape = shape[:split] + (len(parts), ) + shape[split:]
    output = Domain(shape, parts[0].dtype)
    fn = functools.partial(_torch_stack, dim)
    return Function(fn, output, parts)
Ejemplo n.º 13
0
def test_reduce_all(op):
    x = Variable('x', bint(2))
    y = Variable('y', bint(3))
    z = Variable('z', bint(4))
    f = x * y + z
    dtype = f.dtype
    check_funsor(f, {'x': bint(2), 'y': bint(3), 'z': bint(4)}, Domain((), dtype))
    if op is ops.logaddexp:
        pytest.skip()

    with interpretation(sequential):
        actual = f.reduce(op)

    values = [f(x=i, y=j, z=k)
              for i in x.output
              for j in y.output
              for k in z.output]
    expected = reduce(op, values)
    assert actual == expected
Ejemplo n.º 14
0
def test_binary_funsor_funsor(symbol, dims1, dims2):
    sizes = {'a': 3, 'b': 4, 'c': 5}
    shape1 = tuple(sizes[d] for d in dims1)
    shape2 = tuple(sizes[d] for d in dims2)
    inputs1 = OrderedDict((d, bint(sizes[d])) for d in dims1)
    inputs2 = OrderedDict((d, bint(sizes[d])) for d in dims2)
    data1 = rand(shape1) + 0.5
    data2 = rand(shape2) + 0.5
    dtype = 'real'
    if symbol in BOOLEAN_OPS:
        dtype = 2
        data1 = ops.astype(data1, 'uint8')
        data2 = ops.astype(data2, 'uint8')
    x1 = Tensor(data1, inputs1, dtype)
    x2 = Tensor(data2, inputs2, dtype)
    inputs, aligned = align_tensors(x1, x2)
    expected_data = binary_eval(symbol, aligned[0], aligned[1])

    actual = binary_eval(symbol, x1, x2)
    check_funsor(actual, inputs, Domain((), dtype), expected_data)
Ejemplo n.º 15
0
def test_reduce_subset(op, reduced_vars):
    reduced_vars = frozenset(reduced_vars)
    x = Variable('x', bint(2))
    y = Variable('y', bint(3))
    z = Variable('z', bint(4))
    f = x * y + z
    dtype = f.dtype
    check_funsor(f, {'x': bint(2), 'y': bint(3), 'z': bint(4)}, Domain((), dtype))
    if op is ops.logaddexp:
        pytest.skip()

    with interpretation(sequential):
        actual = f.reduce(op, reduced_vars)

    expected = f
    for v in [x, y, z]:
        if v.name in reduced_vars:
            expected = reduce(op, [expected(**{v.name: i}) for i in v.output])

    check_funsor(actual, expected.inputs, expected.output)
    # TODO check data
    if not reduced_vars:
        assert actual is f
Ejemplo n.º 16
0
def tensor_to_funsor(tensor, event_inputs=(), event_output=0, dtype="real"):
    """
    Convert a :class:`torch.Tensor` to a :class:`funsor.tensor.Tensor` .

    Note this should not touch data, but may trigger a
    :meth:`torch.Tensor.reshape` op.

    :param torch.Tensor tensor: A PyTorch tensor.
    :param tuple event_inputs: A tuple of names for rightmost tensor
        dimensions.  If ``tensor`` has these names, they will be converted to
        ``result.inputs``.
    :param int event_output: The number of tensor dimensions assigned to
        ``result.output``. These must be on the right of any ``event_input``
        dimensions.
    :return: A funsor.
    :rtype: funsor.tensor.Tensor
    """
    assert isinstance(tensor, torch.Tensor)
    assert isinstance(event_inputs, tuple)
    assert isinstance(event_output, int) and event_output >= 0
    inputs_shape = tensor.shape[:tensor.dim() - event_output]
    output = Domain(dtype=dtype, shape=tensor.shape[tensor.dim() - event_output:])
    dim_to_name = default_dim_to_name(inputs_shape, event_inputs)
    return to_funsor(tensor, output, dim_to_name)