コード例 #1
0
def test_basic_unify_reify():
    # Test reification with manually constructed replacements
    a = tf.compat.v1.placeholder(tf.float64, name='a')
    x_l = var('x_l')
    a_reif = reify(x_l, {x_l: mt(a)})
    assert a_reif.obj is not None
    # Confirm that identity is preserved (i.e. that the underlying object
    # was properly tracked and not unnecessarily reconstructed)
    assert a == a_reif.reify()

    test_expr = mt.add(tf.constant(1, dtype=tf.float64),
                       mt.mul(tf.constant(2, dtype=tf.float64),
                              x_l))
    test_reify_res = reify(test_expr, {x_l: a})
    test_base_res = test_reify_res.reify()
    assert isinstance(test_base_res, tf.Tensor)

    with tf.Graph().as_default():
        a = tf.compat.v1.placeholder(tf.float64, name='a')
        expected_res = tf.add(tf.constant(1, dtype=tf.float64),
                              tf.multiply(tf.constant(2, dtype=tf.float64), a))
    assert_ops_equal(test_base_res, expected_res)

    # Simply make sure that unification succeeds
    meta_expected_res = mt(expected_res)
    s_test = unify(test_expr, meta_expected_res, {})
    assert len(s_test) == 3

    assert reify(test_expr, s_test) == meta_expected_res
コード例 #2
0
def test_meta_dtype_inference():

    one_int_mt = mt(1)
    res = mt.add.output_meta_types({'x': one_int_mt})
    assert res[0][0] == TFlowMetaTensor
    assert res[0][1] == tf.int32

    one_flt_mt = mt(1.0)
    res = mt.add.output_meta_types({'y': one_flt_mt})
    assert res[0][0] == TFlowMetaTensor
    assert res[0][1] == tf.float32

    res = mt.add.output_meta_types({'T': tf.int32})
    assert res[0][0] == TFlowMetaTensor
    assert res[0][1] == tf.int32

    add_mt = TFlowMetaOperator(mt.add.op_def,
                               TFlowMetaNodeDef('Add', 'my_add', {'T': 1}))
    res = add_mt.output_meta_types()
    assert res[0][0] == TFlowMetaTensor
    assert res[0][1] == tf.float32

    with pytest.raises(AssertionError):
        # These integer types conflict with the NodeDef type in our operator,
        # `add_mt`.
        add_mt(1, 2)

    res = mt.cast.output_meta_types({'dtype': 'blah'})
    assert res[0][0] == TFlowMetaTensor
    assert isvar(res[0][1])

    res = mt.placeholder.output_meta_types({'dtype': 'blah'})
    assert isvar(res[0][1])
コード例 #3
0
def test_meta_existing_names():

    with tf.Graph().as_default():
        one_mt = mt(1)
        assert one_mt.op.name == 'Const'

        # Clear-out the associated base variable
        orig_one_tf = one_mt._obj
        one_mt.reset()
        one_mt.op.reset()
        assert one_mt.obj is None
        assert one_mt.op.obj is None

        # Attempt to reify to a base variable
        one_tf = one_mt.reify()
        assert one_tf.op.name == 'Const'
        # Make sure it's the first base variable we created
        assert orig_one_tf is one_tf

        two_mt = mt(2)
        two_mt.op.node_def.name = 'Const'

        # TODO FIXME: We shouldn't have to do this manually after changing a
        # dependency.
        two_mt.reset()
        two_mt.op.reset()
        assert two_mt.obj is None
        assert two_mt.op.obj is None
        assert two_mt.op.name == 'Const'

        with pytest.raises(MetaReificationError):
            two_mt.reify()
コード例 #4
0
def test_sexp_unify_reify():
    """Make sure we can unify and reify etuples/S-exps."""
    # Unify `A . (x + y)`, for `x`, `y` logic variables
    A = tf.compat.v1.placeholder(tf.float64,
                                 name="A",
                                 shape=tf.TensorShape([None, None]))
    x = tf.compat.v1.placeholder(tf.float64,
                                 name="x",
                                 shape=tf.TensorShape([None, 1]))
    y = tf.compat.v1.placeholder(tf.float64,
                                 name="y",
                                 shape=tf.TensorShape([None, 1]))

    z = tf.matmul(A, tf.add(x, y))

    z_sexp = etuplize(z, shallow=False)

    # Let's just be sure that the original TF objects are preserved
    assert z_sexp[1].reify() == A
    assert z_sexp[2][1].reify() == x
    assert z_sexp[2][2].reify() == y

    A_lv, x_lv, y_lv = var(), var(), var()
    dis_pat = etuple(
        TFlowMetaOperator(mt.matmul.op_def, var()),
        A_lv,
        etuple(TFlowMetaOperator(mt.add.op_def, var()), x_lv, y_lv),
    )

    s = unify(dis_pat, z_sexp, {})

    assert s[A_lv] == mt(A)
    assert s[x_lv] == mt(x)
    assert s[y_lv] == mt(y)

    # Now, we construct a graph that reflects the distributive property and
    # reify with the substitutions from the un-distributed form
    out_pat = etuple(mt.add, etuple(mt.matmul, A_lv, x_lv),
                     etuple(mt.matmul, A_lv, y_lv))
    z_dist = reify(out_pat, s)

    # Evaluate the tuple-expression and get a meta object/graph
    z_dist_mt = z_dist.eval_obj

    # If all the logic variables were reified, we should be able to
    # further reify the meta graph and get a concrete TF graph
    z_dist_tf = z_dist_mt.reify()

    assert isinstance(z_dist_tf, tf.Tensor)

    # Check the first part of `A . x + A . y` (i.e. `A . x`)
    assert z_dist_tf.op.inputs[0].op.inputs[0] == A
    assert z_dist_tf.op.inputs[0].op.inputs[1] == x
    # Now, the second, `A . y`
    assert z_dist_tf.op.inputs[1].op.inputs[0] == A
    assert z_dist_tf.op.inputs[1].op.inputs[1] == y
コード例 #5
0
def test_meta_hashing():
    """Make sure we can hash meta graphs."""
    N = 100
    X = np.vstack([np.random.randn(N), np.ones(N)]).T
    X_mt = mt(X)

    assert isinstance(hash(X_mt), int)

    a_mt = mt(tf.compat.v1.placeholder('float32', name='a', shape=[1, 2]))
    add_mt = mt.add(tf.convert_to_tensor([1.0, 2.0]), mt.add(a_mt, a_mt))

    assert isinstance(hash(add_mt), int)
コード例 #6
0
def test_metatize():
    class CustomClass(object):
        pass

    with pytest.raises(ValueError):
        mt(CustomClass())

    x_tf = tf.convert_to_tensor(np.r_[1, 2, 3])
    x_mt = mt(x_tf)
    assert isinstance(x_mt.op.node_def.attr['value'], HashableNDArray)

    x_mt = mt(np.r_[1, 2, 3])
    assert isinstance(x_mt.op.node_def.attr['value'], HashableNDArray)
コード例 #7
0
def test_meta_compare():
    """Make objects compare correctly."""

    a_tf = tf.compat.v1.placeholder('float', name='a', shape=[None, 1])
    z_tf = tf.multiply(2.0, a_tf)

    assert mt(z_tf) == mt(z_tf)

    const_tf = tf.convert_to_tensor([1.0, 2.0])
    const_mt = mt(const_tf)

    assert const_mt == const_mt
    assert const_mt == mt(const_tf)
    assert const_mt != const_tf
    assert const_mt != a_tf
コード例 #8
0
def test_commutativity():
    with enable_lvar_defaults('names'):
        add_1_mt = mt(1) + mt(2)
        add_2_mt = mt(2) + mt(1)

    res = run(0, var('q'), commutative(add_1_mt.base_operator))
    assert res is not False

    res = run(0, var('q'), eq_comm(add_1_mt, add_2_mt))
    assert res is not False

    with enable_lvar_defaults('names'):
        add_pattern_mt = mt(2) + var('q')

    res = run(0, var('q'), eq_comm(add_1_mt, add_pattern_mt))
    assert res[0] == add_1_mt.base_arguments[0]
コード例 #9
0
def test_meta_distributions():
    N = 100
    sigma_tf = tfd.Gamma(np.asarray(1.), np.asarray(1.)).sample()
    epsilon_tf = tfd.Normal(np.zeros((N, 1)), sigma_tf).sample()
    beta_tf = tfd.Normal(np.zeros((2, 1)), 1).sample()
    X = np.vstack([np.random.randn(N), np.ones(N)]).T
    X_tf = tf.convert_to_tensor(X)

    Y_tf = tf.linalg.matmul(X_tf, beta_tf) + epsilon_tf

    Y_mt = mt(Y_tf)

    # Confirm that all `Operation`s are the same.
    assert_ops_equal(Y_mt, Y_tf)

    # Now, let's see if we can reconstruct it entirely from the
    # meta objects.
    def _remove_obj(meta_obj):
        if (hasattr(meta_obj, '_obj')
                and not isinstance(meta_obj, TFlowMetaOpDef)):
            meta_obj._obj = None

        if hasattr(meta_obj, 'ancestors'):
            for a in meta_obj.ancestors or []:
                _remove_obj(a)

    _remove_obj(Y_mt)

    Y_mt_tf = Y_mt.reify()

    assert_ops_equal(Y_mt, Y_mt_tf)
コード例 #10
0
def test_walko():
    with enable_lvar_defaults("names"):
        add_1_mt = mt(1) + mt(2)

    def walk_rel(x, y):
        return lall(eq(x, mt(1)), eq(y, mt(3)))

    q = var()
    (res, ) = run(1, q, walko(walk_rel, add_1_mt, q))

    # The easiest way to check whether or not two arbitrary TF meta graphs are
    # (structurally) equivalent is to confirm that they unify.  This avoids
    # uninteresting differences in node names, uninferred type information,
    # etc.
    with enable_lvar_defaults("names", "node_attrs"):
        assert unify(res.eval_obj, mt(3) + mt(2)) is not False
コード例 #11
0
def test_nodedef():
    X = np.random.normal(0, 1, (10, 10))
    S = tf.matmul(X, X, transpose_a=True)
    d, U, V = tf.linalg.svd(S)
    node_def_mt = mt(d.op.node_def)

    assert 'compute_uv' in node_def_mt.attr
    assert 'full_matrices' in node_def_mt.attr

    # Some outputs use nodedef information; let's test those.
    norm_rv = mt.RandomStandardNormal(mean=0,
                                      stddev=1,
                                      shape=(1000, ),
                                      dtype=tf.float32,
                                      name=var())
    assert isinstance(norm_rv, TFlowMetaTensor)
    assert norm_rv.dtype == tf.float32

    # We shouldn't be metatizing all parsed `node_def.attr` values; otherwise,
    # we won't be able to reconstruct corresponding meta Ops using their meta
    # OpDefs and inputs.
    x_test = tf.constant([1.8, 2.2], dtype=tf.float32)

    with tf.Graph().as_default():
        y_test = tf.dtypes.cast(x_test, tf.int32, name="y")
        y_test_mt = mt(y_test)

    # `ytest_mt.inputs` should have two `.attr` values that are Python
    # primitives (i.e. int and bool); these shouldn't get metatized and break
    # our ability to reconstruct the object from its rator + rands.
    y_test_new_mt = TFlowMetaOperator(
        y_test_mt.op.op_def, y_test_mt.op.node_def)(*y_test_mt.base_arguments)

    # We're changing this so we can use ==
    assert y_test_new_mt.op.node_def.name.startswith('y')
    y_test_new_mt.op.node_def.name = 'y'

    assert y_test_mt == y_test_new_mt

    with tf.Graph().as_default():
        z_test_mt = mt.cast(x_test, tf.int32, name="y")

    assert z_test_mt.op.node_def.name.startswith('y')
    z_test_mt.op.node_def.name = 'y'

    assert z_test_mt == y_test_mt
コード例 #12
0
def test_ascii_printing():
    """Make sure we can ascii/text print a TF graph."""

    A = tf.compat.v1.placeholder("float",
                                 name="A",
                                 shape=tf.TensorShape([None, None]))
    x = tf.compat.v1.placeholder("float",
                                 name="x",
                                 shape=tf.TensorShape([None, 1]))
    y = tf.multiply(1.0, x, name="y")

    z = tf.matmul(A, tf.add(y, y, name="x_p_y"), name="A_dot")

    std_out = io.StringIO()
    with redirect_stdout(std_out):
        tf_dprint(z)

    expected_out = textwrap.dedent("""
    Tensor(MatMul):0,\tdtype=float32,\tshape=[None, 1],\t"A_dot:0"
    |  Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, None],\t"A:0"
    |  Tensor(Add):0,\tdtype=float32,\tshape=[None, 1],\t"x_p_y:0"
    |  |  Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
    |  |  |  Tensor(Const):0,\tdtype=float32,\tshape=[],\t"y/x:0"
    |  |  |  |  1.
    |  |  |  Tensor(Placeholder):0,\tdtype=float32,\tshape=[None, 1],\t"x:0"
    |  |  Tensor(Mul):0,\tdtype=float32,\tshape=[None, 1],\t"y:0"
    |  |  |  ...
    """)

    assert std_out.getvalue() == expected_out.lstrip()

    std_out = io.StringIO()
    with tf.Graph().as_default(), redirect_stdout(std_out):
        Var._id = 0
        tt_lv_inputs_mt = mt.Tensor(mt.Operation(var(), var(), var()), 0,
                                    var())
        tt_const_lv_nodedef_mt = mt.Tensor(
            mt.Operation(mt.Const.op_def, var(), ()), 0, var())
        tt_lv_op_mt = mt.Tensor(var(), 0, var())
        test_mt = mt(
            1) + tt_lv_inputs_mt + tt_const_lv_nodedef_mt + tt_lv_op_mt
        tf_dprint(test_mt)

    expected_out = textwrap.dedent("""
    Tensor(AddV2):0,\tdtype=int32,\tshape=~_11,\t"add:0"
    |  Tensor(AddV2):0,\tdtype=int32,\tshape=~_12,\t"add:0"
    |  |  Tensor(AddV2):0,\tdtype=int32,\tshape=~_13,\t"add:0"
    |  |  |  Tensor(Const):0,\tdtype=int32,\tshape=[],\t"Const:0"
    |  |  |  |  1
    |  |  |  Tensor(~_15):0,\tdtype=~_3,\tshape=~_14,\t"~_17"
    |  |  |  |  ~_2
    |  |  Tensor(Const):0,\tdtype=~_5,\tshape=~_18,\t"~_20"
    |  |  |  ~_4
    |  Tensor(~_6):0,\tdtype=~_7,\tshape=~_21,\t"~_22"
    """)

    assert std_out.getvalue() == expected_out.lstrip()
コード例 #13
0
def test_meta_eager():

    assert tf.executing_eagerly()

    N = 100
    X = np.vstack([np.random.randn(N), np.ones(N)]).T
    X_tf = tf.convert_to_tensor(X)

    with pytest.raises(AttributeError):
        _ = mt(X_tf)

    with pytest.raises(AttributeError):
        _ = mt(X)

    with graph_mode():
        N = 100
        X = np.vstack([np.random.randn(N), np.ones(N)]).T
        X_tf = tf.convert_to_tensor(X)
        _ = mt(X_tf)
コード例 #14
0
def test_meta_reify():
    a_mt = mt(tf.compat.v1.placeholder('float64', name='a', shape=[1, 2]))
    b_mt = mt(tf.compat.v1.placeholder('float64', name='b', shape=[]))
    add_mt = mt.add(a_mt, b_mt)

    assert add_mt.shape.as_list() == [1, 2]

    add_tf = add_mt.reify()

    assert isinstance(add_tf, tf.Tensor)
    assert add_tf.op.type == 'Add'
    assert add_tf.shape.as_list() == [1, 2]

    # Remove cached base object and force manual reification.
    add_mt._obj = None
    add_tf = add_mt.reify()

    assert isinstance(add_tf, tf.Tensor)
    assert add_tf.op.type == 'Add'
    assert add_tf.shape.as_list() == [1, 2]
コード例 #15
0
def test_meta_const():
    """Make sure we can create a Const tensor by hand."""

    with tf.Graph().as_default():
        one_mt = mt.const(1, 'int32', 'Const')

    with tf.Graph().as_default():
        another_one_mt = mt(1)

    assert one_mt == another_one_mt
    assert isinstance(one_mt.reify(), tf.Tensor)
    assert one_mt.reify().op.type == 'Const'
コード例 #16
0
def test_inputs_remapping():
    t1 = [[1, 2, 3], [4, 5, 6]]
    t2 = [[7, 8, 9], [10, 11, 12]]
    z = tf.concat([t1, t2], 0)

    z_mt = mt(z)

    # Even though we gave it unhashable arguments, the operator should've
    # converted them
    assert isinstance(z_mt.base_arguments[0], tuple)
    assert z_mt.base_arguments[0][0].obj == z.op.inputs[0]
    assert z_mt.base_arguments[0][1].obj == z.op.inputs[1]
    assert z_mt.base_arguments[1].obj == z.op.inputs[2]
コード例 #17
0
def test_commutativity_tfp():

    with tf.Graph().as_default():
        mu_tf = tf.compat.v1.placeholder(tf.float32,
                                         name="mu",
                                         shape=tf.TensorShape([None]))
        tau_tf = tf.compat.v1.placeholder(tf.float32,
                                          name="tau",
                                          shape=tf.TensorShape([None]))

        normal_tfp = tfd.normal.Normal(mu_tf, tau_tf)

        value_tf = tf.compat.v1.placeholder(tf.float32,
                                            name="value",
                                            shape=tf.TensorShape([None]))

        normal_log_lik = normal_tfp.log_prob(value_tf)

    normal_log_lik_opt = normalize_tf_graph(normal_log_lik)

    with enable_lvar_defaults("names", "node_attrs"):
        tfp_normal_pattern_mt = mt_normal_log_prob(var(), var(), var())

    normal_log_lik_mt = mt(normal_log_lik)
    normal_log_lik_opt_mt = mt(normal_log_lik_opt)

    # Our pattern is the form of an unnormalized TFP normal PDF.
    assert run(0, True, eq(normal_log_lik_mt,
                           tfp_normal_pattern_mt)) == (True, )
    # Our pattern should *not* match the Grappler-optimized graph, because
    # Grappler will reorder terms (e.g. the log + constant
    # variance/normalization term)
    assert run(0, True, eq(normal_log_lik_opt_mt, tfp_normal_pattern_mt)) == ()

    # XXX: `eq_comm` is, unfortunately, order sensitive!  LHS should be ground.
    assert run(0, True, eq_comm(normal_log_lik_mt,
                                tfp_normal_pattern_mt)) == (True, )
    assert run(0, True, eq_comm(normal_log_lik_opt_mt,
                                tfp_normal_pattern_mt)) == (True, )
コード例 #18
0
ファイル: utils.py プロジェクト: volcacius/symbolic-pymc
def mt_normal_log_prob(x, loc, scale):
    """Create a meta graph for Grappler-canonicalized standard or non-standard TFP normal log-likelihoods."""
    if loc == 0:
        log_unnormalized_mt = mt(np.array(-0.5, "float32"))
        log_unnormalized_mt *= mt.squareddifference(
            mt(np.array(0.0, "float32")),
            mt.realdiv(x, scale) if scale != 1 else mt.mul(
                np.array(1.0, "float32"), x),
        )
    else:
        log_unnormalized_mt = mt(np.array(-0.5, "float32"))
        log_unnormalized_mt *= mt.squareddifference(
            mt.realdiv(x, scale) if scale != 1 else mt.mul(
                np.array(1.0, "float32"), x),
            mt.realdiv(loc, scale) if scale != 1 else mt.mul(
                np.array(1.0, "float32"), loc),
        )

    log_normalization_mt = mt((0.5 * np.log(2.0 * np.pi)).astype("float32"))

    if scale != 1:
        log_normalization_mt = log_normalization_mt + mt.log(scale)

    return log_unnormalized_mt - log_normalization_mt
コード例 #19
0
def test_meta_multi_output():
    """Make sure we can handle TF `Operation`s that output more than on tensor."""
    d, U, V = mt.linalg.svd(var())

    assert d.op == U.op == V.op
    assert d.value_index == 0
    assert U.value_index == 1
    assert V.value_index == 2

    assert d.op.outputs == (d, U, V)
    assert d.op.default_output is d.op.outputs

    tf.compat.v1.disable_eager_execution()

    X_mt = mt(np.eye(2))
    d, U, V = mt.linalg.svd(X_mt)
    d.value_index = var()
    assert isinstance(d.reify(), TFlowMetaTensor)
コード例 #20
0
 def walk_rel(x, y):
     return lall(eq(x, mt(1)), eq(y, mt(3)))
コード例 #21
0
def test_etuple_term():

    assert etuplize("blah", return_bad_args=True) == "blah"

    a = tf.compat.v1.placeholder(tf.float64, name='a')
    b = tf.compat.v1.placeholder(tf.float64, name='b')

    a_mt = mt(a)
    a_mt._obj = None
    a_reified = a_mt.reify()
    assert isinstance(a_reified, tf.Tensor)
    assert a_reified.shape.dims is None

    with pytest.raises(TypeError):
        etuplize(a_mt.op.op_def)

    a_nd_e = etuplize(a_mt.op.node_def, shallow=False)
    assert a_nd_e[0] is TFlowMetaNodeDef
    assert a_nd_e[1] == a_mt.op.node_def.op
    assert a_nd_e[2] == a_mt.op.node_def.name
    assert a_nd_e[3] == a_mt.op.node_def.attr

    # A deep etuplization
    test_e = etuplize(a_mt, shallow=False)
    assert len(test_e) == 1
    assert len(test_e[0]) == 3
    assert test_e[0][0] is TFlowMetaOperator
    assert test_e[0][1] is a_mt.op.op_def
    assert test_e[0][2] == a_nd_e

    assert test_e.eval_obj is a_mt

    test_e._eval_obj = ExpressionTuple.null
    with tf.Graph().as_default():
        a_evaled = test_e.eval_obj
    assert a_evaled == a_mt

    # A shallow etuplization
    test_e = etuplize(a_mt, shallow=True)
    assert len(test_e) == 1
    assert isinstance(test_e[0], TFlowMetaOperator)
    assert test_e[0].op_def is a_mt.op.op_def
    assert test_e[0].node_def is a_mt.op.node_def

    assert test_e.eval_obj is a_mt

    test_e._eval_obj = ExpressionTuple.null
    with tf.Graph().as_default():
        a_evaled = test_e.eval_obj
    assert a_evaled == a_mt

    a_reified = a_evaled.reify()
    assert isinstance(a_reified, tf.Tensor)
    assert a_reified.shape.dims is None

    # Now, consider a meta graph with operator arguments
    add_mt = mt.AddV2(a, b)
    add_et = etuplize(add_mt, shallow=True)
    assert isinstance(add_et, ExpressionTuple)
    assert add_et[0].op_def == mt.AddV2.op_def

    # Check `kanren`'s term framework
    assert isinstance(operator(add_mt), TFlowMetaOperator)
    assert arguments(add_mt) == add_mt.op.inputs

    assert operator(add_mt)(*arguments(add_mt)) == add_mt

    assert isinstance(add_et[0], TFlowMetaOperator)
    assert add_et[1:] == add_mt.op.inputs
    assert operator(add_mt)(*arguments(add_mt)) == add_mt

    assert term(operator(add_mt), arguments(add_mt)) == add_mt

    # Make sure things work with logic variables
    add_lvar_mt = TFlowMetaTensor(var(), var(), [1, 2])

    # TODO FIXME: This is bad
    assert operator(add_lvar_mt) is None
    # assert operator(add_lvar_mt) == add_lvar_mt.op
    # TODO FIXME: Same here
    assert arguments(add_lvar_mt) is None
コード例 #22
0
def test_global_options():

    with tf.Graph().as_default():
        x_mt = mt.Placeholder('float')
        assert isinstance(x_mt.obj, tf.Tensor)
        assert x_mt.name == 'Placeholder:0'

    with tf.Graph().as_default(), disable_auto_reification():
        y_mt = mt.Placeholder('float')
        assert y_mt.obj is None
        assert y_mt.name == 'Placeholder:0'
        assert isinstance(y_mt.op.node_def.attr, dict)

    with tf.Graph().as_default(), enable_lvar_defaults('names', 'node_attrs'):
        # This *will* auto-reify and have base versions of `names` and `attrs`;
        # however, it will replace those with lvars.
        z_mt = mt.Placeholder('float')
        assert z_mt.obj is None
        assert isvar(z_mt.name)
        assert isvar(z_mt.op.node_def.attr)

    with disable_auto_reification(), enable_lvar_defaults(
            'names', 'node_attrs'):
        # This will *not* auto-reify and simply create the object from scratch with meta types
        # and the appropriate/desired logic variables.
        z_mt = mt.Placeholder('float')
        assert z_mt.obj is None
        assert isvar(z_mt.name)
        assert isvar(z_mt.op.node_def.attr)

    with tf.Graph().as_default(), enable_lvar_defaults('names', 'node_attrs'):
        y_mt = mt.Placeholder('float') + mt.Placeholder('float')
        assert isvar(y_mt.name)
        assert isvar(y_mt.op.inputs[0].name)
        assert isvar(y_mt.op.inputs[1].name)
        assert isvar(y_mt.op.node_def.attr)
        assert isvar(y_mt.op.inputs[0].op.node_def.attr)
        assert isvar(y_mt.op.inputs[1].op.node_def.attr)

    with tf.Graph().as_default() as test_graph:
        a_mt = mt(2.0)
        assert a_mt.obj is not None

    with test_graph.as_default(), enable_lvar_defaults('names', 'node_attrs'):
        a_new_mt = mt(a_mt)
        assert a_new_mt is a_mt

        b_mt = 1.0 * a_mt
        assert a_mt.obj is not None
        assert isvar(b_mt.name)
        assert isvar(b_mt.op.node_def.attr)
        assert b_mt.op.inputs[1] is a_mt

        # `NodeDef.attr` for constants should not be turned into lvars
        assert not isvar(b_mt.op.inputs[0].op.node_def.attr)
        assert not isvar(b_mt.op.inputs[1].op.node_def.attr)

    # Make sure we clear out the `.obj` so that the names won't mismatch
    with tf.Graph().as_default(), enable_lvar_defaults('names'):
        a_mt = mt(1.0)
        assert isvar(a_mt.name)
コード例 #23
0
def test_etuple_term():

    assert etuplize("blah", return_bad_args=True) == "blah"

    a = tf.compat.v1.placeholder(tf.float64, name="a")
    b = tf.compat.v1.placeholder(tf.float64, name="b")

    a_mt = mt(a)
    a_mt._obj = None
    a_reified = a_mt.reify()
    assert isinstance(a_reified, tf.Tensor)
    assert a_reified.shape.dims is None

    with pytest.raises(TypeError):
        etuplize(a_mt.op.op_def)

    with pytest.raises(TypeError):
        etuplize(a_mt.op.node_def, shallow=False)

    with pytest.raises(TypeError):
        etuplize(a_mt, shallow=False)

    # Now, consider a meta graph with operator arguments
    add_mt = mt.AddV2(a, b)
    add_et = etuplize(add_mt, shallow=True)
    assert isinstance(add_et, ExpressionTuple)
    assert add_et[0].op_def == mt.AddV2.op_def

    # Check `kanren`'s term framework
    assert isinstance(operator(add_mt), TFlowMetaOperator)
    assert arguments(add_mt) == add_mt.op.inputs

    assert operator(add_mt)(*arguments(add_mt)) == add_mt

    assert isinstance(add_et[0], TFlowMetaOperator)
    assert add_et[1:] == add_mt.op.inputs
    assert operator(add_mt)(*arguments(add_mt)) == add_mt

    assert term(operator(add_mt), arguments(add_mt)) == add_mt

    add_mt = mt.AddV2(a, add_mt)
    add_et = etuplize(add_mt, shallow=False)

    assert isinstance(add_et, ExpressionTuple)
    assert len(add_et) == 3
    assert add_et[0].op_def == mt.AddV2.op_def
    assert len(add_et[2]) == 3
    assert add_et[2][0].op_def == mt.AddV2.op_def
    assert add_et.eval_obj is add_mt

    add_et._eval_obj = ExpressionTuple.null
    with tf.Graph().as_default():
        assert add_et.eval_obj == add_mt

    # Make sure things work with logic variables
    add_lvar_mt = TFlowMetaTensor(var(), var(), [1, 2])

    with pytest.raises(ConsError):
        assert operator(add_lvar_mt) is None

    with pytest.raises(ConsError):
        assert arguments(add_lvar_mt) is None
コード例 #24
0
def test_meta_basic():

    assert mt.Add == mt.Add
    assert mt.Add != mt.Sub
    assert mt.Add.op_def == mt.Add.op_def
    assert mt.Add.op_def != mt.Sub.op_def

    var_mt = TFlowMetaTensor(var(), var(), var())
    # It should generate a logic variable for the name and use from here on.
    var_name = var_mt.name
    assert isvar(var_name)
    assert var_mt.name is var_name
    # Same for a tensor shape
    var_shape = var_mt.shape
    assert isinstance(var_shape, TFlowMetaTensorShape)
    assert isvar(var_shape.dims)

    # This essentially logic-variabled tensor should not reify; it should
    # create a distinct/new meta object that's either equal to the original
    # meta object or partially reified.
    assert var_mt.reify() == var_mt

    # This operator is reifiable
    # NOTE: Const objects are automatically created for the constant inputs, so
    # we need to do this in a new graph to make sure that their auto-generated
    # names are consistent throughout runs.
    with tf.Graph().as_default() as test_graph:
        test_op = TFlowMetaOp(mt.Add.op_def,
                              TFlowMetaNodeDef('Add', 'Add', {}), [1, 0])

        # This tensor has an "unknown"/logic variable output index and dtype, but,
        # since the operator fully specifies it, reification should still work.
        var_mt = TFlowMetaTensor(test_op, var(), var())

        # This should be partially reified
        var_tf = var_mt.reify()

        assert isinstance(var_tf, tf.Tensor)

        # These shouldn't be equal, since `var_mt` has logic variables for
        # output index and dtype.  (They should be unifiable, though.)
        assert mt(var_tf) != var_mt

        # NOTE: The operator name specified by the meta NodeDef *can* be
        # different from the reified TF tensor (e.g. when meta objects are
        # created/reified within a graph already using the NodeDef-specified
        # name).
        #
        # TODO: We could search for existing TF objects in the current graph by
        # name and raise exceptions when the desired meta information and name
        # do not correspond--effectively making the meta object impossible to
        # reify in said graph.

    # Next, we convert an existing TF object into a meta object
    # and make sure everything corresponds between the two.
    N = 100
    X = np.vstack([np.random.randn(N), np.ones(N)]).T

    X_tf = tf.convert_to_tensor(X)

    with tf.Graph().as_default() as test_graph:
        X_mt = mt(X)

    assert isinstance(X_mt, TFlowMetaTensor)
    assert X_mt.op.obj.name == 'Const'
    assert not hasattr(X_mt, '_name')
    assert X_mt.name == 'Const:0'
    assert X_mt._name == 'Const:0'

    # Make sure `reify` returns the cached base object.
    assert X_mt.reify() is X_mt.obj
    assert isinstance(X_mt.reify(), tf.Tensor)

    assert X_mt == mt(X_tf)

    # Create a (constant) tensor meta object manually.
    X_raw_mt = TFlowMetaTensor(X_tf.op, X_tf.value_index, X_tf.dtype, obj=X_tf)

    assert np.array_equal(X_raw_mt.op.node_def.attr['value'], X)

    # These are *not* equivalent, since they're constants without matching
    # constant values (well, our manually-created meta constant has no constant
    # value).
    assert X_mt == X_raw_mt
    # TODO: Should this be true?
    # assert X_mt.name == X_raw_mt.name

    add_mt = mt.add(1, 2)

    assert isinstance(add_mt, TFlowMetaTensor)
    assert isinstance(add_mt.obj, tf.Tensor)
    assert isinstance(add_mt.op.obj, tf.Operation)
    assert add_mt.op.obj.type == 'Add'

    assert len(add_mt.op.inputs) == 2
    assert all(isinstance(i, TFlowMetaTensor) for i in add_mt.op.inputs)

    one_mt, two_mt = mt(1), mt(2)

    assert one_mt != two_mt

    add_mt_2 = mt.add(one_mt, two_mt)

    assert isinstance(add_mt_2, TFlowMetaTensor)
    assert isinstance(add_mt_2.obj, tf.Tensor)
    assert isinstance(add_mt_2.op.obj, tf.Operation)
    assert add_mt_2.op.obj.type == 'Add'

    a_mt = mt(tf.compat.v1.placeholder('float64', name='a', shape=[1, 2]))
    b_mt = mt(tf.compat.v1.placeholder('float64', name='b'))
    assert a_mt != b_mt

    assert a_mt.shape.ndims == 2
    assert a_mt.shape == TFlowMetaTensorShape([1, 2])

    # Make sure that names are properly inferred when there are no base objects
    # to reference
    with tf.Graph().as_default():
        one_mt = mt(1.0)
        log_mt = mt.log(one_mt)
        assert log_mt.name == 'Log:0'
        assert log_mt.dtype == tf.float32
        assert log_mt.op.outputs[0].dtype == tf.float32

        log_mt._name = None
        one_mt._obj = None
        log_mt._obj = None
        assert log_mt.dtype == tf.float32
        assert log_mt.name == 'Log:0'

        log_mt = mt.log(var(), name=var())
        assert isvar(log_mt.name)