示例#1
0
def test_validate_abstract_2():
    bad_array = AbstractArray(to_abstract_test(f64), {
        SHAPE: (1, 2),
        TYPE: PyTorchTensor
    })
    with pytest.raises(ValidationError):
        validate_abstract(bad_array, {})
示例#2
0
 def mksens(x):
     return AbstractArray(
         AbstractScalar(
             {TYPE: np_dtype_to_type(x.dtype.name), VALUE: ANYTHING}
         ),
         {SHAPE: tuple(x.shape), TYPE: NDArray},
     )
示例#3
0
 def mksens(x):
     return AbstractArray(
         AbstractScalar(
             {TYPE: pytorch_dtype_to_type(x.dtype), VALUE: ANYTHING}
         ),
         {SHAPE: tuple(x.shape), TYPE: PyTorchTensor},
     )
示例#4
0
def to_abstract_test(self, x: np.ndarray):
    return AbstractArray(
        AbstractScalar({
            VALUE: ANYTHING,
            TYPE: dtype.np_dtype_to_type(str(x.dtype)),
        }),
        {SHAPE: x.shape}
    )
示例#5
0
文件: common.py 项目: GonChen/myia
def arr_of(t, shp, value):
    return AbstractArray(AbstractScalar({
        VALUE: value,
        TYPE: t,
    }), {
        SHAPE: shp,
        TYPE: xtype.NDArray
    })
示例#6
0
def test_validate_abstract_2():
    fn = AbstractFunction(
        VirtualFunction(
            (),
            AbstractArray(to_abstract_test(f64), {
                SHAPE: (1, 2),
                TYPE: PyTorchTensor
            }),
        ), )
    with pytest.raises(ValidationError):
        validate_abstract(fn, {})
示例#7
0
文件: common.py 项目: tor4z/myia
def arr_of(t, shp, value):
    """Generate symbolic array."""
    return AbstractArray(
        AbstractScalar({
            VALUE: value,
            TYPE: t
        }),
        {
            SHAPE: shp,
            TYPE: xtype.NDArray
        },
    )
示例#8
0
def _grad_test(fn,
               obj,
               args,
               sens_type,
               pipeline=grad_pipeline,
               rel_error=1e-3):

    pytorch_grads = pt_fn_grads(fn, *args)

    sens_type_shape = sens_type
    if sens_type == ():
        sens_type = APT_0d_loss
    elif sens_type == (1, ):
        sens_type = APT_loss
    else:
        sens_type = AbstractArray(AbstractScalar({
            TYPE: f32,
            VALUE: ANYTHING
        }), {
            SHAPE: sens_type,
            TYPE: PyTorchTensor
        })

    pipeline = standard_pipeline
    pipeline = pipeline.insert_after('parse', grad_wrap=grad_wrap)
    argspec = tuple(from_value(arg, broaden=True) for arg in clean_args(args))
    sens_type = to_abstract_test(sens_type)
    if isinstance(obj, FunctionType):
        res = pipeline.run(input=obj, argspec=[*argspec, sens_type])
    else:
        pip = pipeline.configure(parse=False)
        res = pip.run(graph=obj, argspec=[*argspec, sens_type])

    if sens_type == APT_loss:
        sens = torch.Tensor([1.0])
    elif sens_type == APT_0d_loss:
        sens = torch.Tensor([1.0]).reshape(())
    else:
        sens = torch.ones(sens_type_shape)

    myia_grads = res['output'](*args, sens)

    for pt_g, my_g in zip(pytorch_grads, myia_grads):
        # print("pytorch_grad", pt_g)
        # print("myia_grad", my_g)
        assert torch.allclose(pt_g,
                              my_g,
                              rtol=1e-05,
                              atol=1e-06,
                              equal_nan=True)
示例#9
0
from myia.composite import ArithmeticData
from myia.dtype import Bool, Nil, Number, f16, f32, f64, i16, i32, i64, u64
from myia.ir import MultitypeGraph
from myia.prim.py_implementations import hastype, tagged
from myia.utils import (
    ADT,
    EnvInstance,
    dataclass_fields,
    dataclass_methods,
    overload,
)

B = Bool
Bot = AbstractBottom()
EmptyTuple = typing.Tuple[()]
AA = AbstractArray(ANYTHING, {SHAPE: ANYTHING})


###########################
# Abstract value builders #
###########################


def arr_of(t, shp, value):
    return AbstractArray(AbstractScalar({
        VALUE: value,
        TYPE: t,
    }), {SHAPE: shp})


def ai64_of(*shp, value=ANYTHING):
示例#10
0
def to_relay_type(self, a: AbstractArray):
    tp = a.element.xtype()
    return relay.ty.TensorType(a.xshape(), type_to_np_dtype(tp))
示例#11
0
文件: common.py 项目: GonChen/myia
    AbstractUnion,
    AbstractValue,
    empty,
    from_value,
    listof,
    type_to_abstract,
)
from myia.classes import ADT
from myia.ir import MultitypeGraph
from myia.utils import EnvInstance, HandleInstance, dataclass_fields, overload
from myia.xtype import Bool, f16, f32, f64, i16, i32, i64, u64

B = Bool
Bot = AbstractBottom()
EmptyTuple = typing.Tuple[()]
AA = AbstractArray(ANYTHING, {SHAPE: ANYTHING, TYPE: ANYTHING})
AN = AbstractArray(ANYTHING, {SHAPE: ANYTHING, TYPE: xtype.NDArray})

###########################
# Abstract value builders #
###########################


def arr_of(t, shp, value):
    return AbstractArray(AbstractScalar({
        VALUE: value,
        TYPE: t,
    }), {
        SHAPE: shp,
        TYPE: xtype.NDArray
    })
示例#12
0
            'resources.backend.name': backend,
            'resources.backend.options': backend_options,
        })
#"""


# TODO: should this also return grads with respect to kwargs
def pt_fn_grads(fn, *args, **kwargs):
    output = fn(*args, **kwargs)
    return torch.autograd.grad(output, args, torch.ones(output.shape))


APT_loss = AbstractArray(AbstractScalar({
    TYPE: f32,
    VALUE: ANYTHING
}), {
    SHAPE: (1, ),
    TYPE: PyTorchTensor
})
APT_0d_loss = AbstractArray(AbstractScalar({
    TYPE: f32,
    VALUE: ANYTHING
}), {
    SHAPE: (),
    TYPE: PyTorchTensor
})


def _fwd_test(fn,
              args,
              pipeline=standard_pipeline,