Beispiel #1
0
 def worker(rank, data, backend, expect, port_queue):
     if mge.get_device_count("gpu") < world_size:
         return
     _init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
     inp = Parameter(data)
     dist.functional.bcast_param(inp)
     assert np.allclose(inp.numpy(), expect)
Beispiel #2
0
def test_load_quantized():
    data_shape = (2, 28)
    data = tensor(np.random.random(data_shape), dtype="float32")
    data = data.astype(mgb.dtype.qint8(0.1))
    mlp = MLP()
    quantize_qat(mlp)
    quantize(mlp)
    mlp.dense0.weight = Parameter(
        mlp.dense0.weight.astype(mgb.dtype.qint8(0.001)).numpy())
    mlp.dense1.weight = Parameter(
        mlp.dense1.weight.astype(mgb.dtype.qint8(0.0002)).numpy())
    mlp.eval()
    pred0 = mlp(data)

    with BytesIO() as fout:
        mge.save(mlp.state_dict(), fout)
        fout.seek(0)
        checkpoint = mge.load(fout)
        # change mlp weight.
        mlp.dense0.weight = Parameter(
            mlp.dense0.weight.astype(mgb.dtype.qint8(0.00001)).numpy())
        mlp.dense1.weight = Parameter(
            mlp.dense1.weight.astype(mgb.dtype.qint8(0.2)).numpy())
        mlp.load_state_dict(checkpoint)
        pred1 = mlp(data)

    assertTensorClose(pred0.astype("float32").numpy(),
                      pred1.astype("float32").numpy(),
                      max_err=5e-6)
Beispiel #3
0
 def worker(rank, data, backend, expect, port_queue):
     if not mge.is_cuda_available():
         return
     _init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
     inp = Parameter(data)
     dist.functional.bcast_param(inp, "x")
     assert np.allclose(inp.numpy(), expect)
def test_tensor_serialization():
    def tensor_eq(a, b):
        assert a.dtype == b.dtype
        assert a.device == b.device
        assert a.requires_grad == b.requires_grad
        assertTensorClose(a, b)

    with TemporaryFile() as f:
        data = np.random.randint(low=0, high=7, size=[233])
        a = tensor(data, device="xpux", dtype=np.int32)
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        tensor_eq(a, b)

    with TemporaryFile() as f:
        a = Parameter(np.random.random(size=(233, 2)).astype(np.float32))
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        assert isinstance(b, Parameter)
        tensor_eq(a, b)

    with TemporaryFile() as f:
        a = Buffer(np.random.random(size=(2, 233)).astype(np.float32))
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        assert isinstance(b, Buffer)
        tensor_eq(a, b)
Beispiel #5
0
 def get_mge_backward():
     mge_module = PyTorchModule(APlusB())
     mge_a = Parameter(a.numpy(), dtype=np.float32)
     mge_b = tensor(b.numpy(), dtype=np.float32)
     mge_c = mge_module(mge_a, mge_b)
     mge_d = mge_module(mge_c, mge_b)
     mge_e = mge.functional.sum(mge_d)
     return mge.functional.grad(mge_e, mge_a, use_virtual_grad=False)
Beispiel #6
0
def test_set_value():
    v0 = np.random.random((2, 3)).astype(np.float32)
    param = Parameter(v0)
    v1 = np.random.random((2, 3)).astype(np.float32)
    param.set_value(v1)
    assertTensorClose(param.numpy(), v1, max_err=5e-6)
    v2 = np.random.random((3, 3)).astype(np.float32)
    # TODO: add this
    # with pytest.raises(ValueError):
    #     param.set_value(v2)
    assertTensorClose(param.numpy(), v1, max_err=5e-6)
Beispiel #7
0
 def __init__(self):
     super().__init__()
     self.i = self.InnerModule()
     self.bn = BatchNorm2d(4)
     self.param = Parameter(np.ones(1, dtype=np.float32))
     self.buff = Buffer(np.ones(1, dtype=np.float32))
Beispiel #8
0
 def __init__(self):
     super().__init__()
     self.torch_module = PyTorchModule(self.SubModule())
     self.multiplier = Parameter(init_param[1], dtype=np.float32)