Exemplo n.º 1
0
def test_tensor_serialization():
    def tensor_eq(a, b):
        assert a.dtype == b.dtype
        assert a.device == b.device
        assert a.requires_grad == b.requires_grad
        assertTensorClose(a, b)

    with TemporaryFile() as f:
        data = np.random.randint(low=0, high=7, size=[233])
        a = tensor(data, device="xpux", dtype=np.int32)
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        tensor_eq(a, b)

    with TemporaryFile() as f:
        a = Parameter(np.random.random(size=(233, 2)).astype(np.float32))
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        assert isinstance(b, Parameter)
        tensor_eq(a, b)

    with TemporaryFile() as f:
        a = Buffer(np.random.random(size=(2, 233)).astype(np.float32))
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        assert isinstance(b, Buffer)
        tensor_eq(a, b)
Exemplo n.º 2
0
def test_shape_warning():
    with Graph() as cg:
        cg.set_option("eager_evaluation", False)
        b = Buffer(np.ones((2, 3)).astype(np.float32))
        with pytest.warns(None) as record:
            print(b.shape)
        if len(record) != 0:
            raise ValueError(
                "Getting the shape of a constant Tensor should throw no Warning"
            )
Exemplo n.º 3
0
class FrozenBatchNorm2d(M.Module):
    """
    BatchNorm2d, which the weight, bias, running_mean, running_var
    are immutable.
    """
    def __init__(self, num_features, eps=1e-5):
        super().__init__()
        self.eps = eps
        self.weight = Buffer(np.ones(num_features, dtype=np.float32))
        self.bias = Buffer(np.zeros(num_features, dtype=np.float32))
        self.running_mean = Buffer(
            np.zeros((1, num_features, 1, 1), dtype=np.float32))
        self.running_var = Buffer(
            np.ones((1, num_features, 1, 1), dtype=np.float32))

    def forward(self, x):
        scale = self.weight.reshape(
            1, -1, 1, 1) * (1.0 / (self.running_var + self.eps).sqrt())
        bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
        return x * scale + bias
Exemplo n.º 4
0
 def __init__(self, num_features, eps=1e-5):
     super().__init__()
     self.eps = eps
     self.weight = Buffer(np.ones(num_features, dtype=np.float32))
     self.bias = Buffer(np.zeros(num_features, dtype=np.float32))
     self.running_mean = Buffer(
         np.zeros((1, num_features, 1, 1), dtype=np.float32))
     self.running_var = Buffer(
         np.ones((1, num_features, 1, 1), dtype=np.float32))
Exemplo n.º 5
0
 def __init__(self):
     super().__init__()
     self.i = self.InnerModule()
     self.bn = BatchNorm2d(4)
     self.param = Parameter(np.ones(1, dtype=np.float32))
     self.buff = Buffer(np.ones(1, dtype=np.float32))
Exemplo n.º 6
0
def test_fill():
    a = Buffer(np.zeros((2, 3), dtype=np.float32))
    a.fill(3)
    assertTensorClose(a.numpy(), np.full((2, 3), 3, dtype=np.float32))
    a.fill(124.568)
    assertTensorClose(a.numpy(), np.full((2, 3), 124.568, dtype=np.float32))