Exemplo n.º 1
0
def test_range(check_lazy_shapes):
    # Check correctness.
    approx(B.range(5), np.arange(5))
    approx(B.range(2, 5), np.arange(2, 5))
    approx(B.range(2, 5, 2), np.arange(2, 5, 2))

    # Check various step sizes.
    for step in [1, 1.0, 0.25]:
        check_function(
            B.range,
            (
                Value(np.float32, tf.float32, torch.float32, jnp.float32),
                Value(0),
                Value(5),
                Value(step),
            ),
        )

    # Check two-argument specification.
    check_function(
        B.range,
        (Value(np.float32, tf.float32, torch.float32,
               jnp.float32), Value(0), Value(5)),
    )

    # Check one-argument specification.
    check_function(
        B.range,
        (Value(np.float32, tf.float32, torch.float32, jnp.float32), Value(5)))
Exemplo n.º 2
0
def test_take_tf(check_lazy_shapes):
    # Check that TensorFlow also takes in tensors.
    a = Matrix(3, 4, 5)
    ref = Tensor(3)
    approx(B.take(a.tf(), ref.tf() > 0), B.take(a.np(), ref.np() > 0))
    approx(B.take(a.tf(), ref.np() > 0), B.take(a.np(), ref.np() > 0))
    approx(B.take(a.tf(), B.range(tf.int64, 2)), B.take(a.np(), B.range(2)))
    approx(B.take(a.tf(), B.range(np.int64, 2)), B.take(a.np(), B.range(2)))
Exemplo n.º 3
0
def test_take_perm(dtype, check_lazy_shapes):
    a = B.range(dtype, 10)
    perm = B.randperm(B.dtype_int(dtype), 10)
    a2 = B.take(a, perm)
    assert B.dtype(perm) == B.dtype_int(dtype)
    assert B.shape(a) == B.shape(a2)
    assert B.dtype(a) == B.dtype(a2)
Exemplo n.º 4
0
def test_choice(x, p, check_lazy_shapes):
    state = B.create_random_state(B.dtype(x))

    # Make `p` a dictionary so that we can optionally give it.
    if p is None:
        p = {}
    else:
        # Cast weights to the right framework.
        p = {"p": B.cast(B.dtype(x), p)}

    # Check shape.
    assert B.shape(B.choice(x, **p)) == B.shape(x)[1:]
    assert B.shape(B.choice(x, 5, **p)) == (5, ) + B.shape(x)[1:]
    assert B.shape(B.choice(x, 5, 5, **p)) == (5, 5) + B.shape(x)[1:]

    assert isinstance(B.choice(state, x, **p)[0], B.RandomState)
    assert B.shape(B.choice(state, x, **p)[1]) == B.shape(x)[1:]
    assert B.shape(B.choice(state, x, 5, **p)[1]) == (5, ) + B.shape(x)[1:]
    assert B.shape(B.choice(state, x, 5, 5, **p)[1]) == (5, 5) + B.shape(x)[1:]

    # Check correctness.
    dtype = B.dtype(x)
    choices = set(to_np(B.choice(B.range(dtype, 5), 1000)))
    assert choices == set(to_np(B.range(dtype, 5)))
Exemplo n.º 5
0
def test_compute_alpha():
    lags = 8
    alpha = out._compute_alpha(lags)
    x = np.sin(2 * np.pi / lags * B.range(10000))

    # Perform filtering.
    y = [x[0]]
    for xi in x:
        y.append(alpha * xi + (1 - alpha) * y[-1])
    y = np.array(y)

    # Check damping in dB.
    ratio = 10 * np.log10(np.mean(y**2) / np.mean(x**2))
    approx(ratio, -3, atol=5e-2)

    # Check not setting the cut-off.
    assert out._compute_alpha(None) == 1
Exemplo n.º 6
0
    def __init__(
        self,
        scheme="structured",
        noise=5e-2,
        fix_noise=False,
        alpha=None,
        alpha_t=None,
        window=None,
        fix_window=False,
        lam=None,
        gamma=None,
        gamma_t=None,
        a=None,
        b=None,
        m_max=None,
        m_max_cap=150,
        n_z=None,
        scale=None,
        fix_scale=False,
        ms=None,
        n_u=None,
        n_u_cap=300,
        t_u=None,
        extend_t_z=None,
        t=None,
    ):
        AbstractGPCM.__init__(self, scheme)

        # Ensure that `t` is a vector.
        if t is not None:
            t = np.array(t)

        # Store whether to fix the length scale, window length, and noise.
        self.fix_scale = fix_scale
        self.fix_window = fix_window
        self.fix_noise = fix_noise

        # First initialise optimisable model parameters.
        if alpha is None:
            alpha = 1 / window

        if alpha_t is None:
            alpha_t = B.sqrt(2 * alpha)

        if lam is None:
            lam = 1 / scale

        self.noise = noise
        self.alpha = alpha
        self.alpha_t = alpha_t
        self.lam = lam

        # For convenience, also store the extent of the filter.
        self.extent = 4 / self.alpha

        # Then initialise fixed variables.
        if t_u is None:
            # Place inducing points until the filter is `exp(-pi) = 4.32%`.
            t_u_max = B.pi / self.alpha

            # `n_u` is required to initialise `t_u`.
            if n_u is None:
                # Set it to two inducing points per wiggle, multiplied by two to account
                # for the longer range.
                n_u = int(np.ceil(2 * 2 * window / scale))
                if n_u > n_u_cap:
                    warnings.warn(
                        f"Using {n_u} inducing points for the filter, which is too "
                        f"many. It is capped to {n_u_cap}.",
                        category=UserWarning,
                    )
                    n_u = n_u_cap

            t_u = B.linspace(0, t_u_max, n_u)

        if n_u is None:
            n_u = B.shape(t_u)[0]

        if a is None:
            a = B.min(t) - B.max(t_u)

        if b is None:
            b = B.max(t)

        # First, try to determine `m_max` from `n_z`.
        if m_max is None and n_z is not None:
            m_max = int(np.ceil(n_z / 2))

        if m_max is None:
            freq = 1 / scale
            m_max = int(np.ceil(freq * (b - a)))
            if m_max > m_max_cap:
                warnings.warn(
                    f"Using {m_max} inducing features, which is too "
                    f"many. It is capped to {m_max_cap}.",
                    category=UserWarning,
                )
                m_max = m_max_cap

        if ms is None:
            ms = B.range(2 * m_max + 1)

        self.a = a
        self.b = b
        self.m_max = m_max
        self.ms = ms
        self.n_z = len(ms)
        self.n_u = n_u
        self.t_u = t_u

        # Initialise dependent model parameters.
        if gamma is None:
            gamma = 1 / (2 * (self.t_u[1] - self.t_u[0]))

        if gamma_t is None:
            gamma_t = B.sqrt(2 * gamma)

        # Must ensure that `gamma < alpha`.
        self.gamma = min(gamma, self.alpha / 1.5)
        self.gamma_t = gamma_t
Exemplo n.º 7
0
        approx(B.to_active_device(a), a)

    # Check that numbers remain unchanged.
    a = 1
    assert B.to_active_device(a) is a


@pytest.mark.parametrize("t", [tf.float32, torch.float32, jnp.float32])
@pytest.mark.parametrize(
    "f",
    [
        lambda t: B.zeros(t, 2, 2),
        lambda t: B.ones(t, 2, 2),
        lambda t: B.eye(t, 2),
        lambda t: B.linspace(t, 0, 5, 10),
        lambda t: B.range(t, 10),
        lambda t: B.rand(t, 10),
        lambda t: B.randn(t, 10),
    ],
)
def test_on_device(f, t, check_lazy_shapes):
    f_t = f(t)  # Contruct on current and existing device.

    # Set the active device to something else.
    B.ActiveDevice.active_name = "previous"

    # Check that explicit allocation on CPU works.
    with B.on_device("cpu"):
        assert B.device(f(t)) == B.device(f_t)

    # Also test inferring the device from a tensor.
Exemplo n.º 8
0
def test_inv_perm():
    perm = np.random.permutation(10)
    approx(perm[inv_perm(perm)], B.range(10))