예제 #1
0
파일: slds.py 프로젝트: MillerJJY/funsor
    def model(data):
        log_prob = funsor.Number(0.)

        # s is the discrete latent state,
        # x is the continuous latent state,
        # y is the observed state.
        s_curr = funsor.Tensor(torch.tensor(0), dtype=2)
        x_curr = funsor.Tensor(torch.tensor(0.))
        for t, y in enumerate(data):
            s_prev = s_curr
            x_prev = x_curr

            # A delayed sample statement.
            s_curr = funsor.Variable('s_{}'.format(t), funsor.bint(2))
            log_prob += dist.Categorical(trans_probs[s_prev], value=s_curr)

            # A delayed sample statement.
            x_curr = funsor.Variable('x_{}'.format(t), funsor.reals())
            log_prob += dist.Normal(x_prev, trans_noise[s_curr], value=x_curr)

            # Marginalize out previous delayed sample statements.
            if t > 0:
                log_prob = log_prob.reduce(ops.logaddexp,
                                           {s_prev.name, x_prev.name})

            # An observe statement.
            log_prob += dist.Normal(x_curr, emit_noise, value=y)

        log_prob = log_prob.reduce(ops.logaddexp)
        return log_prob
예제 #2
0
    def model(data):
        log_prob = funsor.to_funsor(0.)
        xs_curr = [funsor.Tensor(torch.tensor(0.)) for var in var_names]

        for t, y in enumerate(data):
            xs_prev = xs_curr

            # A delayed sample statement.
            xs_curr = [
                funsor.Variable(name + '_{}'.format(t), funsor.reals())
                for name in var_names
            ]

            for i, x_curr in enumerate(xs_curr):
                log_prob += dist.Normal(trans_eqs[var_names[i]](xs_prev),
                                        torch.exp(trans_noises[i]),
                                        value=x_curr)

            if t > 0:
                log_prob = log_prob.reduce(
                    ops.logaddexp,
                    frozenset([x_prev.name for x_prev in xs_prev]))

            # An observe statement.
            log_prob += dist.Normal(emit_eq(xs_curr),
                                    torch.exp(emit_noise),
                                    value=y)

        # Marginalize out all remaining delayed variables.
        return log_prob.reduce(ops.logaddexp), log_prob.gaussian
예제 #3
0
    def __call__(self):

        # calls pyro.param so that params are exposed and constraints applied
        # should not create any new torch.Tensors after __init__
        self.initialize_params()

        N_c = self.config["sizes"]["group"]
        N_s = self.config["sizes"]["individual"]

        log_prob = Tensor(torch.tensor(0.), OrderedDict())

        plate_g = Tensor(torch.zeros(N_c), OrderedDict([("g", bint(N_c))]))
        plate_i = Tensor(torch.zeros(N_s), OrderedDict([("i", bint(N_s))]))

        if self.config["group"]["random"] == "continuous":
            eps_g_dist = plate_g + dist.Normal(**self.params["eps_g"])(
                value="eps_g")
            log_prob += eps_g_dist

        # individual-level random effects
        if self.config["individual"]["random"] == "continuous":
            eps_i_dist = plate_g + plate_i + dist.Normal(
                **self.params["eps_i"])(value="eps_i")
            log_prob += eps_i_dist

        return log_prob
예제 #4
0
def test_normal_gaussian_3(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    loc = Tensor(torch.randn(batch_shape), inputs)
    scale = Tensor(torch.randn(batch_shape).exp(), inputs)
    value = Tensor(torch.randn(batch_shape), inputs)

    expected = dist.Normal(loc, scale, value)
    assert isinstance(expected, Tensor)
    check_funsor(expected, inputs, reals())

    g = dist.Normal(Variable('loc', reals()), scale, 'value')
    assert isinstance(g, Contraction)
    actual = g(loc=loc, value=value)
    check_funsor(actual, inputs, reals())

    assert_close(actual, expected, atol=1e-4)
예제 #5
0
def test_normal_independent():
    loc = random_tensor(OrderedDict(), reals(2))
    scale = random_tensor(OrderedDict(), reals(2)).exp()
    fn = dist.Normal(loc['i'], scale['i'], value='z_i')
    assert fn.inputs['z_i'] == reals()
    d = Independent(fn, 'z', 'i', 'z_i')
    assert d.inputs['z'] == reals(2)
    sample = d.sample(frozenset(['z']))
    assert isinstance(sample, Contraction)
    assert sample.inputs['z'] == reals(2)
예제 #6
0
    def model(data):
        log_prob = funsor.to_funsor(0.)

        x_curr = funsor.Tensor(torch.tensor(0.))
        for t, y in enumerate(data):
            x_prev = x_curr

            # A delayed sample statement.
            x_curr = funsor.Variable('x_{}'.format(t), funsor.reals())
            log_prob += dist.Normal(1 + x_prev / 2., trans_noise, value=x_curr)

            # Optionally marginalize out the previous state.
            if t > 0 and not args.lazy:
                log_prob = log_prob.reduce(ops.logaddexp, x_prev.name)

            # An observe statement.
            log_prob += dist.Normal(0.5 + 3 * x_curr, emit_noise, value=y)

        # Marginalize out all remaining delayed variables.
        log_prob = log_prob.reduce(ops.logaddexp)
        return log_prob
예제 #7
0
def test_normal_affine(expr):

    scale = Tensor(torch.tensor(0.3), OrderedDict())
    x = Variable('x', reals())
    y = Variable('y', reals())

    expected = dist.Normal(x, scale, y)
    actual = eval(expr)

    assert isinstance(actual, Contraction)
    assert dict(actual.inputs) == dict(expected.inputs), (actual.inputs,
                                                          expected.inputs)

    for ta, te in zip(actual.terms, expected.terms):
        assert_close(ta.align(tuple(te.inputs)), te)
예제 #8
0
    def loss_function(data, subsample_scale):
        # Lazily sample from the guide.
        loc, scale = encode(data)
        q = funsor.Independent(dist.Normal(loc['i'], scale['i'], value='z_i'),
                               'z', 'i', 'z_i')

        # Evaluate the model likelihood at the lazy value z.
        probs = decode('z')
        p = dist.Bernoulli(probs['x', 'y'], value=data['x', 'y'])
        p = p.reduce(ops.add, {'x', 'y'})

        # Construct an elbo. This is where sampling happens.
        elbo = funsor.Integrate(q, p - q, frozenset(['z']))
        elbo = elbo.reduce(ops.add, 'batch') * subsample_scale
        loss = -elbo
        return loss
예제 #9
0
def update(p_x_tp1, t, y, var_names, emit_eq, emit_noise):
    """Computes p(x_{t+1} | y_{t+1}) from p(x_{t+1}). This is useful for iterating 1-step ahead predictions"""
    log_prob = p_x_tp1

    x_tp1s = [
        funsor.Variable(name + '_{}'.format(t + 1), funsor.reals())
        for name in var_names
    ]
    log_p_x = log_prob

    log_prob += dist.Normal(emit_eq(x_tp1s), emit_noise, value=y)
    log_p_y = log_prob.reduce(ops.logaddexp,
                              frozenset([x_tp1.name for x_tp1 in x_tp1s]))

    log_p_x_y = log_prob + log_p_x - log_p_y
    return log_p_x_y
예제 #10
0
def one_step_prediction(p_x_tp1, t, var_names, emit_eq, emit_noise):
    """Computes p(y_{t+1}) from p(x_{t+1}). We assume y_t is scalar, so only one emit_eq"""
    log_prob = p_x_tp1

    x_tp1s = [
        funsor.Variable(name + '_{}'.format(t + 1), funsor.reals())
        for name in var_names
    ]
    y_tp1 = funsor.Variable('y_{}'.format(t + 1), funsor.reals())
    log_prob += dist.Normal(emit_eq(x_tp1s),
                            torch.exp(emit_noise),
                            value=y_tp1)
    log_prob = log_prob.reduce(ops.logaddexp,
                               frozenset([x_tp1.name for x_tp1 in x_tp1s]))

    return log_prob
예제 #11
0
def test_normal_affine(expr):

    scale = Tensor(torch.tensor(0.3), OrderedDict())
    x = Variable('x', reals())
    y = Variable('y', reals())

    expected = dist.Normal(x, scale, y)
    actual = eval(expr)

    assert isinstance(actual, Joint)
    assert dict(actual.inputs) == dict(expected.inputs), (actual.inputs,
                                                          expected.inputs)

    assert_close(actual.gaussian.align(tuple(expected.gaussian.inputs)),
                 expected.gaussian)
    assert_close(actual.discrete.align(tuple(expected.discrete.inputs)),
                 expected.discrete)
예제 #12
0
def next_state(p_x_t, t, var_names, trans_eqs, trans_noises):
    """Computes p(x_{t+1}) from p(x_t)"""
    log_prob = p_x_t

    x_ts = [
        funsor.Variable(name + '_{}'.format(t), funsor.reals())
        for name in var_names
    ]
    x_tp1s = [
        funsor.Variable(name + '_{}'.format(t + 1), funsor.reals())
        for name in var_names
    ]

    for i, x_tp1 in enumerate(x_tp1s):
        log_prob += dist.Normal(trans_eqs[var_names[i]](x_ts),
                                torch.exp(trans_noises[i]),
                                value=x_tp1)

    log_prob = log_prob.reduce(ops.logaddexp,
                               frozenset([x_t.name for x_t in x_ts]))
    return log_prob
예제 #13
0
def test_normal_density(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    @funsor.of_shape(reals(), reals(), reals())
    def normal(loc, scale, value):
        return -((value - loc)**2) / (2 * scale**2) - scale.log() - math.log(
            math.sqrt(2 * math.pi))

    check_funsor(normal, {
        'loc': reals(),
        'scale': reals(),
        'value': reals()
    }, reals())

    loc = Tensor(torch.randn(batch_shape), inputs)
    scale = Tensor(torch.randn(batch_shape).exp(), inputs)
    value = Tensor(torch.randn(batch_shape), inputs)
    expected = normal(loc, scale, value)
    check_funsor(expected, inputs, reals())

    actual = dist.Normal(loc, scale, value)
    check_funsor(actual, inputs, reals())
    assert_close(actual, expected)
예제 #14
0
def test_normal_defaults():
    loc = Variable('loc', reals())
    scale = Variable('scale', reals())
    value = Variable('value', reals())
    assert dist.Normal(loc, scale) is dist.Normal(loc, scale, value)
예제 #15
0
    def __call__(self):

        # calls pyro.param so that params are exposed and constraints applied
        # should not create any new torch.Tensors after __init__
        self.initialize_params()

        N_state = self.config["sizes"]["state"]

        # initialize gamma to uniform
        gamma = Tensor(
            torch.zeros((N_state, N_state)),
            OrderedDict([("y_prev", bint(N_state))]),
        )

        N_v = self.config["sizes"]["random"]
        N_c = self.config["sizes"]["group"]
        log_prob = []

        plate_g = Tensor(torch.zeros(N_c), OrderedDict([("g", bint(N_c))]))

        # group-level random effects
        if self.config["group"]["random"] == "discrete":
            # group-level discrete effect
            e_g = Variable("e_g", bint(N_v))
            e_g_dist = plate_g + dist.Categorical(**self.params["e_g"])(value=e_g)

            log_prob.append(e_g_dist)

            eps_g = (plate_g + self.params["eps_g"]["theta"])(e_g=e_g)

        elif self.config["group"]["random"] == "continuous":
            eps_g = Variable("eps_g", reals(N_state))
            eps_g_dist = plate_g + dist.Normal(**self.params["eps_g"])(value=eps_g)

            log_prob.append(eps_g_dist)
        else:
            eps_g = to_funsor(0.)

        N_s = self.config["sizes"]["individual"]

        plate_i = Tensor(torch.zeros(N_s), OrderedDict([("i", bint(N_s))]))
        # individual-level random effects
        if self.config["individual"]["random"] == "discrete":
            # individual-level discrete effect
            e_i = Variable("e_i", bint(N_v))
            e_i_dist = plate_g + plate_i + dist.Categorical(
                **self.params["e_i"]
            )(value=e_i) * self.raggedness_masks["individual"](t=0)

            log_prob.append(e_i_dist)

            eps_i = (plate_i + plate_g + self.params["eps_i"]["theta"](e_i=e_i))

        elif self.config["individual"]["random"] == "continuous":
            eps_i = Variable("eps_i", reals(N_state))
            eps_i_dist = plate_g + plate_i + dist.Normal(**self.params["eps_i"])(value=eps_i)

            log_prob.append(eps_i_dist)
        else:
            eps_i = to_funsor(0.)

        # add group-level and individual-level random effects to gamma
        gamma = gamma + eps_g + eps_i

        N_state = self.config["sizes"]["state"]

        # we've accounted for all effects, now actually compute gamma_y
        gamma_y = gamma(y_prev="y(t=1)")

        y = Variable("y", bint(N_state))
        y_dist = plate_g + plate_i + dist.Categorical(
            probs=gamma_y.exp() / gamma_y.exp().sum()
        )(value=y)

        # observation 1: step size
        step_dist = plate_g + plate_i + dist.Gamma(
            **{k: v(y_curr=y) for k, v in self.params["step"].items()}
        )(value=self.observations["step"])

        # step size zero-inflation
        if self.config["zeroinflation"]:
            step_zi = dist.Categorical(probs=self.params["zi_step"]["zi_param"](y_curr=y))(
                value="zi_step")
            step_zi_dist = plate_g + plate_i + dist.Delta(self.config["MISSING"], 0.)(
                value=self.observations["step"])
            step_dist = (step_zi + Stack("zi_step", (step_dist, step_zi_dist))).reduce(ops.logaddexp, "zi_step")

        # observation 2: step angle
        angle_dist = plate_g + plate_i + dist.VonMises(
            **{k: v(y_curr=y) for k, v in self.params["angle"].items()}
        )(value=self.observations["angle"])

        # observation 3: dive activity
        omega_dist = plate_g + plate_i + dist.Beta(
            **{k: v(y_curr=y) for k, v in self.params["omega"].items()}
        )(value=self.observations["omega"])

        # dive activity zero-inflation
        if self.config["zeroinflation"]:
            omega_zi = dist.Categorical(probs=self.params["zi_omega"]["zi_param"](y_curr=y))(
                value="zi_omega")
            omega_zi_dist = plate_g + plate_i + dist.Delta(self.config["MISSING"], 0.)(
                value=self.observations["omega"])
            omega_dist = (omega_zi + Stack("zi_omega", (omega_dist, omega_zi_dist))).reduce(ops.logaddexp, "zi_omega")

        # finally, construct the term for parallel scan reduction
        hmm_factor = step_dist + angle_dist + omega_dist
        hmm_factor = hmm_factor * self.raggedness_masks["individual"]
        hmm_factor = hmm_factor * self.raggedness_masks["timestep"]
        # copy masking behavior of pyro.infer.TraceEnum_ELBO._compute_model_factors
        hmm_factor = hmm_factor + y_dist
        log_prob.insert(0, hmm_factor)

        return log_prob