Beispiel #1
0
def model_1(capture_history, sex):
    N, T = capture_history.shape
    phi = pyro.sample("phi", dist.Uniform(0.0, 1.0))  # survival probability
    rho = pyro.sample("rho", dist.Uniform(0.0, 1.0))  # recapture probability

    with pyro.plate("animals", N, dim=-1):
        z = torch.ones(N)
        # we use this mask to eliminate extraneous log probabilities
        # that arise for a given individual before its first capture.
        first_capture_mask = torch.zeros(N).bool()
        for t in pyro.markov(range(T)):
            with poutine.mask(mask=first_capture_mask):
                mu_z_t = first_capture_mask.float() * phi * z + (
                    1 - first_capture_mask.float())
                # we use parallel enumeration to exactly sum out
                # the discrete states z_t.
                z = pyro.sample(
                    "z_{}".format(t),
                    dist.Bernoulli(mu_z_t),
                    infer={"enumerate": "parallel"},
                )
                mu_y_t = rho * z
                pyro.sample("y_{}".format(t),
                            dist.Bernoulli(mu_y_t),
                            obs=capture_history[:, t])
            first_capture_mask |= capture_history[:, t].bool()
Beispiel #2
0
def model_2(capture_history, sex):
    N, T = capture_history.shape
    rho = pyro.sample("rho", dist.Uniform(0.0, 1.0))  # recapture probability

    z = torch.ones(N)
    first_capture_mask = torch.zeros(N).bool()
    # we create the plate once, outside of the loop over t
    animals_plate = pyro.plate("animals", N, dim=-1)
    for t in pyro.markov(range(T)):
        # note that phi_t needs to be outside the plate, since
        # phi_t is shared across all N individuals
        phi_t = pyro.sample("phi_{}".format(t), dist.Uniform(0.0, 1.0)) if t > 0 \
                else 1.0
        with animals_plate, poutine.mask(mask=first_capture_mask):
            mu_z_t = first_capture_mask.float() * phi_t * z + (
                1 - first_capture_mask.float())
            # we use parallel enumeration to exactly sum out
            # the discrete states z_t.
            z = pyro.sample("z_{}".format(t),
                            dist.Bernoulli(mu_z_t),
                            infer={"enumerate": "parallel"})
            mu_y_t = rho * z
            pyro.sample("y_{}".format(t),
                        dist.Bernoulli(mu_y_t),
                        obs=capture_history[:, t])
        first_capture_mask |= capture_history[:, t].bool()
def init_params(data):
    params = {}
    # assign init values for parameters
    params["sigma_a"] = pyro.sample("sigma_a", dist.Uniform(0., 100.))
    params["sigma_y"] = pyro.sample("sigma_y", dist.Uniform(0., 100.))

    return params
Beispiel #4
0
def init_params(data):
    params = {}
    params["sigma_a1"] = pyro.sample("sigma_a1", dist.Uniform(0., 100.))
    params["sigma_a2"] = pyro.sample("sigma_a2", dist.Uniform(0., 100.))
    params["sigma_y"] = pyro.sample("sigma_y", dist.Uniform(0., 100.))

    return params
Beispiel #5
0
def model(data, params):
    # initialize data
    N = data["N"]
    n_grade = data["n_grade"]
    n_grade_pair = data["n_grade_pair"]
    n_pair = data["n_pair"]
    grade = data["grade"].long() - 1
    grade_pair = data["grade_pair"].long() - 1
    pair = data["pair"].long() - 1
    pre_test = data["pre_test"]
    treatment = data["treatment"]
    y = data["y"]

    # model block
    with pyro.plate('n_grade_pair', n_grade_pair):
        mu_a = pyro.sample("mu_a", dist.Normal(0., 1.))
        sigma_a = pyro.sample("sigma_a", dist.Uniform(0., 100.))
    sigma_a_hat = sigma_a[grade_pair]
    mu_a_hat = 40 * mu_a[grade_pair]
    with pyro.plate('n_pair', n_pair):
        a = pyro.sample("a", dist.Normal(mu_a_hat, sigma_a_hat))
    with pyro.plate('n_grade', n_grade):
        b = pyro.sample("b", dist.Normal(0., 100.))
        c = pyro.sample("c", dist.Normal(0., 100.))
        sigma_y = pyro.sample("sigma_y", dist.Uniform(0., 100.))
    sigma_y_hat = sigma_y[grade]
    with pyro.plate("data", N):
        y_hat = a[pair] + b[grade] * treatment + c[grade] * pre_test
        y = pyro.sample("y", dist.Normal(y_hat, sigma_y_hat), obs=y)
Beispiel #6
0
def test_hmc(model_class, X, y, kernel, likelihood):
    if model_class is SparseGPRegression or model_class is VariationalSparseGP:
        gp = model_class(X, y, kernel, X, likelihood)
    else:
        gp = model_class(X, y, kernel, likelihood)

    kernel.set_prior("variance",
                     dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
    kernel.set_prior("lengthscale",
                     dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))

    hmc_kernel = HMC(gp.model, step_size=1)
    mcmc_run = MCMC(hmc_kernel, num_samples=10)

    post_trace = defaultdict(list)
    for trace, _ in mcmc_run._traces():
        variance_name = param_with_module_name(kernel.name, "variance")
        post_trace["variance"].append(trace.nodes[variance_name]["value"])
        lengthscale_name = param_with_module_name(kernel.name, "lengthscale")
        post_trace["lengthscale"].append(
            trace.nodes[lengthscale_name]["value"])
        if model_class is VariationalGP:
            f_name = param_with_module_name(gp.name, "f")
            post_trace["f"].append(trace.nodes[f_name]["value"])
        if model_class is VariationalSparseGP:
            u_name = param_with_module_name(gp.name, "u")
            post_trace["u"].append(trace.nodes[u_name]["value"])

    for param in post_trace:
        param_mean = torch.mean(torch.stack(post_trace[param]), 0)
        logger.info("Posterior mean - {}".format(param))
        logger.info(param_mean)
Beispiel #7
0
def continuous_model(args, data):
    # Sample global parameters.
    rate_s, prob_i, rho = global_model(args.population)

    # Sample reparameterizing variables.
    S_aux = pyro.sample("S_aux",
                        dist.Uniform(-0.5, args.population + 0.5)
                            .mask(False).expand(data.shape).to_event(1))
    I_aux = pyro.sample("I_aux",
                        dist.Uniform(-0.5, args.population + 0.5)
                            .mask(False).expand(data.shape).to_event(1))

    # Sequentially sample time-local variables.
    S_curr = torch.tensor(args.population - 1.)
    I_curr = torch.tensor(1.)
    for t, datum in poutine.markov(enumerate(data)):
        S_prev, I_prev = S_curr, I_curr
        S_curr = quantize("S_{}".format(t), S_aux[..., t], min=0, max=args.population)
        I_curr = quantize("I_{}".format(t), I_aux[..., t], min=0, max=args.population)

        # Now we reverse the computation.
        S2I = S_prev - S_curr
        I2R = I_prev - I_curr + S2I
        pyro.sample("S2I_{}".format(t),
                    dist.ExtendedBinomial(S_prev, -(rate_s * I_prev).expm1()),
                    obs=S2I)
        pyro.sample("I2R_{}".format(t),
                    dist.ExtendedBinomial(I_prev, prob_i),
                    obs=I2R)
        pyro.sample("obs_{}".format(t),
                    dist.ExtendedBinomial(S2I, rho),
                    obs=datum)
Beispiel #8
0
def model_3(capture_history, sex):
    def logit(p):
        return torch.log(p) - torch.log1p(-p)

    N, T = capture_history.shape
    phi_mean = pyro.sample("phi_mean",
                           dist.Uniform(0.0, 1.0))  # mean survival probability
    phi_logit_mean = logit(phi_mean)
    # controls temporal variability of survival probability
    phi_sigma = pyro.sample("phi_sigma", dist.Uniform(0.0, 10.0))
    rho = pyro.sample("rho", dist.Uniform(0.0, 1.0))  # recapture probability

    z = torch.ones(N)
    first_capture_mask = torch.zeros(N).bool()
    # we create the plate once, outside of the loop over t
    animals_plate = pyro.plate("animals", N, dim=-1)
    for t in pyro.markov(range(T)):
        phi_logit_t = pyro.sample("phi_logit_{}".format(t),
                                  dist.Normal(phi_logit_mean, phi_sigma)) if t > 0 \
                      else torch.tensor(0.0)
        phi_t = torch.sigmoid(phi_logit_t)
        with animals_plate, poutine.mask(mask=first_capture_mask):
            mu_z_t = first_capture_mask.float() * phi_t * z + (
                1 - first_capture_mask.float())
            # we use parallel enumeration to exactly sum out
            # the discrete states z_t.
            z = pyro.sample("z_{}".format(t),
                            dist.Bernoulli(mu_z_t),
                            infer={"enumerate": "parallel"})
            mu_y_t = rho * z
            pyro.sample("y_{}".format(t),
                        dist.Bernoulli(mu_y_t),
                        obs=capture_history[:, t])
        first_capture_mask |= capture_history[:, t].bool()
Beispiel #9
0
def OldSCM(vae, mu, sigma):
    z_dim = vae.z_dim
    Ny, Y, ys = [], [], []
    Nx = pyro.sample("Nx", dist.Uniform(torch.zeros(vae.image_dim), torch.ones(vae.image_dim)))
    Nz = pyro.sample("Nz", dist.Normal(torch.zeros(z_dim), torch.ones(z_dim)))
    m = torch.distributions.gumbel.Gumbel(torch.tensor(0.0), torch.tensor(1.0))
    for label_id in range(6):
        name = vae.label_names[label_id]
        length = vae.label_shape[label_id]
        new = pyro.sample("Ny_%s"%name, dist.Uniform(torch.zeros(length), torch.ones(length)) )
        Ny.append(new)
        gumbel_vars = torch.tensor([m.sample() for _ in range(length)])
        max_ind = torch.argmax(torch.log(new) + gumbel_vars).item()
        Y.append(pyro.sample("Y_%s"%name, dist.Normal(torch.tensor(max_ind * 1.0), 1e-4)))
#         Y.append(pyro.sample("Y_%s"%name, dist.Delta(torch.tensor(max_ind*1.0))))
        ys.append(torch.nn.functional.one_hot(torch.tensor(max_ind), int(length)))  
    Y = torch.tensor(Y)
    ys = torch.cat(ys).to(torch.float32).reshape(1,-1).cuda()
    Z = pyro.sample("Z", dist.Normal(mu + Nz*sigma, 1e-4))
#     Z = pyro.sample("Z", dist.Delta(mu + Nz*sigma))
    zs = Z.cuda()
    p = vae.decoder.forward(zs,ys)
    X = pyro.sample("X", dist.Normal((Nx < p.cpu()).type(torch.float), 1e-4))
#     X = pyro.sample("X", dist.Delta((Nx < p.cpu()).type(torch.float)))
    return X, Y, Z
Beispiel #10
0
    def __call__(self, name, fn, obs):
        fn, event_dim = self._unwrap(fn)
        assert isinstance(fn, dist.Stable) and fn.coords == "S0"

        # Strategy: Let X ~ S0(a,b,s,m) be the stable variable of interest.
        # 1. WLOG scale and shift so s=1 and m=0, additionally shifting to convert
        #    from Zolotarev's S parameterization to Nolan's S0 parameterization.
        # 2. Decompose X = S + T, where
        #    S ~ S(a,0,...,0) is symmetric and
        #    T ~ S(a,sgn(b),...,0) is totally skewed.
        # 3. Decompose S = G * sqrt(Z) via the symmetric strategy, where
        #    Z ~ S(a/2,1,...,0) is totally-skewed and
        #    G ~ Normal(0,1) is Gaussian.
        # 4. Defer the totally-skewed Z and T to the Chambers-Mallows-Stuck
        #    strategy: Z = f(Unif,Exp), T = f(Unif,Exp).
        #
        # To derive the parameters of S and T, we solve the equations
        #
        #   T.stability = a            S.stability = a
        #   T.skew = sgn(b)            S.skew = 0
        #   T.loc = 0                  S.loc = 0
        #
        #   s = (S.scale**a + T.scale**a)**(1/a) = 1       # by step 1.
        #
        #       S.skew * S.scale**a + T.skew * T.scale**a
        #   b = ----------------------------------------- = sgn(b) * T.scale**a
        #                S.scale**a + T.scale**a
        # yielding
        #
        #   T.scale = |b| ** (1/a)     S.scale = (1 - |b|) ** (1/a)

        # Draw parameter-free noise.
        proto = fn.stability
        half_pi = proto.new_full(proto.shape, math.pi / 2)
        one = proto.new_ones(proto.shape)
        zu = pyro.sample("{}_z_uniform".format(name),
                         self._wrap(dist.Uniform(-half_pi, half_pi), event_dim))
        ze = pyro.sample("{}_z_exponential".format(name),
                         self._wrap(dist.Exponential(one), event_dim))
        tu = pyro.sample("{}_t_uniform".format(name),
                         self._wrap(dist.Uniform(-half_pi, half_pi), event_dim))
        te = pyro.sample("{}_t_exponential".format(name),
                         self._wrap(dist.Exponential(one), event_dim))

        # Differentiably transform.
        a = fn.stability
        z = _unsafe_standard_stable(a / 2, 1, zu, ze, coords="S")
        t = _standard_stable(a, one, tu, te, coords="S0")
        a_inv = a.reciprocal()
        skew_abs = fn.skew.abs()
        t_scale = skew_abs.pow(a_inv)
        s_scale = (1 - skew_abs).pow(a_inv)
        shift = _safe_shift(a, skew_abs, t_scale)
        loc = fn.loc + fn.scale * fn.skew.sign() * (t * t_scale + shift)
        scale = fn.scale * s_scale * z.sqrt() * (math.pi / 4 * a).cos().pow(a_inv)
        scale = scale.clamp(min=torch.finfo(scale.dtype).tiny)

        # Construct a scaled Gaussian, using Stable(2,0,s,m) == Normal(m,s*sqrt(2)).
        new_fn = self._wrap(dist.Normal(loc, scale * (2 ** 0.5)), event_dim)
        return new_fn, obs
Beispiel #11
0
def model_4(capture_history, sex):
    N, T = capture_history.shape
    # survival probabilities for males/females
    phi_male = pyro.sample("phi_male", dist.Uniform(0.0, 1.0))
    phi_female = pyro.sample("phi_female", dist.Uniform(0.0, 1.0))
    # we construct a N-dimensional vector that contains the appropriate
    # phi for each individual given its sex (female = 0, male = 1)
    phi = sex * phi_male + (1.0 - sex) * phi_female
    rho = pyro.sample("rho", dist.Uniform(0.0, 1.0))  # recapture probability

    with pyro.plate("animals", N, dim=-1):
        z = torch.ones(N)
        # we use this mask to eliminate extraneous log probabilities
        # that arise for a given individual before its first capture.
        first_capture_mask = torch.zeros(N).bool()
        for t in pyro.markov(range(T)):
            with poutine.mask(mask=first_capture_mask):
                mu_z_t = first_capture_mask.float() * phi * z + (
                    1 - first_capture_mask.float())
                # we use parallel enumeration to exactly sum out
                # the discrete states z_t.
                z = pyro.sample("z_{}".format(t),
                                dist.Bernoulli(mu_z_t),
                                infer={"enumerate": "parallel"})
                mu_y_t = rho * z
                pyro.sample("y_{}".format(t),
                            dist.Bernoulli(mu_y_t),
                            obs=capture_history[:, t])
            first_capture_mask |= capture_history[:, t].bool()
Beispiel #12
0
def test_expand(sample_shape, batch_shape, event_shape):
    ones_shape = torch.Size((1,) * len(batch_shape))
    mask = torch.empty(ones_shape).bernoulli_(0.5).bool()
    zero = torch.zeros(ones_shape + event_shape)
    d0 = dist.Uniform(zero - 2, zero + 1).to_event(len(event_shape))
    d1 = dist.Uniform(zero - 1, zero + 2).to_event(len(event_shape))
    d = dist.MaskedMixture(mask, d0, d1)

    assert d.sample().shape == ones_shape + event_shape
    assert d.mean.shape == ones_shape + event_shape
    assert d.variance.shape == ones_shape + event_shape
    assert d.sample(sample_shape).shape == sample_shape + ones_shape + event_shape

    assert (
        d.expand(sample_shape + batch_shape).batch_shape == sample_shape + batch_shape
    )
    assert (
        d.expand(sample_shape + batch_shape).sample().shape
        == sample_shape + batch_shape + event_shape
    )
    assert (
        d.expand(sample_shape + batch_shape).mean.shape
        == sample_shape + batch_shape + event_shape
    )
    assert (
        d.expand(sample_shape + batch_shape).variance.shape
        == sample_shape + batch_shape + event_shape
    )
Beispiel #13
0
def guide_3DA(data):    
    # Hyperparameters    
    a_psi_1 = pyro.param('a_psi_1', torch.tensor(-np.pi), constraint=constraints.greater_than(-3.15)) 
    b_psi_1 = pyro.param('b_psi_1', torch.tensor(np.pi), constraint=constraints.less_than(3.15))
    x_psi_1 = pyro.param('x_psi_1', torch.tensor(2.), constraint=constraints.positive)
    
    a_phi_2 = pyro.param('a_phi_2', torch.tensor(-np.pi), constraint=constraints.greater_than(-3.15)) 
    b_phi_2 = pyro.param('b_phi_2', torch.tensor(np.pi), constraint=constraints.less_than(3.15))
    x_phi_2 = pyro.param('x_phi_2', torch.tensor(2.), constraint=constraints.positive)
    
    a_psi_2 = pyro.param('a_psi_2', torch.tensor(-np.pi), constraint=constraints.greater_than(-3.15)) 
    b_psi_2 = pyro.param('b_psi_2', torch.tensor(np.pi), constraint=constraints.less_than(3.15))
    x_psi_2 = pyro.param('x_psi_2', torch.tensor(2.), constraint=constraints.positive)
    
    a_phi_3 = pyro.param('a_phi_3', torch.tensor(-np.pi), constraint=constraints.greater_than(-3.15)) 
    b_phi_3 = pyro.param('b_phi_3', torch.tensor(np.pi), constraint=constraints.less_than(3.15))
    x_phi_3 = pyro.param('x_phi_3', torch.tensor(2.), constraint=constraints.positive)
    
    # Sampling mu and kappa
    pyro.sample("mu_psi_1", dist.Uniform(a_psi_1, b_psi_1))
    pyro.sample("inv_kappa_psi_1", dist.HalfNormal(x_psi_1))    
    pyro.sample("mu_phi_2", dist.Uniform(a_phi_2, b_phi_2))
    pyro.sample("inv_kappa_phi_2", dist.HalfNormal(x_phi_2))
    pyro.sample("mu_psi_2", dist.Uniform(a_psi_2, b_psi_2))
    pyro.sample("inv_kappa_psi_2", dist.HalfNormal(x_psi_2))    
    pyro.sample("mu_phi_3", dist.Uniform(a_phi_3, b_phi_3))
    pyro.sample("inv_kappa_phi_3", dist.HalfNormal(x_phi_3))
Beispiel #14
0
def sampleblocks(n, XDIM, YDIM):
    blocks = []
    satisfied = False

    while (not satisfied):
        blocks = []
        satisfied = True

        for i in range(n):
            xdim = pyro.sample('xdim', dist.Uniform(0, XDIM))
            ydim = pyro.sample('ydim', dist.Uniform(0, YDIM))
            xsize = pyro.sample('xsize', dist.Normal(50, 20))
            ysize = pyro.sample('xsize', dist.Normal(50, 20))
            # CONVERTING TO INTS FROM TENSORS, BAD!
            blocks.append(
                (int(xdim.item()), int(ydim.item()), int(xsize.item()),
                 int(ysize.item())))

        for block in blocks:
            for block2 in blocks:
                if (block == block2):
                    continue
                else:
                    # NOT USING PYRO.CONDITION
                    satisfied = \
                        (block[0] + block[2]/2 < block2[0] - block[2]/2 or
                         block[0] - block[2]/2 > block2[0] + block[2]/2 or
                         block[1] + block[3]/2 < block2[1] - block[3]/2 or
                         block[1] - block[3]/2 > block2[1] + block[3]/2)

    return blocks
Beispiel #15
0
    def __init__(self):
        SpatialNodeMixin.__init__(self, tf=torch.eye(4))

        # TODO(gizatt) pyro @scope for local variable naming?
        kitchen_height = pyro.sample("kitchen_height", dist.Uniform(2.0, 3.0))
        kitchen_width = pyro.sample("kitchen_width",
                                    dist.Uniform(2.0, 4.0))  # x axis
        kitchen_length = pyro.sample("kitchen_length",
                                     dist.Uniform(2.0, 4.0))  # y axis
        # North is +y
        # East is +x
        n_wall_rule = DeterministicRelativePoseProductionRule(
            child_constructor=Wall,
            child_name="north_wall",
            relative_tf=pose_to_tf_matrix(
                torch.tensor([0., kitchen_length / 2., 0., 0., 0., 0.])),
            height=kitchen_height,
            width=kitchen_width)
        e_wall_rule = DeterministicRelativePoseProductionRule(
            child_constructor=Wall,
            child_name="east_wall",
            relative_tf=pose_to_tf_matrix(
                torch.tensor([kitchen_width / 2., 0., 0., 0., 0.,
                              -np.pi / 2.])),
            height=kitchen_height,
            width=kitchen_length)
        w_wall_rule = DeterministicRelativePoseProductionRule(
            child_constructor=Wall,
            child_name="west_wall",
            relative_tf=pose_to_tf_matrix(
                torch.tensor([-kitchen_width / 2., 0., 0., 0., 0.,
                              np.pi / 2.])),
            height=kitchen_height,
            width=kitchen_length)
        s_wall_rule = DeterministicRelativePoseProductionRule(
            child_constructor=Wall,
            child_name="south_wall",
            relative_tf=pose_to_tf_matrix(
                torch.tensor([0., -kitchen_length / 2., 0., 0., 0., np.pi])),
            height=kitchen_height,
            width=kitchen_width)
        floor_rule = DeterministicRelativePoseProductionRule(
            child_constructor=Floor,
            child_name="floor",
            relative_tf=torch.eye(4),
            width=kitchen_width,
            length=kitchen_length)

        AndNode.__init__(
            self,
            name="kitchen",
            production_rules=[
                n_wall_rule,
                e_wall_rule,
                #w_wall_rule,
                #s_wall_rule,
                floor_rule
            ])
Beispiel #16
0
def model(x: 'int[10]' = None):
    theta: 'real' = sample('theta', dist.Uniform(0.0, 1.0))
    sample('theta' + '__1', dist.Uniform(0 * 3 / 5, 1 + 5 - 5), obs=theta)
    for i in range(1, 10 + 1):
        if 1 <= 10 and (1 > 5 or 2 < 1):
            sample('x' + '__{}'.format(i - 1) + '__2',
                   dist.Bernoulli(theta),
                   obs=x[i - 1])
    print(x)
Beispiel #17
0
    def __init__(self, name, tf):
        # Handle geometry and physics.
        PhysicsGeometryNodeMixin.__init__(self,
                                          tf=tf,
                                          fixed=True,
                                          is_container=True)
        # Offset cabinet geometry from the wall
        geom_tf = pose_to_tf_matrix(torch.tensor([0.15, 0., 0., 0., 0., 0.]))
        # TODO(gizatt) Resource path management to be done here...
        model_path = "/home/gizatt/drake/examples/manipulation_station/models/cupboard.sdf"
        # Randomly open doors random amounts.
        # Left door is straight  open at -pi/2 and closed at 0.
        left_door_state = pyro.sample("%s_left_door_state" % name,
                                      dist.Uniform(-np.pi / 2., 0.))
        # Right door is straight open at pi/2 and closed at 0.
        right_door_state = pyro.sample("%s_right_door_state" % name,
                                       dist.Uniform(0.0, np.pi / 2.))
        self.register_model_file(tf=geom_tf,
                                 model_path=model_path,
                                 root_body_name="cupboard_body",
                                 q0_dict={
                                     "left_door_hinge":
                                     left_door_state.detach().numpy(),
                                     "right_door_hinge":
                                     right_door_state.detach().numpy()
                                 })
        # Add clearance geometry to indicate that shelves shouldn't
        # penetrate each other and should have clearance to open the doors.
        clearance_depth = 0.75
        # Offset out from wall just a bit more to give collision detection
        # a margin
        geom_tf = pose_to_tf_matrix(
            torch.tensor(
                [clearance_depth / 2. + 0.001, 0., 0., 0., 0., np.pi / 2.]))
        geometry = Box(width=0.6, depth=clearance_depth, height=1.)
        self.register_clearance_geometry(geom_tf, geometry)

        # Place shelf nodes.
        # Dimensions of a single shelf, in terms of the
        shelf_height = 0.13115 * 2
        bottom_shelf_z_local = -0.3995
        num_shelves = 3
        rules = []
        for k in range(num_shelves):
            rules.append(
                DeterministicRelativePoseProductionRule(
                    child_constructor=PlanarObjectRegion,
                    child_name="cabinet_level_%02d" % k,
                    relative_tf=pose_to_tf_matrix(
                        torch.tensor([
                            0.15, 0., bottom_shelf_z_local + shelf_height * k,
                            0., 0., 0.
                        ])),
                    object_production_rate=0.5,
                    bounds=((-0.1, 0.1), (-0.2, 0.2), (0., 0.2))))
        AndNode.__init__(self, name=name, production_rules=rules)
Beispiel #18
0
def model(x=None, transformed_data=None):
    y = transformed_data['y']
    ___shape = {}
    ___shape['x'] = 10
    ___shape['theta'] = ()
    theta = sample('theta', dist.Uniform(0.0, 1.0))
    sample('theta' + '__1', dist.Uniform(0, 1), obs=theta)
    for i in range(1, 10 + 1):
        sample('y' + '__{}'.format(i - 1) + '__2', dist.Bernoulli(theta),
            obs=y[i - 1])
def sample_fridge_handle(length, width, height, left):
    HANDLE_LEN = pyro.sample('hl', dist.Uniform(0.01, 0.03)).item()
    HANDLE_WIDTH = pyro.sample('hw', dist.Uniform(0.01, 0.05)).item()
    HANDLE_HEIGHT = pyro.sample('hh', dist.Uniform(height / 4, height)).item()

    HX = HANDLE_LEN
    HY = -width * 2 + HANDLE_WIDTH
    HZ = pyro.sample(
        'hz', dist.Uniform(-(height - HANDLE_HEIGHT), height - HANDLE_HEIGHT))
    return HX, HY, HZ, HANDLE_LEN, HANDLE_WIDTH, HANDLE_HEIGHT
Beispiel #20
0
def model(utt, phi):
  a = pyro.sample("a", dist.Uniform(.1, 20.))
  b = pyro.sample("b", dist.Uniform(.1, 20.))
  
  mu = pyro.sample("mu", dist.Beta(a,b))
  nu = pyro.sample("nu", dist.LogNormal(2,0.5))
  
  a2 = mu * (nu + 1)
  b2 = (1-mu) * (nu + 1)
  
  with pyro.plate("data"):
         pyro.sample("obs", RSASpeaker(mu, phi), obs=utt)
Beispiel #21
0
def init_params(data):
    params = {}
    # initialize data
    N = data["N"]
    weight = data["weight"]
    height = data["height"]
    # assign init values for parameters
    params["a"] = pyro.sample("a", dist.Uniform(0., 100.))
    params["b"] = pyro.sample("b", dist.Uniform(0., 100.))
    params["sigma"] = pyro.sample("sigma", dist.Uniform(0., 100.))

    return params
Beispiel #22
0
def tower():
    blocks = pyro.sample('numblocks', dist.Normal(0, 2))
    if (blocks < 0):
        blocks = -1 * blocks
    blocks += 3

    blockLocations = []
    for i in range(0, blocks):
        x = pyro.sample('x_{}'.format(i), dist.Uniform(0, 10))
        x = pyro.sample('y_{}'.format(i), dist.Uniform(0, blocks))

    return blockLocations
Beispiel #23
0
def vectorized_model(args, data):
    # Sample global parameters.
    rate_s, prob_i, rho = global_model(args.population)

    # Sample reparameterizing variables.
    S_aux = pyro.sample(
        "S_aux",
        dist.Uniform(-0.5, args.population + 0.5).mask(False).expand(
            data.shape).to_event(1),
    )
    I_aux = pyro.sample(
        "I_aux",
        dist.Uniform(-0.5, args.population + 0.5).mask(False).expand(
            data.shape).to_event(1),
    )

    # Manually enumerate.
    S_curr, S_logp = quantize_enumerate(S_aux, min=0, max=args.population)
    I_curr, I_logp = quantize_enumerate(I_aux, min=0, max=args.population)
    # Truncate final value from the right then pad initial value onto the left.
    S_prev = torch.nn.functional.pad(S_curr[:-1], (0, 0, 1, 0),
                                     value=args.population - 1)
    I_prev = torch.nn.functional.pad(I_curr[:-1], (0, 0, 1, 0), value=1)
    # Reshape to support broadcasting, similar to EnumMessenger.
    T = len(data)
    Q = 4
    S_prev = S_prev.reshape(T, Q, 1, 1, 1)
    I_prev = I_prev.reshape(T, 1, Q, 1, 1)
    S_curr = S_curr.reshape(T, 1, 1, Q, 1)
    S_logp = S_logp.reshape(T, 1, 1, Q, 1)
    I_curr = I_curr.reshape(T, 1, 1, 1, Q)
    I_logp = I_logp.reshape(T, 1, 1, 1, Q)
    data = data.reshape(T, 1, 1, 1, 1)

    # Reverse the S2I,I2R computation.
    S2I = S_prev - S_curr
    I2R = I_prev - I_curr + S2I

    # Compute probability factors.
    S2I_logp = dist.ExtendedBinomial(S_prev,
                                     -(rate_s * I_prev).expm1()).log_prob(S2I)
    I2R_logp = dist.ExtendedBinomial(I_prev, prob_i).log_prob(I2R)
    obs_logp = dist.ExtendedBinomial(S2I, rho).log_prob(data)

    # Manually perform variable elimination.
    logp = S_logp + (I_logp + obs_logp) + S2I_logp + I2R_logp
    logp = logp.reshape(-1, Q * Q, Q * Q)
    logp = pyro.distributions.hmm._sequential_logmatmulexp(logp)
    logp = logp.reshape(-1).logsumexp(0)
    logp = logp - math.log(4)  # Account for S,I initial distributions.
    warn_if_nan(logp)
    pyro.factor("obs", logp)
Beispiel #24
0
def init_params(data):
    params = {}
    # initialize data
    J = data["J"]
    N = data["N"]
    person = data["person"]
    time = data["time"]
    y = data["y"]
    # assign init values for parameters
    params["sigma_a1"] = pyro.sample("sigma_a1", dist.Uniform(0., 100.))
    params["sigma_a2"] = pyro.sample("sigma_a2", dist.Uniform(0., 100.))
    params["sigma_y"] = pyro.sample("sigma_y", dist.Uniform(0., 100.))
    return params
Beispiel #25
0
    def model():
        stability = pyro.sample("stability", dist.Uniform(1.0, 2.0))
        trans_skew = pyro.sample("trans_skew", dist.Uniform(-1.0, 1.0))
        obs_skew = pyro.sample("obs_skew", dist.Uniform(-1.0, 1.0))
        scale = pyro.sample("scale", dist.Gamma(3, 1))

        # We use separate plates because the .cumsum() op breaks independence.
        with pyro.plate("time1", len(data)):
            dz = pyro.sample("dz", dist.Stable(stability, trans_skew))
        z = dz.cumsum(-1)
        with pyro.plate("time2", len(data)):
            y = pyro.sample("y", dist.Stable(stability, obs_skew, scale, z))
            pyro.sample("x", dist.Poisson(y.abs()), obs=data)
Beispiel #26
0
 def goal_generator(self): 
     '''
     Defines the weights one puts to evalaute one's research career.
     Inspired from https://stackoverflow.com/questions/5563808/how-to-generate-three-random-numbers-whose-sum-is-1
     '''
     a = pyro.sample("first_pivot",pyd.Uniform(0,1))
     b = pyro.sample("second_pivot",pyd.Uniform(0,1-a.item()))
     con1 = np.array([0,0,1])
     con2 = a.item()*np.array([1,0,-1])
     con3 = b.item()*np.array([0,1,-1])
     final_importance = np.add(con1,con2)
     final_importance = np.add(final_importance,con3)
     return final_importance
Beispiel #27
0
 def _sample_cabinet_pose_on_wall(self):
     # For now, hard-code cabinet size to help it not intersect the other walls...
     min_cab_height = 0.5
     max_cab_height = 1.5
     cabinet_width = 0.6
     x_on_wall = pyro.sample(
         "%s_cabinet_x" % self.name,
         dist.Uniform(-self.width / 2. + cabinet_width / 2.,
                      self.width / 2. - cabinet_width / 2.))
     z_on_wall = pyro.sample("%s_cabinet_z" % self.name,
                             dist.Uniform(min_cab_height, max_cab_height))
     return pose_to_tf_matrix(
         torch.tensor([x_on_wall, 0., z_on_wall, 0., 0., -np.pi / 2.]))
Beispiel #28
0
def init_params(data):
    params = {}
    N = data["N"]
    n_groups = data["n_groups"]
    n_scenarios = data["n_scenarios"]
    group_id = data["group_id"]
    scenario_id = data["scenario_id"]
    y = data["y"]
    # assign init values for parameters
    params["sigma_a"] = pyro.sample("sigma_a", dist.Uniform(0., 100.))
    params["sigma_b"] = pyro.sample("sigma_b", dist.Uniform(0., 100.))
    params["sigma_y"] = pyro.sample("sigma_y", dist.Uniform(0., 100.))
    return params
Beispiel #29
0
def init_params(data):
    params = {}
    # initialize data
    N = data["N"]
    n_pair = data["n_pair"]
    pair = data["pair"]
    treatment = data["treatment"]
    y = data["y"]
    # assign init values for parameters
    params["sigma_a"] = pyro.sample("sigma_a", dist.Uniform(0., 100.))
    params["sigma_y"] = pyro.sample("sigma_y", dist.Uniform(0., 100.))

    return params
 def _sample_object_pose(self):
     # For now, hard-code cabinet size to help it not intersect the other walls...
     x_on_shelf = pyro.sample(
         "%s_object_x" % self.name,
         dist.Uniform(self.x_bounds[0], self.x_bounds[1]))
     y_on_shelf = pyro.sample(
         "%s_object_y" % self.name,
         dist.Uniform(self.y_bounds[0], self.y_bounds[1]))
     yaw = pyro.sample("%s_object_yaw" % self.name,
                       dist.Uniform(0., np.pi * 2.))
     return pose_to_tf_matrix(
         torch.tensor(
             [x_on_shelf, y_on_shelf,
              np.mean(self.z_bounds), 0., 0., yaw]))