def rmh_worker_glm_coef(comm, b_hat, b_prev, y, X, I, family, w=1, V=None,
                        method='newton', MPIROOT=0, coverage_prob=0.999,
                        grid_min_spacing=0.5, cov=emulate.cov_sqexp, **kwargs):
    '''
    Worker component of single Metropolis-Hastings step for GLM coefficients
    using a normal approximation to their posterior distribution. Proposes
    linearly-transformed vector of independent t_propDf random variables.

    At least one of I (the Fisher information) and V (the inverse Fisher
    information) must be provided. If I is provided, V is ignored. It is more
    efficient to provide the information matrix than the covariance matrix.

    Returns None.
    '''
    # Get number of workers
    n_workers = comm.Get_size() - 1

    # Get dimensions
    p = X.shape[1]
    
    if method == 'emulate':
        # Build local emulator for score function
        grid_radius = emulate.approx_quantile(coverage_prob=coverage_prob,
                                              d=p, n=n_workers)
        if V is None:
            L = linalg.solve_triangular(linalg.cholesky(I, lower=True),
                                        np.eye(p), lower=True)
        else:
            L = linalg.cholesky(V, lower=True)

        emulator = emulate.build_emulator(
            glm.score, center=b_hat, slope_mean=L, cov=cov,
            grid_min_spacing=grid_min_spacing, grid_radius=grid_radius,
            f_kwargs={'y' : y, 'X' : X, 'w' : w, 'family' : family})
        
        # Send emulator to master node
        emulate.aggregate_emulators_mpi(
            comm=comm, emulator=emulator, MPIROOT=MPIROOT)
    elif method == 'newton':
        # Build necessary quantities for distributed posterior approximation
        z_hat = np.dot(I, b_hat)

        # Condense approximation to a single vector for reduction
        approx = np.r_[z_hat, I[np.tril_indices(p)]]

        # Combine with other approximations on master.
        comm.Reduce([approx, MPI.DOUBLE], None,
                    op=MPI.SUM, root=MPIROOT)

        # Receive settings for refinement
        settings = np.zeros(2, dtype=int)
        comm.Bcast([settings, MPI.INT], root=MPIROOT)
        n_iter, final_info = settings

        # Newton-Raphson iterations for refinement of approximation
        for i in xrange(n_iter):
            # Receive updated estimate from master
            comm.Bcast([b_hat, MPI.DOUBLE], root=MPIROOT)

            # Compute score and information matrix at combined estimate
            eta = np.dot(X, b_hat)
            mu = family.link.inv(eta)
            weights = w * family.weights(mu)
            dmu_deta = family.link.deriv(eta)
            sqrt_W_X = (X.T * np.sqrt(weights)).T

            grad = np.dot(X.T, weights / dmu_deta * (y - mu))
            info = np.dot(sqrt_W_X.T, sqrt_W_X)

            # Condense update to a single vector for reduction
            update = np.r_[grad, info[np.tril_indices(p)]]

            # Combine with other updates on master
            comm.Reduce([update, MPI.DOUBLE], None,
                        op=MPI.SUM, root=MPIROOT)

        # Contribute to final information matrix refinement if requested
        if final_info:
            # Receive updated estimate
            comm.Bcast([b_hat, MPI.DOUBLE], root=MPIROOT)

            # Update information matrix
            eta = np.dot(X, b_hat)
            mu = family.link.inv(eta)
            weights = w * family.weights(mu)
            sqrt_W_X = (X.T * np.sqrt(weights)).T

            info = np.dot(sqrt_W_X.T, sqrt_W_X)

            # Combine informations on master
            comm.Reduce([info[np.tril_indices(p)], MPI.DOUBLE], None,
                        op=MPI.SUM, root=MPIROOT)
    else:
        print >> sys.stderr, "Error - method %s unknown" % method
        return

    # Obtain proposed value of coefficients from master.
    b_prop = np.empty(p)
    comm.Bcast([b_prop, MPI.DOUBLE], root=MPIROOT)

    # Compute proposed and previous means
    eta_prop = np.dot(X, b_prop)
    eta_prev = np.dot(X, b_prev)

    mu_prop = family.link.inv(eta_prop)
    mu_prev = family.link.inv(eta_prev)

    # Compute log-ratio of target densities
    log_target_ratio = np.sum(family.loglik(y=y, mu=mu_prop, w=w) -
                              family.loglik(y=y, mu=mu_prev, w=w))

    # Reduce log-target ratio for MH step on master.
    comm.Reduce([np.array(log_target_ratio), MPI.DOUBLE], None,
                op=MPI.SUM, root=MPIROOT)
def rmh_worker_nbinom_hyperparams(comm, x, r_prev, p_prev, MPIROOT=0,
                                  prior_mean_log=2.65,
                                  prior_prec_log=1. / 0.652 ** 2,
                                  prior_a=1., prior_b=1.,
                                  brent_scale=6., fallback_upper=10000.,
                                  correct_prior=True, method='newton',
                                  coverage_prob=0.999,
                                  grid_min_spacing=0.5, cov=emulate.cov_sqexp):
    '''
    Worker side of Metropolis-Hastings step for negative-binomial
    hyperparameters given all other parameters.

    Using a log-normal prior for the r (convolution) hyperparameter and a
    conditionally-conjugate beta prior for p.

    Proposing from normal approximation to the conditional posterior
    (conditional independence chain). Parameters are log- and logit-transformed.

    Builds normal approximation based upon local data, then combines this with
    others on the master process. These approximations are used to generate a
    proposal, which is then broadcast back to the workers. The workers then
    evaluate the log-target ratio and combine these on the master to execute
    the MH step. The resulting draw is __not__ brought back to the workers until
    the next synchronization.

    Returns None.
    '''
    # Correct / adjust prior for distributed approximation, if requested
    adj = 1.
    if correct_prior:
        adj = comm.Get_size() - 1.
    
    # Setup arguments
    nbinom_args = dict(x=x, prior_a=prior_a, prior_b=prior_b,
                       prior_mean_log=prior_mean_log,
                       prior_prec_log=prior_prec_log, prior_adj=adj)

    # Compute posterior mode for r and p using profile log-posterior
    r_hat, p_hat = map_estimator_nbinom(transform=True,
                                        brent_scale=brent_scale,
                                        fallback_upper=fallback_upper,
                                        **nbinom_args)

    # Propose using a bivariate normal approximate to the joint conditional
    # posterior of (r, p)

    # Compute posterior information matrix for parameters
    info = info_posterior_nbinom(r=r_hat, p=p_hat, transform=True,
                                 **nbinom_args)
    
    # Transform point estimate
    theta_hat = np.log(np.array([r_hat, p_hat]))
    theta_hat[1] -= np.log(1. - p_hat)

    if method == 'emulate':
        # Build local emulator for score function
        grid_radius = emulate.approx_quantile(coverage_prob=coverage_prob,
                                              d=2, n=adj)
        L = linalg.solve_triangular(linalg.cholesky(info, lower=True),
                                    np.eye(2), lower=True)

        emulator = emulate.build_emulator(
            score_posterior_nbinom_vec, center=theta_hat, slope_mean=L, cov=cov,
            grid_min_spacing=grid_min_spacing, grid_radius=grid_radius,
            f_kwargs=nbinom_args)
        
        # Send emulator to master node
        emulate.aggregate_emulators_mpi(
            comm=comm, emulator=emulator, MPIROOT=MPIROOT)
    else:
        # Build necessary quantities for distributed posterior approximation
        z_hat = np.dot(info, theta_hat)

        # Condense approximation to a single vector for reduction
        approx = np.r_[z_hat, info[np.tril_indices(2)]]

        # Combine with other approximations on master.
        comm.Reduce([approx, MPI.DOUBLE], None,
                    op=MPI.SUM, root=MPIROOT)

        # Receive settings for refinement
        settings = np.zeros(2, dtype=int)
        comm.Bcast([settings, MPI.INT], root=MPIROOT)
        n_iter, final_info = settings

        # Newton-Raphson iterations for refinement of approximation
        for i in xrange(n_iter):
            # Receive updated estimate from master
            comm.Bcast([theta_hat, MPI.DOUBLE], root=MPIROOT)

            # Compute score and information matrix at combined estimate
            r_hat = np.exp(theta_hat[0])
            p_hat = 1. / (1. + np.exp(-theta_hat[1]))
            grad = score_posterior_nbinom_vec(theta_hat,
                                              **nbinom_args).flatten()
            info = info_posterior_nbinom(r=r_hat, p=p_hat, transform=True,
                                         **nbinom_args)
            

            # Condense update to a single vector for reduction
            update = np.r_[grad, info[np.tril_indices(2)]]

            # Combine with other updates on master
            comm.Reduce([update, MPI.DOUBLE], None,
                        op=MPI.SUM, root=MPIROOT)

        # Contribute to final information matrix refinement if requested
        if final_info:
            # Receive updated estimate
            comm.Bcast([theta_hat, MPI.DOUBLE], root=MPIROOT)
            
            r_hat = np.exp(theta_hat[0])
            p_hat = 1. / (1 + np.exp(-theta_hat[1]))
            info = info_posterior_nbinom(r=r_hat, p=p_hat, transform=True,
                                         **nbinom_args)

            # Combine informations on master
            comm.Reduce([info[np.tril_indices(2)], MPI.DOUBLE], None,
                        op=MPI.SUM, root=MPIROOT)

    # Obtain proposed value of theta from master.
    theta_prop = np.empty(2)
    comm.Bcast([theta_prop, MPI.DOUBLE], root=MPIROOT)
    r_prop, p_prop = np.exp(theta_prop)
    p_prop = p_prop / (1. + p_prop)

    # Compute log-ratio of target densities, omitting prior.
    # Log-ratio of prior densities is handled on the master.

    # Only component is log-likelihood ratio for x.
    log_target_ratio = np.sum(dnbinom(x, r=r_prop, p=p_prop, log=True) -
                              dnbinom(x, r=r_prev, p=p_prev, log=True))

    # Reduce log-target ratio for MH step on master.
    comm.Reduce([np.array(log_target_ratio), MPI.DOUBLE], None,
                op=MPI.SUM, root=MPIROOT)