Beispiel #1
0
    def childParentJoint( self, t, alphas, betas, xs=None ):
        alpha = np.broadcast_to( alphas[ t ], ( self.K, self.K ) ).T
        transition = self.transitionProb( t, t + 1, forward=False )
        beta = np.broadcast_to( betas[ t + 1 ], ( self.K, self.K ) )
        emission = self.emissionProb( t + 1, forward=False, xs=xs )

        return self.multiplyTerms( ( alpha, transition, beta, emission ) )
Beispiel #2
0
    def childParentJoint( self, t, alphas, betas, ys=None ):
        # P( x_t+1, x_t, Y ) = P( y_t+1 | x_t+1 ) * P( y_t+2:T | x_t+1 ) * P( x_t+1 | x_t ) * P( x_t, y_1:t )

        alpha = np.broadcast_to( alphas[ t ], ( self.K, self.K ) ).T
        transition = self.transitionProb( t, t + 1, forward=False )
        beta = np.broadcast_to( betas[ t + 1 ], ( self.K, self.K ) )
        emission = self.emissionProb( t + 1, forward=False, ys=ys )

        return self.multiplyTerms( ( alpha, transition, beta, emission ) )
Beispiel #3
0
    def nail_positions(self, theta, level=None, nail=None):
        if level is None or nail is None:
            level = np.broadcast_to(np.arange(self.n_rows), (self.n_nails, self.n_rows)).T
            nail = np.broadcast_to(np.arange(self.n_nails), (self.n_rows, self.n_nails))

        level_rel = 1. * level / (self.n_rows - 1)
        nail_rel = 2. * nail / (self.n_nails - 1) - 1.

        nail_positions = ((1. - np.sin(np.pi * level_rel)) * 0.5
                          + np.sin(np.pi * level_rel) * sigmoid(10 * theta * nail_rel))

        return nail_positions
Beispiel #4
0
 def __init__(self, args=None):
     super().__init__()
     self.env = topopogy1.Environment(args)
     form = (1, self.env.args['nely'], self.env.args['nelx'])
     layer = np.broadcast_to(args['volfrac'], form)
     self.layer = tf.Variable(layer, trainable=True)
     self.args = args
def physical_density(x, args, volume_contraint=False, cone_filter=True):
    shape = (args['nely'], args['nelx'])
    assert x.shape == shape or x.ndim == 1
    x = x.reshape(shape)
    if volume_contraint:
        mask = np.broadcast_to(args['mask'], x.shape) > 0
        x_designed = sigmoid_with_constrained_mean(x[mask], args['volfrac'])
        x_flat = autograd_lib.scatter1d(
            x_designed, np.flatnonzero(mask), x.size)
        x = x_flat.reshape(x.shape)

        v = np.sum(x) / args['nely'] / args['nelx']

        if args['volfrac'] > 0.9:
            args['volfrac'] = max(v * (1 - 0.02), 0.9)
        elif args['volfrac'] > 0.8:
            args['volfrac'] = max(v * (1 - 0.02), 0.8)
        elif args['volfrac'] > 0.7:
            args['volfrac'] = max(v * (1 - 0.02), 0.7)
        elif args['volfrac'] > 0.6:
            args['volfrac'] = max(v * (1 - 0.02), 0.6)
        else:
            args['volfrac'] = max(v * (1 - 0.02), 0.5)
        
        print("v = ", v ,", volfrac = ", args['volfrac'])
        
    else:
        x = x * args['mask']
    if cone_filter:
        x = autograd_lib.cone_filter(x, args['filter_width'], args['mask'])
    return x
Beispiel #6
0
def condition(args, to_shape=None):
    """Condition n-d args for PyCO2SYS.
    
    If NumPy can broadcast the args together, they are a valid combination, and they
    will be combined following NumPy broadcasting rules.

    All array-like args will be broadcast into the same shape.
    Any scalar args will be left as scalars.
    """
    try:  # check all args can be broadcast together
        args = {k: v for k, v in args.items() if v is not None}
        args_broadcast = broadcast1024(*args.values())
        if to_shape is not None:
            try:  # check args can be broadcast to to_shape, if provided
                broadcast1024(np.ones(to_shape), np.ones(args_broadcast.shape))
                args_broadcast_shape = to_shape
            except ValueError:
                print("PyCO2SYS error: args are not broadcastable to to_shape.")
                return
        else:
            args_broadcast_shape = args_broadcast.shape
        # Broadcast the non-scalar args to a consistent shape
        args_conditioned = {
            k: np.broadcast_to(v, args_broadcast_shape) if not np.isscalar(v) else v
            for k, v in args.items()
        }
        # Convert to float, where needed
        args_conditioned = {
            k: np.float64(v) if k in input_floats else v
            for k, v in args_conditioned.items()
        }
    except ValueError:
        print("PyCO2SYS error: input shapes cannot be broadcast together.")
        return
    return args_conditioned
 def draw_z_column(self, k):
     # Because the z columns are inter-dependent, only draw one column at a time.
     assert k < self.k_approx
     z_cond_params = self.get_z_cond_params(self.z)
     z_logsumexp = sp.misc.logsumexp(z_cond_params, 1)
     z_logsumexp = np.broadcast_to(z_logsumexp, (self.k_approx, self.x_n)).T
     z_means = np.exp(z_cond_params - z_logsumexp)
     self.z[:, k] = vi.draw_z(z_means, 1)[0, :, k].astype(float)
Beispiel #8
0
    def emissionProb( self, t, forward=False, ys=None ):

        if( ys is None ):
            emiss = self.L[ t ]
        else:
            emiss = self._L.T[ ys[ :, t ] ].sum( axis=0 )

        return emiss if forward == True else np.broadcast_to( emiss, ( self.K, self.K ) )
Beispiel #9
0
def taylor_approx(target, stencil, values):
    """Use taylor series to approximate up to second order derivatives.

  Args:
    target: An array of shape (..., n), a batch of n-dimensional points
      where one wants to approximate function value and derivatives.
    stencil: An array of shape broadcastable to (..., k, n), for each target
      point a set of k = triangle(n + 1) points to use on its approximation.
    values: An array of shape broadcastable to (..., k), the function value at
      each of the stencil points.

  Returns:
    An array of shape (..., k), for each target point the approximated
    function value, gradient and hessian evaluated at that point (flattened
    and in the same order as returned by derivative_names).
  """
    # Broadcast arrays to their required shape.
    batch_shape, ndim = target.shape[:-1], target.shape[-1]
    stencil = np.broadcast_to(stencil,
                              batch_shape + (triangular(ndim + 1), ndim))
    values = np.broadcast_to(values, stencil.shape[:-1])

    # Subtract target from each stencil point.
    delta_x = stencil - np.expand_dims(target, axis=-2)
    delta_xy = np.matmul(np.expand_dims(delta_x, axis=-1),
                         np.expand_dims(delta_x, axis=-2))
    i = np.arange(ndim)
    j, k = np.triu_indices(ndim, k=1)

    # Build coefficients for the Taylor series equations, namely:
    #   f(stencil) = coeffs @ [f(target), df/d0(target), ...]
    coeffs = np.concatenate(
        [
            np.ones(delta_x.shape[:-1] + (1, )),  # f(target)
            delta_x,  # df/di(target)
            delta_xy[..., i, i] / 2,  # d^2f/di^2(target)
            delta_xy[..., j, k],  # d^2f/{dj dk}(target)
        ],
        axis=-1)

    # Then: [f(target), df/d0(target), ...] = coeffs^{-1} @ f(stencil)
    return np.squeeze(np.matmul(np.linalg.inv(coeffs), values[...,
                                                              np.newaxis]),
                      axis=-1)
Beispiel #10
0
def LKJ_to_beta_pars(eta, d):
    """
    Transform LKJ distribution with parameter eta for matrix of dimension d
    to vector of beta distribution parameters:
    p_{i >= 1, j>i; 1...i-1} ~ Beta(b_i, b_i)
    b_i = eta + (d - 1 - i)/2
    """
    idxmat = np.broadcast_to((d - 1 - np.arange(d)) / 2., (d, d)).T
    bmat = eta + idxmat
    return U_to_vec(bmat)  # only upper triangle, flattened
Beispiel #11
0
 def __init__(self, name='', length=1, matrix_size=2, diag_lb=0.0, val=None):
     self.name = name
     self.__matrix_size = int(matrix_size)
     self.__matrix_shape = np.array([ int(matrix_size), int(matrix_size) ])
     self.__length = int(length)
     self.__shape = np.append(self.__length, self.__matrix_shape)
     # vec_size is the size of a single matrix in vector form.
     self.__vec_size = int(matrix_size * (matrix_size + 1) / 2)
     self.__diag_lb = diag_lb
     assert diag_lb >= 0
     if val is None:
         default_val = np.diag(np.full(self.__matrix_size, diag_lb + 1.0))
         self.__val = np.broadcast_to(default_val, self.__shape)
     else:
         self.set(val)
def physical_density(x, args, volume_contraint=False, cone_filter=True):
    shape = (args['nely'], args['nelx'])
    assert x.shape == shape or x.ndim == 1
    x = x.reshape(shape)
    if volume_contraint:
        mask = np.broadcast_to(args['mask'], x.shape) > 0
        x_designed = sigmoid_with_constrained_mean(x[mask], args['volfrac'])
        x_flat = autograd_lib.scatter1d(x_designed, np.flatnonzero(mask),
                                        x.size)
        x = x_flat.reshape(x.shape)
    else:
        x = x * args['mask']
    if cone_filter:
        x = autograd_lib.cone_filter(x, args['filter_width'], args['mask'])
    return x
Beispiel #13
0
    def emissionProb( self, t, forward=False, ys=None ):

        if( ys is None ):
            emiss = self.L[ t ]
        else:
            emiss = np.zeros( self.K )

            for k in range( self.K ):

                n1 = self.n1Emiss[ k ]
                n2 = self.n2Emiss[ k ]

                emiss += Normal.log_likelihood( ys[ :, t ], nat_params=( n1, n2 ) ).sum( axis=0 )

        return emiss if forward == True else np.broadcast_to( emiss, ( self.K, self.K ) )
Beispiel #14
0
    def emissionProb( self, t, forward=False, xs=None ):
        if( xs is None ):
            emiss = self.L[ t - 1 ]
        else:

            emiss = np.zeros( self.K )

            for i, ( n1, n2, n3 ) in enumerate( zip( self.n1Trans, self.n2Trans, self.n3Trans ) ):

                def ll( _x ):
                    x, x1 = np.split( _x, 2 )
                    return Regression.log_likelihood( ( x, x1 ), nat_params=( n1, n2, n3 ) )

                emiss[ i ] = np.apply_along_axis( ll, -1, np.hstack( ( xs[ :-1, t ], xs[ 1: , t ] ) ) )

        return emiss if forward == True else np.broadcast_to( emiss, ( self.K, self.K ) )
Beispiel #15
0
 def __init__(self, name='', array_shape=(1), matrix_size=2, diag_lb=0.0, val=None):
     self.name = name
     self.__matrix_size = int(matrix_size)
     self.__matrix_shape = np.array([ int(matrix_size), int(matrix_size) ])
     self.__array_shape = array_shape
     self.__array_ranges = [ range(0, t) for t in self.__array_shape ]
     self.__array_length = np.prod(self.__array_shape)
     self.__shape = np.append(self.__array_shape, self.__matrix_shape)
     # __vec_size is the size of a single matrix in vector form.
     self.__vec_size = int(matrix_size * (matrix_size + 1) / 2)
     self.__diag_lb = diag_lb
     assert diag_lb >= 0
     if val is None:
         default_val = np.diag(np.full(self.__matrix_size, diag_lb + 1.0))
         self.__val = np.broadcast_to(default_val, self.__shape)
     else:
         self.set(val)
Beispiel #16
0
    def multiplyTerms(cls, terms):
        # Basically np.einsum but in log space

        assert isinstance(terms, Iterable)

        # Remove the empty terms
        terms = [t for t in terms if np.prod(t.shape) > 1]

        ndim = max([len(term.shape) for term in terms])

        axes = [[i for i, s in enumerate(t.shape) if s != 1] for t in terms]

        # Get the shape of the output
        shape = np.ones(ndim, dtype=int)
        for ax, term in zip(axes, terms):
            shape[np.array(ax)] = term.squeeze().shape

        total_elts = shape.prod()
        if (total_elts > 1e8):
            assert 0, 'Don\'t do this on a cpu!  Too many terms: %d' % (
                int(total_elts))

        # Build a meshgrid out of each of the terms over the right axes
        # and sum.  Doing it this way because np.einsum doesn't work
        # for matrix multiplication in log space - we can't do np.einsum
        # but add instead of multiply over indices

        ans = np.zeros(shape)
        for ax, term in zip(axes, terms):

            for _ in range(ndim - term.ndim):
                term = term[..., None]

            ans += np.broadcast_to(term, ans.shape)

        return ans
Beispiel #17
0
def broadcast_to(a: Numeric, *shape: Int):
    return anp.broadcast_to(a, shape)
Beispiel #18
0
 def backwardStep( self, t, beta ):
     # Write P( y_t+2:T | x_t+1 ) in terms of [ x_t+1, x_t ]
     _beta = np.broadcast_to( beta, ( self.K, self.K ) )
     return super( CategoricalHMM, self ).backwardStep( t, _beta )
Beispiel #19
0
 def forwardStep( self, t, alpha ):
     # Write P( y_1:t-1, x_t-1 ) in terms of [ x_t, x_t-1 ]
     _alpha = np.broadcast_to( alpha, ( self.K, self.K ) )
     return super( CategoricalHMM, self ).forwardStep( t, _alpha )
Beispiel #20
0
def multivariate_normal_logpdf(data, mus, Sigmas, mask=None):
    """
    Compute the log probability density of a multivariate Gaussian distribution.
    This will broadcast as long as data, mus, Sigmas have the same (or at
    least compatible) leading dimensions.
    Parameters
    ----------
    data : array_like (..., D)
        The points at which to evaluate the log density
    mus : array_like (..., D)
        The mean(s) of the Gaussian distribution(s)
    Sigmas : array_like (..., D, D)
        The covariances(s) of the Gaussian distribution(s)
    mask : array_like (..., D) bool
        Optional mask indicating which entries in the data are observed
    Returns
    -------
    lps : array_like (...,)
        Log probabilities under the multivariate Gaussian distribution(s).
    """
    # Check inputs
    D = data.shape[-1]
    assert mus.shape[-1] == D
    assert Sigmas.shape[-2] == Sigmas.shape[-1] == D

    # If there's no mask, we can just use the standard log pdf code
    if mask is None:
        return _multivariate_normal_logpdf(data, mus, Sigmas)

    # Otherwise we need to separate the data into sets with the same mask,
    # since each one will entail a different covariance matrix.
    #
    # First, determine the output shape. Allow mus and Sigmas to
    # have different shapes; e.g. many Gaussians with the same
    # covariance but different means.
    shp1 = np.broadcast(data, mus).shape[:-1]
    shp2 = np.broadcast(data[..., None], Sigmas).shape[:-2]
    assert len(shp1) == len(shp2)
    shp = tuple(max(s1, s2) for s1, s2 in zip(shp1, shp2))

    # Broadcast the data into the full shape
    full_data = np.broadcast_to(data, shp + (D, ))

    # Get the full mask
    assert mask.dtype == bool
    assert mask.shape == data.shape
    full_mask = np.broadcast_to(mask, shp + (D, ))

    # Flatten the mask and get the unique values
    flat_data = flatten_to_dim(full_data, 1)
    flat_mask = flatten_to_dim(full_mask, 1)
    unique_masks, mask_index = np.unique(flat_mask,
                                         return_inverse=True,
                                         axis=0)

    # Initialize the output
    lls = np.nan * np.ones(flat_data.shape[0])

    # Compute the log probability for each mask
    for i, this_mask in enumerate(unique_masks):
        this_inds = np.where(mask_index == i)[0]
        this_D = np.sum(this_mask)
        if this_D == 0:
            lls[this_inds] = 0
            continue

        this_data = flat_data[np.ix_(this_inds, this_mask)]
        this_mus = mus[..., this_mask]
        this_Sigmas = Sigmas[np.ix_(
            *[np.ones(sz, dtype=bool) for sz in Sigmas.shape[:-2]], this_mask,
            this_mask)]

        # Precompute the Cholesky decomposition
        this_Ls = np.linalg.cholesky(this_Sigmas)

        # Broadcast mus and Sigmas to full shape and extract the necessary indices
        this_mus = flatten_to_dim(np.broadcast_to(this_mus, shp + (this_D, )),
                                  1)[this_inds]
        this_Ls = flatten_to_dim(
            np.broadcast_to(this_Ls, shp + (this_D, this_D)), 2)[this_inds]

        # Evaluate the log likelihood
        lls[this_inds] = _multivariate_normal_logpdf(this_data,
                                                     this_mus,
                                                     this_Sigmas,
                                                     Ls=this_Ls)

    # Reshape the output
    assert np.all(np.isfinite(lls))
    return np.reshape(lls, shp)
Beispiel #21
0
    def _simulate_transmission(self, theta, rng, return_history=False):

        # Track log p(x, z) (to calculate the score later)
        logp_xz = 0.

        # Initial state
        if self.initial_infection:
            dice = rng.rand(self.n_individuals, self.n_strains)
            threshold = np.broadcast_to(self.overall_prevalence,
                                        (self.n_individuals, self.n_strains))
            state = (dice < threshold)
        else:
            state = np.zeros((self.n_individuals, self.n_strains),
                             dtype=np.bool)

        # Track state history
        if return_history:
            history = [state]

        # Time steps
        n_time_steps = int(round(self.end_time / self.delta_t))

        for i in range(n_time_steps):
            # Random numbers
            dice = rng.rand(self.n_individuals, self.n_strains)

            # Exposure
            exposure = (
                state / (self.n_individuals - 1.) *
                np.broadcast_to(1. / np.sum(state, axis=1),
                                (self.n_strains, self.n_individuals)).T)
            exposure[np.invert(np.isfinite(exposure))] = 0.
            exposure = np.sum(exposure, axis=0)
            exposure = np.broadcast_to(exposure,
                                       (self.n_individuals, self.n_strains))

            # Prevalence of each strain
            prevalence = np.broadcast_to(self.overall_prevalence,
                                         (self.n_individuals, self.n_strains))

            # Individual infection status
            any_infection = (np.sum(state, axis=1) > 0)
            any_infection = np.broadcast_to(
                any_infection, (self.n_strains, self.n_individuals)).T

            # Infection threshold
            probabilities_infected = (
                np.invert(state) *
                (any_infection * theta[1] + np.invert(any_infection)) *
                (theta[0] * exposure + self.fixed_lambda * prevalence) *
                self.delta_t + state * (1. - self.fixed_gamma * self.delta_t))

            # Update state
            state = (dice < probabilities_infected)

            # Accumulate probabilities
            log_p_this_decision = state * probabilities_infected + (
                1 - state) * (1. - probabilities_infected)
            logp_xz = logp_xz + np.sum(np.log(log_p_this_decision))

            # Track state history
            if return_history:
                history.append(state)

        if return_history:
            return logp_xz, (state, history)

        return logp_xz, state
Beispiel #22
0
    def multiplyTerms(cls, terms):
        # Basically np.einsum but in log space

        assert isinstance(terms, Iterable)

        # Check if we should use the multiply for fbsData or for regular data
        fbs_data_count, non_fbs_data_count = (0, 0)
        for t in terms:
            if (isinstance(t, fbsData)):
                fbs_data_count += 1
            else:
                non_fbs_data_count += 1

        # Can't mix types
        if (not (fbs_data_count == 0 or non_fbs_data_count == 0)):
            print('fbs_data_count', fbs_data_count)
            print('non_fbs_data_count', non_fbs_data_count)
            print(terms)
            for t in terms:
                if (isinstance(t, fbsData)):
                    print('this ones good', t, type(t))
                else:
                    print('this ones bad', t, type(t))
            assert 0

        # Use the regular multiply if we don't have fbs data
        if (fbs_data_count == 0):
            return GraphHMM.multiplyTerms(terms)

        # Remove the empty terms
        terms = [t for t in terms if np.prod(t.shape) > 1]

        if (len(terms) == 0):
            return fbsData(np.array([]), 0)

        # Separate out where the feedback set axes start and get the largest fbs_axis.
        # Need to handle case where ndim of term > all fbs axes
        # terms, fbs_axes_start = list( zip( *terms ) )
        fbs_axes_start = [term.fbs_axis for term in terms]
        terms = [term.data for term in terms]

        if (max(fbs_axes_start) != -1):
            max_fbs_axis = max([
                ax if ax != -1 else term.ndim
                for ax, term in zip(fbs_axes_start, terms)
            ])

            if (max_fbs_axis > 0):
                # Pad extra dims at each term so that the fbs axes start the same way for every term
                for i, ax in enumerate(fbs_axes_start):
                    if (ax == -1):
                        for _ in range(max_fbs_axis - terms[i].ndim + 1):
                            terms[i] = terms[i][..., None]
                    else:
                        for _ in range(max_fbs_axis - ax):
                            terms[i] = np.expand_dims(terms[i], axis=ax)
        else:
            max_fbs_axis = -1

        ndim = max([len(term.shape) for term in terms])

        axes = [[i for i, s in enumerate(t.shape) if s != 1] for t in terms]

        # Get the shape of the output
        shape = np.ones(ndim, dtype=int)
        for ax, term in zip(axes, terms):
            shape[np.array(ax)] = term.squeeze().shape

        total_elts = shape.prod()
        if (total_elts > 1e8):
            assert 0, 'Don\'t do this on a cpu!  Too many terms: %d' % (
                int(total_elts))

        # Build a meshgrid out of each of the terms over the right axes
        # and sum.  Doing it this way because np.einsum doesn't work
        # for matrix multiplication in log space - we can't do np.einsum
        # but add instead of multiply over indices
        ans = np.zeros(shape)
        for ax, term in zip(axes, terms):

            for _ in range(ndim - term.ndim):
                term = term[..., None]

            ans += np.broadcast_to(term, ans.shape)

        return fbsData(ans, max_fbs_axis)