Ejemplo n.º 1
0
    def helper(x):
        filter_lower = x < lower_bound
        x[filter_lower] = lower_bound + reflective_strength

        filter_upper = x > upper_bound
        x[filter_upper] = upper_bound - reflective_strength
        return x, np.any(filter_lower) or np.any(filter_upper)
Ejemplo n.º 2
0
    def X_hat(self, ge_dict):
        """

        Parameters
        ----------
        ge_dict : dictionary
            Dictionary storing GE inputs and outputs. See unwrap_ge_dict for keys.

        Returns
        -------
        matrix
            N times N matrix of changes in trade flows.

        """

        w_hat, P_hat, E_hat, tau_hat = ge_dict["w_hat"], ge_dict["P_hat"], ge_dict["E_hat"], ge_dict["tau_hat"]

        if np.any(ge_dict["P_hat"] < 0):  # nudge to avoid negative trade solutions
            ge_dict["P_hat"][ge_dict["P_hat"] < 0] = .01
        if np.any(ge_dict["w_hat"] < 0):
            ge_dict["w_hat"][ge_dict["w_hat"] < 0] = .01

        A = np.power(tau_hat, -self.theta-1)
        B = np.dot(np.diag(np.power(w_hat, 1-self.beta-self.theta)), np.diag(np.power(P_hat, self.beta-self.theta)))
        C = np.dot(np.diag(np.power(P_hat, self.theta)), np.diag(E_hat))

        AB = np.dot(A, B)
        XhatT = np.dot(AB.T, C)
        Xhat = XhatT.T

        return(Xhat)
Ejemplo n.º 3
0
    def vData( self, U, V, node, edges=None, ndim=None ):
        # THESE MUST NOT MODIFY U OR V!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # VERY IMPORTANT!!!!!! PROBABLY ENFORCE THIS SOMEHOW IN THE FURURE
        V_row, V_col, V_data = V

        if( ~np.any( np.in1d( V_row, node ) ) ):
            # If the node isn't in the V object (node is a leaf)
            ans = [ np.array( [] ) ]
        elif( edges is None ):
            # Return the data over all down edges
            ans = self.vDataFromMask( V, np.in1d( V_row, node ) )
        elif( isinstance( edges, Iterable ) and len( edges ) > 0 ):
            # Return over only certain edges
            mask = np.in1d( V_row, node )
            for e in edges:
                mask &= np.in1d( V_col, e )
            ans = self.vDataFromMask( V, mask )
        elif( isinstance( edges, Iterable ) and len( edges ) == 0 ):
            # This happens when we're not passed a down edge.  In this
            # case, we should return an empty v value, not a leaf v value
            return [ np.array( [] ) ]
        else:
            # Only looking at one edge
            ans = self.vDataFromMask( V, np.in1d( V_col, edges ) )

        if( len( ans ) == 0 ):
            # If we're looking at a leaf, return all 0s
            nVals = 1 if edges is None else len( edges )
            ans = []
            for _ in range( nVals ):
                ans.append( np.array( [] ) )

        assert sum( [ 0 if ~np.any( np.isnan( v ) ) else 1 for v in ans ] ) == 0, ans

        return ans
Ejemplo n.º 4
0
    def __init__(self, populations, baba, z_score, n_snps):
        for arr in (baba, z_score, n_snps):
            if np.any(arr.shape != np.array([len(populations)] * 4)):
                raise ValueError("array has wrong dimension")

        equiv_perms = get_permutations("ABBA", "ABBA") & get_permutations(
            "BABA", "BABA")
        opposite_perms = get_permutations("ABBA", "BABA") & get_permutations(
            "BABA", "ABBA")

        if not is_symmetric(z_score, equiv_perms) or not is_symmetric(
                z_score, opposite_perms, antisymm=True):
            raise ValueError("Z-scores not symmetric")

        if not is_symmetric(n_snps, list(it.permutations(range(4)))):
            raise ValueError("n_snps not symmetric")

        if not is_symmetric(baba, get_permutations("BABA", "BABA")):
            raise ValueError("BABA not symmetric")

        self.populations = populations
        self.baba = baba
        self.n_snps = n_snps
        self.z_score = z_score

        if np.any((self.z_score < 0) & (self.baba > self.abba)) or np.any(
            (self.z_score > 0) & (self.baba < self.abba)):
            raise ValueError("z_score should have same sign as baba-abba")
Ejemplo n.º 5
0
def _expected_sfs(demography, configs, folded, error_matrices):
    if np.any(configs.sampled_n != demography.sampled_n) or np.any(
            configs.sampled_pops != demography.sampled_pops):
        raise ValueError(
            "configs and demography must have same sampled_n, sampled_pops. Use Demography.copy() or ConfigList.copy() to make a copy with different sampled_n."
        )

    vecs, idxs = configs._vecs_and_idxs(folded)

    if error_matrices is not None:
        vecs = _apply_error_matrices(vecs, error_matrices)

    vals = expected_sfs_tensor_prod(vecs, demography)

    sfs = vals[idxs['idx_2_row']]
    if folded:
        sfs = sfs + vals[idxs['folded_2_row']]

    denom = vals[idxs['denom_idx']]
    for i in (0, 1):
        denom = denom - vals[idxs[("corrections_2_denom", i)]]

    #assert np.all(np.logical_or(vals >= 0.0, np.isclose(vals, 0.0)))

    return sfs, denom
Ejemplo n.º 6
0
    def flat_indices(self, folded_bool, free=None):
        # If no indices are specified, save time and return an empty array.
        if not np.any(folded_bool):
            return np.array([], dtype=int)

        free = self._free_with_default(free)
        shape_ok, err_msg = self._validate_folded_shape(folded_bool)
        if not shape_ok:
            raise ValueError(err_msg)
        if not free:
            folded_indices = self.fold(np.arange(self.flat_length(False),
                                                 dtype=int),
                                       validate_value=False,
                                       free=False)
            return folded_indices[folded_bool]
        else:
            # Every element of a particular simplex depends on all
            # the free values for that simplex.

            # The simplex is the last index, which moves the fastest.
            indices = []
            offset = 0
            free_simplex_length = self.__simplex_size - 1
            array_ranges = (range(n) for n in self.__array_shape)
            for ind in itertools.product(*array_ranges):
                if np.any(folded_bool[ind]):
                    free_inds = np.arange(offset * free_simplex_length,
                                          (offset + 1) * free_simplex_length,
                                          dtype=int)
                    indices.append(free_inds)
                offset += 1
            if len(indices) > 0:
                return np.hstack(indices)
            else:
                return np.array([])
Ejemplo n.º 7
0
 def validate_folded(self, folded_val, validate_value=None):
     shape_ok, err_msg = self._validate_folded_shape(folded_val)
     if not shape_ok:
         raise ValueError(err_msg)
     if validate_value is None:
         validate_value = self.default_validate
     if validate_value:
         if np.any(folded_val < 0):
             return False, 'Some values are negative.'
         simplex_sums = np.sum(folded_val, axis=-1)
         if np.any(np.abs(simplex_sums - 1) > 1e-12):
             return False, 'The simplexes do not sum to one.'
     return True, ''
Ejemplo n.º 8
0
def getIcase(par1type, par2type, checks=True):
    """Generate vector describing the combination of input parameters.

    Options for `par1type` and `par2type`:

      * `1` = total alkalinity
      * `2` = dissolved inorganic carbon
      * `3` = pH
      * `4` = partial pressure of CO2
      * `5` = fugacity of CO2
      * `6` = carbonate ion
      * `7` = bicarbonate ion
      * `8` = aqueous CO2
      * `9` = dry mole fraction of CO2

    `Icase` is `10*parXtype + parYtype` where `parXtype` is whichever of `par1type` or
    `par2type` is greater.

    Noting that a pair of any two from pCO2, fCO2, xCO2 CO2(aq) is not allowed, the
    valid `Icase` options are:

        12, 13, 14, 15, 16, 17, 18, 19,
            23, 24, 25, 26, 27, 28, 29,
                34, 35, 36, 37, 38, 39,
                        46, 47,
                        56, 57,
                            67, 68, 69,
                                78, 79.

    The optional argument `checks` allows you to decide whether the function should test
    the validity of the entered combinations or not.
    """
    # Check validity of separate `par1type` and `par2type` inputs
    if checks:
        assert np.all(
            np.isin(par1type, [1, 2, 3, 4, 5, 6, 7, 8, 9])
            & np.isin(par2type, [1, 2, 3, 4, 5, 6, 7, 8, 9])
        ), "All `par1type` and `par2type` values must be integers from 1 to 9."
        assert ~np.any(
            par1type == par2type
        ), "`par1type` and `par2type` must be different from each other."
    # Combine inputs into `Icase` and check its validity
    Icase = np.where(
        par1type < par2type, 10 * par1type + par2type, par1type + 10 * par2type
    )
    if checks:
        assert ~np.any(
            np.isin(Icase, [45, 48, 49, 58, 59, 89])
        ), "Combinations of pCO2, fCO2, xCO2 and CO2(aq) are not valid argument pairs."
    return Icase
Ejemplo n.º 9
0
    def vData( self, U, V, node, edges=None ):
        # THESE MUST NOT MODIFY U OR V!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # VERY IMPORTANT!!!!!! PROBABLY ENFORCE THIS SOMEHOW IN THE FURURE
        V_row, V_col, _ = V

        if( self.inFeedbackSet( node, is_partial_graph_index=True ) ):
            # This is going to be empty because by construction,
            # if a node is in the fbs, all the information from
            # going down the down edges can be gained by going
            # up an up edge.
            if( edges is None ):
                return []
            elif( isinstance( edges, Iterable ) ):
                return []
            else:
                return fbsData( np.array( [] ), -1 )

        if( ~np.any( np.in1d( V_row, node ) ) ):
            # If the node isn't in the V object (node is a leaf)
            ans = [ fbsData( np.array( [] ), -1 ) ]
        elif( edges is None ):
            # Return the data over all down edges
            ans = self.vDataFromMask( V, np.in1d( V_row, node ) )
        elif( isinstance( edges, Iterable ) and len( edges ) > 0 ):
            # Return over only certain edges
            mask = np.in1d( V_row, node )
            for e in edges:
                mask &= np.in1d( V_col, e )
            ans = self.vDataFromMask( V, mask )
        elif( isinstance( edges, Iterable ) and len( edges ) == 0 ):
            # This happens when we're not passed a down edge.  In this
            # case, we should return an empty v value, not a leaf v value
            return [ fbsData( np.array( [] ), -1 ) ]
        else:
            # Only looking at one edge
            ans = self.vDataFromMask( V, np.in1d( V_col, edges ) )

        if( len( ans ) == 0 ):
            # If we're looking at a leaf, return all 0s
            nVals = 1 if edges is None else len( edges )
            ans = []
            for _ in range( nVals ):
                ans.append( fbsData( np.array( [] ), -1 ) )

        for v in ans:
            data = v.data
            assert np.any( np.isnan( data ) ) == False, data

        return ans
Ejemplo n.º 10
0
def simple_hmc(U, K, grad_U, mass, inv_mass, iters, q_0, integrator):
    D = q_0.shape[1]
    q_hist = []
    q_hist.append(q_0.reshape(D, ))
    accepted_num = 0
    cur_q = q_0.copy()

    for i in range(iters):

        nxt_q = integrator(U,
                           K,
                           grad_U,
                           cur_q,
                           mass,
                           inv_mass,
                           L=200,
                           eps=0.05)

        if np.any(nxt_q != cur_q):
            accepted_num += 1

            q_hist.append(np.asarray(nxt_q.reshape(D, )))
        cur_q = nxt_q

        if i % 50 == 0:
            print("progressed {}%".format(i * 100 / iters))

    print("The acceptance rate is {}".format(accepted_num / iters))
    #corrplot(np.asarray(q_hist))
    return q_hist
Ejemplo n.º 11
0
def adaptive_hmc(U, K, grad_U, mass, inv_mass, iters, q_0, integrator):
    D = q_0.shape[1]
    q_hist = []
    q_hist.append(q_0.reshape(D, ))
    accepted_num = 0
    cur_q = q_0.copy()

    for i in range(iters):
        nxt_q = integrator(U,
                           K,
                           grad_U,
                           cur_q,
                           mass,
                           inv_mass,
                           L=200,
                           eps=0.05)

        if np.any(nxt_q != cur_q):
            accepted_num += 1
            q_hist.append(np.asarray(nxt_q.reshape(D, )))
        cur_q = nxt_q
        if i % 50 == 0:
            print("progressed {}%".format(i * 100 / iters))
        if i % 1000 and len(q_hist) > 200:
            # every 1000 iterations, we re-estimate the covariance of estimated target
            mass = adaptive_metric(q_hist[-200:])
            inv_mass = np.linalg.inv(mass + np.identity(D) * 1e-5)
    print("The acceptance rate is {}".format(accepted_num / iters))
    #corrplot(np.asarray(q_hist))
    return q_hist
Ejemplo n.º 12
0
def pca_with_imputation(D, datas, masks, num_iters=20):
    if isinstance(datas, (list, tuple)) and isinstance(masks, (list, tuple)):
        data = np.concatenate(datas)
        mask = np.concatenate(masks)

    if np.any(~mask):
        # Fill in missing data with mean to start
        fulldata = data.copy()
        for n in range(fulldata.shape[1]):
            fulldata[~mask[:, n], n] = fulldata[mask[:, n], n].mean()

        for itr in range(num_iters):
            # Run PCA on imputed data
            pca = PCA(D)
            x = pca.fit_transform(fulldata)

            # Fill in missing data with PCA predictions
            pred = pca.inverse_transform(x)
            fulldata[~mask] = pred[~mask]
    else:
        pca = PCA(D)
        x = pca.fit_transform(data)

    # Unpack xs
    xs = np.split(x, np.cumsum([len(data) for data in datas])[:-1])
    assert len(xs) == len(datas)
    assert all([x.shape[0] == data.shape[0] for x, data in zip(xs, datas)])

    return pca, xs
def wishart_updates(x, mu, mu2, e_z, prior_mu, prior_inv_wishart_scale, prior_wishart_dof, kappa):
    k_approx = np.shape(mu)[0]

    prior_mu_tile = np.tile(prior_mu, (k_approx, 1))
    cross_term1 = np.dot(mu.T, prior_mu_tile)
    prior_normal_term = kappa * (np.sum(mu2, axis = 0)\
                                - cross_term1 - cross_term1.T \
                                + k_approx * np.outer(prior_mu, prior_mu))

    predictive = np.dot(e_z, mu)
    outer_mu = np.array([np.outer(mu[i,:], mu[i,:]) for i in range(k_approx)])
    z_sum = np.sum(e_z, axis = 0)

    cross_term2 = np.dot(x.T, predictive)
    data_lh_term = np.dot(x.T, x) - cross_term2 - cross_term2.T\
                    + np.einsum('kij, k -> ij', outer_mu, z_sum)

    inv_scale_update = prior_inv_wishart_scale + prior_normal_term + data_lh_term
    scale_update = np.linalg.inv(inv_scale_update)

    dof_update = prior_wishart_dof + np.shape(x)[0] + k_approx

    # there's some numerical issue in which the update is not exactly symmetric
    if np.any(np.abs(scale_update - scale_update.T) >= 1e-10):
        print('wishart scale not symmetric?')
        print(scale_update - scale_update.T)

    scale_update = 0.5 * (scale_update + scale_update.T)

    return scale_update, np.array([dof_update])
Ejemplo n.º 14
0
    def preprocessData(self, data_graphs):
        super(_graphHMMMixin, self).updateGraphs(data_graphs)

        self.possible_latent_states = {}

        total_nodes = 0
        for data_graph in data_graphs:
            for node, state in data_graph.possible_latent_states.items():
                self.possible_latent_states[total_nodes + node] = state
            total_nodes += len(data_graph.nodes)

        ys = []
        for graph in data_graphs:
            ys.extend([
                graph.data[node] if graph.data[node] is not None else np.nan
                for node in graph.nodes
            ])

        self.ys = ys

        if (hasattr(self, 'emission_dist')):
            self.L_set = True
            ys = np.array(ys).T
            self.L = np.array([
                self.emission_dist[:, y] if not np.any(np.isnan(y)) else
                np.zeros_like(self.emission_dist[:, 0]) for y in ys
            ]).sum(axis=0).T
Ejemplo n.º 15
0
    def preprocessData(self, data_graphs):

        super().updateGraphs(data_graphs)

        self.possible_latent_states = {}

        total_nodes = 0
        for data_graph, fbs in data_graphs:
            for node, state in data_graph.possible_latent_states.items():
                self.possible_latent_states[total_nodes + node] = state
            total_nodes += len(data_graph.nodes)

        ys = []
        for graph, fbs in data_graphs:
            ys.extend([
                graph.data[node] if graph.data[node] is not None else np.nan
                for node in graph.nodes
            ])

        self.ys = np.array(ys)

        if (hasattr(self, 'emission_dist')):
            self.L_set = True
            ys = np.array(ys).T
            assert ys.ndim == 2, 'If there is only 1 measurement, add an extra dim!'
            self.L = np.array([
                self.emission_dist[:, y] if not np.any(np.isnan(y)) else
                np.zeros_like(self.emission_dist[:, 0]) for y in ys
            ]).sum(axis=0).T
Ejemplo n.º 16
0
    def updateNatParams(self,
                        log_initial_dist,
                        log_transition_dist,
                        log_emission_dist,
                        data_graphs=None,
                        check_parameters=True,
                        compute_marginal=True):

        if (check_parameters):
            self.parameterCheck(log_initial_dist, log_transition_dist,
                                log_emission_dist)

        self.K = log_initial_dist.shape[0]
        self.pi0 = log_initial_dist
        self.pis = {}
        for log_dist in log_transition_dist:
            ndim = log_dist.ndim
            self.pis[ndim] = log_dist

        self.emission_dist = log_emission_dist
        self.L_set = False

        if (data_graphs is not None):
            self.preprocessData(data_graphs)

        self.clearCache()

        if (hasattr(self, 'ys') and self.L_set == False):
            self.L_set = True
            ys = np.array(self.ys).T
            assert ys.ndim == 2, 'If there is only 1 measurement, add an extra dim!'
            self.L = np.array([
                self.emission_dist[:, y] if not np.any(np.isnan(y)) else
                np.zeros_like(self.emission_dist[:, 0]) for y in ys
            ]).sum(axis=0).T
Ejemplo n.º 17
0
    def _check_bad_constraint(self, y_hat, slack, old_constraints):
        if slack < 1e-5:
            return True
        y_hat_plain = unwrap_pairwise(y_hat)

        already_active = np.any([
            True for y__, _, _ in old_constraints
            if np.all(y_hat_plain == unwrap_pairwise(y__))
        ])
        if already_active:
            return True

        # "smart" stopping criterion
        # check if most violated constraint is more violated
        # than previous ones by more then eps.
        # If it is less violated, inference was wrong/approximate
        if self.check_constraints:
            for con in old_constraints:
                # compute slack for old constraint
                slack_tmp = max(con[2] - np.dot(self.w, con[1]), 0)
                if self.verbose > 5:
                    print("slack old constraint: %f" % slack_tmp)
                # if slack of new constraint is smaller or not
                # significantly larger, don't add constraint.
                # if smaller, complain about approximate inference.
                if slack - slack_tmp < -1e-5:
                    if self.verbose > 0:
                        print("bad inference: %f" % (slack_tmp - slack))
                    if self.break_on_bad:
                        raise ValueError("bad inference: %f" %
                                         (slack_tmp - slack))
                    return True

        return False
Ejemplo n.º 18
0
def renyii(pk0, pk1, a):
    """
    Compute the renyii divergence between two Gaussian distributions.
    """

    # Check dimensions
    assert (pk0.S.shape == pk1.S.shape)
    # Check diagonal
    p0S_is_diag = np.all(np.diag(np.diag(pk0.S)) == pk0.S)
    p1S_is_diag = np.all(np.diag(np.diag(pk1.S)) == pk1.S)

    Sa = (1 - a) * pk0.S + a * pk1.S
    # make sure eigenvalues are positive
    if np.any(np.isfinite(Sa) == 0):
        print(Sa)
    w, v = np.linalg.eig(Sa)
    #assert(np.all(w > 0))
    assert np.linalg.det(Sa) != 0
    #if np.linalg.det(Sa) == 0:
    #  print Sa
    #  return float('Inf')

    dm = pk1.m - pk0.m
    # Use precise computation for diagonal covariance matrices
    if p0S_is_diag and p1S_is_diag:
        r = a / 2. * np.dot(np.dot(dm, np.linalg.inv(Sa)), dm) + \
            (np.sum(np.log(np.diag(Sa))) - (1-a)*np.sum(np.log(np.diag(pk0.S))) - a*np.sum(np.log(np.diag(pk1.S)))) \
                / (1 - a) / 2.
    else:
        r = a / 2. * np.dot(np.dot(dm, np.linalg.inv(Sa)), dm) + \
            (np.log(np.linalg.det(Sa)) - (1-a)*np.log(np.linalg.det(pk0.S)) - a*np.log(np.linalg.det(pk1.S))) \
                / (1 - a) / 2.
    #assert(r > -1e-10)
    return max(r, 0)
Ejemplo n.º 19
0
def build_detection_coadd(sed, bg_rms, observation):
    """Build a channel weighted coadd to use for source detection

    Parameters
    ----------
    sed: array
        SED at the center of the source.
    bg_rms: array
        Background RMS in each channel in observation.
    observation: `~scarlet.observation.Observation`
        Observation to use for the coadd.

    Returns
    -------
    detect: array
        2D image created by weighting all of the channels by SED
    bg_cutoff: float
        The minimum value in `detect` to include in detection.
    """
    C = len(sed)
    if np.any(bg_rms <= 0):
        raise ValueError("bg_rms must be greater than zero in all channels")

    positive = [c for c in range(C) if sed[c] > 0]
    positive_img = [observation.images[c] for c in positive]
    positive_bgrms = np.array([bg_rms[c] for c in positive])
    weights = np.array([sed[c] / bg_rms[c] ** 2 for c in positive])
    jacobian = np.array([sed[c] ** 2 / bg_rms[c] ** 2 for c in positive]).sum()
    detect = np.einsum("i,i...", weights, positive_img) / jacobian

    # thresh is multiple above the rms of detect (weighted variance across channels)
    bg_cutoff = np.sqrt((weights ** 2 * positive_bgrms ** 2).sum()) / jacobian
    return detect, bg_cutoff
Ejemplo n.º 20
0
    def P_hat(self, ge_dict):
        """

        Parameters
        ----------
        ge_dict : dictionary
            Dictionary storing GE inputs and outputs. See unwrap_ge_dict for keys.

        Returns
        -------
        vector
            N times 1 vector of price changes

        """

        w_hat, P_hat, tau_hat = ge_dict["w_hat"], ge_dict["P_hat"], ge_dict["tau_hat"]

        if np.any(ge_dict["P_hat"] < 0):  # nudge to avoid negative trade solutions
            ge_dict["P_hat"][ge_dict["P_hat"] < 0] = .01

        A = self.lambda_pc * np.power(tau_hat,-self.theta)
        b = np.power(w_hat, 1-self.beta-self.theta) * np.power(P_hat, self.beta-self.theta)

        Phat_int = np.dot(A, b)
        Phat = np.power(Phat_int, -1/self.theta)

        return(Phat)
Ejemplo n.º 21
0
    def parameter_optimization(self, ctl, iters=100):
        # worst-case parameter optimization
        beta = 1e16 * np.ones((1, ))
        beta, _, _ = self.parameter_dual_optimization(beta, ctl, iters=iters)

        agcost = self.parameter_augment_cost(beta)

        # initial adversial xdist. with policy xdist.
        q_xdist = deepcopy(self.xdist)

        # first iteration to establish initial conv_kl
        param, _, _ = self.parameter_backward_pass(beta, agcost, q_xdist, ctl)
        p_xdist, _, _ = self.cubature_forward_pass(ctl, param)

        # convergence of inner loop
        xdist_kl = self.gaussians_kldiv(p_xdist.mu, p_xdist.sigma, q_xdist.mu,
                                        q_xdist.sigma, self.dm_state,
                                        self.nb_steps + 1)
        while np.any(xdist_kl > 1e-3):
            param, _, _ = self.parameter_backward_pass(beta, agcost, q_xdist,
                                                       ctl)
            p_xdist, _, _ = self.cubature_forward_pass(ctl, param)

            # check convergence of loop
            xdist_kl = self.gaussians_kldiv(p_xdist.mu, p_xdist.sigma,
                                            q_xdist.mu, q_xdist.sigma,
                                            self.dm_state, self.nb_steps + 1)

            # interpolate between distributions
            q_xdist.mu, q_xdist.sigma = self.interp_gauss_kl(
                q_xdist.mu, q_xdist.sigma, p_xdist.mu, p_xdist.sigma, 1e-1)

        return param, beta
Ejemplo n.º 22
0
def create_pf():
    u = np.linspace(-1.2 / np.sqrt(2), 1.2 / np.sqrt(2), endpoint=True, num=60)
    v = np.linspace(-1.2 / np.sqrt(2), 1.2 / np.sqrt(2), endpoint=True, num=60)
    U, V = np.meshgrid(u, v)
    u, v = U.flatten(), V.flatten()
    uv = np.stack([u, v]).T
    print(f"uv.shape={uv.shape}")
    ls = []
    for x in uv:
        # generate solutions on the Pareto front:
        # x = np.array([x1, x1])

        f, f_dx = concave_fun_eval(x)
        ls.append(f)
    ls = np.stack(ls)

    po, pf = [], []
    for i, x in enumerate(uv):
        l_i = ls[i]
        if np.any(np.all(l_i > ls, axis=1)):
            continue
        else:
            po.append(x)
            pf.append(l_i)

    po = np.stack(po)
    pf = np.stack(pf)
    pf_tri = mtri.Triangulation(po[:, 0], po[:, 1])
    tri = mtri.Triangulation(u, v)

    return pf, pf_tri, ls, tri
Ejemplo n.º 23
0
def gamma_logpdf(x, shape, rate):    
    if np.any(x <= 0):
        return -np.infty
    return (  np.log(rate) * shape
            - sp.special.gammaln(shape)
            + np.log(x) * (shape-1)
            - rate * x)
Ejemplo n.º 24
0
    def _wrapped(*args, **kwargs):
        """Wrapped NumPy function"""

        tensor_kwargs = {}

        if "requires_grad" in kwargs:
            tensor_kwargs["requires_grad"] = kwargs.pop("requires_grad")
        else:
            tensor_args = list(extract_tensors(args))

            if tensor_args:
                # Unless the user specifies otherwise, if all tensors in the argument
                # list are non-trainable, the output is also non-trainable.
                # Equivalently: if any tensor is trainable, the output is also trainable.
                # NOTE: Use of Python's ``any`` results in an infinite recursion,
                # and I'm not sure why. Using ``np.any`` works fine.
                tensor_kwargs["requires_grad"] = _np.any([i.requires_grad for i in tensor_args])

        # evaluate the original object
        res = obj(*args, **kwargs)

        if isinstance(res, _np.ndarray):
            # only if the output of the object is a ndarray,
            # then convert to a PennyLane tensor
            res = tensor(res, **tensor_kwargs)

        return res
Ejemplo n.º 25
0
def _pHfromTAVX(TA, VX, totals, k_constants, initialfunc, deltafunc):
    """Calculate pH from total alkalinity and DIC or one of its components using a
    Newton-Raphson iterative method.

    Although it is coded for H on the total pH scale, for the pH values occuring in
    seawater (pH > 6) it will be equally valid on any pH scale (H terms negligible) as
    long as the K Constants are on that scale.

    Based on the CalculatepHfromTA* functions, version 04.01, Oct 96, by Ernie Lewis.
    """
    # First guess inspired by M13/OE15, added v1.3.0:
    pH = initialfunc(TA, VX, totals["TB"], k_constants["K1"],
                     k_constants["K2"], k_constants["KB"])
    deltapH = 1.0 + pHTol
    while np.any(np.abs(deltapH) >= pHTol):
        pHdone = np.abs(
            deltapH) < pHTol  # check which rows don't need updating
        deltapH = deltafunc(pH, TA, VX, totals, k_constants)  # the pH jump
        # To keep the jump from being too big:
        abs_deltapH = np.abs(deltapH)
        np.sign_deltapH = np.sign(deltapH)
        # Jump by 1 instead if `deltapH` > 5
        deltapH = np.where(abs_deltapH > 5.0, np.sign_deltapH, deltapH)
        # Jump by 0.5 instead if 1 < `deltapH` < 5
        deltapH = np.where(
            (abs_deltapH > 0.5) & (abs_deltapH <= 5.0),
            0.5 * np.sign_deltapH,
            deltapH,
        )  # assumes that once we're within 1 of the correct pH, we will converge
        pH = np.where(pHdone, pH,
                      pH + deltapH)  # only update rows that need it
    return pH
Ejemplo n.º 26
0
    def _update_params(self, x, result):
        self.fevals = self.fevals + 1

        # Autograd ArrayBox behaves differently from numpy, that fixes it.
        result_copy = remove_arraybox(result)
        x_copy = remove_arraybox(x)

        assert np.any(np.isnan(result_copy)) == False, "X out of domain"

        found_best = np.all(result_copy <= self.best_f)

        if self._has_eqc:
            if found_best:
                self.best_z = x_copy
            x_copy = np.squeeze(
                self._null_space_feasible_matrix @ np.reshape(x_copy,
                                                              (-1, 1)) +
                self._feasible_vector)

        self.all_evals += [result_copy]
        self.all_x += [x_copy]

        if found_best:
            self._best_x = x_copy
            self._best_f = result_copy
            self.all_best_x += [self.best_x]
            self.all_best_f += [self.best_f]
        return result
Ejemplo n.º 27
0
 def ineq_func(x):
     result = []
     for f in inequality:
         u = f(x)
         #u = remove_arraybox(u)
         size_u = np.size(np.shape(u))
         if size_u == 2:
             result += [-np.inf] if np.any(
                 u >= 0) else [np.log(np.linalg.det(-u))]
         elif size_u == 1:
             result += [-np.inf] if np.any(
                 u >= 0) else [np.sum(np.log(-u))]
         else:
             result += [-np.inf] if u >= 0 else [np.log(-u)]
         #result += [-np.inf] if np.any(u >= 0) else [np.sum(np.log(-u))]
     return result
Ejemplo n.º 28
0
def surgery(Z):
    empties = np.isclose(Z.sum(axis=0), 0)
    Q, R = Z.shape
    if np.any(empties):
        print('!')
    while np.any(empties):
        for r, empty in enumerate(empties):
            if empty:
                # select a nonempty cluster and split it
                c = np.random.choice(np.where(np.logical_not(empties))[0])
                for q in range(Q):
                    if np.random.binomial(1, 0.5):
                        Z[q, r] = Z[q, c]
                        Z[q, c] = 0
        empties = np.isclose(Z.sum(axis=0), 0)
    return Z
Ejemplo n.º 29
0
def expand(U, wires, num_wires):
    r"""Expand a multi-qubit operator into a full system operator.

    Args:
        U (array): :math:`2^n \times 2^n` matrix where n = len(wires).
        wires (Sequence[int]): Target subsystems (order matters! the
            left-most Hilbert space is at index 0).

    Returns:
        array: :math:`2^N\times 2^N` matrix. The full system operator.
    """
    if num_wires == 1:
        # total number of wires is 1, simply return the matrix
        return U

    N = num_wires
    wires = np.asarray(wires)

    if np.any(wires < 0) or np.any(
            wires >= N) or len(set(wires)) != len(wires):
        raise ValueError(
            "Invalid target subsystems provided in 'wires' argument.")

    if U.shape != (2**len(wires), 2**len(wires)):
        raise ValueError(
            "Matrix parameter must be of size (2**len(wires), 2**len(wires))")

    # generate N qubit basis states via the cartesian product
    tuples = np.array(list(itertools.product([0, 1], repeat=N)))

    # wires not acted on by the operator
    inactive_wires = list(set(range(N)) - set(wires))

    # expand U to act on the entire system
    U = np.kron(U, np.identity(2**len(inactive_wires)))

    # move active wires to beginning of the list of wires
    rearranged_wires = np.array(list(wires) + inactive_wires)

    # convert to computational basis
    # i.e., converting the list of basis state bit strings into
    # a list of decimal numbers that correspond to the computational
    # basis state. For example, [0, 1, 0, 1, 1] = 2^3+2^1+2^0 = 11.
    perm = np.ravel_multi_index(tuples[:, rearranged_wires].T, [2] * N)

    # permute U to take into account rearranged wires
    return U[:, perm][perm]
Ejemplo n.º 30
0
    def __init__(self, frame, sky_coord, observations):
        """Source intialized with a single pixel

        Parameters
        ----------
        frame: `~scarlet.Frame`
            The frame of the model
        sky_coord: tuple
            Center of the source
        observations: instance or list of `~scarlet.Observation`
            Observation(s) to initialize this source
        """
        C, Ny, Nx = frame.shape
        self.center = np.array(frame.get_pixel(sky_coord), dtype="float")

        # initialize SED from sky_coord
        try:
            iter(observations)
        except TypeError:
            observations = [observations]

        # determine initial SED from peak position
        # SED in the frame for source detection
        seds = []
        for obs in observations:
            _sed = get_psf_sed(sky_coord, obs, frame)
            seds.append(_sed)
        sed = np.concatenate(seds).reshape(-1)

        if np.any(sed <= 0):
            # If the flux in all channels is  <=0,
            # the new sed will be filled with NaN values,
            # which will cause the code to crash later
            msg = "Zero or negative SED {} at y={}, x={}".format(
                sed, *sky_coord)
            if np.all(sed <= 0):
                logger.warning(msg)
            else:
                logger.info(msg)

        # set up parameters
        sed = Parameter(
            sed,
            name="sed",
            step=partial(relative_step, factor=1e-2),
            constraint=PositivityConstraint(),
        )
        center = Parameter(self.center, name="center", step=1e-1)

        # define bbox
        pixel_center = tuple(np.round(center).astype("int"))
        front, back = 0, C
        bottom = pixel_center[0] - frame.psf.shape[1] // 2
        top = pixel_center[0] + frame.psf.shape[1] // 2
        left = pixel_center[1] - frame.psf.shape[2] // 2
        right = pixel_center[1] + frame.psf.shape[2] // 2
        bbox = Box.from_bounds((front, back), (bottom, top), (left, right))

        super().__init__(frame, sed, center, self._psf_wrapper, bbox=bbox)
Ejemplo n.º 31
0
 def standardToNat( cls, pi ):
     if( np.any( np.isclose( pi, 0.0 ) ) ):
         n = np.empty_like( pi )
         n[ ~np.isclose( pi, 0.0 ) ] = np.log( pi[ ~np.isclose( pi, 0.0 ) ] )
         n[ np.isclose( pi, 0.0 ) ] = np.NINF
     else:
         n = np.log( pi )
     return ( n, )
Ejemplo n.º 32
0
def load_stamps_and_samps(gstamps):

    # gather all stamp files!
    print "loading available stamps"
    gstamps.sort()
    stamp_ids = extract_stamp_ids(gstamps)
    stamps    = stamps2array(gstamps)

    # gather all samps!
    print "loading MCMC sample files"
    gal_chain_template = 'samp_cache/run5/gal_samps_stamp_%s_chain_0.bin'
    gal_chain_files = [gal_chain_template%sid for sid in stamp_ids]
    chain_mask      = np.zeros(len(stamp_ids), dtype=np.bool)  # keep track of the ones that actually have samples
    Nselect = 500
    Nskip   = 5
    samps  = []
    for i,chain in enumerate(gal_chain_files):
        print "Galaxy", os.path.basename(chain)

        ## 0. load four chains from disk
        src_samp_chains, ll_samp_chains, eps_samp_chains = \
            io.load_mcmc_chains(chain, num_chains=4)

        if len(src_samp_chains) > 0:
            th            = rec2matrix( np.concatenate(src_samp_chains))
            # make sure there are no infinite samples
            if np.any(np.isinf(th)) or np.any(np.isnan(th)):
                continue
            chain_mask[i] = True
            samps.append(th[-Nselect*Nskip:-1:Nskip, :])

    print "There are %d chains with either missing, zeros, or otherwise unsuitable samples"%((~chain_mask).sum())

    # samps and stamps now aligned
    stamps = stamps[chain_mask, :, :, :]
    samps  = np.array(samps)
    return stamps, samps
Ejemplo n.º 33
0
    #####################################################################
    # fit model to galaxy shape parameters
    # 
    #   re  - [0, infty], transformation log
    #   ab  - [0, 1], transformation log (ab / (1 - ab))
    #   phi - [0, 180], transformation log (phi / (180 - phi))
    #
    ######################################################################
    print "fitting galaxy shape"
    shape_df = np.row_stack([ coadd_df[['expRad_r', 'expAB_r', 'expPhi_r']].values,
                              coadd_df[['deVRad_r', 'deVAB_r', 'deVPhi_r']].values ])[::3,:]
    shape_df[:,0] = np.log(shape_df[:,0])
    shape_df[:,1] = np.log(shape_df[:,1]) - np.log(1.-shape_df[:,1])
    shape_df[:,2] = shape_df[:,2] * (np.pi / 180.)

    bad_idx = np.any(np.isinf(shape_df), axis=1)
    shape_df = shape_df[~bad_idx,:]
    gal_re_mog = fit_mog(shape_df[:,0], mog_class = GalRadiusMoG, max_comps=50)
    gal_ab_mog = fit_mog(shape_df[:,1], mog_class = GalAbMoG, max_comps=50)

    with open('gal_re_mog.pkl', 'wb') as f:
        pickle.dump(gal_re_mog, f)

    with open('gal_ab_mog.pkl', 'wb') as f:
        pickle.dump(gal_ab_mog, f)


    #####################################################################
    # fit star => galaxy proposal distributions
    #
    #   re  - [0, infty], transformation log
Ejemplo n.º 34
0
 def to_log_nanomaggies(mags):
     fluxes  = np.log(mags2nanomaggies(mags))
     bad_idx = np.any(np.isinf(fluxes), axis=1)
     return fluxes[~bad_idx,:]
Ejemplo n.º 35
0
def polyinterp(points, doPlot=None, xminBound=None, xmaxBound=None):
    """ polynomial interpolation
    Parameters
    ----------
    points: shape(pointNum, 3), three columns represents x, f, g
    doPolot: set to 1 to plot, default 0
    xmin: min value that brackets minimum (default: min of points)
    xmax: max value that brackets maximum (default: max of points)
    
    set f or g to sqrt(-1)=1j if they are not known
    the order of the polynomial is the number of known f and g values minus 1

    Returns
    -------
    minPos:
    fmin:
    """
    
    if doPlot == None:
        doPlot = 0

    nPoints = points.shape[0]
    order = np.sum(np.imag(points[:, 1:3]) == 0) -1
    
    # code for most common case: cubic interpolation of 2 points
    if nPoints == 2 and order == 3 and doPlot == 0:
        [minVal, minPos] = [np.min(points[:,0]), np.argmin(points[:,0])]
        notMinPos = 1 - minPos
        d1 = points[minPos,2] + points[notMinPos,2] - 3*(points[minPos,1]-\
                points[notMinPos,1])/(points[minPos,0]-points[notMinPos,0])

        t_d2 =  d1**2 - points[minPos,2]*points[notMinPos,2]
        if t_d2 > 0:
            d2 = np.sqrt(t_d2)
        else:
            d2 = np.sqrt(-t_d2) * np.complex(0,1)
        if np.isreal(d2):
            t = points[notMinPos,0] - (points[notMinPos,0]-points[minPos,0])*\
                    ((points[notMinPos,2]+d2-d1)/(points[notMinPos,2]-\
                    points[minPos,2]+2*d2))
            minPos = np.min([np.max([t,points[minPos,0]]), points[notMinPos,0]])
        else:
            minPos = np.mean(points[:,0])
        fmin = minVal
        return (minPos, fmin)
    
    xmin = np.min(points[:,0])
    xmax = np.max(points[:,0])

    # compute bounds of interpolation area
    if xminBound == None:
        xminBound = xmin
    if xmaxBound == None:
        xmaxBound = xmax

    # constraints based on available function values
    A = np.zeros((0, order+1))
    b = np.zeros((0, 1))
    for i in range(nPoints):
        if np.imag(points[i,1]) == 0:
            constraint = np.zeros(order+1)
            for j in np.arange(order,-1,-1):
                constraint[order-j] = points[i,0]**j
            A = np.vstack((A, constraint))
            b = np.append(b, points[i,1])
    
    # constraints based on availabe derivatives
    for i in range(nPoints):
        if np.isreal(points[i,2]):
            constraint = np.zeros(order+1)
            for j in range(1,order+1):
                constraint[j-1] = (order-j+1)* points[i,0]**(order-j)
            A = np.vstack((A, constraint))
            b = np.append(b,points[i,2])
    
    # find interpolating polynomial
    params = np.linalg.solve(A, b)

    # compute critical points
    dParams = np.zeros(order)
    for i in range(params.size-1):
        dParams[i] = params[i] * (order-i)
    
    if np.any(np.isinf(dParams)):
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0]))
    else:
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0], \
                np.roots(dParams)))
    
    # test critical points
    fmin = np.infty;
    minPos = (xminBound + xmaxBound)/2.
    for xCP in cp:
        if np.imag(xCP) == 0 and xCP >= xminBound and xCP <= xmaxBound:
            fCP = np.polyval(params, xCP)
            if np.imag(fCP) == 0 and fCP < fmin:
                minPos = np.double(np.real(xCP))
                fmin = np.double(np.real(fCP))
    
    # plot situation (omit this part for now since we are not going to use it
    # anyway)

    return (minPos, fmin)
Ejemplo n.º 36
0
def isLegal(v):
    return np.sum(np.any(np.imag(v)))==0 and np.sum(np.isnan(v))==0 and \
            np.sum(np.isinf(v))==0