示例#1
0
    def test_optimize_locs_width(self):
        """
        Test the function optimize_locs_width(..). Make sure it does not return 
        unusual results.
        """
        # sample source
        n = 600
        dim = 2
        seed = 17

        ss = data.SSGaussMeanDiff(dim, my=1.0)
        #ss = data.SSGaussVarDiff(dim)
        #ss = data.SSSameGauss(dim)
        # ss = data.SSBlobs()
        dim = ss.dim()

        dat = ss.sample(n, seed=seed)
        tr, te = dat.split_tr_te(tr_proportion=0.5, seed=10)
        xy_tr = tr.stack_xy()

        # initialize test_locs by drawing the a Gaussian fitted to the data
        # number of test locations
        J = 3
        V0 = util.fit_gaussian_draw(xy_tr, J, seed=seed + 1)
        med = util.meddistance(xy_tr, subsample=1000)
        gwidth0 = med**2
        assert gwidth0 > 0

        # optimize
        V_opt, gw2_opt, opt_info = tst.GaussUMETest.optimize_locs_width(
            tr,
            V0,
            gwidth0,
            reg=1e-2,
            max_iter=100,
            tol_fun=1e-5,
            disp=False,
            locs_bounds_frac=100,
            gwidth_lb=None,
            gwidth_ub=None)

        # perform the test using the optimized parameters on the test set
        alpha = 0.01
        ume_opt = tst.GaussUMETest(V_opt,
                                   gw2_opt,
                                   n_simulate=2000,
                                   alpha=alpha)
        test_result = ume_opt.perform_test(te)

        assert test_result['h0_rejected']
        assert util.is_real_num(gw2_opt)
        assert gw2_opt > 0
        assert np.all(np.logical_not((np.isnan(V_opt))))
        assert np.all(np.logical_not((np.isinf(V_opt))))
示例#2
0
def to_common_arr(x):
    """ Numerically stable transform from real line to positive reals

    Returns ag_np.log(1.0 + ag_np.exp(x))

    Autograd friendly and fully vectorized

    Args
    ----
    x : array of values in (-\infty, +\infty)

    Returns
    -------
    ans : array of values in (0, +\infty), same size as x
    """
    if not isinstance(x, float):
        mask1 = x > 0
        mask0 = ag_np.logical_not(mask1)
        out = ag_np.zeros_like(x)
        out[mask0] = ag_np.log1p(ag_np.exp(x[mask0]))
        out[mask1] = x[mask1] + ag_np.log1p(ag_np.exp(-x[mask1]))
        return out
    if x > 0:
        return x + ag_np.log1p(ag_np.exp(-x))
    else:
        return ag_np.log1p(ag_np.exp(x))
示例#3
0
def to_unconstrained_arr(p):
    """ Numerically stable transform from positive reals to real line

    Implements ag_np.log(ag_np.exp(x) - 1.0)

    Autograd friendly and fully vectorized

    Args
    ----
    p : array of values in (0, +\infty)

    Returns
    -------
    ans : array of values in (-\infty, +\infty), same size as p
    """
    ## Handle numpy array case
    if not isinstance(p, float):
        mask1 = p > 10.0
        mask0 = ag_np.logical_not(mask1)
        out = ag_np.zeros_like(p)
        out[mask0] =  ag_np.log(ag_np.expm1(p[mask0]))
        out[mask1] = p[mask1] + ag_np.log1p(-ag_np.exp(-p[mask1]))
        return out
    ## Handle scalar float case
    else:
        if p > 10:
            return p + ag_np.log1p(-ag_np.exp(-p))
        else:
            return ag_np.log(ag_np.expm1(p))
示例#4
0
    def test_optimize_locs_width(self):
        """
        Test the function optimize_locs_width(..). Make sure it does not return 
        unusual results.
        """
        # sample source 
        n = 600
        dim = 2
        seed = 17

        ss = data.SSGaussMeanDiff(dim, my=1.0)
        #ss = data.SSGaussVarDiff(dim)
        #ss = data.SSSameGauss(dim)
        # ss = data.SSBlobs()
        dim = ss.dim()

        dat = ss.sample(n, seed=seed)
        tr, te = dat.split_tr_te(tr_proportion=0.5, seed=10)
        xy_tr = tr.stack_xy()

        # initialize test_locs by drawing the a Gaussian fitted to the data
        # number of test locations
        J = 3
        V0 = util.fit_gaussian_draw(xy_tr, J, seed=seed+1)
        med = util.meddistance(xy_tr, subsample=1000)
        gwidth0 = med**2
        assert gwidth0 > 0

        # optimize
        V_opt, gw2_opt, opt_info = tst.GaussUMETest.optimize_locs_width(tr, V0, gwidth0, reg=1e-2,
            max_iter=100,  tol_fun=1e-5, disp=False, locs_bounds_frac=100,
            gwidth_lb=None, gwidth_ub=None)

        # perform the test using the optimized parameters on the test set
        alpha = 0.01
        ume_opt = tst.GaussUMETest(V_opt, gw2_opt, n_simulate=2000, alpha=alpha)
        test_result = ume_opt.perform_test(te)

        assert test_result['h0_rejected']
        assert util.is_real_num(gw2_opt)
        assert gw2_opt > 0
        assert np.all(np.logical_not((np.isnan(V_opt))))
        assert np.all(np.logical_not((np.isinf(V_opt))))
示例#5
0
    def update_sigma2(y, weights, mu, Sigma):
        """
        y[N, G, T] data
        weights = [N, G, T]
        mu = T
        """
        diffs = y - mu
        total_weight = np.sum(
            weights[:, :, np.newaxis] * np.logical_not(np.isnan(y)))

        sigma2 = np.nansum(weights[:, :, np.newaxis] * (diffs ** 2))
        total_weight = np.maximum(1e-10, total_weight)
        sigma2 = (sigma2 / total_weight) + np.trace(Sigma)
        return sigma2, total_weight
def tr_te_indices(n, tr_proportion, seed=9282):
    """Get two logical vectors for indexing train/test points.
    Return (tr_ind, te_ind)
    """
    rand_state = np.random.get_state()
    np.random.seed(seed)

    Itr = np.zeros(n, dtype=bool)
    tr_ind = np.random.choice(n, int(tr_proportion * n), replace=False)
    Itr[tr_ind] = True
    Ite = np.logical_not(Itr)

    np.random.set_state(rand_state)
    return (Itr, Ite)
示例#7
0
def tr_te_indices(n, tr_proportion, seed=9282 ):
    """Get two logical vectors for indexing train/test points.

    Return (tr_ind, te_ind)
    """
    rand_state = np.random.get_state()
    np.random.seed(seed)

    Itr = np.zeros(n, dtype=bool)
    tr_ind = np.random.choice(n, int(tr_proportion*n), replace=False)
    Itr[tr_ind] = True
    Ite = np.logical_not(Itr)

    np.random.set_state(rand_state)
    return (Itr, Ite)
示例#8
0
def transformed_expi(x):
    abs_x = np.abs(x)
    ser = abs_x < 1. / 45.
    nser = np.logical_not(ser)

    #     ret = np.zeros(x.shape)
    #     ret[ser], ret[nser] = transformed_expi_series(x[ser]), transformed_expi_naive(x[nser])))
    #     return ret

    # We use np.concatenate to combine.
    # would be better to use ret[ser] and ret[nser] as commented out above
    # but array assignment not yet supported by autograd
    assert np.all(abs_x[:-1] >= abs_x[1:])
    return np.concatenate(
        (transformed_expi_naive(x[nser]), transformed_expi_series(x[ser])))
示例#9
0
def logpdf(x, mu, sigma2):
    """
    not really logpdf. we need to use the weights
    to keep track of normalizing factors that differ
    across clusters
    """
    mask = np.where(np.logical_not(np.isnan(x)))
    x = np.atleast_1d(x[mask])
    mu = np.atleast_1d(mu[mask])
    D = x.size

    if D == 0:
        return 0
    sigma2 = sigma2 * np.ones(D)
    return np.sum([norm.logpdf(x[d], mu[d], np.sqrt(sigma2[d])) for d in range(D)])
示例#10
0
def surgery(Z):
    empties = np.isclose(Z.sum(axis=0), 0)
    Q, R = Z.shape
    if np.any(empties):
        print('!')
    while np.any(empties):
        for r, empty in enumerate(empties):
            if empty:
                # select a nonempty cluster and split it
                c = np.random.choice(np.where(np.logical_not(empties))[0])
                for q in range(Q):
                    if np.random.binomial(1, 0.5):
                        Z[q, r] = Z[q, c]
                        Z[q, c] = 0
        empties = np.isclose(Z.sum(axis=0), 0)
    return Z
示例#11
0
    def update_mean(y, sigma2, weights, kernel_params):
        """
        y[N, G, T] data
        mu [T] means
        sigma2 = global noise variance
        priors = [T, T]
        weights = [N, G]
        """
        prior = cov_func(kernel_params, inputs, inputs) + np.eye(T) * 1e-6
        weights = weights[:, :, np.newaxis] * np.logical_not(np.isnan(y))
        B = np.diag(weights.reshape(-1, T).sum(axis=0) / sigma2)

        yB = np.nansum((y * weights / sigma2).reshape(-1, T), axis=0)
        Sigma = np.linalg.inv(B + np.linalg.inv(prior))
        mu = np.dot(Sigma, yB)
        # import pdb; pdb.set_trace()
        return mu, Sigma
示例#12
0
def compress_observations(y, weights):
    """
    y [N, G, T]
    weights [N, G]

    takes convex combination of observations by weights
    returns mean and relative precision for each dimension of T
    """
    t = y.shape[-1]
    compressed_weights = (weights[:, :, np.newaxis] * np.logical_not(
        np.isnan(y))).sum(axis=0).sum(axis=0)

    compressed_y = np.nansum((y * weights[:, :, np.newaxis] / compressed_weights).reshape(
        -1, t), axis=0)

    if np.any(np.isnan(compressed_y)):
        pdb.set_trace()
    return compressed_y, compressed_weights
示例#13
0
    def predict(params,
                x,
                y,
                xstar,
                weights=None,
                condense=True,
                prediction_noise=True):
        """Returns the predictive mean and covariance at locations xstar,
           of the latent function value f (without observation noise)."""

        n, t = y.shape

        if weights is None:
            weights = np.ones(n)

        if not condense:
            return predict_full(params, np.tile(x, n), y.flatten(), xstar,
                                np.tile(weights, (x.size, 1)).T.flatten())

        mean, cov_params, noise_variance = unpack_kernel_params(params)

        if n == 0:
            # no data, return the prior
            prior_mean = mean * np.ones(xstar.size)
            prior_covariance = cov_func(cov_params, xstar, xstar)
            return prior_mean, prior_covariance

        y_bar = np.dot(weights, y)
        weights_full = (np.logical_not(np.isnan(y)) *
                        weights[:, np.newaxis]).sum(axis=0)

        cov_f_f = cov_func(cov_params, xstar, xstar)
        cov_y_f = weights_full[:, np.newaxis] * cov_func(cov_params, x, xstar)

        cov_y_y = np.outer(weights_full, weights_full) * \
            cov_func(cov_params, x, x) + \
            noise_variance * np.diag(weights_full)

        z = solve(cov_y_y, cov_y_f).T
        pred_mean = mean + np.dot(z, y_bar - mean).flatten()
        pred_cov = cov_f_f - np.dot(z, cov_y_f)
        if prediction_noise:
            pred_cov = pred_cov + noise_variance * np.eye(xstar.size)
        return pred_mean, pred_cov
def softplus(x):
    """ Numerically stable transform from real line to positive reals
    Returns np.log(1.0 + np.exp(x))
    Autograd friendly and fully vectorized
    
    @param x: array of values in (-\infty, +\infty)
    @return ans : array of values in (0, +\infty), same size as x
    """
    if not isinstance(x, float):
        mask1 = x > 0
        mask0 = np.logical_not(mask1)
        out = np.zeros_like(x)
        out[mask0] = np.log1p(np.exp(x[mask0]))
        out[mask1] = x[mask1] + np.log1p(np.exp(-x[mask1]))
        return out
    if x > 0:
        return x + np.log1p(np.exp(-x))
    else:
        return np.log1p(np.exp(x))
示例#15
0
def _log_logistic_sigmoid(x_real):
    ''' Compute log of logistic sigmoid transform from real line to unit interval.

    Numerically stable and fully vectorized.

    Args
    ----
    x_real : array-like, with values in (-infty, +infty)

    Returns
    -------
    log_p_real : array-like, size of x_real, with values in <= 0
    '''
    if not isinstance(x_real, float):
        out = np.zeros_like(x_real)
        mask1 = x_real > 50.0
        out[mask1] = - np.log1p(np.exp(-x_real[mask1]))
        mask0 = np.logical_not(mask1)
        out[mask0] = x_real[mask0]
        out[mask0] -= np.log1p(np.exp(x_real[mask0]))
        return out
    return _log_logistic_sigmoid_not_vectorized(x_real)
def logistic_sigmoid(x_real):
    ''' Compute logistic sigmoid transform from real line to unit interval.

    Numerically stable and fully vectorized.

    Args
    ----
    x_real : array-like, with values in (-infty, +infty)

    Returns
    -------
    p_real : array-like, size of x_real, with values in (0, 1)

    Examples
    --------
    >>> logistic_sigmoid(-55555.)
    0.0
    >>> logistic_sigmoid(0.0)
    0.5
    >>> logistic_sigmoid(55555.)
    1.0
    >>> logistic_sigmoid(np.asarray([-999999, 0, 999999.]))
    array([ 0. ,  0.5,  1. ])
    '''
    if not isinstance(x_real, float):
        out = np.zeros_like(x_real)
        mask1 = x_real > 50.0
        out[mask1] = 1.0 / (1.0 + np.exp(-x_real[mask1]))
        mask0 = np.logical_not(mask1)
        out[mask0] = np.exp(x_real[mask0])
        out[mask0] /= (1.0 + out[mask0])
        return out
    if x_real > 50.0:
        pos_real = np.exp(-x_real)
        return 1.0 / (1.0 + pos_real)
    else:
        pos_real = np.exp(x_real)
        return pos_real / (1.0 + pos_real)
示例#17
0
 def is_feasible(self, x):
     x = np.array(x, ndmin=2)
     if self.y is None:
         self.y = self.__call__(x)
     feasibility = np.logical_not(np.isnan(self.y))
     return feasibility
示例#18
0
    def fit(
            param_vector=None,
            pi_kappa=0.0,
            pi_omega=1e-8,
            max_steps=y.size,
            step_iter=50,
            step_size=0.1,
            gamma=0.9,
            eps=1e-8,
            backoff=0.75):
        if param_vector is None:
            param_vector = pack()
        n_params = param_vector.size
        param_path = np.zeros((n_params, max_steps))
        pi_kappa_path = np.zeros(max_steps)
        pi_omega_path = np.zeros(max_steps)
        loglik_path = np.zeros(max_steps)
        dof_path = np.zeros(max_steps)
        aic_path = np.zeros(max_steps)

        # Now, an idiotic gradient descent algorithm
        # Seeding by iteratively-reweighted least squares
        # or just least squares would be better
        grad_negloglik = grad(negloglik, 0)
        grad_penalty = grad(penalty, 0)
        grad_objective = grad(objective, 0)

        avg_sq_grad = np.ones_like(param_vector)

        for j in range(max_steps):
            loss = objective(param_vector)
            best_loss = loss
            local_step_size = step_size
            best_param_vector = np.array(param_vector)

            for i in range(step_iter):
                g_negloglik = grad_negloglik(param_vector)
                g_penalty = grad_penalty(param_vector, pi_kappa, pi_omega)
                g = g_negloglik + g_penalty
                avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma)

                velocity = g/(np.sqrt(avg_sq_grad) + eps) / sqrt(i+1.0)
                # watch out, nans
                velocity[np.logical_not(np.isfinite(velocity))] = 0.0

                penalty_dominant = np.abs(
                    g_negloglik
                ) < (
                    penalty_weight(pi_kappa, pi_omega)
                )
                velocity[penalty_dominant * (velocity == 0)] = 0.0
                new_param_vector = param_vector - velocity * local_step_size
                # coefficients that pass through 0 must stop there
                new_param_vector[
                    np.abs(
                        np.sign(new_param_vector) -
                        np.sign(param_vector)
                    ) == 2
                ] = 0.0
                new_param_vector[:] = np.maximum(new_param_vector, param_floor)
                new_loss = objective(new_param_vector)
                if new_loss < loss:
                    # print('good', loss, '=>', new_loss, local_step_size)
                    loss = new_loss
                    param_vector = new_param_vector
                else:
                    # print('bad', loss, '=>', new_loss, local_step_size)
                    local_step_size = local_step_size * backoff
                    new_param_vector = param_vector + backoff * (
                        new_param_vector - param_vector
                    )
                    loss = objective(new_param_vector)
                if loss < best_loss:
                    best_param_vector = np.array(param_vector)
                    best_loss = loss

                if local_step_size < 1e-3:
                    print('nope', j, i, max_steps)
                    break

            this_loglik = -negloglik(best_param_vector)
            this_dof = dof(best_param_vector)
            param_path[:, j] = best_param_vector
            pi_kappa_path[j] = pi_kappa
            pi_omega_path[j] = pi_omega
            loglik_path[j] = this_loglik
            dof_path[j] = this_dof
            aic_path[j] = 2 * this_loglik - 2 * this_dof

            # regularisation parameter selection
            # ideally should be randomly weight according
            # to sizes of those two damn vectors
            mu_grad, kappa_grad, log_omega_grad = unpack(
                np.abs(
                    grad_objective(best_param_vector) *
                    (best_param_vector != 0.0)
                )
            )
            if (
                np.random.random() < (
                    sqrt(log_omega_grad.size) /
                    (sqrt(kappa_grad.size) + sqrt(log_omega_grad.size))
                    )):
                print('log_omega_grad', log_omega_grad)
                pi_omega += max(
                    np.amin(log_omega_grad[log_omega_grad > 0])
                    * j/max_steps,
                    pi_omega * 0.1
                )
            else:
                print('kappa_grad', kappa_grad)
                pi_kappa += max(
                    np.amin(kappa_grad[kappa_grad > 0]) * j / max_steps,
                    pi_kappa * 0.1
                )

        return dict(
            param_path=param_path,
            pi_kappa_path=pi_kappa_path,
            pi_omega_path=pi_omega_path,
            loglik_path=loglik_path,
            dof_path=dof_path,
            aic_path=aic_path
        )
示例#19
0
def boxQP(H, g, lower, upper, x0):
    n = H.shape[0]
    clamped = np.zeros(n)
    free = np.ones(n)
    Hfree = np.zeros(n)
    oldvalue = 0
    result = 0
    nfactor = 0
    clamp = lambda value: np.maximum(lower, np.minimum(upper, value))

    maxIter = 100
    minRelImprove = 1e-8
    minGrad = 1e-8
    stepDec = 0.6
    minStep = 1e-22
    Armijo = 0.1

    if x0.shape[0] == n:
        x = clamp(x0)
    else:
        lu = np.array([lower, upper])
        lu[np.isnan(lu)] = np.nan
        x = np.nanmean(lu, axis=1)

    value = np.dot(x.T, np.dot(H, x)) + np.dot(x.T, g)

    for iteration in range(maxIter):
        if result != 0:
            break

        if iteration > 1 and (oldvalue - value) < minRelImprove * abs(oldvalue):
            result = 4
            logging.info("[QP info] Improvement smaller than tolerance")
            break

        oldvalue = value

        grad = g + np.dot(H, x)

        old_clamped = clamped
        clamped = np.zeros(n)
        clamped[np.logical_and(x == lower, grad > 0)] = 1
        clamped[np.logical_and(x == upper, grad < 0)] = 1
        free = np.logical_not(clamped)

        if np.all(clamped):
            result = 6
            logging.info("[QP info] All dimensions are clamped")
            break

        if iteration == 0:
            factorize = True
        else:
            factorize = np.any(old_clamped != clamped)

        if factorize:
            try:
                if not np.all(np.allclose(H, H.T)):
                    H = np.triu(H)
                Hfree = np.linalg.cholesky(H[np.ix_(free, free)])
            except LinAlgError:
                eigs, _ = np.linalg.eig(H[np.ix_(free, free)])
                print(eigs)
                result = -1
                logging.info("[QP info] Hessian is not positive definite")
                break
            nfactor += 1

        gnorm = np.linalg.norm(grad[free])
        if gnorm < minGrad:
            result = 5
            logging.info("[QP info] Gradient norm smaller than tolerance")
            break

        grad_clamped = g + np.dot(H, x*clamped)
        search = np.zeros(n)

        y = np.linalg.lstsq(Hfree.T, grad_clamped[free])[0]
        search[free] = -np.linalg.lstsq(Hfree, y)[0] - x[free]
        sdotg = np.sum(search*grad)
        if sdotg >= 0:
            print(f"[QP info] No descent direction found. Should not happen. Grad is {grad}")
            break

        # armijo linesearch
        step = 1
        nstep = 0
        xc = clamp(x + step*search)
        vc = np.dot(xc.T, g) + 0.5*np.dot(xc.T, np.dot(H, xc))
        while (vc - oldvalue) / (step*sdotg) < Armijo:
            step *= stepDec
            nstep += 1
            xc = clamp(x + step * search)
            vc = np.dot(xc.T, g) + 0.5 * np.dot(xc.T, np.dot(H, xc))

            if step < minStep:
                result = 2
                break

        # accept candidate
        x = xc
        value = vc

        # print(f"[QP info] Iteration {iteration}, value of the cost: {vc}")

    if iteration >= maxIter:
        result = 1

    return x, result, Hfree, free
示例#20
0
def atomsDistances(positions, cell, cutoff_radius=6.0, self_interaction=False):
    """ Compute the distance of every atom to its neighbors.

    
    This function computes the distances of every central atom to its neighbors. If the
    distances is larger than the cutoff radius, then the distances will be handled as 0.
    Here, periodic boundary condition is assuming true for every axis.

    Parameters:
    -----------
    positions: np.ndarray
        Atomic positions. The size of this tensor will be (N_atoms, 3), where N_atoms is the number of atoms
        in the cluster.
    cell: np.ndarray
        Periodic cell, which has the size of (3, 3)
    cutoff_radius: float
        Cutoff Radius, which is a hyper parameters. The default is 6.0 Angstrom.
    self_interaction: boolean
        Default is False, which means that results will not consider the atom itself as its neighbor.
    Returns:
    ----------
    distances: np.ndarray
        Differentialble distances array.
    first_atoms: np.ndarray
        Atoms that we observed in the cell. The np.unique of first_atoms will be np.arange of the number of
        atoms in the cell.
    second_atoms: np.ndarray
        Atoms that are considered as the neighbor atoms of first atoms. The distances of first_atoms and
        second_atoms will be computed and stored in the distances array.
    cell_shift_vector: np.ndarray
        The cell shift vector of every atom.
    """
    # Compute reciprocal lattice vectors.
    inverse_cell = np.linalg.pinv(cell).T

    # Compute distances of cell faces.
    face_dist_c = 1 / np.linalg.norm(inverse_cell, axis=0)

    # We use a minimum bin size of 3 A
    bin_size = max(cutoff_radius, 3)

    # Compute number of bins, the minimum bin size must be [1., 1., 1.].
    nbins_c = np.maximum(
        (face_dist_c / bin_size - (face_dist_c / bin_size) % 1), [1., 1., 1.])
    nbins = np.prod(nbins_c)

    # Compute the number of neighbor cell that need to be search
    neighbor_search_x, neighbor_search_y, neighbor_search_z =\
                np.ceil(bin_size * nbins_c / face_dist_c).astype(int)

    # Sort atoms into bins.
    scaled_positions_ic = np.dot(positions, inverse_cell) % 1
    bin_index_ic = scaled_positions_ic * nbins_c - (scaled_positions_ic *
                                                    nbins_c) % 1

    # Convert Cartesian bin index to unique scalar bin index.
    bin_index_i = (bin_index_ic[:, 0] + nbins_c[0] *
                   (bin_index_ic[:, 1] + nbins_c[1] * bin_index_ic[:, 2]))

    # atom_i contains atom index in new sort order.
    atom_i = np.argsort(bin_index_i)
    bin_index_i = bin_index_i[atom_i]

    # Compute the maximum number of atoms in a bin
    max_natoms_per_bin = np.bincount(np.int_(bin_index_i)).max()

    # Sort atoms into bins. The atoms_in_bin_ba contains the information about where the atoms located.
    atoms_in_bin_ba = -np.ones([np.int_(nbins), max_natoms_per_bin], dtype=int)

    for i in range(max_natoms_per_bin):
        # Create a mask array that identifies the first atom of each bin.
        mask = np.append([True], bin_index_i[:-1] != bin_index_i[1:])
        # Assign all first atoms.
        atoms_in_bin_ba[np.int_(bin_index_i[mask]), i] = atom_i[mask]

        # Remove atoms that we just sorted into atoms_in_bin_ba. The next
        # "first" atom will be the second and so on.
        mask = np.logical_not(mask)
        atom_i = atom_i[mask]
        bin_index_i = bin_index_i[mask]

    # Create the shift list that indicates that where the cell might shift.
    shift = []
    for x in range(-neighbor_search_x, neighbor_search_x + 1):
        for y in range(-neighbor_search_y, neighbor_search_y + 1):
            for z in range(-neighbor_search_z, neighbor_search_z + 1):
                shift += [[x, y, z]]

    # Therefore, the possible positions of neighborhood bin can be computed by the following code.
    neighborbin = (bin_index_ic[:, None] + np.array(shift)[None, :]) % nbins_c
    cell_shift = ((bin_index_ic[:, None] + np.array(shift)[None, :]) -
                  neighborbin) / nbins_c
    neighborbin = neighborbin[:, :, 0] + nbins_c[0] * (
        neighborbin[:, :, 1] + nbins_c[1] * neighborbin[:, :, 2])

    distances = []
    first_atoms = []
    second_atoms = []
    cell_shift_vector = []
    for i in range(len(positions)):
        # Create a mask that indicates which neighborhood bin contains atoms.
        if self_interaction:
            mask = (atoms_in_bin_ba[np.int_(neighborbin[i])] != -1)
        else:
            mask = np.logical_and(
                atoms_in_bin_ba[np.int_(neighborbin[i])] != -1,
                atoms_in_bin_ba[np.int_(neighborbin[i])] != i)
        distances_vec = positions[atoms_in_bin_ba[np.int_(
            neighborbin[i])]] - positions[i]
        # the distance should consider the cell shift
        distances_vec = distances_vec + np.dot(cell_shift[i], cell)[:, None]
        # make the cell shift vector for every atom instead of every bin.
        _cell_shift_vector = np.repeat(cell_shift[i][:, None],
                                       max_natoms_per_bin,
                                       axis=1)[mask]
        distances_vec = distances_vec[mask]
        temp_distances = np.sum(distances_vec * distances_vec, axis=1)
        temp_distances = (temp_distances)**0.5
        cutoff_mask = (temp_distances < cutoff_radius)
        _second_atoms = atoms_in_bin_ba[np.int_(
            neighborbin[i])][mask][cutoff_mask]
        _first_atoms = [i] * len(_second_atoms)
        _cell_shift_vector = _cell_shift_vector[cutoff_mask]
        first_atoms.extend(_first_atoms)
        second_atoms.extend(_second_atoms)
        distances.extend(temp_distances[cutoff_mask])
        cell_shift_vector.extend(_cell_shift_vector)

    distances = np.array(distances)
    cell_shift_vector = np.array(cell_shift_vector)
    first_atoms = np.array(first_atoms)
    second_atoms = np.array(second_atoms)

    return distances, first_atoms, second_atoms, cell_shift_vector
示例#21
0
    im = ax[1, 0].imshow(imgs[0] - imgs[0])
    fig.colorbar(im, ax=ax[1, 0], orientation='vertical')
    ax[2, 0].imshow(imgs[0])
    for j in range(1, 6):
        imdst = cv2.warpPerspective(imgs[0], H[0, j],
                                    (imgs[j].shape[1], imgs[j].shape[0]))
        ax[0, j].imshow(imgs[j])
        err = np.mean(imgs[j] - imdst, -1)
        mask = 1.0 - 1.0 * (np.mean(imdst, -1) == 0.0)
        im = ax[1, j].imshow(
            np.minimum(255.0, np.maximum(0, np.abs(mask * err))) / 255)
        fig.colorbar(im, ax=ax[1, j], orientation='vertical')
        # ax[2, j].imshow(imdst)
        ax[2, j].imshow(imgs[j])
        xyj = h_apply(H[0, j], xy1)
    not_sel = np.logical_not(sel)
    for j in range(6):
        xyj = h_apply(H[0, j], xy1)
        ax[2, j].scatter(xyj[0, sel], xyj[1, sel], c='g', marker='x')
        ax[2, j].scatter(xyj[0, not_sel], xyj[1, not_sel], c='r', marker='x')
    print(np.sum(sel))
    print(np.sum(not_sel))
    plt.show()

##############################################################################
print("Time for point selection: ", time.time() - START_TIME)
##############################################################################

descs0 = np.stack([dl[0][ids1[i]] for i in range(len(ids1)) if sel[i]])
fulldescs0 = np.stack([dkl[0][ids1[i]] for i in range(len(ids1)) if sel[i]])
keypoints0 = [keypoints[0][ids1[i]] for i in range(len(ids1)) if sel[i]]
def main():
    ### Setup

    # Constants
    d = 2  # dimensions
    I = numpy.identity(d)
    B = numpy.concatenate((I, -I), axis=1)  # Difference matrix

    # Simulation Parameters
    spring_const = 10.0 # Technically could vary per spring
    h = 0.005
    mass = 0.05
    # exit()
    # Initial conditions
    starting_stretch = 1#0.6

    # Big bar
    def generate_bar_points(n_sections, scale=1.0, translate=numpy.array([0.0, 0.0])):
        top = numpy.array([-2,1])
        bottom = numpy.array([-2,0])
        bottom_2 = numpy.array([-2,-1])
        offset = numpy.array([1,0])

        return numpy.concatenate(
            [[top + offset * i, bottom + offset * i, bottom_2 + offset * i] for i in range(n_sections + 2)]
            #[[top + offset * i] for i in range(n_sections + 2)]
        ) * scale + translate

    def generate_springs(n_sections):
        offset = numpy.array([3, 3])
        section = numpy.array([

            [0, 3],
            [0, 4],
            [1, 5],
            [1, 4],
            [3, 4],
            [4, 5],
            [2, 5],
            [1, 3],
            [2, 4]

        ])

        return numpy.concatenate([[[0,1], [1, 2]], numpy.concatenate([section + offset * i for i in range(n_sections + 1)]) ])

    # def generate_bar_points(n_sections, scale=1.0, translate=numpy.array([0.0, 0.0])):
    #     top = numpy.array([-2,1])
    #     bottom = numpy.array([-2,0])
    #     bottom_2 = numpy.array([-2,-1])
    #     offset = numpy.array([1,0])

    #     return numpy.concatenate(
    #         #[[top + offset * i, bottom + offset * i, bottom_2 + offset * i] for i in range(n_sections + 2)]
    #         [[top + offset * i] for i in range(n_sections + 2)]
    #     ) * scale + translate

    # def generate_springs(n_sections):
    #     offset = numpy.array([1, 1])
    #     section = numpy.array([

    #         [0, 1],

    #     ])

    #     return numpy.concatenate([section + offset * i for i in range(n_sections +1 )])

    sections = 10
    starting_points = generate_bar_points(sections)
    
    n_points = len(starting_points) # Num points
    q_initial = starting_points.flatten()

    pinned_points = numpy.array([0, 1])
    q_mask = numpy.ones(n_points * d, dtype=bool)
    q_mask[numpy.concatenate([pinned_points * d + i for i in range(d)])] = False
    q_mask_inv = numpy.logical_not(q_mask)

    springs = generate_springs(sections)

    n_springs = len(springs)

    P_matrices = construct_P_matrices(springs, n_points, d)
    
    all_spring_offsets = (B @ (P_matrices @ q_initial).T).T
    rest_lens = numpy.linalg.norm(all_spring_offsets, axis=1) * starting_stretch

    mass_matrix = numpy.identity(len(q_initial)) * mass # Mass matrix
    external_forces = numpy.array([0, -9.8] * n_points)

    # Assemble offsets
    P = numpy.concatenate(B @ P_matrices)

    # Assemble forces
    Pf = numpy.array([numpy.zeros(len(springs) * 2)] * n_points * 2)
    for i, s in enumerate(springs):
        n0 = s[0] * 2
        n1 = s[1] * 2
        col = i * 2
        Pf[n0][col] = 1.0
        Pf[n0+1][col+1] = 1.0
        Pf[n1][col] = -1.0
        Pf[n1+1][col+1] = -1.0
   # print(Pf.shape) #?????? why wrong

    def compute_internal_forces(q):
        # forces = numpy.array([[0.0, 0.0]] * len(springs))
        # for i, s in enumerate(springs):
        #     s0 = s[0] * 2
        #     s1 = s[1] * 2
        #     offset_vec = q[s0: s0 + 2] - q[s1: s1 + 2]
        #     length = numpy.linalg.norm(offset_vec)
        #     displacement_dir = offset_vec / length
        #     force = -spring_const * (length / rest_lens[i] - 1.0) * displacement_dir
        #     forces[i] = force

        offsets = (P @ q).reshape(n_springs, 2)
        lengths = numpy.sqrt((offsets * offsets).sum(axis=1))
        #normed_displacements = numpy.linalg.norm(offsets, axis=1)
        normed_displacements = offsets / lengths[:, None]
        forces = (spring_const * (lengths / rest_lens - 1.0))[:, None] * normed_displacements # Forces per spring
        forces = forces.flatten()

        global_forces = Pf @ forces
        return global_forces

    # print(compute_internal_forces(q_initial))
    # print(autograd.jacobian(compute_internal_forces)(q_initial))


    def kinetic_energy(q_k, q_k1):
        """ Profile this to see if using numpy.dot is different from numpy.matmul (@)"""

        d_q = q_k1 - q_k
        energy = 1.0 / (2 * h ** 2) * d_q.T @ mass_matrix @ d_q

        return energy

    def potential_energy(q_k, q_k1):
        q_tilde = 0.5 * (q_k + q_k1)

        # Optimized but ugly version
        sum = numpy.sum(
            (1.0 - (1.0 / rest_lens) * numpy.sqrt(numpy.einsum('ij,ij->i', q_tilde.T @ P_matrices.transpose((0,2,1)) @ B.T, (B @ P_matrices @ q_tilde)))) ** 2
        )

        return 0.5 * spring_const * sum

    def neg_potential(u):
        # q = numpy.array(q_sub_pinned)
        # for i in pinned_points:
        #     q = numpy.insert(q, i*d, q_initial[i*d])
        #     q = numpy.insert(q, i*d+1, q_initial[i*d+1])
        q = q_initial + u
        return -potential_energy(q, q)

    def find_natural_modes():
        # q = q_initial * 2
        # # K = spring_const * numpy.concatenate(B @ P_matrices) * 1.0 / mass # Multiplying by inverse mass to ger rid of mass matrix on right
        # F = (1.0 - (1.0 / rest_lens) * numpy.sqrt(numpy.einsum('ij,ij->i', q.T @ P_matrices.transpose((0,2,1)) @ B.T, (B @ P_matrices @ q))))
        # print(len(q_initial))
        # print(F.shape)
        # print(F)
        # print(len(springs))

        #K = -autograd.jacobian(compute_internal_forces)(q_initial) * 1.0 / mass
       
        # M_inv[0][0] = 0.0
        # M_inv[1][1] = 0.0
        # M_inv[2][2] = 0.0
        # M_inv[3][3] = 0.0

        force_fn = autograd.grad(neg_potential)
        K = autograd.jacobian(force_fn)(numpy.zeros(n_points*2)) #* 1.0/mass
        print(K)
        # K = K[2:-2,2:-2]
        # K = numpy.linalg.inv(K)
        print(K)
        import scipy

        w, v = scipy.linalg.eig(K)
        
        idx = w.argsort()[::-1]   
        w = w[idx]
        v = v[:,idx]
        print(w)

        # print(w)
        # print()
        # print(v)
        # print(w[0])
        # print(len(v))

        i = 0
        while True:
            # if numpy.abs(w[i]) > 0.0001:
            render(numpy.real_if_close(v[i] + q_initial), springs, save_frames=False)
            import time
            time.sleep(0.3)
            i = (i + 1) % len(v)
    # find_natural_modes()
    # exit()

    def discrete_lagrangian(q_k, q_k1):
        return kinetic_energy(q_k, q_k1) - potential_energy(q_k, q_k1)

    D1_Ld = autograd.grad(discrete_lagrangian, 0)  # (q_t, q_t+1) -> R^N*d
    D2_Ld = autograd.grad(discrete_lagrangian, 1)  # (q_t-1, q_t) -> R^N*d


    # Want D1_Ld + D2_Ld = 0
    # Do root finding
    def DEL(new_q, cur_q, prev_q):

        res = D1_Ld(cur_q, new_q) + D2_Ld(prev_q, cur_q) + mass_matrix @ external_forces

        # SUPER hacky way of adding constrained points
        return res

    jac_DEL = autograd.jacobian(DEL, 0)

    def DEL_objective(new_q, cur_q, prev_q):
        res = DEL(new_q, cur_q, prev_q)

        return res.T @ res

    ### Simulation
    q_history = []
    save_freq = 1000
    current_frame = 0
    output_path = 'configurations'

    prev_q = q_initial
    cur_q = q_initial
    while True:


        sol = optimize.root(DEL, cur_q, method='broyden1', args=(cur_q, prev_q))#, jac=jac_DEL)# Note numerical jacobian seems much faster
        #sol = optimize.minimize(DEL_objective, cur_q, args=(cur_q, prev_q), method='L-BFGS-B', jac=autograd.jacobian(DEL_objective, 0))#, options={'gtol': 1e-6, 'eps': 1e-06, 'disp': False})
        prev_q = cur_q
        cur_q = sol.x

        render(cur_q, springs, save_frames=False)

        if save_freq > 0:
            current_frame += 1
            q_history.append(cur_q)

            if current_frame % save_freq == 0:
                with open(output_path, 'wb') as f:
                    pickle.dump(q_history, f)
示例#23
0
ol_bool = {}
for probe in ['probeC', 'probeD']:
    trial_sd = np.std(lfp[probe], axis=2, keepdims=True)
    ol = np.any(np.abs(lfp[probe]) > 5 * trial_sd, axis=(0, 1))
    ol_bool[probe] = ol

ol = np.logical_or(ol_bool['probeC'], ol_bool['probeD'])
print('outlier trials: %d' % np.sum(ol))
if plot_ol:
    for probe in ['probeC', 'probeD']:
        x1 = np.unique(x[probe][:, 0])
        for j in x1:
            plt.figure(figsize=(6, 16))
            for i, xi in enumerate(x[probe][x[probe][:, 0] == j]):
                plt.plot(t,
                         xi[1] + 3 * lfp[probe][i, :, np.logical_not(ol)].T,
                         'k')
                plt.plot(t, xi[1] + 3 * lfp[probe][i, :, ol].T, 'r')
            plt.title('%s x1 = %0.2f microns' % (probe, j))

for probe in ['probeC', 'probeD']:
    lfp[probe] = lfp[probe][:, :, np.logical_not(ol)]

# %%
csdSE = {}
csdMatern = {}
for probe in ['probeC', 'probeD']:

    # Create GPCSD model
    R_prior = GPCSDInvGammaPrior()
    R_prior.set_params(50, 300)
示例#24
0
def e2(R, x):
    arg = (np.tan(R)**2 * np.tan(x)**2 * np.pi)
    mask = np.logical_not(np.isclose(arg, 0))
    y = np.where(mask, np.exp(-1 / arg[mask]), 0)
    return y