Esempio n. 1
0
def nonlin_evo(psiP2, psiP1, psi0, psiM1, psiM2, c0, c2, c4, V, p, dt, spin_f):
    # Calculate densities:
    n = abs(psiP2) ** 2 + abs(psiP1) ** 2 + abs(psi0) ** 2 + abs(psiM1) ** 2 + abs(psiM2) ** 2
    A00 = 1 / cp.sqrt(5) * (psi0 ** 2 - 2 * psiP1 * psiM1 + 2 * psiP2 * psiM2)
    fz = 2 * (abs(psiP2) ** 2 - abs(psiM2) ** 2) + abs(psiP1) ** 2 - abs(psiM1) ** 2

    # Evolve spin-singlet term -c4*(n^2-|alpha|^2)
    S = cp.sqrt(n ** 2 - abs(A00) ** 2)
    S = cp.nan_to_num(S)

    cosT = cp.cos(c4 * S * dt)
    sinT = cp.sin(c4 * S * dt) / S
    sinT[S == 0] = 0  # Corrects division by 0

    Wfn = [psiP2 * cosT + 1j * (n * psiP2 - A00 * cp.conj(psiM2)) * sinT,
           psiP1 * cosT + 1j * (n * psiP1 + A00 * cp.conj(psiM1)) * sinT,
           psi0 * cosT + 1j * (n * psi0 - A00 * cp.conj(psi0)) * sinT,
           psiM1 * cosT + 1j * (n * psiM1 + A00 * cp.conj(psiP1)) * sinT,
           psiM2 * cosT + 1j * (n * psiM2 - A00 * cp.conj(psiP2)) * sinT]

    # Calculate spin vectors
    fp = cp.sqrt(6) * (Wfn[1] * cp.conj(Wfn[2]) + Wfn[2] * cp.conj(Wfn[3])) + 2 * (Wfn[3] * cp.conj(Wfn[4]) +
                                                                                   Wfn[0] * cp.conj(Wfn[1]))
    F = cp.sqrt(fz ** 2 + abs(fp) ** 2)

    # Calculate cos, sin and Qfactor terms:
    C1, S1 = cp.cos(c2 * F * dt), cp.sin(c2 * F * dt)
    C2, S2 = cp.cos(2 * c2 * F * dt), cp.sin(2 * c2 * F * dt)
    Qfactor = 1j * (-4 / 3 * S1 + 1 / 6 * S2)
    Q2factor = (-5 / 4 + 4 / 3 * C1 - 1 / 12 * C2)
    Q3factor = 1j * (1 / 3 * S1 - 1 / 6 * S2)
    Q4factor = (1 / 4 - 1 / 3 * C1 + 1 / 12 * C2)

    fzQ = cp.nan_to_num(fz / F)
    fpQ = cp.nan_to_num(fp / F)

    Qpsi = calc_Qpsi(fzQ, fpQ, Wfn)
    Q2psi = calc_Qpsi(fzQ, fpQ, Qpsi)
    Q3psi = calc_Qpsi(fzQ, fpQ, Q2psi)
    Q4psi = calc_Qpsi(fzQ, fpQ, Q3psi)

    # Evolve spin term c2 * F^2
    for ii in range(len(Wfn)):
        Wfn[ii] += Qfactor * Qpsi[ii] + Q2factor * Q2psi[ii] + Q3factor * Q3psi[ii] + Q4factor * Q4psi[ii]

    # Evolve (c0+c4)*n^2 + (V + pm)*n:
    for ii in range(len(Wfn)):
        mF = spin_f - ii
        Wfn[ii] *= cp.exp(-1j * dt * ((c0 + c4) * n + V + p * mF))

    return Wfn
Esempio n. 2
0
def em(collection: list, p_wt: cp.ndarray,
       p_td: cp.ndarray) -> (cp.ndarray, cp.ndarray):
    # P(Wi|Tk)
    p_wt_devisor = cp.zeros(p_wt.shape[1], dtype=float)  # (TOPIC_SIZE,)
    p_wt_devidend = cp.zeros(p_wt.shape, dtype=float)  # (51253, TOPIC_SIZE)
    for i_prime in tqdm(range(p_wt.shape[0]), desc='EM_P(Wi|Tk)'):

        # C(Wi', dj) -> dj(D
        word = str(i_prime)
        c_wd = np.zeros((1, len(collection)), dtype=int)
        for j in range(len(collection)):
            c_wd[0][j] = collection[j][word]

        # P(Tk|Wi', dj) -> dj(D
        p_twd_devisor = cp.dot(p_wt[i_prime], p_td.T)
        p_twd_devidend = p_wt[i_prime] * p_td
        p_twd = cp.nan_to_num(p_twd_devidend.T / p_twd_devisor).T

        # C(Wi', dj) * P(Tk|Wi', dj) -> dj(D
        sum_cp_d = cp.dot(c_wd, p_twd)[0]

        p_wt_devidend[i_prime] = sum_cp_d
        p_wt_devisor += sum_cp_d
    new_wt = p_wt_devidend / p_wt_devisor

    # P(Tk|dj)
    p_td_devisor = cp.zeros(len(collection), dtype=float)  # (18461,)
    p_td_devidend = cp.zeros(p_td.shape, dtype=float)  # (18461, TOPIC_SIZE)
    for j in tqdm(range(p_td.shape[0]), desc='EM_P(Tk|dj)'):

        # C(Wi', dj) -> i=1~|V|
        c_wd = cp.zeros((1, p_wt.shape[0]), dtype=int)
        for key, value in collection[j].items():
            c_wd[0][int(key)] = value
            p_td_devisor[j] += value

        # P(Tk|Wi', dj) -> i=1~|V|
        p_twd_devisor = cp.dot(p_wt, p_td[j])
        p_twd_devidend = p_wt * p_td[j]
        p_twd = cp.nan_to_num(p_twd_devidend.T / p_twd_devisor).T

        # C(Wi', dj) * P(Tk|Wi', dj) -> i=1~|V|
        sum_cp_v = cp.dot(c_wd, p_twd)[0]

        p_td_devidend[j] = sum_cp_v
    new_td = (p_td_devidend.T / p_td_devisor).T

    return new_wt, new_td
Esempio n. 3
0
def confusion_matrix(client, y_true, y_pred, normalize=None, sample_weight=None):
    from cuml.dask.common.input_utils import DistributedDataHandler

    unique_classes = cp.unique(y_true.map_blocks(lambda x: cp.unique(x)).compute())
    nclasses = len(unique_classes)

    ddh = DistributedDataHandler.create([y_true, y_pred])

    cms = client.compute(
        [
            client.submit(
                local_cm, part, unique_classes, sample_weight, workers=[worker]
            )
            for worker, part in ddh.gpu_futures
        ],
        sync=True,
    )

    cm = cp.zeros((nclasses, nclasses))
    for i in cms:
        cm += i

    with np.errstate(all="ignore"):
        if normalize == "true":
            cm = cm / cm.sum(axis=1, keepdims=True)
        elif normalize == "pred":
            cm = cm / cm.sum(axis=0, keepdims=True)
        elif normalize == "all":
            cm = cm / cm.sum()
        cm = cp.nan_to_num(cm)

    return cm
Esempio n. 4
0
def local_cm(y_y_pred, unique_labels, sample_weight):

    y_true, y_pred = y_y_pred
    labels = unique_labels

    n_labels = labels.size

    # Assume labels are monotonically increasing for now.

    # intersect y_pred, y_true with labels, eliminate items not in labels
    ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
    y_pred = y_pred[ind]
    y_true = y_true[ind]

    if sample_weight is None:
        sample_weight = cp.ones(y_true.shape[0], dtype=np.int64)
    else:
        sample_weight = cp.asarray(sample_weight)

    sample_weight = sample_weight[ind]

    cm = cp.sparse.coo_matrix(
        (sample_weight, (y_true, y_pred)),
        shape=(n_labels, n_labels),
        dtype=cp.float32,
    ).toarray()

    return cp.nan_to_num(cm)
Esempio n. 5
0
    def __lioness_loop(self):
        """
        Description:
            Initialize instance of Lioness class and load data.

        Outputs:
            self.total_lioness_network: An edge-by-sample matrix containing sample-specific networks.
        """
        for i in self.indexes:
            print("Running LIONESS for sample %d:" % (i+1))
            idx = [x for x in range(self.n_conditions) if x != i]  # all samples except i
            with Timer("Computing coexpression network:"):
                if self.computing=='gpu':
                    import cupy as cp
                    correlation_matrix = cp.corrcoef(self.expression_matrix[:, idx])
                    if cp.isnan(correlation_matrix).any():
                        cp.fill_diagonal(correlation_matrix, 1)
                        correlation_matrix = cp.nan_to_num(correlation_matrix)
                    correlation_matrix=cp.asnumpy(correlation_matrix)
                else:
                    correlation_matrix = np.corrcoef(self.expression_matrix[:, idx])
                    if np.isnan(correlation_matrix).any():
                        np.fill_diagonal(correlation_matrix, 1)
                        correlation_matrix = np.nan_to_num(correlation_matrix)

            with Timer("Normalizing networks:"):
                correlation_matrix_orig = correlation_matrix # save matrix before normalization
                correlation_matrix = self._normalize_network(correlation_matrix)

            with Timer("Inferring LIONESS network:"):
                if self.motif_matrix is not None:
                    del correlation_matrix_orig
                    subset_panda_network = self.panda_loop(correlation_matrix, np.copy(self.motif_matrix), np.copy(self.ppi_matrix),self.computing)
                else:
                    del correlation_matrix
                    subset_panda_network = correlation_matrix_orig

            lioness_network = self.n_conditions * (self.network - subset_panda_network) + subset_panda_network

            with Timer("Saving LIONESS network %d to %s using %s format:" % (i+1, self.save_dir, self.save_fmt)):
                path = os.path.join(self.save_dir, "lioness.%d.%s" % (i+1, self.save_fmt))
                if self.save_fmt == 'txt':
                    np.savetxt(path, lioness_network)
                elif self.save_fmt == 'npy':
                    np.save(path, lioness_network)
                elif self.save_fmt == 'mat':
                    from scipy.io import savemat
                    savemat(path, {'PredNet': lioness_network})
                else:
                    print("Unknown format %s! Use npy format instead." % self.save_fmt)
                    np.save(path, lioness_network)
            if i == 0:
                self.total_lioness_network = np.fromstring(np.transpose(lioness_network).tostring(),dtype=lioness_network.dtype)
            else:
                self.total_lioness_network=np.column_stack((self.total_lioness_network ,np.fromstring(np.transpose(lioness_network).tostring(),dtype=lioness_network.dtype)))

        return self.total_lioness_network
Esempio n. 6
0
    def calc_grad_and_update_embeddings(self, embeddings, target_embeddings,
                                        is_test, learn_rate,
                                        relations_matrix_transpose):
        # 着目ノードのベクトルと,その内部情報を元に算出した着目ノードの想定ベクトルとの差
        original_minus_target = embeddings - target_embeddings
        # 着目ノードを継承クラスや呼び出しメソッドとして内部で使っているノードのベクトルと,
        # そのノードの想定ベクトルとの差の和
        original_minus_target_be_used = -relations_matrix_transpose.dot(
            original_minus_target)
        original_minus_target_all = original_minus_target + original_minus_target_be_used
        norm_original_minus_target_all = cp.linalg.norm(
            original_minus_target_all, axis=1).reshape(
                (original_minus_target_all.shape[0], 1))

        if is_test:
            embeddings[self.model.num_of_train_nodes:] \
                -= learn_rate * cp.nan_to_num(original_minus_target_all / norm_original_minus_target_all)[self.model.num_of_train_nodes:]
        else:
            embeddings -= learn_rate * cp.nan_to_num(
                original_minus_target_all / norm_original_minus_target_all)
Esempio n. 7
0
 def compute_dA(self, A, Y, lamb_coord, lamb_noobj):
     """Compute dA for the softmax layer.
     Args:
         A (np.array): predicted labels
         Y (np.array): true labels
     Returns:
         np.array: dA
     """
     dA = np.zeros_like(Y)
     dA[:, :, :, 0] = 2*(A[:, :, :, 0]-Y[:, :, :, 0]) * \
         (Y[:, :, :, 0] + (1-Y[:, :, :, 0])*lamb_noobj)
     dA[:, :, :, 1] = lamb_coord*2*Y[:, :, :, 0] * \
         (A[:, :, :, 1]-Y[:, :, :, 1])
     dA[:, :, :, 2] = lamb_coord*2*Y[:, :, :, 0] * \
         (A[:, :, :, 2]-Y[:, :, :, 2])
     dA[:, :, :, 3] = lamb_coord * Y[:, :, :, 0] * cp.nan_to_num(
         (cp.sqrt(A[:, :, :, 3]) - cp.sqrt(Y[:, :, :, 3])) /
         cp.sqrt(A[:, :, :, 3]))
     dA[:, :, :, 4] = lamb_coord * Y[:, :, :, 0] * cp.nan_to_num(
         (cp.sqrt(A[:, :, :, 4]) - cp.sqrt(Y[:, :, :, 4])) /
         cp.sqrt(A[:, :, :, 4]))
     return dA
Esempio n. 8
0
def renorm_magnetisation(target_mag, Wfn):

    np2 = abs(Wfn[0]) ** 2
    np1 = abs(Wfn[1]) ** 2
    n0 = abs(Wfn[2]) ** 2
    nm1 = abs(Wfn[3]) ** 2
    nm2 = abs(Wfn[4]) ** 2

    N = np2 + np1 + n0 + nm1 + nm2

    r = (np2 + nm2) / (np1 + nm1)  # Ratio of outer component populations to inner populations

    # Correction factors:
    r1 = 1 + r * (target_mag * N - 2 * (np2 - nm2) - (np1 - nm1)) / (2 * (2 * r + 1) * np2)
    r5 = 1 - r * (target_mag * N - 2 * (np2 - nm2) - (np1 - nm1)) / (2 * (2 * r + 1) * nm2)
    r2 = 1 + r * (target_mag * N - 2 * (np2 - nm2) - (np1 - nm1)) / (2 * (2 * r + 1) * np1)
    r4 = 1 - r * (target_mag * N - 2 * (np2 - nm2) - (np1 - nm1)) / (2 * (2 * r + 1) * nm1)

    # Update renorm factors:
    r1 = cp.sqrt(r1 / N)
    r2 = cp.sqrt(r2 / N)
    r3 = cp.sqrt(1 / N)
    r4 = cp.sqrt(r4 / N)
    r5 = cp.sqrt(r5 / N)

    r1 = cp.nan_to_num(r1)
    r2 = cp.nan_to_num(r2)
    r4 = cp.nan_to_num(r4)
    r5 = cp.nan_to_num(r5)

    # Renormalise spinor components:
    Wfn[0] *= r1
    Wfn[1] *= r2
    Wfn[2] *= r3
    Wfn[3] *= r4
    Wfn[4] *= r5
Esempio n. 9
0
def calc_spin_dens(wfn_plus, wfn_0, wfn_minus, dt, c2):
    """Calculates various quantities such as spin vectors, sin and cosine terms and the atomic density."""
    spin_perp = cp.sqrt(2.) * (cp.conj(wfn_plus) * wfn_0 +
                               cp.conj(wfn_0) * wfn_minus)
    spin_z = cp.abs(wfn_plus)**2 - cp.abs(wfn_minus)**2
    F = cp.sqrt(cp.abs(spin_z)**2 +
                cp.abs(spin_perp)**2)  # Magnitude of spin vector

    cos_term = cp.cos(c2 * F * dt)
    sin_term = 1j * cp.sin(c2 * F * dt) / F
    sin_term = cp.nan_to_num(sin_term)  # Corrects division by 0

    density = cp.abs(wfn_minus)**2 + cp.abs(wfn_0)**2 + cp.abs(wfn_plus)**2

    return spin_perp, spin_z, cos_term, sin_term, density
Esempio n. 10
0
def log_likelihood(collection: list, p_wt: cp.ndarray,
                   p_td: cp.ndarray) -> float:

    likelihood = 0.
    for i in tqdm(range(p_wt.shape[0]), desc='Log_like'):
        # C(Wi', dj) -> dj(D
        word = str(i)
        c_wd = cp.zeros(len(collection), dtype=int)
        for j in range(len(collection)):
            c_wd[j] = collection[j][word]

        # P(Wi|Tk)P(Tk|dj) -> k=1~K
        sum_pp_k = cp.dot(p_wt[i], p_td.T)
        likelihood += cp.dot(c_wd, cp.nan_to_num(cp.log(sum_pp_k)))

    return likelihood
Esempio n. 11
0
def _local_cm(inputs, labels, use_sample_weight):
    if use_sample_weight:
        y_true, y_pred, sample_weight = inputs
    else:
        y_true, y_pred = inputs
        sample_weight = cp.ones(y_true.shape[0], dtype=y_true.dtype)

    y_true, _ = make_monotonic(y_true, labels, copy=True)
    y_pred, _ = make_monotonic(y_pred, labels, copy=True)

    n_labels = labels.size

    # intersect y_pred, y_true with labels, eliminate items not in labels
    ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
    y_pred = y_pred[ind]
    y_true = y_true[ind]
    sample_weight = sample_weight[ind]
    cm = cupyx.scipy.sparse.coo_matrix((sample_weight, (y_true, y_pred)),
                                       shape=(n_labels, n_labels),
                                       dtype=cp.float64).toarray()
    return cp.nan_to_num(cm)
Esempio n. 12
0
def ndwi(data, bands, factor=1.0, vtype='int16', device='CPU') -> np.array:
    """
    GPU Support
    :param data: xarray or numpy array object in the form (c, h, w)
    :param bands: number of the original bands of the raster
    :param factor: factor used for toa imagery
    :return: new band with SI calculated
    """
    # 8 and 4 band imagery: NDWI := factor * (Green - NIR1) / (Green + NIR1)
    GREEN, NIR1 = bands.index('Green'), bands.index('NIR1')

    ndwi = factor * ((data[GREEN, :, :] - data[NIR1, :, :]) /
                     (data[GREEN, :, :] + data[NIR1, :, :]))

    if device == 'CPU':
        ndwi = ndwi.expand_dims(dim="band", axis=0).fillna(0).astype(vtype)
    elif device == 'GPU':
        ndwi = cp.nan_to_num(cp.expand_dims(ndwi, 0))
    else:
        raise RuntimeError("{} device not supported".format(device))
    return ndwi, "NDWI"
Esempio n. 13
0
def fdi(data, bands, factor=1.0, vtype='int16', device='CPU') -> np.array:
    """
    GPU Support
    :param data: xarray or numpy array object in the form (c, h, w)
    :param bands: number of the original bands of the raster
    :param factor: factor used for toa imagery
    :return: new band with FDI calculated
    """
    # 8 band imagery: FDI := NIR2 - (RedEdge + Blue)
    # 4 band imagery: FDI := NIR1 - (Red + Blue)
    NIR = bands.index('NIR2') if 'NIR2' in bands else bands.index('NIR1')
    RED = bands.index('RedEdge') if 'RedEdge' in bands else bands.index('Red')
    BLUE = bands.index('Blue')

    fdi = data[NIR, :, :] - (data[RED, :, :] + data[BLUE, :, :])
    if device == 'CPU':
        fdi = fdi.expand_dims(dim="band", axis=0).fillna(0).astype(vtype)
    elif device == 'GPU':
        fdi = cp.nan_to_num(cp.expand_dims(fdi, 0))
    else:
        raise RuntimeError("{} device not supported".format(device))
    return fdi, "FDI"
Esempio n. 14
0
def si(data, bands, factor=1.0, vtype='int16', device='CPU') -> np.array:
    """
    GPU Support
    :param data: xarray or numpy array object in the form (c, h, w)
    :param bands: number of the original bands of the raster
    :param factor: factor used for toa imagery
    :return: new band with SI calculated
    """
    # 8 and 4 band imagery:
    # SI := ((factor - Blue) * (factor - Green) * (factor - Red)) ** (1.0 / 3)
    BLUE, GREEN = bands.index('Blue'), bands.index('Green')
    RED = bands.index('Red')

    si = ((factor - data[BLUE, :, :]) * (factor - data[GREEN, :, :]) *
          (factor - data[RED, :, :]))**(1.0 / 3.0)

    if device == 'CPU':
        si = si.expand_dims(dim="band", axis=0).fillna(0).astype(vtype)
    elif device == 'GPU':
        si = cp.nan_to_num(cp.expand_dims(si, 0))
    else:
        raise RuntimeError("{} device not supported".format(device))
    return si, "SI"
Esempio n. 15
0
def confusion_matrix(y_true,
                     y_pred,
                     labels=None,
                     sample_weight=None,
                     normalize=None) -> CumlArray:
    """Compute confusion matrix to evaluate the accuracy of a classification.

    Parameters
    ----------
    y_true : array-like (device or host) shape = (n_samples,)
        or (n_samples, n_outputs)
        Ground truth (correct) target values.
    y_pred : array-like (device or host) shape = (n_samples,)
        or (n_samples, n_outputs)
        Estimated target values.
    labels : array-like (device or host) shape = (n_classes,), optional
        List of labels to index the matrix. This may be used to reorder or
        select a subset of labels. If None is given, those that appear at least
        once in y_true or y_pred are used in sorted order.
    sample_weight : array-like (device or host) shape = (n_samples,), optional
        Sample weights.
    normalize : string in [‘true’, ‘pred’, ‘all’]
        Normalizes confusion matrix over the true (rows), predicted (columns)
        conditions or all the population. If None, confusion matrix will not be
        normalized.

    Returns
    -------
    C : array-like (device or host) shape = (n_classes, n_classes)
        Confusion matrix.
    """
    y_true, n_rows, n_cols, dtype = \
        input_to_cuml_array(y_true, check_dtype=[cp.int32, cp.int64])

    y_pred, _, _, _ = \
        input_to_cuml_array(y_pred, check_dtype=dtype,
                            check_rows=n_rows, check_cols=n_cols)

    if labels is None:
        labels = sorted_unique_labels(y_true, y_pred)
        n_labels = len(labels)
    else:
        labels, n_labels, _, _ = \
            input_to_cupy_array(labels, check_dtype=dtype, check_cols=1)
    if sample_weight is None:
        sample_weight = cp.ones(n_rows, dtype=dtype)
    else:
        sample_weight, _, _, _ = \
            input_to_cupy_array(sample_weight,
                                check_dtype=[cp.float32, cp.float64,
                                             cp.int32, cp.int64],
                                check_rows=n_rows, check_cols=n_cols)

    if normalize not in ['true', 'pred', 'all', None]:
        msg = "normalize must be one of " \
              f"{{'true', 'pred', 'all', None}}, got {normalize}."
        raise ValueError(msg)

    with using_output_type("cupy"):
        y_true, _ = make_monotonic(y_true, labels, copy=True)
        y_pred, _ = make_monotonic(y_pred, labels, copy=True)

    # intersect y_pred, y_true with labels, eliminate items not in labels
    ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
    y_pred = y_pred[ind]
    y_true = y_true[ind]
    sample_weight = sample_weight[ind]

    cm = cupyx.scipy.sparse.coo_matrix((sample_weight, (y_true, y_pred)),
                                       shape=(n_labels, n_labels),
                                       dtype=np.float64).toarray()

    # Choose the accumulator dtype to always have high precision
    if sample_weight.dtype.kind in {'i', 'u', 'b'}:
        cm = cm.astype(np.int64)

    with np.errstate(all='ignore'):
        if normalize == 'true':
            cm = cp.divide(cm, cm.sum(axis=1, keepdims=True))
        elif normalize == 'pred':
            cm = cp.divide(cm, cm.sum(axis=0, keepdims=True))
        elif normalize == 'all':
            cm = cp.divide(cm, cm.sum())
        cm = cp.nan_to_num(cm)

    return cm
Esempio n. 16
0
def affine_transform(image,
                     rmatrix,
                     order=3,
                     scale=1.0,
                     image_center=None,
                     recenter=False,
                     missing=0.0,
                     use_scipy=False,
                     use_cupyx=False):
    """
    Rotates, shifts and scales an image.

    Will use `skimage.transform.warp` unless scikit-image can't be imported
    then it will use`scipy.ndimage.affine_transform`.

    Parameters
    ----------
    image : `numpy.ndarray`
        2D image to be rotated.
    rmatrix : `numpy.ndarray` that is 2x2
        Linear transformation rotation matrix.
    order : `int` 0-5, optional
        Interpolation order to be used, defaults to 3. When using scikit-image this parameter
        is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).
        When using scipy it is passed into
        `scipy.ndimage.affine_transform` where it controls the order of the spline.
    scale : `float`
        A scale factor for the image with the default being no scaling.
    image_center : tuple, optional
        The point in the image to rotate around (axis of rotation).
        Defaults to the center of the array.
    recenter : `bool` or array-like, optional
        Move the axis of rotation to the center of the array or recenter coords.
        Defaults to `True` i.e., recenter to the center of the array.
    missing : `float`, optional
        The value to replace any missing data after the transformation.
    use_scipy : `bool`, optional
        Force use of `scipy.ndimage.affine_transform`.
        Will set all "NaNs" in image to zero before doing the transform.
        Defaults to `False`, unless scikit-image can't be imported.

    Returns
    -------
    `numpy.ndarray`:
        New rotated, scaled and translated image.

    Notes
    -----
    This algorithm uses an affine transformation as opposed to a polynomial
    geometrical transformation, which by default is `skimage.transform.warp`.
    One can specify using `scipy.ndimage.affine_transform` as
    an alternative affine transformation. The two transformations use different
    algorithms and thus do not give identical output.

    When using `skimage.transform.warp` with order >= 4 or using
    `scipy.ndimage.affine_transform` at all, "NaN" values will be replaced with
    zero prior to rotation. No attempt is made to retain the "NaN" values.

    Input arrays with integer data are cast to float 64 and can be re-cast using
    `numpy.ndarray.astype` if desired.

    In the case of `skimage.transform.warp`, the image is normalized to [0, 1]
    before passing it to the function. It is later rescaled back to the original range.

    Although this function is analogous to the IDL's ``rot`` function, it does not
    use the same algorithm as the IDL ``rot`` function.
    IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
    method to calculate the inverse mapping of original to target pixel
    coordinates. This is a polynomial geometrical transformation.
    Then optionally it uses a bicubic convolution interpolation
    algorithm to map the original to target pixel values.
    """
    rmatrix = rmatrix / scale
    array_center = (np.array(image.shape)[::-1] - 1) / 2.0

    # Make sure the image center is an array and is where it's supposed to be
    if image_center is not None:
        image_center = np.asanyarray(image_center)
    else:
        image_center = array_center

    # Determine center of rotation based on use (or not) of the recenter keyword
    if recenter:
        rot_center = array_center
    else:
        rot_center = image_center

    displacement = np.dot(rmatrix, rot_center)
    shift = image_center - displacement
    if not use_scipy:
        try:
            import skimage.transform
        except ImportError:
            warnings.warn(
                "scikit-image could not be imported. Image rotation will use scipy",
                ImportWarning)
            use_scipy = True
    if use_cupyx:
        try:
            import cupyx
            import cupy
        except ImportError:
            warnings.warn(
                "cupyx could not be imported. Image rotation will use scipy",
                ImportWarning)
            use_scipy = True
        if np.any(np.isnan(image)):
            warnings.warn("Setting NaNs to 0 for SciPy rotation.",
                          SunpyUserWarning)
        # Transform the image using the scipy affine transform
        rotated_image = cupyx.scipy.scipy.ndimage.affine_transform(
            cupy.nan_to_num(cupy.asarray(image)).T,
            rmatrix,
            offset=shift,
            order=order,
            mode='constant',
            cval=missing).T
        return rotated_image.asnumpy()
    if use_scipy:
        if np.any(np.isnan(image)):
            warnings.warn("Setting NaNs to 0 for SciPy rotation.",
                          SunpyUserWarning)
        # Transform the image using the scipy affine transform
        rotated_image = scipy.ndimage.interpolation.affine_transform(
            np.nan_to_num(image).T,
            rmatrix,
            offset=shift,
            order=order,
            mode='constant',
            cval=missing).T
    else:
        # Make the rotation matrix 3x3 to include translation of the image
        skmatrix = np.zeros((3, 3))
        skmatrix[:2, :2] = rmatrix
        skmatrix[2, 2] = 1.0
        skmatrix[:2, 2] = shift
        tform = skimage.transform.AffineTransform(skmatrix)

        if issubclass(image.dtype.type, numbers.Integral):
            warnings.warn("Integer input data has been cast to float64.",
                          SunpyUserWarning)
            adjusted_image = image.astype(np.float64)
        else:
            adjusted_image = image.copy()
        if np.any(np.isnan(adjusted_image)) and order >= 4:
            warnings.warn(
                "Setting NaNs to 0 for higher-order scikit-image rotation.",
                SunpyUserWarning)
            adjusted_image = np.nan_to_num(adjusted_image)

        # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)
        is_nan_image = np.all(np.isnan(adjusted_image))
        if is_nan_image:
            adjusted_missing = missing
        else:
            im_min = np.nanmin(adjusted_image)
            adjusted_image -= im_min
            im_max = np.nanmax(adjusted_image)
            if im_max > 0:
                adjusted_image /= im_max
                adjusted_missing = (missing - im_min) / im_max
            else:
                # The input array is all one value (aside from NaNs), so no scaling is needed
                adjusted_missing = missing - im_min

        rotated_image = skimage.transform.warp(adjusted_image,
                                               tform,
                                               order=order,
                                               mode='constant',
                                               cval=adjusted_missing)

        # Convert the image back to its original range if it is valid
        if not is_nan_image:
            if im_max > 0:
                rotated_image *= im_max
            rotated_image += im_min

    return rotated_image
Esempio n. 17
0
def cosine_similarity(X, Y):
    K = 1.0 - cp.asarray(pairwise_distances(X, Y, metric='cosine'))
    return cp.nan_to_num(K, copy=False)
Esempio n. 18
0
def nanmean(a, *args, **kwargs):
    """ For cupy v0.6.0 compatibility """
    return cp.sum(cp.nan_to_num(a), *args, **kwargs) / cp.sum(
        ~cp.isnan(a), *args, **kwargs
    )