Exemplo n.º 1
0
  def local_cov_bet_class_NN(self,key,label,nb_class,batchsize,k):
    key_broadcast=cp.broadcast_to(key,(batchsize,batchsize,key.shape[1]))
    key_broadcast_transpose=cp.transpose(cp.broadcast_to(key,(batchsize,batchsize,key.shape[1])),axes=(1,0,2))
    sub_key_broadcast=key_broadcast-key_broadcast_transpose
    norm_sub_broadcast=cp.linalg.norm(sub_key_broadcast,axis=2)
    sorted_d=cp.sort(norm_sub_broadcast,axis=0)
    kth_d=sorted_d[k]
    kth_d=kth_d.reshape([batchsize,1])
    sigma=cp.matmul(kth_d,cp.transpose(kth_d))

    batchsize_per_class=batchsize//nb_class
    index=cp.arange(key.shape[0])
    xx,yy=cp.meshgrid(index,index)
    sub=key[xx]-key[yy]
    norm_sub=cp.linalg.norm(sub,axis=2)
    a1=cp.exp(-norm_sub*norm_sub/sigma)
    lindex=cp.arange(label.shape[0])
    lx,ly=cp.meshgrid(lindex,lindex)
    l=(label[lx]==label[ly])
    a1=a1*l*(1.0/(batchsize*nb_class)-1.0/batchsize_per_class)
    l2=(label[lx]!=label[ly])
    a2=l2*(1.0/batchsize)
    a=a1+a2
    a=a.reshape([a.shape[0],a.shape[1],1])
    a_sub=a*sub
    Sb=cp.einsum('ijk,ijl->kl',a_sub,sub,dtype='float32')*0.5
    return Sb
Exemplo n.º 2
0
 def run_all_to_all(rank, n_workers, dtype):
     dev = cuda.Device(rank)
     dev.use()
     comm = NCCLBackend(n_workers, rank)
     in_array = cupy.arange(n_workers * 10,
                            dtype='f').reshape(n_workers, 10)
     out_array = cupy.zeros((n_workers, 10), dtype='f')
     comm.all_to_all(in_array, out_array)
     expected = (10 * rank) + cupy.broadcast_to(cupy.arange(10, dtype='f'),
                                                (n_workers, 10))
     testing.assert_allclose(out_array, expected)
Exemplo n.º 3
0
    def __call__(self,
                 data,
                 iters=5,
                 width=3.0,
                 weights=None,
                 mask=None,
                 keepdims=False):

        data = cp.asarray(data, dtype=self.dtype)

        filt = cp.ones_like(data)
        if mask is not None:
            mask = cp.asarray(mask, dtype=self.dtype)
            elementwise_not(cp.broadcast_to(mask, data.shape), filt)
        if weights is not None:
            weights = cp.asarray(weights, dtype=self.dtype)
            try:
                filt *= weights
            except ValueError:
                if isinstance(self.axis, int):
                    ndim = data.ndim
                    axis = self.axis % ndim
                    if weights.size == data.shape[axis]:
                        filt *= weights.reshape(-1 if i == axis else 1
                                                for i in range(ndim))
                    else:
                        raise ValueError(
                            'length of weights must be same as'
                            ' the length of data along specified axis.')
                else:
                    raise ValueError(
                        'If weights and data are not broadcastable, '
                        'axis must be specified as int.')

        checkfinite(data, filt, filt)

        iterator = count() if (iters is None) else range(iters)

        csum = check_sum(filt, axis=self.axis)
        for _ in iterator:
            self.updatefilt(data, filt, width)
            tsum = check_sum(filt, axis=self.axis)
            if all_equal(csum, tsum):
                break
            else:
                csum = tsum

        if self.rtnmask:
            result = elementwise_not(filt, filt)
        else:
            result = self.reduce(data, filt, keepdims=keepdims)

        return result
Exemplo n.º 4
0
 def run_gather(rank, n_workers, root, dtype):
     dev = cuda.Device(rank)
     dev.use()
     comm = NCCLBackend(n_workers, rank)
     in_array = (rank + 1) * cupy.arange(10, dtype='f')
     out_array = cupy.zeros((n_workers, 10), dtype='f')
     comm.gather(in_array, out_array, root)
     if rank == root:
         expected = 1 + cupy.arange(n_workers).reshape(n_workers, 1)
         expected = expected * cupy.broadcast_to(cupy.arange(10, dtype='f'),
                                                 (n_workers, 10))
         testing.assert_allclose(out_array, expected)
Exemplo n.º 5
0
    def predict(self, input_x):
        output = self.predictor(input_x)
        batch_size, input_channel, input_h, input_w = input_x.shape
        batch_size, _, grid_h, grid_w = output.shape
        x, y, w, h, conf, prob = F.split_axis(F.reshape(
            output, (batch_size, self.predictor.n_boxes,
                     self.predictor.n_classes + 5, grid_h, grid_w)),
                                              (1, 2, 3, 4, 5),
                                              axis=2)
        x = F.sigmoid(x)  # xのactivation
        y = F.sigmoid(y)  # yのactivation
        conf = F.sigmoid(conf)  # confのactivation
        prob = F.transpose(prob, (0, 2, 1, 3, 4))
        prob = F.softmax(prob)  # probablitiyのacitivation
        prob = F.transpose(prob, (0, 2, 1, 3, 4))

        # x, y, w, hを絶対座標へ変換
        x_shift = Variable(
            xp.broadcast_to(xp.arange(grid_w, dtype=xp.float32), x.shape))
        y_shift = Variable(
            xp.broadcast_to(
                xp.arange(grid_h, dtype=xp.float32).reshape(grid_h, 1),
                y.shape))
        w_anchor = Variable(
            xp.broadcast_to(
                xp.reshape(
                    xp.array(self.anchors, dtype=xp.float32)[:, 0],
                    (self.predictor.n_boxes, 1, 1, 1)), w.shape))
        h_anchor = Variable(
            xp.broadcast_to(
                xp.reshape(
                    xp.array(self.anchors, dtype=xp.float32)[:, 1],
                    (self.predictor.n_boxes, 1, 1, 1)), h.shape))
        # x_shift.to_gpu(), y_shift.to_gpu(), w_anchor.to_gpu(), h_anchor.to_gpu()
        box_x = (x + x_shift) / grid_w
        box_y = (y + y_shift) / grid_h
        box_w = F.exp(w) * w_anchor / grid_w
        box_h = F.exp(h) * h_anchor / grid_h

        return box_x, box_y, box_w, box_h, conf, prob
Exemplo n.º 6
0
def in1d(ar1, ar2, assume_unique=False, invert=False):
    """Tests whether each element of a 1-D array is also present in a second
    array.

    Returns a boolean array the same length as ``ar1`` that is ``True``
    where an element of ``ar1`` is in ``ar2`` and ``False`` otherwise.

    Args:
        ar1 (cupy.ndarray): Input array.
        ar2 (cupy.ndarray): The values against which to test each value of
            ``ar1``.
        assume_unique (bool, optional): Ignored
        invert (bool, optional): If ``True``, the values in the returned array
            are inverted (that is, ``False`` where an element of ``ar1`` is in
            ``ar2`` and ``True`` otherwise). Default is ``False``.

    Returns:
        cupy.ndarray, bool: The values ``ar1[in1d]`` are in ``ar2``.

    """
    # Ravel both arrays, behavior for the first array could be different
    ar1 = ar1.ravel()
    ar2 = ar2.ravel()
    if ar1.size == 0 or ar2.size == 0:
        if invert:
            return cupy.ones(ar1.shape, dtype=cupy.bool_)
        else:
            return cupy.zeros(ar1.shape, dtype=cupy.bool_)

    shape = (ar1.size, ar2.size)
    ar1_broadcast = cupy.broadcast_to(ar1[..., cupy.newaxis], shape)
    ar2_broadcast = cupy.broadcast_to(ar2, shape)
    count = (ar1_broadcast == ar2_broadcast).sum(axis=1)
    if invert:
        return count == 0
    else:
        return count > 0
Exemplo n.º 7
0
def triu_indices(n, k=0, m=None):
    """Returns the indices of the upper triangular matrix.
    Here, the first group of elements contains row coordinates
    of all indices and the second group of elements
    contains column coordinates.

    Parameters
    ----------
    n : int
        The size of the arrays for which the returned indices will
        be valid.
    k : int, optional
        Refers to the diagonal offset. By default, `k = 0` i.e.
        the main dialogal. The positive value of `k`
        denotes the diagonals above the main diagonal, while the negative
        value includes the diagonals below the main diagonal.
    m : int, optional
        The column dimension of the arrays for which the
        returned arrays will be valid. By default, `m = n`.

    Returns
    -------
    y : tuple of ndarrays
        The indices for the triangle. The returned tuple
        contains two arrays, each with the indices along
        one dimension of the array.

    See Also
    --------
    numpy.triu_indices

    """

    tri_ = ~cupy.tri(n, m, k=k - 1, dtype=bool)

    return tuple(
        cupy.broadcast_to(inds, tri_.shape)[tri_]
        for inds in cupy.indices(tri_.shape, dtype=int))
Exemplo n.º 8
0
def tril_indices(n, k=0, m=None):
    """Returns the indices of the lower triangular matrix.
    Here, the first group of elements contains row coordinates
    of all indices and the second group of elements
    contains column coordinates.

    Parameters
    ----------
    n : int
        The row dimension of the arrays for which the returned
        indices will be valid.
    k : int, optional
        Diagonal above which to zero elements. `k = 0`
        (the default) is the main diagonal, `k < 0` is
        below it and `k > 0` is above.
    m : int, optional
        The column dimension of the arrays for which the
        returned arrays will be valid. By default, `m = n`.

    Returns
    -------
    y : tuple of ndarrays
        The indices for the triangle. The returned tuple
        contains two arrays, each with the indices along
        one dimension of the array.

    See Also
    --------
    numpy.tril_indices

    """

    tri_ = cupy.tri(n, m, k=k, dtype=bool)

    return tuple(
        cupy.broadcast_to(inds, tri_.shape)[tri_]
        for inds in cupy.indices(tri_.shape, dtype=int))
Exemplo n.º 9
0
    def __call__(self, input_x, t):
        output = self.predictor(input_x)
        batch_size, _, grid_h, grid_w = output.shape
        self.seen += batch_size
        x, y, w, h, conf, prob = F.split_axis(F.reshape(
            output, (batch_size, self.predictor.n_boxes,
                     self.predictor.n_classes + 5, grid_h, grid_w)),
                                              (1, 2, 3, 4, 5),
                                              axis=2)
        x = F.sigmoid(x)  # xのactivation
        y = F.sigmoid(y)  # yのactivation
        conf = F.sigmoid(conf)  # confのactivation
        prob = F.transpose(prob, (0, 2, 1, 3, 4))
        prob = F.softmax(prob)  # probablitiyのacitivation

        # 教師データの用意
        tw = xp.zeros(
            w.shape,
            dtype=xp.float32)  # wとhが0になるように学習(e^wとe^hは1に近づく -> 担当するbboxの倍率1)
        th = xp.zeros(h.shape, dtype=xp.float32)
        tx = xp.tile(0.5, x.shape).astype(xp.float32)  # 活性化後のxとyが0.5になるように学習()
        ty = xp.tile(0.5, y.shape).astype(xp.float32)

        if self.seen < self.unstable_seen:  # centerの存在しないbbox誤差学習スケールは基本0.1
            box_learning_scale = xp.tile(0.1, x.shape).astype(xp.float32)
        else:
            box_learning_scale = xp.tile(0, x.shape).astype(xp.float32)

        tconf = xp.zeros(
            conf.shape, dtype=xp.float32
        )  # confidenceのtruthは基本0、iouがthresh以上のものは学習しない、ただしobjectの存在するgridのbest_boxのみ真のIOUに近づかせる
        conf_learning_scale = xp.tile(0.1, conf.shape).astype(xp.float32)

        tprob = prob.data.copy()  # best_anchor以外は学習させない(自身との二乗和誤差 = 0)

        # 全bboxとtruthのiouを計算(batch単位で計算する)
        x_shift = Variable(
            xp.broadcast_to(xp.arange(grid_w, dtype=xp.float32), x.shape[1:]))
        y_shift = Variable(
            xp.broadcast_to(
                xp.arange(grid_h, dtype=xp.float32).reshape(grid_h, 1),
                y.shape[1:]))
        w_anchor = Variable(
            xp.broadcast_to(
                xp.reshape(
                    xp.array(self.anchors, dtype=xp.float32)[:, 0],
                    (self.predictor.n_boxes, 1, 1, 1)), w.shape[1:]))
        h_anchor = Variable(
            xp.broadcast_to(
                xp.reshape(
                    xp.array(self.anchors, dtype=xp.float32)[:, 1],
                    (self.predictor.n_boxes, 1, 1, 1)), h.shape[1:]))
        x_shift.to_gpu(), y_shift.to_gpu(), w_anchor.to_gpu(), h_anchor.to_gpu(
        )
        best_ious = []
        for batch in range(batch_size):
            n_truth_boxes = len(t[batch])
            box_x = (x[batch] + x_shift) / grid_w
            box_y = (y[batch] + y_shift) / grid_h
            box_w = F.exp(w[batch]) * w_anchor / grid_w
            box_h = F.exp(h[batch]) * h_anchor / grid_h

            ious = []
            for truth_index in range(n_truth_boxes):
                truth_box_x = Variable(
                    xp.broadcast_to(
                        xp.array(t[batch][truth_index]["x"], dtype=xp.float32),
                        box_x.shape))
                truth_box_y = Variable(
                    xp.broadcast_to(
                        xp.array(t[batch][truth_index]["y"], dtype=xp.float32),
                        box_y.shape))
                truth_box_w = Variable(
                    xp.broadcast_to(
                        xp.array(t[batch][truth_index]["w"], dtype=xp.float32),
                        box_w.shape))
                truth_box_h = Variable(
                    xp.broadcast_to(
                        xp.array(t[batch][truth_index]["h"], dtype=xp.float32),
                        box_h.shape))
                truth_box_x.to_gpu(), truth_box_y.to_gpu(), truth_box_w.to_gpu(
                ), truth_box_h.to_gpu()
                ious.append(
                    multi_box_iou(
                        Box(box_x, box_y, box_w, box_h),
                        Box(truth_box_x, truth_box_y, truth_box_w,
                            truth_box_h)).data.get())
            ious = xp.array(ious)
            best_ious.append(xp.max(ious, axis=0))
        best_ious = xp.array(best_ious)

        # 一定以上のiouを持つanchorに対しては、confを0に下げないようにする(truthの周りのgridはconfをそのまま維持)。
        tconf[best_ious > self.thresh] = conf.data.get()[
            best_ious > self.thresh]
        conf_learning_scale[best_ious > self.thresh] = 0

        # objectの存在するanchor boxのみ、x、y、w、h、conf、probを個別修正
        abs_anchors = self.anchors / xp.array([grid_w, grid_h])
        for batch in range(batch_size):
            for truth_box in t[batch]:
                truth_w = int(float(truth_box["x"]) * grid_w)
                truth_h = int(float(truth_box["y"]) * grid_h)
                truth_n = 0
                best_iou = 0.0
                for anchor_index, abs_anchor in enumerate(abs_anchors):
                    iou = box_iou(
                        Box(0, 0, float(truth_box["w"]),
                            float(truth_box["h"])),
                        Box(0, 0, abs_anchor[0], abs_anchor[1]))
                    if best_iou < iou:
                        best_iou = iou
                        truth_n = anchor_index

                # objectの存在するanchorについて、centerを0.5ではなく、真の座標に近づかせる。anchorのスケールを1ではなく真のスケールに近づかせる。学習スケールを1にする。
                box_learning_scale[batch, truth_n, :, truth_h, truth_w] = 1.0
                tx[batch, truth_n, :, truth_h,
                   truth_w] = float(truth_box["x"]) * grid_w - truth_w
                ty[batch, truth_n, :, truth_h,
                   truth_w] = float(truth_box["y"]) * grid_h - truth_h
                tw[batch, truth_n, :, truth_h, truth_w] = xp.log(
                    float(truth_box["w"]) / abs_anchors[truth_n][0])
                th[batch, truth_n, :, truth_h, truth_w] = xp.log(
                    float(truth_box["h"]) / abs_anchors[truth_n][1])
                tprob[batch, :, truth_n, truth_h, truth_w] = 0
                tprob[batch,
                      int(truth_box["label"]), truth_n, truth_h, truth_w] = 1

                # IOUの観測
                full_truth_box = Box(float(truth_box["x"]),
                                     float(truth_box["y"]),
                                     float(truth_box["w"]),
                                     float(truth_box["h"]))
                predicted_box = Box(
                    (x[batch][truth_n][0][truth_h][truth_w].data.get() +
                     truth_w) / grid_w,
                    (y[batch][truth_n][0][truth_h][truth_w].data.get() +
                     truth_h) / grid_h,
                    xp.exp(w[batch][truth_n][0][truth_h][truth_w].data.get()) *
                    abs_anchors[truth_n][0],
                    xp.exp(h[batch][truth_n][0][truth_h][truth_w].data.get()) *
                    abs_anchors[truth_n][1])
                predicted_iou = box_iou(full_truth_box, predicted_box)
                tconf[batch, truth_n, :, truth_h, truth_w] = predicted_iou
                conf_learning_scale[batch, truth_n, :, truth_h, truth_w] = 10.0

            # debug prints
            maps = F.transpose(prob[batch], (2, 3, 1, 0)).data
            print(
                "best confidences and best conditional probability and predicted class of each grid:"
            )
            for i in range(grid_h):
                for j in range(grid_w):
                    print("%2d" %
                          (int(conf[batch, :, :, i, j].data.max() * 100)),
                          end=" ")
                print("     ", end="")
                for j in range(grid_w):
                    print("%2d" % (maps[i][j][int(
                        maps[i][j].max(axis=1).argmax())].argmax()),
                          end=" ")
                print("     ", end="")
                for j in range(grid_w):
                    print("%2d" % (maps[i][j][int(
                        maps[i][j].max(axis=1).argmax())].max() * 100),
                          end=" ")
                print()

            print(
                "best default iou: %.2f   predicted iou: %.2f   confidence: %.2f   class: %s"
                % (best_iou, predicted_iou,
                   conf[batch][truth_n][0][truth_h][truth_w].data,
                   t[batch][0]["label"]))
            print("-------------------------------")
        print("seen = %d" % self.seen)

        # loss計算
        tx, ty, tw, th, tconf, tprob = Variable(tx), Variable(ty), Variable(
            tw), Variable(th), Variable(tconf), Variable(tprob)
        box_learning_scale, conf_learning_scale = Variable(
            box_learning_scale), Variable(conf_learning_scale)
        tx.to_gpu(), ty.to_gpu(), tw.to_gpu(), th.to_gpu(), tconf.to_gpu(
        ), tprob.to_gpu()
        box_learning_scale.to_gpu()
        conf_learning_scale.to_gpu()

        x_loss = F.sum((tx - x)**2 * box_learning_scale) / 2
        y_loss = F.sum((ty - y)**2 * box_learning_scale) / 2
        w_loss = F.sum((tw - w)**2 * box_learning_scale) / 2
        h_loss = F.sum((th - h)**2 * box_learning_scale) / 2
        c_loss = F.sum((tconf - conf)**2 * conf_learning_scale) / 2
        p_loss = F.sum((tprob - prob)**2) / 2
        print(
            "x_loss: %f  y_loss: %f  w_loss: %f  h_loss: %f  c_loss: %f   p_loss: %f"
            % (F.sum(x_loss).data, F.sum(y_loss).data, F.sum(w_loss).data,
               F.sum(h_loss).data, F.sum(c_loss).data, F.sum(p_loss).data))

        loss = x_loss + y_loss + w_loss + h_loss + c_loss + p_loss
        return loss
Exemplo n.º 10
0
 def test_broadcast_to_short_shape_numpy19(self, dtype):
     # Note that broadcast_to is only supported on numpy>=1.10
     a = testing.shaped_arange((1, 3, 4), cupy, dtype)
     with self.assertRaises(ValueError):
         cupy.broadcast_to(a, (3, 4))
Exemplo n.º 11
0
    return [
        Array._new(array)
        for array in np.broadcast_arrays(*[a._array for a in arrays])
    ]


def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.broadcast_to <numpy.broadcast_to>`.

    See its docstring for more information.
    """
    from ._array_object import Array

    return Array._new(np.broadcast_to(x._array, shape))


def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
    """
    Array API compatible wrapper for :py:func:`np.can_cast <numpy.can_cast>`.

    See its docstring for more information.
    """
    if isinstance(from_, Array):
        from_ = from_.dtype
    elif from_ not in _all_dtypes:
        raise TypeError(f"{from_=}, but should be an array_api array or dtype")
    if to not in _all_dtypes:
        raise TypeError(f"{to=}, but should be a dtype")
    # Note: We avoid np.can_cast() as it has discrepancies with the array API,
Exemplo n.º 12
0
def average(a, axis=None, weights=None, returned=False):
    """Returns the weighted average along an axis.

    Args:
        a (cupy.ndarray): Array to compute average.
        axis (int): Along which axis to compute average. The flattened array
            is used by default.
        weights (cupy.ndarray): Array of weights where each element
            corresponds to the value in ``a``. If ``None``, all the values
            in ``a`` have a weight equal to one.
        returned (bool): If ``True``, a tuple of the average and the sum
            of weights is returned, otherwise only the average is returned.

    Returns:
        cupy.ndarray or tuple of cupy.ndarray: The average of the input array
            along the axis and the sum of weights.

    .. seealso:: :func:`numpy.average`
    """
    a = cupy.asarray(a)

    if weights is None:
        avg = a.mean(axis)
        scl = avg.dtype.type(a.size / avg.size)
    else:
        wgt = cupy.asarray(weights)

        if issubclass(a.dtype.type, (numpy.integer, numpy.bool_)):
            result_dtype = numpy.result_type(a.dtype, wgt.dtype, 'f8')
        else:
            result_dtype = numpy.result_type(a.dtype, wgt.dtype)

        # Sanity checks
        if a.shape != wgt.shape:
            if axis is None:
                raise TypeError(
                    'Axis must be specified when shapes of a and weights '
                    'differ.')
            if wgt.ndim != 1:
                raise TypeError(
                    '1D weights expected when shapes of a and weights differ.')
            if wgt.shape[0] != a.shape[axis]:
                raise ValueError(
                    'Length of weights not compatible with specified axis.')

            # setup wgt to broadcast along axis
            wgt = cupy.broadcast_to(wgt, (a.ndim - 1) * (1, ) + wgt.shape)
            wgt = wgt.swapaxes(-1, axis)

        scl = wgt.sum(axis=axis, dtype=result_dtype)
        if cupy.any(scl == 0.0):
            raise ZeroDivisionError(
                'Weights sum to zero, can\'t be normalized')

        avg = cupy.multiply(a, wgt, dtype=result_dtype).sum(axis) / scl

    if returned:
        if scl.shape != avg.shape:
            scl = cupy.broadcast_to(cupy.array(scl), avg.shape).copy()
        return avg, scl
    else:
        return avg
Exemplo n.º 13
0
 def test_broadcast_to_short_shape_numpy19(self, dtype):
     # Note that broadcast_to is only supported on numpy>=1.10
     a = testing.shaped_arange((1, 3, 4), cupy, dtype)
     with self.assertRaises(ValueError):
         cupy.broadcast_to(a, (3, 4))
Exemplo n.º 14
0
 def duplication(a):
     return cupy.concatenate(cupy.broadcast_to(a, (self.fs, a.shape[0], a.shape[1])), axis=1).reshape(a.shape[0]*self.fs, -1)
Exemplo n.º 15
0
def cdf(y, x, bw_method='scott', weight=1):
    '''
    Nadaraya watson conditional probability estimation is a way to estimate the conditional probability of a 
    random variable y given random variable x in a non-parametric way. It works for both uni-variate and 
    multi-variate data. It includes automatic bandwidth determination. The estimation works best 
    for a unimodal distribution; bimodal or multi-modal distributions tend to be oversmoothed.
    Parameters
    dataset: array_like
    Datapoints to estimate from. Currently, it only supports 1-D array.
    
    bw_method:str, scalar or callable, optional
    The method used to calculate the estimator bandwidth. 
    This can be ‘scott’, ‘silverman’, a scalar constant. 
    If a scalar, this will be used directly as kde.factor. 
    If None (default), ‘scott’ is used. See Notes for more details.
    
    weights:array_like, optional
    weights of datapoints. This must be the same shape as dataset. 
    If None (default), the samples are assumed to be equally weighted
    '''
    mempool = cp.get_default_memory_pool()
    pinned_mempool = cp.get_default_pinned_memory_pool()
    assert (x.ndim == 1) & (y.ndim == 1)
    NN = y.size
    d = 1
    neff = (cp.ones(NN) * weight).sum()
    if bw_method == 'scott':
        h = neff**(-1. / (d + 4))
    elif bw_method == 'silverman':
        h = (neff * (d + 2) / 4.)**(-1. / (d + 4))
    else:
        h = bw_method

    x = x.reshape((-1, 1))
    x = cp.asarray(x / h, dtype='float32')
    y = cp.asarray(y, dtype='float32')
    XX = cp.broadcast_to(x, (NN, NN))
    XXT = cp.broadcast_to(x.T, (NN, NN))
    xx = cp.absolute(XX - XXT)

    XX = None
    XXT = None
    xx2 = cp.copy(xx)
    xx[xx2 < 1] = 70 / 81 * (1 - xx[xx < 1]**3)**3
    xx[xx2 >= 1] = 0
    xx2 = None

    y = y.reshape((-1, 1))
    yy = y <= y.T
    kernel = cp.asarray(weight, dtype='float32')
    kernel = cp.broadcast_to(kernel, (NN, NN))
    kernel = xx * kernel
    weight = kernel / kernel.sum(0, keepdims=True)
    cdf = (weight * yy).sum(0, keepdims=True).T
    #cv = cp.asnumpy((((yy-cdf)/(1-weight))**2*kk).mean())
    weight = None
    kernel = None
    yy = None
    cdf2 = cp.asnumpy(cdf)
    cdf = None
    mempool.free_all_blocks()
    pinned_mempool.free_all_blocks()
    return cdf2
Exemplo n.º 16
0
def main(id):
    model_path = "/efs/fMRI_AE/SimpleFCAE_E32D32/model/model_iter_108858"

    gpu = 0
    get_device_from_id(gpu).use()
    """NibDataset
    def __init__(self, directory: str, crop: list):
    """
    crop = [[9, 81], [11, 99], [0, 80]]
    test_dataset = NibDataset("/data/test", crop=crop)

    mask = load_mask_nib("/data/mask/average_optthr.nii", crop)
    """SimpleFCAE_E32D32
    def __init__(self, mask, r: int, in_mask: str, out_mask: str):
    """
    model = Model(mask, 2, "mask", "mask")
    load_npz(model_path, model)
    model.to_gpu()

    # feature_idx = 0
    # feature_idx = (0, 4, 5, 5) # == [0, 9/2, 11/2, 10/2]
    # feature_idx = (0, 1, 1, 1)
    feature_idx = (0, 2, 7, 4)
    resample_size = 100
    batch_size = 10
    noise_level = 0.2

    for i in range(len(test_dataset)):
        if i % 8 != id:
            continue
        print("{:4}/{:4}".format(i, len(test_dataset)))
        subject = test_dataset.get_subject(i)
        frame = test_dataset.get_frame(i)
        test_img = xp.asarray(test_dataset[i])

        resample_remain = resample_size
        resample_processed = 0
        ret = xp.zeros(test_img.shape)
        while resample_remain > 0:
            batch_size_this_loop = min(batch_size, resample_remain)
            resample_remain -= batch_size_this_loop

            batch = xp.broadcast_to(
                test_img, chain((batch_size_this_loop, ), test_img.shape))
            sigma = noise_level / (xp.max(test_img) - xp.min(test_img))
            batch += sigma * xp.random.randn(*batch.shape)

            x = Variable(batch)

            feature = model.extract(x)
            assert feature.shape == (batch_size, 1, 9, 11, 10)
            feature = F.sum(feature, axis=0)
            assert feature.shape == (1, 9, 11, 10)
            feature = F.get_item(feature, feature_idx)
            feature.backward()
            grad = xp.mean(x.grad, axis=0)
            ret = (ret * resample_processed + grad * batch_size_this_loop) / (
                resample_processed + batch_size_this_loop)
            model.cleargrads()

        xp.save(
            "/efs/fMRI_AE/SimpleFCAE_E32D32/grad/sensitivity_map_feature_{}_{}_{}_subject{:03d}_frame{:03d}"
            .format(feature_idx[1], feature_idx[2], feature_idx[3], subject,
                    frame), ret)
Exemplo n.º 17
0
    def choice(self, a, size=None, replace=True, p=None):
        """Returns an array of random values from a given 1-D array.

        .. seealso::
            :func:`cupy.random.choice` for full document,
            :meth:`numpy.random.choice`

        """
        if a is None:
            raise ValueError('a must be 1-dimensional or an integer')
        if isinstance(a, cupy.ndarray) and a.ndim == 0:
            raise NotImplementedError
        if isinstance(a, six.integer_types):
            a_size = a
            if a_size <= 0:
                raise ValueError('a must be greater than 0')
        else:
            a = cupy.array(a, copy=False)
            if a.ndim != 1:
                raise ValueError('a must be 1-dimensional or an integer')
            else:
                a_size = len(a)
                if a_size == 0:
                    raise ValueError('a must be non-empty')

        if p is not None:
            p = cupy.array(p)
            if p.ndim != 1:
                raise ValueError('p must be 1-dimensional')
            if len(p) != a_size:
                raise ValueError('a and p must have same size')
            if not (p >= 0).all():
                raise ValueError('probabilities are not non-negative')
            p_sum = cupy.sum(p).get()
            if not numpy.allclose(p_sum, 1):
                raise ValueError('probabilities do not sum to 1')

        if not replace:
            raise NotImplementedError

        if size is None:
            raise NotImplementedError
        shape = size
        size = numpy.prod(shape)

        if p is not None:
            p = cupy.broadcast_to(p, (size, a_size))
            index = cupy.argmax(cupy.log(p) -
                                cupy.random.gumbel(size=(size, a_size)),
                                axis=1)
            if not isinstance(shape, six.integer_types):
                index = cupy.reshape(index, shape)
        else:
            index = cupy.random.randint(0, a_size, size=shape)
            # Align the dtype with NumPy
            index = index.astype(cupy.int64, copy=False)

        if isinstance(a, six.integer_types):
            return index

        if index.ndim == 0:
            return cupy.array(a[index], dtype=a.dtype)

        return a[index]
Exemplo n.º 18
0
def make_plasma(steps, cell_size, coarseness=3, fineness=2):
    """
    Make coarse plasma initial state arrays and the arrays needed to intepolate
    coarse plasma into fine plasma (``virt_params``).

    Coarse is the one that will evolve and fine is the one to be bilinearly
    interpolated from the coarse one based on the initial positions
    (using 1 to 4 coarse plasma particles that initially were the closest).
    """
    coarse_step = cell_size * coarseness

    # Make two initial grids of plasma particles, coarse and fine.
    # Coarse is the one that will evolve and fine is the one to be bilinearly
    # interpolated from the coarse one based on the initial positions.

    coarse_grid = make_coarse_plasma_grid(steps, cell_size, coarseness)
    coarse_grid_xs, coarse_grid_ys = coarse_grid[:, None], coarse_grid[None, :]

    fine_grid = make_fine_plasma_grid(steps, cell_size, fineness)

    Nc = len(coarse_grid)

    # Create plasma electrons on the coarse grid, the ones that really move
    coarse_x_init = cp.broadcast_to(cp.asarray(coarse_grid_xs), (Nc, Nc))
    coarse_y_init = cp.broadcast_to(cp.asarray(coarse_grid_ys), (Nc, Nc))
    coarse_x_offt = cp.zeros((Nc, Nc))
    coarse_y_offt = cp.zeros((Nc, Nc))
    coarse_px = cp.zeros((Nc, Nc))
    coarse_py = cp.zeros((Nc, Nc))
    coarse_pz = cp.zeros((Nc, Nc))
    coarse_m = cp.ones((Nc, Nc)) * ELECTRON_MASS * coarseness**2
    coarse_q = cp.ones((Nc, Nc)) * ELECTRON_CHARGE * coarseness**2

    # Calculate indices for coarse -> fine bilinear interpolation

    # Neighbour indices array, 1D, same in both x and y direction.
    indices = np.searchsorted(coarse_grid, fine_grid)
    # example:
    #     coarse:  [-2., -1.,  0.,  1.,  2.]
    #     fine:    [-2.4, -1.8, -1.2, -0.6,  0. ,  0.6,  1.2,  1.8,  2.4]
    #     indices: [ 0  ,  1  ,  1  ,  2  ,  2  ,  3  ,  4  ,  4  ,  5 ]
    # There is no coarse particle with index 5, so clip it to 4:
    indices_next = np.clip(indices, 0, Nc - 1)  # [0, 1, 1, 2, 2, 3, 4, 4, 4]
    # Clip to zero for indices of prev particles as well:
    indices_prev = np.clip(indices - 1, 0, Nc - 1)  # [0, 0, 0, 1 ... 3, 3, 4]
    # mixed from: [ 0&0 , 0&1 , 0&1 , 1&2 , 1&2 , 2&3 , 3&4 , 3&4, 4&4 ]

    # Calculate weights for coarse->fine interpolation from initial positions.
    # The further the fine particle is from closest right coarse particles,
    # the more influence the left ones have.
    influence_prev = (coarse_grid[indices_next] - fine_grid) / coarse_step
    influence_next = (fine_grid - coarse_grid[indices_prev]) / coarse_step
    # Fix for boundary cases of missing cornering particles.
    influence_prev[indices_next == 0] = 0  # nothing on the left?
    influence_next[indices_next == 0] = 1  # use right
    influence_next[indices_prev == Nc - 1] = 0  # nothing on the right?
    influence_prev[indices_prev == Nc - 1] = 1  # use left
    # Same arrays are used for interpolating in y-direction.

    # The virtualization formula is thus
    # influence_prev[pi] * influence_prev[pj] * <bottom-left neighbour value> +
    # influence_prev[pi] * influence_next[nj] * <top-left neighbour value> +
    # influence_next[ni] * influence_prev[pj] * <bottom-right neighbour val> +
    # influence_next[ni] * influence_next[nj] * <top-right neighbour value>
    # where pi, pj are indices_prev[i], indices_prev[j],
    #       ni, nj are indices_next[i], indices_next[j] and
    #       i, j are indices of fine virtual particles

    # This is what is employed inside mix() and deposit_kernel().

    # An equivalent formula would be
    # inf_prev[pi] * (inf_prev[pj] * <bot-left> + inf_next[nj] * <bot-right>) +
    # inf_next[ni] * (inf_prev[pj] * <top-left> + inf_next[nj] * <top-right>)

    # Values of m, q, px, py, pz should be scaled by 1/(fineness*coarseness)**2

    virt_params = GPUArrays(
        influence_prev=influence_prev,
        influence_next=influence_next,
        indices_prev=indices_prev,
        indices_next=indices_next,
        fine_grid=fine_grid,
    )

    return (coarse_x_init, coarse_y_init, coarse_x_offt, coarse_y_offt,
            coarse_px, coarse_py, coarse_pz, coarse_m, coarse_q, virt_params)
Exemplo n.º 19
0
def kde(dataset, bw_method='scott', weight=1):
    #
    # Representation of a kernel-density estimate using Gaussian kernels.
    '''
    Nadaraya watson Kernel density estimation is a way to estimate the probability density function (PDF) of a 
    random variable in a non-parametric way. The code currently only works for  uni-variate data. It includes automatic 
    bandwidth determination. The estimation works best 
    for a unimodal distribution; bimodal or multi-modal distributions tend to be oversmoothed.
    Parameters
    dataset: array_like
    Datapoints to estimate from. Currently, it only supports 1-D array.
    
    bw_method:str, scalar or callable, optional
    The method used to calculate the estimator bandwidth. 
    This can be ‘scott’, ‘silverman’, a scalar constant. 
    If a scalar, this will be used directly as kde.factor. 
    If None (default), ‘scott’ is used. See Notes for more details.
    
    weights:array_like, optional
    weights of datapoints. This must be the same shape as dataset. 
    If None (default), the samples are assumed to be equally weighted
    '''

    mempool = cp.get_default_memory_pool()
    pinned_mempool = cp.get_default_pinned_memory_pool()

    assert dataset.ndim == 1
    n = dataset.size

    neff = (cp.ones(n) * weight).sum()
    d = 1
    #find band width
    if bw_method == 'scott':
        h = neff**(-1. / (d + 4))
    elif bw_method == 'silverman':
        h = (neff * (d + 2) / 4.)**(-1. / (d + 4))
    else:
        h = bw_method

    dataset = cp.asarray(dataset / h, dtype='float32').T
    dataset = cp.expand_dims(dataset, 1)
    XX = cp.broadcast_to(dataset, (n, n))
    XXT = cp.broadcast_to(dataset.T, (n, n))
    norm = cp.absolute(XX - XXT)
    XX = None
    XXT = None

    #find k((x-X)/h)
    kxx = cp.copy(norm)
    kxx[norm < 1] = 70 / 81 * (1 - norm[norm < 1]**3)**3
    kxx[norm >= 1] = 0
    norm = None

    kernel = cp.asarray(weight, dtype='float32')
    kernel = cp.broadcast_to(kernel, (n, n))
    kernel = kxx * kernel
    kde = kernel.mean(0, keepdims=False) / h

    mempool.free_all_blocks()
    pinned_mempool.free_all_blocks()
    return kde
Exemplo n.º 20
0
def diff(a, n=1, axis=-1, prepend=None, append=None):
    """Calculate the n-th discrete difference along the given axis.

    Args:
        a (cupy.ndarray): Input array.
        n (int): The number of times values are differenced. If zero, the input
            is returned as-is.
        axis (int): The axis along which the difference is taken, default is
            the last axis.
        prepend (int, float, cupy.ndarray): Value to prepend to ``a``.
        append (int, float, cupy.ndarray): Value to append to ``a``.

    Returns:
        cupy.ndarray: The result array.

    .. seealso:: :func:`numpy.diff`
    """

    if n == 0:
        return a
    if n < 0:
        raise ValueError(
            "order must be non-negative but got " + repr(n))

    a = cupy.asanyarray(a)
    nd = a.ndim

    combined = []

    if prepend is not None:
        prepend = cupy.asanyarray(prepend)
        if prepend.ndim == 0:
            shape = list(a.shape)
            shape[axis] = 1
            prepend = cupy.broadcast_to(prepend, tuple(shape))
        combined.append(prepend)

    combined.append(a)

    if append is not None:
        append = cupy.asanyarray(append)
        if append.ndim == 0:
            shape = list(a.shape)
            shape[axis] = 1
            append = cupy.broadcast_to(append, tuple(shape))
        combined.append(append)

    if len(combined) > 1:
        a = cupy.concatenate(combined, axis)

    slice1 = [slice(None)] * nd
    slice2 = [slice(None)] * nd
    slice1[axis] = slice(1, None)
    slice2[axis] = slice(None, -1)
    slice1 = tuple(slice1)
    slice2 = tuple(slice2)

    op = cupy.not_equal if a.dtype == numpy.bool_ else cupy.subtract
    for _ in range(n):
        a = op(a[slice1], a[slice2])

    return a
Exemplo n.º 21
0
def kernel_smoothing_ecdf_weighted(y,
                                   x,
                                   dampmin=1e-30,
                                   maxit=500,
                                   lam=0,
                                   bw_method='scott',
                                   weight=1):

    mempool = cp.get_default_memory_pool()
    pinned_mempool = cp.get_default_pinned_memory_pool()
    assert (x.ndim == 1) & (y.ndim == 1)
    NN = y.size
    d = 1
    neff = (cp.ones(NN) * weight).sum()
    if bw_method == 'scott':
        h = neff**(-1. / (d + 4))
    elif bw_method == 'silverman':
        h = (neff * (d + 2) / 4.)**(-1. / (d + 4))
    else:
        h = bw_method
    NN = x.size

    x = x.reshape((-1, 1))
    x = cp.asarray(x / h, dtype='float32')
    y = cp.asarray(y, dtype='float32')
    XX = cp.broadcast_to(x, (NN, NN))
    XXT = cp.broadcast_to(x.T, (NN, NN))
    xx = XX - XXT
    XX = None
    XXT = None
    #print(mempool.used_bytes())
    kxx = cp.absolute(xx, dtype='float32')
    kxx[kxx < 1] = 70 / 81 * (1 - kxx[kxx < 1]**3)**3
    kxx[cp.absolute(xx, dtype='float32') >= 1] = 0
    xx = xx * kxx
    kernel = cp.asarray(weight, dtype='float32')  #weight
    kernel = cp.broadcast_to(kernel, (NN, NN))

    #Levenberg Marquardt
    whileii = 0
    #lam = -1/(xx.max(0)+xx.max(0).mean())/2
    lam = cp.zeros(xx.shape[0], dtype='float32')  #-1/(xx.max(0))/2
    max_change = 1
    residual_rhs = 1e10
    damp = 1e-2
    # Levenberg Marquardt method of finding better weighting for adjusted Nadaraya waston
    while ((max_change > 2e-100) |
           (residual_rhs > 1e-100)) & (whileii < maxit):
        whileii = whileii + 1
        lam2 = cp.broadcast_to(lam, (NN, NN))
        dpt_constraint = cp.asarray(xx / (1 + lam2 * xx), dtype='float64')
        lam2 = None
        ddpt_constraint = -dpt_constraint**2
        ddpt_constraint = (kernel * ddpt_constraint).sum(0)
        dpt_constraint = (kernel * dpt_constraint).sum(0)
        residual_rhs_old = residual_rhs
        residual_rhs = cp.absolute(dpt_constraint).mean()  #calculate residual
        change = dpt_constraint * ddpt_constraint / (ddpt_constraint**2 + damp)
        max_change = cp.absolute(change).max()
        dpt_constraint = None
        ddpt_constraint = None
        '''
        lam2 = cp.broadcast_to(lam,(NN,NN))
        lam2 = cp.logical_not(((1+lam2*xx)>=0).prod(0))
        #lam2 = None
        lam[lam2] = lam[lam2]/100
        
        if cp.any(lam>0):
            lam[lam>0] = -cp.random.rand(int((lam>0).sum()))/(xx[:,lam>0].max(0))
        #lam = cp.maximum(-1/(xx+1e-4),lam)
        #obj = cp.log(1+lam*xx+1e-4).sum()
        '''

        if (residual_rhs_old >= residual_rhs):
            lam = lam - change
            if ((whileii % 20) == 0):
                print(max_change, ' ', residual_rhs, ' ', damp, ' ', lam.max(),
                      lam.min(), ' any NA ',
                      cp.isnan(change).any())
            if (damp > dampmin): damp = damp / 2
            change = None

        elif (residual_rhs_old < residual_rhs):
            damp = damp * 4
    residual_rhs = None

    p = 1 / (1 + lam * xx) * kernel
    p = cp.asarray(p, dtype='float64')
    p = p / p.sum(0)
    if cp.any(p < -1e-3):
        print(
            'kernel smoothing weighting is not converging in finding outlier, should be all positive'
        )
    p[p < 0] = 0
    p = p / p.sum(0)

    kernel = cp.asarray(kxx * p, dtype='float32')

    print(lam.max(), lam.min(), p.max(), p.min())
    print('this should be zero. actual residual:',
          cp.absolute((xx * p).sum(0)).max())
    print(
        'sum of probability should be 1, so this should be 0. Actual residual:',
        cp.absolute(sum(p) - 1).mean())

    xx = None
    lam = None

    kxx = cp.asarray(kxx * p, dtype='float32')
    #xx2 =None
    p = None

    kernel = kxx * kernel
    kernel_de = cp.broadcast_to(kernel.sum(0, keepdims=True), (NN, NN))

    y = y.reshape((-1, 1))
    yy = y <= y.T
    weight = kernel / kernel_de
    cdf = (weight * yy).sum(0, keepdims=True).T
    #cv = cp.asnumpy((((yy-cdf)/(1-weight))**2*kk).mean())
    weight = None
    kernel = None
    yy = None

    cdf2 = cp.asnumpy(cdf)
    cdf = None
    mempool.free_all_blocks()
    pinned_mempool.free_all_blocks()
    return cdf2
Exemplo n.º 22
0
def SetGeometry(self):
    if self.verbosity >= 1:
        print("Prepating the domain data (shape,metric,...)")
    eikonal = self.kernel_data['eikonal']
    policy = eikonal.policy

    # Domain shape and grid scale
    self.shape = tuple(
        self.GetValue(
            'dims',
            help="dimensions (shape) of the computational domain").astype(int))

    self.periodic_default = (
        False, False, True) if self.isCurvature else (False, ) * self.ndim
    self.periodic = self.GetValue(
        'periodic',
        default=self.periodic_default,
        help="Apply periodic boundary conditions on some axes")
    self.shape_o = tuple(misc.round_up(self.shape, self.shape_i))
    if policy.bound_active_blocks is True:
        policy.bound_active_blocks = 12 * np.prod(self.shape_o) / np.max(
            self.shape_o)

    # Set the discretization gridScale(s)
    if self.isCurvature:
        self.h_base = self.GetValue(
            'gridScale',
            array_float=True,
            help="Scale of the physical (not angular) grid.")
        self.h_per = self.caster(2. * np.pi / self.shape[2])
        self.h = self.caster((self.h_base, self.h_base, self.h_per))

    elif self.HasValue('gridScale') or self.isCurvature:
        self.h = cp.broadcast_to(
            self.GetValue('gridScale',
                          array_float=True,
                          help="Scale of the computational grid"),
            (self.ndim, ))

    else:
        self.h = self.GetValue(
            'gridScales',
            array_float=True,
            help="Axis independent scales of the computational grid")

    self.h_broadcasted = fd.as_field(self.h, self.shape, depth=1)

    # Get the metric
    if self.model_ == 'Diagonal': metricClass = Metrics.Diagonal
    elif self.model_ == 'Riemann': metricClass = Metrics.Riemann
    elif self.model_ == 'Rander': metricClass = Metrics.Rander
    elif self.model_ == 'TTI': metricClass = Metrics.Seismic.Reduced

    if self.model_ == 'Isotropic':
        self._metric = Metrics.Diagonal(cp.ones(self.ndim, dtype=self.float_t))
        self._dualMetric = None
    elif self.isCurvature:
        pass
    else:
        self._metric = self.GetValue('metric',
                                     default=None,
                                     verbosity=3,
                                     help="Metric of the minimal path model")
        self._dualMetric = self.GetValue(
            'dualMetric',
            default=None,
            verbosity=3,
            help="Dual metric of the minimal path model")
        for key, value in (('_metric', self._metric), ('_dualMetric',
                                                       self._dualMetric)):
            if ad.cupy_generic.isndarray(value):
                setattr(self, key, metricClass.from_HFM(value))

    self.drift = self.GetValue(
        'drift',
        default=None,
        verbosity=3,
        array_float=True,
        help=
        "Drift introduced in the eikonal equation, becoming F^*(grad u - drift)=1"
    )

    # Set the geometry

    if self.isCurvature:
        # Geometry defined using the xi, kappa and theta parameters
        self.xi = self.GetValue(
            'xi',
            array_float=True,
            help="Cost of rotation for the curvature penalized models")
        self.kappa = self.GetValue(
            'kappa',
            default=0.,
            array_float=True,
            help="Rotation bias for the curvature penalized models")
        self.theta = self.GetValue(
            'theta',
            default=0.,
            verbosity=3,
            array_float=True,
            help=
            "Deviation from horizontality, for the curvature penalized models")

        # Scale h_base is taken care of through the 'cost' field
        h_ratio = self.h_per / self.h_base
        self.xi *= h_ratio
        self.kappa /= h_ratio
        # Large arrays are passed as geometry data, and scalar entries as module constants
        geom = []

        def is_var(e):
            return isinstance(e, cp.ndarray) and e.ndim > 0

        traits = eikonal.traits
        traits['xi_var_macro'] = int(is_var(self.xi))
        traits['kappa_var_macro'] = int(is_var(self.kappa))
        traits['theta_var_macro'] = int(is_var(self.theta))
        if not is_var(self.theta): traits['nTheta'] = self.shape[2]
        if all(traits[e] == 0 for e in ('xi_var_macro', 'kappa_var_macro',
                                        'theta_var_macro')):
            traits['precomputed_scheme_macro'] = 1

        geom = [
            e for e in (1. / self.xi, self.kappa, np.cos(self.theta),
                        np.sin(self.theta)) if is_var(e)
        ]
        if len(geom) > 0: self.geom = ad.array(geom)
        else: self.geom = cp.zeros((0, ) + self.shape, dtype=self.float_t)

    else:
        if self._metric is not None:
            self._metric = self._metric.with_costs(self.h)
        if self._dualMetric is not None:
            self._dualMetric = self._dualMetric.with_speeds(self.h)
        if self.drift is not None: self.drift *= self.h_broadcasted

        if self.model_ == 'Isotropic':
            # No geometry field. Metric passed as a module constant
            self.geom = cp.array(0., dtype=self.float_t)
        elif self.model_ == 'Diagonal':
            self.geom = self.dualMetric.costs**2
        elif self.model_ == 'Riemann':
            self.geom = self.dualMetric.flatten()
        elif self.model_ == 'Rander':
            self.geom = Metrics.Riemann(self.metric.m).dual().flatten()
            if self.drift is None: self.drift = self.float_t(0.)
            self.drift += self.metric.w
        elif self.model_ == 'TTI':
            self.geom = self.metric.flatten(transposed_transformation=True)

    eikonal.args['geom'] = misc.block_expand(fd.as_field(
        self.geom, self.shape),
                                             self.shape_i,
                                             mode='constant',
                                             constant_values=np.inf,
                                             contiguous=True)
    if self.drift is not None:
        eikonal.args['drift'] = misc.block_expand(fd.as_field(
            self.drift, self.shape),
                                                  self.shape_i,
                                                  mode='constant',
                                                  constant_values=np.nan,
                                                  contiguous=True)

    # geometrical data related with geodesics
    self.exportGeodesicFlow = self.GetValue(
        'exportGeodesicFlow',
        default=False,
        help="Export the upwind geodesic flow (direction of the geodesics)")
    self.tips = self.GetValue(
        'tips',
        default=None,
        array_float=True,
        help="Tips from which to compute the minimal geodesics")
    if self.isCurvature:
        self.unorientedTips = self.GetValue(
            'unorientedTips',
            default=None,
            array_float=True,
            help="Compute a geodesic from the most favorable orientation")
    self.hasTips = (self.tips is not None
                    or (self.isCurvature and self.unorientedTips is not None))

    # Cost function
    if self.HasValue('speed'):
        self.cost = 1. / self.GetValue(
            'speed',
            array_float=True,
            help="speed = 1/cost (scales the metric, accepts AD)")
    else:
        self.cost = self.GetValue(
            'cost',
            array_float=True,
            default=None,
            help="cost = 1/speed (scales the metric, accepts AD)")
        if self.cost is None:
            self.cost = cp.ones(self.shape, dtype=self.float_t)
    if not ad.is_ad(self.cost):
        costVariation = self.GetValue(
            'costVariation',
            default=None,
            help="First order variation of the cost function")
        if costVariation is not None:
            self.cost = ad.Dense.new(self.cost, costVariation)
    if self.isCurvature: self.cost *= self.h_base
    self.cost = np.broadcast_to(self.cost, self.shape)

    # Cost related parameters
    if self.HasValue('atol') and self.HasValue('rtol'): tol = None
    else:
        tol = self.GetValue(
            'tol',
            default="_Dummy",
            array_float=True,
            help=
            "Convergence tolerance for the fixed point solver (determines atol, rtol)"
        )
        float_resolution = np.finfo(self.float_t).resolution
        if isinstance(tol, str) and tol == "_Dummy":
            cost_bound = ad.remove_ad(self.cost)
            if not self.isCurvature:
                cost_bound = cost_bound * self.metric.cost_bound()
            mean_cost_bound = np.mean(cost_bound)
            tol = mean_cost_bound * float_resolution * 5.
            self.hfmOut['keys']['default']['tol'] = self.float_t(float(tol))
    policy.atol = self.GetValue(
        'atol',
        default=tol,
        array_float=True,
        help="Absolute convergence tolerance for the fixed point solver")
    rtol_default = 0. if policy.multiprecision else float_resolution * 5.
    policy.rtol = self.GetValue(
        'rtol',
        default=rtol_default,
        array_float=True,
        help="Relative convergence tolerance for the fixed point solver")

    if policy.bound_active_blocks:
        policy.minChg_delta_min = self.GetValue(
            'minChg_delta_min',
            default=float(np.min(self.h)) / 10.,
            help="Minimal threshold increase with bound_active_blocks method")

    # Walls
    walls = self.GetValue('walls',
                          default=None,
                          help='Obstacles in the domain')
    if walls is not None:
        wallDist_t = np.uint8
        wallDistBound = self.GetValue(
            'wallsDistBound',
            default=10,
            help="Bound on the computed distance to the obstacles.\n"
            "(Ideally a sharp upper bound on the stencil width.)")
        wallDistMax_t = np.iinfo(wallDist_t).max
        wallDist = cp.full(self.shape, wallDistMax_t, dtype=wallDist_t)
        wallDist[walls] = 0
        l1Kernel = inf_convolution.distance_kernel(1,
                                                   self.ndim,
                                                   dtype=wallDist_t,
                                                   ord=1)
        wallDist = inf_convolution.inf_convolution(wallDist,
                                                   l1Kernel,
                                                   niter=wallDistBound,
                                                   periodic=self.periodic,
                                                   overwrite=True)
        # This value indicates 'far from wall', and visibility computation is bypassed
        wallDist[wallDist > wallsDistBound] = wallDistMax_t
        self.wallDist = wallDist
        eikonal.args['wallDist'] = misc.block_expand(
            wallDist,
            self.shape_i,
            mode='constant',
            constant_values=np.iinfo(wallDist_t).max)
Exemplo n.º 23
0
def roll(a, shift, axis=None):
    """Roll array elements along a given axis.

    Elements that roll beyond the last position are re-introduced at the first.

    Args:
        a (~cupy.ndarray): Array to be rolled.
        shift (int or tuple of int): The number of places by which elements are
            shifted. If a tuple, then `axis` must be a tuple of the same size,
            and each of the given axes is shifted by the corresponding number.
            If an int while `axis` is a tuple of ints, then the same value is
            used for all given axes.
        axis (int or tuple of int or None): The axis along which elements are
            shifted. By default, the array is flattened before shifting, after
            which the original shape is restored.

    Returns:
        ~cupy.ndarray: Output array.

    .. seealso:: :func:`numpy.roll`

    """
    if axis is None:
        return roll(a.ravel(), shift, 0).reshape(a.shape)
    elif isinstance(shift, cupy.ndarray):
        shift = shift.ravel()
        axes = _reduction._get_axis(axis, a.ndim)[0]
        n_axes = max(len(axes), shift.size)
        axes = numpy.broadcast_to(axes, (n_axes,))
        shift = cupy.broadcast_to(shift, (n_axes,))

        # TODO(asi1024): Improve after issue #4799 is resolved.
        indices = []
        for ax in range(a.ndim):
            ind_shape = [1] * a.ndim
            ind_shape[ax] = a.shape[ax]
            indices.append(cupy.arange(a.shape[ax]).reshape(ind_shape))

        for ax, s in zip(axes, shift):
            indices[ax] -= s
            indices[ax] %= a.shape[ax]

        for ax in range(a.ndim):
            indices[ax] = cupy.broadcast_to(indices[ax], a.shape)

        return a[tuple(indices)]
    else:
        axis = _reduction._get_axis(axis, a.ndim)[0]

        broadcasted = numpy.broadcast(shift, axis)
        if broadcasted.nd > 1:
            raise ValueError(
                '\'shift\' and \'axis\' should be scalars or 1D sequences')
        shifts = {ax: 0 for ax in range(a.ndim)}
        for sh, ax in broadcasted:
            shifts[ax] += sh

        rolls = [((slice(None), slice(None)),)] * a.ndim
        for ax, offset in shifts.items():
            offset %= a.shape[ax] or 1  # If `a` is empty, nothing matters.
            if offset:
                # (original, result), (original, result)
                rolls[ax] = ((slice(None, -offset), slice(offset, None)),
                             (slice(-offset, None), slice(None, offset)))

        result = cupy.empty_like(a)
        for indices in itertools.product(*rolls):
            arr_index, res_index = zip(*indices)
            result[res_index] = a[arr_index]

        return result
Exemplo n.º 24
0
    def choice(self, a, size=None, replace=True, p=None):
        """Returns an array of random values from a given 1-D array.

        .. seealso::
            :func:`cupy.random.choice` for full document,
            :meth:`numpy.random.choice`

        """
        if a is None:
            raise ValueError('a must be 1-dimensional or an integer')
        if isinstance(a, cupy.ndarray) and a.ndim == 0:
            raise NotImplementedError
        if isinstance(a, six.integer_types):
            a_size = a
            if a_size <= 0:
                raise ValueError('a must be greater than 0')
        else:
            a = cupy.array(a, copy=False)
            if a.ndim != 1:
                raise ValueError('a must be 1-dimensional or an integer')
            else:
                a_size = len(a)
                if a_size == 0:
                    raise ValueError('a must be non-empty')

        if p is not None:
            p = cupy.array(p)
            if p.ndim != 1:
                raise ValueError('p must be 1-dimensional')
            if len(p) != a_size:
                raise ValueError('a and p must have same size')
            if not (p >= 0).all():
                raise ValueError('probabilities are not non-negative')
            p_sum = cupy.sum(p).get()
            if not numpy.allclose(p_sum, 1):
                raise ValueError('probabilities do not sum to 1')

        if size is None:
            raise NotImplementedError
        shape = size
        size = numpy.prod(shape)

        if not replace and p is None:
            if a_size < size:
                raise ValueError(
                    'Cannot take a larger sample than population when '
                    '\'replace=False\'')
            if isinstance(a, six.integer_types):
                indices = cupy.arange(a, dtype='l')
            else:
                indices = a.copy()
            self.shuffle(indices)
            return indices[:size].reshape(shape)

        if not replace:
            raise NotImplementedError

        if p is not None:
            p = cupy.broadcast_to(p, (size, a_size))
            index = cupy.argmax(cupy.log(p) +
                                cupy.random.gumbel(size=(size, a_size)),
                                axis=1)
            if not isinstance(shape, six.integer_types):
                index = cupy.reshape(index, shape)
        else:
            index = cupy.random.randint(0, a_size, size=shape)
            # Align the dtype with NumPy
            index = index.astype(cupy.int64, copy=False)

        if isinstance(a, six.integer_types):
            return index

        if index.ndim == 0:
            return cupy.array(a[index], dtype=a.dtype)

        return a[index]
Exemplo n.º 25
0
def SetGeometry(self):
    if self.verbosity >= 1:
        print("Preparing the domain data (shape,metric,...)")
    eikonal = self.kernel_data['eikonal']
    policy = eikonal.policy

    # These options allow to delete the metric and dual metric, when they are converted
    self._metric_delete_dual = False
    self._CostMetric_delete_dual = False
    self._metric = None
    self._dualMetric = None
    self._CostMetric = None

    # Domain shape and grid scale
    self.shape = self.hfmIn.shape

    if self.isCurvature and self.ndim_phys == 2:
        periodic_default = (False, False, True)
    else:
        periodic_default = (False, ) * self.ndim
    self.periodic = self.GetValue(
        'periodic',
        default=periodic_default,
        help="Apply periodic boundary conditions on some axes")
    self.shape_o = tuple(fd.round_up_ratio(self.shape, self.shape_i))
    if policy.bound_active_blocks is True:
        policy.bound_active_blocks = 12 * np.prod(self.shape_o) / np.max(
            self.shape_o)

    # Set the discretization gridScale(s)
    if self.isCurvature and self.ndim_phys == 2:
        self.h_base = self.GetValue(
            'gridScale',
            array_float=tuple(),
            help="Scale of the physical (not angular) grid.")
        self.h_per = self.hfmIn.Axes()[2][
            1]  #self.caster(2.*np.pi / self.shape[2] )
        self.h = self.caster((self.h_base, self.h_base, self.h_per))

    elif self.HasValue('gridScale'):
        self.h = cp.broadcast_to(
            self.GetValue('gridScale',
                          array_float=tuple(),
                          help="Scale of the computational grid"),
            (self.ndim, ))

    else:
        self.h = self.GetValue(
            'gridScales',
            array_float=(self.ndim, ),
            help="Axis independent scales of the computational grid")
    if self.isCurvature:
        if self.ndim_phys == 3:
            self.h_base = self.h[0]
            self.h_per = self.h[3]
        h_ratio = self.h_per / self.h_base

    if policy.multiprecision:
        # Choose power of two, significantly less than h
        hmin = float(np.min(self.h))
        self.multip_step = 2.**np.floor(np.log2(hmin / 10))
        self.multip_max = np.iinfo(self.int_t).max * self.multip_step / 2

    self.h_broadcasted = fd.as_field(self.h, self.shape, depth=1)

    # Get the metric
    if self.model_ == 'Diagonal': metricClass = Metrics.Diagonal
    elif self.model_ == 'Riemann': metricClass = Metrics.Riemann
    elif self.model_ == 'Rander': metricClass = Metrics.Rander
    elif self.model_ == 'TTI': metricClass = Metrics.Seismic.TTI
    elif self.model_ == 'AsymmetricQuadratic': metricClass = Metrics.AsymQuad

    if self.model_ == 'Isotropic':
        self._metric = Metrics.Diagonal(cp.ones(self.ndim, dtype=self.float_t))
        self._dualMetric = None
    elif self.isCurvature:
        pass
    else:
        self._metric = self.GetValue('metric',
                                     default=None,
                                     verbosity=3,
                                     help="Metric of the minimal path model")
        self._dualMetric = self.GetValue(
            'dualMetric',
            default=None,
            verbosity=3,
            help="Dual metric of the minimal path model")
        for key, value in (('_metric', self._metric), ('_dualMetric',
                                                       self._dualMetric)):
            if ad.cupy_generic.isndarray(value):
                setattr(self, key, metricClass.from_HFM(value))

    # Set the geometry

    if self.isCurvature and self.ndim_phys == 2:
        # Geometry defined using the xi, kappa and theta parameters
        xi = self.GetValue(
            'xi',
            array_float=True,
            help="Cost of rotation for the curvature penalized models")
        kappa = self.GetValue(
            'kappa',
            default=0.,
            array_float=True,
            help="Rotation bias for the curvature penalized models")
        self.theta = self.GetValue(
            'theta',
            default=0.,
            verbosity=3,
            array_float=True,
            help=
            "Deviation from horizontality, for the curvature penalized models")

        # Scale h_base is taken care of through the 'cost' field
        self.ixi = 1 / (xi * h_ratio)
        self.kappa = kappa / h_ratio
        # Large arrays are passed as geometry data, and scalar entries as module constants
        geom = []
        traits = eikonal.traits
        traits['xi_var_macro'] = self.ixi.ndim > 0
        traits['kappa_var_macro'] = self.kappa.ndim > 0
        traits['theta_var_macro'] = self.theta.ndim > 0
        if self.theta.ndim == 0: traits['nTheta'] = self.shape[2]
        if self.ixi.ndim > 0: self.ixi = self.as_field(self.ixi, 'xi')
        if self.kappa.ndim > 0: self.kappa = self.as_field(self.kappa, 'kappa')
        if self.theta.ndim > 0: self.theta = self.as_field(self.theta, 'theta')

        geom = [
            e for e in (self.ixi, self.kappa, np.cos(self.theta),
                        np.sin(self.theta)) if e.ndim > 0
        ]
        if len(geom) > 0: self.geom = ad.array(geom)
        else: self.geom = cp.zeros((0, self.shape[2]), dtype=self.float_t)

    elif self.isCurvature and self.ndim_phys == 3:
        # No geometry field. Metric is built in
        self.geom = cp.zeros((0, *self.shape[3:]), dtype=self.float_t)  # Dummy
        self.ixi = 1 / (h_ratio * self.GetValue(
            'xi',
            array_float=True,
            help="Cost of rotation for the curvature penalized models"))
        self.sphere_radius = self.h_per * self.shape[-1]
        if self.shape[-1] == self.shape[-2]: self.separation_radius = None
        else:
            self.separation_radius = self.h_per * (self.shape[-2] / 2 -
                                                   self.shape[-1])
        traits = eikonal.traits
        traits['sphere_macro'] = self.separation_radius is not None
        traits['dual_macro'] = self.GetValue(
            'dual', default=False, help="Use the Reeds-Shepp dual model")
        if traits['forward_macro'] and (traits['dual_macro']
                                        or not traits['sphere_macro']):
            raise ValueError("Incompatible traits for the Reeds-Shepp model.")

    else:
        if self._metric is not None:
            self._metric = self._metric.with_costs(self.h)
        if self._dualMetric is not None:
            self._dualMetric = self._dualMetric.with_speeds(self.h)
        #		if self.drift is not None: self.drift *= self.h_broadcasted

        if self.model_ == 'Isotropic':
            # No geometry field. Metric passed as a module constant
            self.geom = cp.array(0., dtype=self.float_t)
        elif self.model_ == 'Diagonal':
            self.geom = self.dualMetric.costs**2
        elif self.model_ == 'Riemann':
            self.geom = self.dualMetric.flatten()
        elif self.model_ == 'Rander':
            self.geom = self.metric.flatten(inverse_m=True)
        elif self.model_ == 'TTI':
            self.geom = self.metric.flatten(transposed_transformation=True)
        elif self.model_ == 'AsymmetricQuadratic':
            self.geom = self.dualMetric.flatten(solve_w=True)
        elif self.model_ == 'SubRiemann':
            pruning_metric = self.GetValue(
                'pruning_metric',
                default=None,
                help="""Finite difference offset is discarded """
                """if this norm exceeds the Euclidean norm.""")
            if pruning_metric is None:
                pruning_eps = self.GetValue(
                    'pruning_eps',
                    default=None,
                    help=
                    """Approximation of the Riemannian relaxation parameter,"""
                    """ used for pruning the finite difference offsets.""")
                rho = np.sqrt(
                    lp.trace(self.dualMetric.m) /
                    self.dualMetric.vdim) * pruning_eps
                pruning_metric = self.metric.with_cost(rho)
            self.geom = np.stack(
                [self.dualMetric.flatten(),
                 pruning_metric.flatten()], axis=0)
            eikonal.traits['SubRiemann_Pruning_macro'] = 1

        else:
            raise ValueError("Unrecognized model")

    # Dual metric is useless now, except for generating the primal one
    if self._metric is not None:
        self._dualMetric = (None, "Deleted in SetGeometry")
    self._metric_delete_dual = True

    # Check wether the geometry only depends on a subset of the coordinates
    geom_shape = self.geom.shape[1:]
    self.geom_indep = len(self.shape) - len(geom_shape)
    eikonal.traits['geom_indep_macro'] = self.geom_indep
    if geom_shape != self.shape[self.geom_indep:]:
        raise ValueError(
            "Inconsistent dimensions for geometry data. "
            "It should match (the last coordinates of) domain shape.")
    if self.isCurvature: assert self.geom_indep <= self.ndim_phys

    block_geom = fd.block_expand(self.geom,
                                 self.shape_i[self.geom_indep:],
                                 mode='constant',
                                 constant_values=np.inf)
    if not eikonal.traits[
            'geom_first_macro'] and block_geom.ndim == 2 * self.ndim + 1:
        block_geom = np.moveaxis(block_geom, 0, -1)
    eikonal.args['geom'] = cp.ascontiguousarray(block_geom)
    block_geom = None

    self.geom = (None, "Deleted in SetGeometry")

    precompute_excluded_schemes = (
        'Isotropic',
        'Diagonal',  # Precomputation is useless, since stencil is trivial
        'AsymmetricQuadratic',
        'Rander',  # TODO (?) precomputation does not handle drift yet
        'TTI'  # TODO (?) precomputation does not handle adaptive mix_is_min yet
    )

    if self.model_ in precompute_excluded_schemes:
        self.precompute_scheme = False
    else:
        self.precompute_scheme = self.GetValue(
            'precompute_scheme',
            default=self.geom_indep > 0,
            help="Precompute and store the finite difference scheme stencils")

    # geometrical data related with geodesics
    self.exportGeodesicFlow = self.GetValue(
        'exportGeodesicFlow',
        default=False,
        help="Export the upwind geodesic flow (direction of the geodesics)")
    self.tips = self.GetValue(
        'tips',
        default=None,
        array_float=(-1, self.ndim),
        help="Tips from which to compute the minimal geodesics")
    if self.isCurvature:
        self.tips_Unoriented = self.GetValue(
            'tips_Unoriented',
            default=None,
            array_float=(-1, self.ndim_phys),
            help="Compute a geodesic from the most favorable orientation")
    self.hasTips = (self.tips is not None
                    or (self.isCurvature and self.tips_Unoriented is not None))

    # Cost function
    if self.HasValue('speed'):
        self.cost = 1. / self.GetValue(
            'speed',
            array_float=True,
            help="speed = 1/cost (scales the metric, accepts AD)")
    else:
        self.cost = self.GetValue(
            'cost',
            array_float=True,
            default=None,
            help="cost = 1/speed (scales the metric, accepts AD)")
        if self.cost is None:
            self.cost = cp.ones(self.shape, dtype=self.float_t)
    if not ad.is_ad(self.cost):
        costVariation = self.GetValue(
            'costVariation',
            default=None,
            help="First order variation of the cost function")
        if costVariation is not None:
            self.cost = ad.Dense.new(self.cost, costVariation)
    if self.isCurvature: self.cost = self.cost * self.h_base
    self.cost = self.as_field(self.cost, 'cost')

    # Cost related parameters
    if self.HasValue('atol') and self.HasValue('rtol'): tol = None
    else:
        tol = self.GetValue(
            'tol',
            default="_Dummy",
            array_float=tuple(),
            help=
            "Convergence tolerance for the fixed point solver (determines atol, rtol)"
        )
        float_resolution = np.finfo(self.float_t).resolution
        if isinstance(tol, str) and tol == "_Dummy":
            cost_bound = ad.remove_ad(self.cost)
            if not self.isCurvature:
                cost_bound = cost_bound * self.metric.cost_bound()
            mean_cost_bound = np.nanmean(cost_bound)
            tol = mean_cost_bound * float_resolution * 5.
            self.hfmOut['keys']['default']['tol'] = self.float_t(float(tol))
    policy.atol = self.GetValue(
        'atol',
        default=tol,
        array_float=tuple(),
        help="Absolute convergence tolerance for the fixed point solver")
    rtol_default = 0. if policy.multiprecision else float_resolution * 5.
    policy.rtol = self.GetValue(
        'rtol',
        default=rtol_default,
        array_float=tuple(),
        help="Relative convergence tolerance for the fixed point solver")

    if policy.bound_active_blocks:
        policy.minChg_delta_min = self.GetValue(
            'minChg_delta_min',
            default=float(np.min(self.h)) / 10.,
            help="Minimal threshold increase with bound_active_blocks method")

    self._CostMetric_delete_metric = not self.drift_model  # Metric will not be needed anymore

    # Walls
    walls = self.GetValue('walls',
                          default=None,
                          help='Obstacles in the domain')
    if walls is not None:
        if self.isCurvature: walls = self.as_field(walls, 'walls')
        wallDist_t = np.uint8
        wallDistBound = self.GetValue(
            'wallDistBound',
            default=10,
            help="Bound on the computed distance to the obstacles.\n"
            "(Ideally a sharp upper bound on the stencil width.)")
        wallDistMax_t = np.iinfo(wallDist_t).max
        wallDist = cp.full(self.shape, wallDistMax_t, dtype=wallDist_t)
        wallDist[walls] = 0
        l1Kernel = inf_convolution.distance_kernel(1,
                                                   self.ndim,
                                                   dtype=wallDist_t,
                                                   ord=1)
        wallDist = inf_convolution.inf_convolution(
            wallDist,
            l1Kernel,
            niter=wallDistBound,
            periodic=self.periodic,
            overwrite=True,
            upper_saturation=wallDistMax_t)
        # This value indicates 'far from wall', and visibility computation is bypassed
        wallDist[wallDist > wallDistBound] = wallDistMax_t
        self.wallDist = wallDist
        eikonal.args['wallDist'] = cp.ascontiguousarray(
            fd.block_expand(wallDist,
                            self.shape_i,
                            mode='constant',
                            constant_values=wallDistMax_t))

    self.walls = walls