Ejemplo n.º 1
0
def non_maximum_suppression(bbox, thresh, score=None, limit=None):
    """非极大值抑制"""
    bbox_y1 = bbox[:, 0]
    bbox_x1 = bbox[:, 1]
    bbox_y2 = bbox[:, 2]
    bbox_x2 = bbox[:, 3]

    area = (bbox_x2 - bbox_x1 + 1) * (bbox_y2 - bbox_y1 + 1)
    n_bbox = bbox.shape[0]

    if score is not None:
        order = score.argsort()[::-1].astype(np.int32)
    else:
        order = cp.arange(n_bbox, dtype=np.int32)
    keep = []

    # 预测框之间进行两两比较,去除重叠面积iou大于thresh的框
    while order.size > 0:
        i = order[0]
        keep.append(i)

        xx1 = cp.maximum(bbox_x1[i], bbox_x1[order[1:]])
        yy1 = cp.maximum(bbox_y1[i], bbox_y1[order[1:]])
        xx2 = cp.minimum(bbox_x2[i], bbox_x2[order[1:]])
        yy2 = cp.minimum(bbox_y2[i], bbox_y2[order[1:]])

        width = cp.maximum(0., (xx2 - xx1 + 1))
        height = cp.maximum(0., (yy2 - yy1 + 1))
        inter = width * height
        iou = inter / (area[i] + area[order[1:]] - inter)
        index = cp.where(iou <= thresh)[0]
        order = order[(index + 1).tolist()]
    if limit is not None:
        keep = keep[:limit]
    return cp.asnumpy(keep)
Ejemplo n.º 2
0
def _get_nearplane_steps(diff, dOP, dPO, A1, A4, recover_psi, recover_probe):
    # (22) Use least-squares to find the optimal step sizes simultaneously
    if recover_psi and recover_probe:
        b1 = cp.sum((dOP.conj() * diff).real, axis=(-2, -1))
        b2 = cp.sum((dPO.conj() * diff).real, axis=(-2, -1))
        A2 = cp.sum((dOP * dPO.conj()), axis=(-2, -1))
        A3 = A2.conj()
        determinant = A1 * A4 - A2 * A3
        x1 = -cp.conj(A2 * b2 - A4 * b1) / determinant
        x2 = cp.conj(A1 * b2 - A3 * b1) / determinant
    elif recover_psi:
        b1 = cp.sum((dOP.conj() * diff).real, axis=(-2, -1))
        x1 = b1 / A1
    elif recover_probe:
        b2 = cp.sum((dPO.conj() * diff).real, axis=(-2, -1))
        x2 = b2 / A4

    if recover_psi:
        step = 0.9 * cp.maximum(0, x1[..., None, None].real)

        # (27b) Object update
        weighted_step_psi = cp.mean(step, keepdims=True, axis=-5)

    if recover_probe:
        step = 0.9 * cp.maximum(0, x2[..., None, None].real)

        weighted_step_probe = cp.mean(step, axis=-5, keepdims=True)
    else:
        weighted_step_probe = None

    return weighted_step_psi, weighted_step_probe
Ejemplo n.º 3
0
    def non_max_suppression(self, A, threshold=0.5):
        """Remove overlapping bounding boxes. Returns filterd boxes in screen coordinates and
        as an array with shape (n_boxes, (x1, y1, x2, y2)).
        Args:
            A (np.array): predicted labels and boxes.
            threshold (float): overlap threshold to treat as new box
        Returns:
            np.array: only max boxes.
        """
        x_stride = 34
        y_stride = 34
        score = []
        x1 = []
        x2 = []
        y1 = []
        y2 = []
        for i in range(0, A.shape[1]):
            for j in range(0, A.shape[2]):
                if A[0, i, j][0] > 0.5:
                    bx, by, w, h = A[0, i, j][1:]
                    score.append(A[0, i, j, 0])
                    x1.append((j + bx - w / 2) * x_stride + 29)
                    y1.append((i + by - h / 2) * y_stride)
                    x2.append((j + bx + w / 2) * x_stride + 29)
                    y2.append((i + by + h / 2) * y_stride)

        score = np.array(score)
        x1 = cp.array(x1)
        x2 = cp.array(x2)
        y1 = cp.array(y1)
        y2 = cp.array(y2)

        score_indexes = score.argsort().tolist()
        boxes_keep_index = []
        while len(score_indexes) > 0:
            index = score_indexes.pop()
            boxes_keep_index.append(index)
            if not len(score_indexes):
                break
            #iou
            xs1 = cp.maximum(x1[index], x1[score_indexes])
            ys1 = cp.maximum(y1[index], y1[score_indexes])
            xs2 = cp.minimum(x2[index], x2[score_indexes])
            ys2 = cp.minimum(y2[index], y2[score_indexes])
            intersections = cp.maximum(ys2 - ys1, 0) * cp.maximum(xs2 - xs1, 0)
            unions = (x2[index]-x1[index])*(y2[index]-y1[index]) \
                + (x2[score_indexes]-x1[score_indexes])*(y2[score_indexes]-y1[score_indexes]) \
                - intersections
            ious = np.array(cp.asnumpy(intersections / unions))
            filtered_indexes = set((ious > threshold).nonzero()[0])
            score_indexes = [
                v for (i, v) in enumerate(score_indexes)
                if i not in filtered_indexes
            ]

        nms_res = np.zeros((len(boxes_keep_index), 5))
        for i, j in enumerate(boxes_keep_index):
            nms_res[i, :] = np.array([score[j], x1[j], y1[j], x2[j], y2[j]])
        return nms_res
Ejemplo n.º 4
0
def calcDifference(sample, aNeg, bNeg, aPos, bPos):
    negPDF = beta.pdf(sample, aNeg, bNeg)
    posPDF = beta.pdf(sample, aPos, bPos)
    pdfDiff = negPDF - posPDF
    pdfDiffNeg = xp.maximum(pdfDiff, xp.zeros_like(pdfDiff))
    pdfDiffPos = xp.maximum(-1 * pdfDiff, xp.zeros_like(pdfDiff))
    pdfMax = xp.maximum(negPDF, posPDF)
    return negPDF, posPDF, pdfDiffPos, pdfDiffNeg, pdfMax
Ejemplo n.º 5
0
def stretch_pre(nimg):
    """
    from 'Applicability Of White-Balancing Algorithms to Restoring Faded Colour Slides: An Empirical Evaluation'
    """
    nimg = nimg.transpose(2, 0, 1)
    nimg[0] = np.maximum(nimg[0] - nimg[0].min(), 0)
    nimg[1] = np.maximum(nimg[1] - nimg[1].min(), 0)
    nimg[2] = np.maximum(nimg[2] - nimg[2].min(), 0)
    return nimg.transpose(1, 2, 0)
Ejemplo n.º 6
0
 def prox(self, x):
     if self.prox_method == 'tv':
         x = 0.5 * (np.maximum(x, 0) + tv.tv3dApproxHaar(
             x, self.tv_lambda / self.L, self.tv_lambdaw))
     if self.prox_method == 'native':
         x = np.maximum(x, 0) + self.soft_thresh(x, self.tau)
     if self.prox_method == 'non-neg':
         x = np.maximum(x, 0)
     return x
Ejemplo n.º 7
0
def _calc_offset(offset_map, region, parent_y, parent_x):
    y0, y1, x0, x1 = region
    x_area, y_area = offset_map[:, y0:y1, x0:x1]
    x_vec = np.power(np.subtract(np.arange(x0, x1), parent_x), 2)
    y_vec = np.power(np.subtract(np.arange(y0, y1), parent_y), 2)
    xv, yv = np.meshgrid(x_vec, y_vec)
    dist = np.sqrt(xv + yv)  # sqrt(y^2 + x^2)
    xv = np.divide(xv, dist)  # normalize x
    yv = np.divide(yv, dist)  # normalize y
    offset_map[0, y0:y1, x0:x1] = np.maximum(x_area, xv)
    offset_map[1, y0:y1, x0:x1] = np.maximum(y_area, yv)
Ejemplo n.º 8
0
def yangVectorDistance(negativeVector, positiveVector, p=1):
    x = xp.array(negativeVector).reshape((-1, 1))
    y = xp.array(positiveVector).reshape((-1, 1))
    pExp = int(p)
    assert x.shape == y.shape, "x ({}) and y ({}) must be of the same shape".format(
        x.shape, y.shape)
    assert pExp > 0, "p must be an integer greater than 0"
    numerator = vectorPDistance(x, y, pExp)
    max_X_Y = xp.maximum(xp.absolute(x), xp.absolute(y))
    maxes = xp.maximum(max_X_Y, xp.absolute(x - y))
    return numerator / xp.sum(maxes)
Ejemplo n.º 9
0
def fast_forward_one(
    prev_x: cp.ndarray,
    prev_l: cp.ndarray,
    hidden: cp.ndarray,
    x_embedder_W: cp.ndarray,
    gru_xw: cp.ndarray,
    gru_hw: cp.ndarray,
    gru_xb: cp.ndarray,
    gru_hb: cp.ndarray,
    O1_W: cp.ndarray,
    O1_b: cp.ndarray,
    O2_W: cp.ndarray,
    O2_b: cp.ndarray,
    w_gru_x: cp.ndarray,
    w_gru_h: cp.ndarray,
    w_out_x1: cp.ndarray,
    w_out_x2: cp.ndarray,
):
    prev_xl = cp.concatenate((x_embedder_W[prev_x], prev_l),
                             axis=1)  # (batch_size, ?)

    # gru_x = prev_xl.dot(gru_xw) + gru_xb
    gru_x = w_gru_x
    prev_xl.dot(gru_xw, gru_x)
    gru_x += gru_xb

    # gru_h = hidden.dot(gru_hw) + gru_hb
    gru_h = w_gru_h
    hidden.dot(gru_hw, gru_h)
    gru_h += gru_hb

    size = gru_x.shape[1] // 3
    W_r_x, W_z_x, W_x = gru_x[:, :size], gru_x[:,
                                               size:size * 2], gru_x[:,
                                                                     size * 2:]
    U_r_h, U_z_h, U_x = gru_h[:, :size], gru_h[:,
                                               size:size * 2], gru_h[:,
                                                                     size * 2:]
    new_hidden = gru_element_wise(hidden, W_r_x, W_z_x, W_x, U_r_h, U_z_h, U_x)

    # out_x = new_hidden.dot(O1_W) + O1_b
    out_x1 = w_out_x1
    new_hidden.dot(O1_W, out_x1)
    out_x1 += O1_b

    cp.maximum(out_x1, 0.0, out_x1)

    # out_x = out_x.dot(O2_W) + O2_b
    out_x2 = w_out_x2
    out_x1.dot(O2_W, out_x2)
    out_x2 += O2_b
    return out_x2, new_hidden
Ejemplo n.º 10
0
def _update_position(
    scan,
    position_options,
    position_update_numerator,
    position_update_denominator,
    alpha=0.05,
    max_shift=1,
):
    step = position_update_numerator / (
        (1 - alpha) * position_update_denominator +
        alpha * position_update_denominator.max())

    step_x = step[..., 0]
    step_y = step[..., 1]

    if position_options.use_adaptive_moment:
        logger.info(
            "position correction with ADAptive Momemtum acceleration enabled.")
        step_x, position_options.vx, position_options.mx = adam(
            step_x,
            position_options.vx,
            position_options.mx,
            vdecay=position_options.vdecay,
            mdecay=position_options.mdecay)
        step_y, position_options.vy, position_options.my = adam(
            step_y,
            position_options.vy,
            position_options.my,
            vdecay=position_options.vdecay,
            mdecay=position_options.mdecay)

    # Step limit for stability
    _max_shift = cp.minimum(
        max_shift,
        _mad(
            cp.concatenate((step_x, step_y), axis=-1),
            axis=-1,
            keepdims=True,
        ),
    )
    step_x = cp.maximum(-_max_shift, cp.minimum(step_x, _max_shift))
    step_y = cp.maximum(-_max_shift, cp.minimum(step_y, _max_shift))

    # Ensure net movement is zero
    step_x -= cp.mean(step_x, axis=-1, keepdims=True)
    step_y -= cp.mean(step_y, axis=-1, keepdims=True)
    logger.info('position update norm is %+.3e', tike.linalg.norm(step_x))

    scan[..., 0] -= step_x
    scan[..., 1] -= step_y

    return scan, position_options
Ejemplo n.º 11
0
    def test_max(self):
        @jit.rawkernel()
        def f(x, y, z, r):
            tid = jit.blockDim.x * jit.blockIdx.x + jit.threadIdx.x
            r[tid] = max(x[tid], y[tid], z[tid])

        x = testing.shaped_random((1024,), dtype=numpy.int32, seed=0)
        y = testing.shaped_random((1024,), dtype=numpy.int32, seed=1)
        z = testing.shaped_random((1024,), dtype=numpy.int32, seed=2)
        r = testing.shaped_random((1024,), dtype=numpy.int32, seed=3)
        f((8,), (128,), (x, y, z, r))
        expected = cupy.maximum(x, cupy.maximum(y, z))
        assert bool((r == expected).all())
Ejemplo n.º 12
0
def normal_density_cupy(x, mean, stddev, from_axis=None, eps=1e-8, gpu=0):
    import cupy as cp

    with cp.cuda.Device(gpu):
        variance = cp.maximum(stddev ** 2, eps)
        stddev = cp.maximum(stddev, eps)

        density = cp.exp(-cp.square(x - mean) / (2 * variance)) / (stddev * math.sqrt(2 * math.pi))

        if (from_axis is not None) and (from_axis >= 0):
            shape = tuple(density.shape[:from_axis]) + (cp.prod(density.shape[from_axis:]),)
            density = cp.reshape(density, shape)
            density = cp.prod(density, axis=from_axis)

        return density
Ejemplo n.º 13
0
def normal_log_density_cupy(x, mean, stddev, from_axis=None, eps=1e-8, gpu=0):
    import cupy as cp

    with cp.cuda.Device(gpu):
        variance = cp.maximum(stddev ** 2, eps)
        log_stddev = cp.log(cp.maximum(stddev, eps))

        log_density = -0.5 * (math.log(2 * math.pi) + 2 * log_stddev + ((x - mean)**2 / variance))

        if (from_axis is not None) and (from_axis >= 0):
            shape = tuple(log_density.shape[:from_axis]) + (cp.prod(log_density.shape[from_axis:]),)
            log_density = cp.reshape(log_density, shape)
            log_density = cp.sum(log_density, axis=from_axis)

        return log_density
Ejemplo n.º 14
0
    def _nms_boxes(self, boxes, box_confidences):
        """Apply the Non-Maximum Suppression (NMS) algorithm on the bounding boxes with their
        confidence scores and return an array with the indexes of the bounding boxes we want to
        keep (and display later).

        Keyword arguments:
        boxes -- a NumPy array containing N bounding-box coordinates that survived filtering,
        with shape (N,4); 4 for x,y,height,width coordinates of the boxes
        box_confidences -- a Numpy array containing the corresponding confidences with shape N
        """
        x_coord = boxes[:, 0]
        y_coord = boxes[:, 1]
        width = boxes[:, 2]
        height = boxes[:, 3]

        areas = width * height
        ordered = box_confidences.argsort()[::-1]

        keep = list()
        while ordered.size > 0:
            # Index of the current element:
            i = ordered[0]
            ii = cp.asnumpy(i)
            keep.append(ii)
            xx1 = cp.maximum(x_coord[i], x_coord[ordered[1:]])
            yy1 = cp.maximum(y_coord[i], y_coord[ordered[1:]])
            xx2 = cp.minimum(x_coord[i] + width[i],
                             x_coord[ordered[1:]] + width[ordered[1:]])
            yy2 = cp.minimum(y_coord[i] + height[i],
                             y_coord[ordered[1:]] + height[ordered[1:]])

            width1 = cp.maximum(0.0, xx2 - xx1 + 1)
            height1 = cp.maximum(0.0, yy2 - yy1 + 1)
            intersection = width1 * height1
            union = (areas[i] + areas[ordered[1:]] - intersection)

            # Compute the Intersection over Union (IoU) score:
            iou = intersection / union

            # The goal of the NMS algorithm is to reduce the number of adjacent bounding-box
            # candidates to a minimum. In this step, we keep only those elements whose overlap
            # with the current bounding box is lower than the threshold:
            indexes = cp.where(iou <= self.nms_threshold)[0]
            ordered = ordered[indexes + 1]
        keep = np.array(keep)
        print(keep)
        keep = cp.asarray(keep)
        return keep
Ejemplo n.º 15
0
def compute_gain(sound, fs, min_db=-80.0, mode='A_weighting'):
    if fs == 16000:
        n_fft = 2048
    elif fs == 44100:
        n_fft = 4096
    else:
        raise Exception('Invalid fs {}'.format(fs))
    stride = n_fft // 2

    gain = None
    for i in range(0, len(sound[0]) - n_fft + 1, stride):
        if mode == 'RMSE':
            g = cupy.mean(sound[i:i + n_fft]**2, axis=1)
        elif mode == 'A_weighting':
            spec = cupy.fft.rfft(
                cupy.hanning(n_fft + 1)[:-1] * sound[:, i:i + n_fft])
            power_spec = cupy.abs(spec)**2
            a_weighted_spec = power_spec * cupy.power(10,
                                                      a_weight(fs, n_fft) / 10)
            g = cupy.sum(a_weighted_spec, axis=1)
        else:
            raise Exception('Invalid mode {}'.format(mode))
        if i == 0:
            gain = g.reshape([-1, 1])
        else:
            gain = cupy.concatenate((gain, g.reshape([-1, 1])), axis=1)

    gain = cupy.maximum(gain, cupy.power(10, min_db / 10))
    gain_db = 10 * cupy.log10(gain)

    return gain_db
Ejemplo n.º 16
0
def adam(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad, beta1, beta2, lr, weight_decay, eps):
    _check_tensors(*params)
    engine = _get_engine(*params)

    for i, param in enumerate(params):
        grad = grads[i]
        exp_avg = exp_avgs[i]
        exp_avg_sq = exp_avg_sqs[i]
        step = state_steps[i]

        bias_correction1 = 1 - beta1 ** step
        bias_correction2 = 1 - beta2 ** step

        if weight_decay != 0:
            grad._data = grad.data + param.data * weight_decay

        # Decay the first and second moment running average coefficient
        exp_avg._data = exp_avg.data * beta1 + (1 - beta1) * grad.data
        # check if this is true
        exp_avg_sq._data = exp_avg_sq.data * beta2
        exp_avg_sq._data = exp_avg_sq.data + (1 - beta2) * (grad.data * grad.data)
        # exp_avg_sq._data = exp_avg_sq.data * beta2 + (1 - beta2) * (grad.data * grad.data)
        if amsgrad:
            max_exp_avg_sq = max_exp_avg_sqs[i]
            # Maintains the maximum of all 2nd moment running avg. till now
            max_exp_avg_sq._data = engine.maximum(max_exp_avg_sq.data, exp_avg_sq.data)
            # Use the max. for normalizing running avg. of gradient
            denom = (engine.sqrt(max_exp_avg_sq.data) / math.sqrt(bias_correction2)) + eps
        else:
            denom = (engine.sqrt(exp_avg_sq.data) / math.sqrt(bias_correction2)) + eps

        step_size = lr / bias_correction1

        param._data = param.data - step_size * (exp_avg.data / denom)
Ejemplo n.º 17
0
def mean_peak_distance(peak_image, centroids, return_numpy=True):
    """
    Calculate the mean peak distance in degrees between two corresponding peaks
    for each line profile in an SLI image series.

    Args:

        peak_image: Boolean NumPy array specifying the peak positions in the full
        SLI stack

        centroids: Use centroid calculation to better determine the peak position
        regardless of the number of
        measurements / illumination angles used.

        return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or Numpy
        array will be returned.

    Returns:

        NumPy array of floating point values containing the mean peak distance of
        the line profiles in degrees.
    """
    peak_distance_gpu = peak_distance(peak_image, centroids,
                                      return_numpy=False)
    peak_distance_gpu[peak_distance_gpu > 180] = 0
    peak_distance_gpu = cupy.sum(peak_distance_gpu, axis=-1) / \
                        cupy.maximum(1, cupy.count_nonzero(peak_distance_gpu,
                                                           axis=-1))
    if return_numpy:
        peak_width_cpu = cupy.asnumpy(peak_distance_gpu)
        del peak_distance_gpu
        return peak_width_cpu
    else:
        return peak_distance_gpu
Ejemplo n.º 18
0
def maximum(a, cuda=False):
    if cuda:
        res = cp.maximum(a, 0)
        cp.cuda.Stream.null.synchronize()
        return res
    else:
        return np.maximum(a, 0)
Ejemplo n.º 19
0
def FSITM(HDR, LDR, alpha=None):

    NumPixels = LDR.size

    if alpha is None:
        r = cp.floor(NumPixels / (2.**18))
        if r > 1.:
            alpha = 1. - (1. / r)
        else:
            alpha = 0.

    minNonzero = cp.min(HDR[HDR > 0])
    LogH = cp.log(cp.maximum(HDR, minNonzero))

    # float is needed for further calculation
    LogH = cp.around((LogH - LogH.min()) * 255. /
                     (LogH.max() - LogH.min())).astype(cp.float)

    if alpha > 0.:
        PhaseHDR_CH = phasecong100(HDR, 2, 2, 8, 8)
        PhaseLDR_CH8 = phasecong100(LDR, 2, 2, 8, 8)
    else:  # so, if image size is smaller than 512x512?
        PhaseHDR_CH = 0
        PhaseLDR_CH8 = 0

    PhaseLogH = phasecong100(LogH, 2, 2, 2, 2)
    PhaseH = alpha * PhaseHDR_CH + (1 - alpha) * PhaseLogH

    PhaseLDR_CH2 = phasecong100(LDR, 2, 2, 2, 2)
    PhaseL = alpha * PhaseLDR_CH8 + (1 - alpha) * PhaseLDR_CH2
    Q = cp.sum(
        cp.logical_or(cp.logical_and(PhaseL <= 0, PhaseH <= 0),
                      cp.logical_and(PhaseL > 0, PhaseH > 0))) / NumPixels
    return Q
Ejemplo n.º 20
0
def ruzicka_mat(matrix_a, vector_new):
    matrix_a *= cp.arange(1023, -1, -1, dtype=cp.uint16)
    min_up = cp.minimum(cp.array(matrix_a), vector_new)
    max_down = cp.maximum(cp.array(matrix_a), vector_new)
    numerator = cp.sum(min_up, axis=1)
    denominator = cp.sum(max_down, axis=1)
    return cp.asnumpy(cp.divide(numerator, denominator))
Ejemplo n.º 21
0
    def relu(self, Z):
        """ReLU function"""

        A = cp.maximum(0, Z)
        assert (A.shape == Z.shape)

        return A
Ejemplo n.º 22
0
    def _min_or_max(self, axis, out, min_or_max, sum_duplicates, non_zero):
        if out is not None:
            raise ValueError(("Sparse matrices do not support "
                              "an 'out' parameter."))

        util.validateaxis(axis)

        if axis is None:
            if 0 in self.shape:
                raise ValueError("zero-size array to reduction operation")

            zero = cupy.zeros((), dtype=self.dtype)
            if self.nnz == 0:
                return zero
            if sum_duplicates:
                self.sum_duplicates()
            m = min_or_max(self.data)
            if non_zero:
                return m
            if self.nnz != internal.prod(self.shape):
                if min_or_max is cupy.min:
                    m = cupy.minimum(zero, m)
                elif min_or_max is cupy.max:
                    m = cupy.maximum(zero, m)
                else:
                    assert False
            return m

        if axis == 0 or axis == 1:
            return self._min_or_max_axis(axis, min_or_max, sum_duplicates,
                                         non_zero)
        else:
            raise ValueError("axis out of range")
    def smearing(self, f, f_el, volt_mat, new_ind):
        '''

        Produces B matrix by comparing voltages

        takes:

        f - array shape (n_nodes)
        f_el - array shape (n_electrodes)
        volt_mat - array shape (n_measurements, 2)
        new_ind - array shape (n_measurements)

        returns:

        b-matrix - array shape (n_measurements, n_nodes)

        '''
        i = cp.arange(len(volt_mat))
        f_volt0 = f_el[new_ind, volt_mat[:, 0].astype(int)]
        f_volt1 = f_el[new_ind, volt_mat[:, 1].astype(int)]
        min_fel = cp.minimum(f_volt0, f_volt1)
        max_fel = cp.maximum(f_volt0, f_volt1)
        b_matrix = cp.empty((len(volt_mat), self.n_pts+self.ne))
        b_matrix[:] = (min_fel[:, None] < f[new_ind]) & (f[new_ind] <= max_fel[:, None])

        return b_matrix
Ejemplo n.º 24
0
    def _min_or_max(self, axis, out, min_or_max, explicit):
        if out is not None:
            raise ValueError(("Sparse matrices do not support "
                              "an 'out' parameter."))

        sputils.validateaxis(axis)

        if axis is None:
            if 0 in self.shape:
                raise ValueError("zero-size array to reduction operation")

            zero = cupy.zeros((), dtype=self.dtype)
            if self.nnz == 0:
                return zero
            self.sum_duplicates()
            m = min_or_max(self.data)
            if explicit:
                return m
            if self.nnz != internal.prod(self.shape):
                if min_or_max is cupy.min:
                    m = cupy.minimum(zero, m)
                elif min_or_max is cupy.max:
                    m = cupy.maximum(zero, m)
                else:
                    assert False
            return m

        if axis < 0:
            axis += 2

        return self._min_or_max_axis(axis, min_or_max, explicit)
Ejemplo n.º 25
0
def relu(z, a=None, derivative=False):
    if derivative:
        return z > 0
    else:
        # z[z<0]=0
        # return z
        # return z*(z>0)
        return cp.maximum(0, z)
Ejemplo n.º 26
0
 def _relu(self, z):
     """ReLu activation function
     Args:
         z (np.array): input
     Returns:
         np.array.
     """
     return np.maximum(0, z)
def lrelu(x, alpha=0.01, derivative=False):
    res = x
    if derivative:
        dx = np.ones_like(res)
        dx[res < 0] = alpha
        return dx
    else:
        return np.maximum(x, x * alpha, x)
Ejemplo n.º 28
0
def ruzicka_vec(vector_old, vector_new):
    vector_old_cp = cp.array(vector_old) * cp.arange(
        1023, -1, -1, dtype=cp.uint16)
    min_up = cp.minimum(vector_old_cp, vector_new)
    max_down = cp.maximum(vector_old_cp, vector_new)
    numerator = cp.sum(min_up, axis=1)
    denominator = cp.sum(max_down, axis=1)
    return cp.asnumpy(cp.divide(numerator, denominator))
Ejemplo n.º 29
0
    def darkflat_correction(self, data):
        """Dark-flat field correction"""

        for k in range(
                data.shape[0]):  # work with 2D arrays to save GPU memory
            data[k] = (data[k] - self.dark) / cp.maximum(
                self.flat - self.dark, 1e-6)
        return data
def predict(u):
    x = u

    for weight, bias in zip(weights, biases):
        x = np.matmul(x, weight)
        x = np.add(x, bias)
        x = np.maximum(0, x)

    return x
Ejemplo n.º 31
0
def _fftconv(a, b, axes=(0, 1)):
    """Patched version of :func:`sporco.linalg.fftconv`."""

    if cp.isrealobj(a) and cp.isrealobj(b):
        fft = cp.fft.rfftn
        ifft = cp.fft.irfftn
    else:
        fft = cp.fft.fftn
        ifft = cp.fft.ifftn
    dims = cp.maximum(cp.asarray([a.shape[i] for i in axes]),
                      cp.asarray([b.shape[i] for i in axes]))
    dims = [int(d) for d in dims]
    af = fft(a, dims, axes)
    bf = fft(b, dims, axes)
    return ifft(af * bf, dims, axes)