Пример #1
0
	def forward(self, bottom, top):
		self.label = cp.asarray(copy.deepcopy(bottom[1].data),cp.uint8)
		prob = cp.asarray(copy.deepcopy(bottom[0].data),cp.float64)
		prob = cp.subtract(prob,cp.max(prob,axis=1)[:,cp.newaxis,...])
		prob = cp.exp(prob)
		self.softmax = cp.divide(prob,cp.sum(prob,axis=1)[:,cp.newaxis,...])

		## mask
		self.weight_mask = cp.ones_like(self.label, cp.float64)
		for weight_id in self.weight_dic:
			self.weight_mask[self.label == weight_id] = self.weight_dic[weight_id]

		if self.has_ignore_label:
			self.weight_mask[self.label == self.ignore_label] = 0
		# self.weight_mask[self.label == 0] = 0.3
		# self.weight_mask[self.label == 1] = 0.25
		# self.weight_mask[self.label == 2] = 5
		# self.weight_mask[self.label == 4] = 2
		self.label[self.label == 3] = 2


		compute_count = self.weight_mask[self.weight_mask != 0].size

		## nomalize mask
		self.weight_mask = cp.divide(self.weight_mask, cp.divide(cp.sum(self.weight_mask), compute_count))


		## compute loss
		prob_compute_matrix = copy.deepcopy(self.softmax[self.index_0,self.label,self.index_2,self.index_3])
		prob_compute_matrix[prob_compute_matrix < (1e-10)] = 1e-10

		loss = - cp.divide(cp.sum(cp.multiply(cp.log(prob_compute_matrix),self.weight_mask)),compute_count)

		loss = cp.asnumpy(loss)
		top[0].data[...] = loss
def linear_model_backward(AL, Y, caches):
    grads = OrderedDict()
    L = len(caches)  # the number of layers
    m = AL.shape[1]
    Y = Y.reshape(AL.shape)  # after this line, Y is the same shape as AL

    # Initializing the backpropagation
    dAL = -(cp.divide(Y, AL) - cp.divide(1 - Y, 1 - AL))

    # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
    current_cache = caches[L - 1]
    grads["dA" + str(L - 1)], grads["dW" + str(L)], grads[
        "db" + str(L)] = linear_activation_backward(dAL,
                                                    current_cache,
                                                    activation="sigmoid")

    for l in reversed(range(L - 1)):
        # lth layer: (RELU -> LINEAR) gradients.
        current_cache = caches[l]
        dA_prev_temp, dW_temp, db_temp = linear_activation_backward(
            grads["dA" + str(l + 1)], current_cache, activation="relu")
        grads["dA" + str(l)] = dA_prev_temp
        grads["dW" + str(l + 1)] = dW_temp
        grads["db" + str(l + 1)] = db_temp

    return grads
Пример #3
0
 def update_pair(self, i, k):
     cp.subtract(self.mass_r_arrayg[i, :],
                 self.mass_r_arrayg[i + 1:, :],
                 out=self.relative_r[:k])
     cp.multiply(self.relative_r[:k],
                 self.relative_r[:k],
                 out=self.distance_sqv[:k])
     # np.add.reduce(self.distance_sqv[:k], axis = 1, out = self.distance_sq[:k])
     cp.sum(self.distance_sqv[:k], axis=1, out=self.distance_sq[:k])
     cp.sqrt(self.distance_sq[:k], out=self.distance_inv[:k])
     cp.divide(1.0, self.distance_inv[:k], out=self.distance_inv[:k])
     cp.multiply(self.relative_r[:k],
                 self.distance_inv[:k].reshape(k, 1),
                 out=self.relative_r[:k])
     cp.divide(self._G, self.distance_sq[:k], out=self.a_factor[:k])
     cp.multiply(self.a_factor[:k],
                 self.mass_m_array[i + 1:],
                 out=self.a1[:k])
     cp.multiply(self.a_factor[:k], self.mass_m_array[i], out=self.a2[:k])
     cp.multiply(self.relative_r[:k],
                 self.a1[:k].reshape(k, 1),
                 out=self.a1r[:k])
     # np.add.reduce(self.a1r[:k], axis = 0, out = self.a1v)
     cp.sum(self.a1r[:k], axis=0, out=self.a1v)
     cp.subtract(self.mass_a_arrayg[i, :],
                 self.a1v,
                 out=self.mass_a_arrayg[i, :])
     cp.multiply(self.relative_r[:k],
                 self.a2[:k].reshape(k, 1),
                 out=self.a2r[:k])
     cp.add(self.mass_a_arrayg[i + 1:, :],
            self.a2r[:k],
            out=self.mass_a_arrayg[i + 1:, :])
Пример #4
0
def backward(yhat, y, memory, param_values, arch):
    gradsVals = {}
    #  y = y.reshape(yhat.shape)
    if usegpu == True:
        yhat = np.asarray(yhat)
        y = np.asarray(y)

    daprev = -(np.divide(y, yhat) - np.divide(1 - y, 1 - yhat))

    for layer_idx_prev, layer in reversed(list(enumerate(arch))):
        layer_idx = layer_idx_prev + 1
        activCurr = layer["activation"]

        da_curr = daprev

        aprev = memory["A" + str(layer_idx_prev)]
        z_curr = memory["Z" + str(layer_idx)]

        w_curr = param_values["W" + str(layer_idx)]
        b_curr = param_values["b" + str(layer_idx)]

        daprev, dw_curr, db_curr = singleBackward(da_curr, w_curr, b_curr,
                                                  z_curr, aprev, activCurr)

        gradsVals["dW" + str(layer_idx)] = dw_curr
        gradsVals["db" + str(layer_idx)] = db_curr

    return gradsVals
Пример #5
0
def cost(AL, Y, mode = 'SEL'):
    '''
    

    Parameters
    ----------
    AL : cp.array(out_dim, examples)
        Final layer output.
    Y : cp.array(out_dim, examples)
        Expected output.
    mode : string, optional
        Type of cost computation. The default is 'SEL'.

    Returns
    -------
    cost : cp.array(out_dim, 1)
        Cost output (cost per features).
    dAL : cp.array(out_dim, examples)
        Cost derivates function.

    '''
    
    m = Y.shape[1]
    
    if mode == 'XC':
        AL = cp.clip(AL, 1e-15, 1-1e-15)
        cost = -(1/m)*cp.sum((Y*cp.log(AL)+((1-Y)*cp.log(1-AL))), axis = 1)
        dAL = - (cp.divide(Y, AL) - cp.divide(1 - Y, 1 - AL))
    elif mode == 'SEL':
        cost = (1/(2*m))*cp.sum((AL - Y)**2, axis = 1)
        dAL = AL - Y
        
    cost = cp.squeeze(cost)
    
    return cost, dAL
Пример #6
0
def GlobalReg(X, T, sigma2, outliers):
    """
    :params:
    :return
    """
    [N, D] = X.shape
    M = T.shape[0]

    # Calculate P matrix
    # Nominator of P
    P_num = cp.sum((X[None, :, :] - T[:, None, :])**2, axis=2)
    P_num = cp.exp(-P_num / (2 * sigma2))
    # Denominator of P
    P_den = cp.sum(P_num, axis=0)
    P_den = cp.tile(P_den, (M, 1))
    P_den[P_den == 0] = 2.220446049250313e-16
    c = ((((2 * cp.pi * sigma2)**D / 2) * (outliers / (1 - outliers))) *
         (M / N))
    P_den += c

    P = cp.divide(P_num, P_den)

    P1 = cp.sum(P, axis=1)
    Pt1 = cp.sum(P, axis=0)

    c1 = c * cp.ones(N)
    K1 = cp.dot(cp.transpose(P_num), cp.ones(M))
    a = cp.tile(cp.divide(1, K1 + c1).reshape(N, 1), D)
    Px = cp.dot(P_num, (cp.multiply(a, X)))

    return P1, Pt1, Px
Пример #7
0
def _calc_offset(offset_map, region, parent_y, parent_x):
    y0, y1, x0, x1 = region
    x_area, y_area = offset_map[:, y0:y1, x0:x1]
    x_vec = np.power(np.subtract(np.arange(x0, x1), parent_x), 2)
    y_vec = np.power(np.subtract(np.arange(y0, y1), parent_y), 2)
    xv, yv = np.meshgrid(x_vec, y_vec)
    dist = np.sqrt(xv + yv)  # sqrt(y^2 + x^2)
    xv = np.divide(xv, dist)  # normalize x
    yv = np.divide(yv, dist)  # normalize y
    offset_map[0, y0:y1, x0:x1] = np.maximum(x_area, xv)
    offset_map[1, y0:y1, x0:x1] = np.maximum(y_area, yv)
Пример #8
0
	def forward(self, bottom, top):
		self.label = cp.asarray(copy.deepcopy(bottom[1].data),cp.uint8)
		prob = cp.asarray(copy.deepcopy(bottom[0].data),cp.float64)
		prob = cp.subtract(prob,cp.max(prob,axis=1)[:,cp.newaxis,...])
		prob = cp.exp(prob)
		self.softmax = cp.divide(prob,cp.sum(prob,axis=1)[:,cp.newaxis,...])

		## mask
		self.weight_mask = cp.ones_like(self.label, cp.float64)
		for weight_id in self.weight_dic:
			self.weight_mask[self.label == weight_id] = self.weight_dic[weight_id]

		if self.has_ignore_label:
			self.weight_mask[self.label == self.ignore_label] = 0
		# num_total = 15422668800
		# empty_num = 3679002314
		# road_num = 10565335603
		# ped_num = 99066996
		# car_num = 995347874
		#self.label[self.label == 3] = 2
		# w_empty = float((num_total-empty_num)/num_total)
		# w_road = float((num_total-road_num)/num_total)
		# w_ped = float((num_total-ped_num)/num_total)
		# w_car = float((num_total-car_num)/num_total)
		# print(w_empty)
		# print(w_road)
		# print(w_ped)
		# print(w_car)
		# empty:0.3
		# road:0.25

		self.weight_mask[self.label == 0] = 0.3
		self.weight_mask[self.label == 1] = 0.25
		self.weight_mask[self.label == 3] = 0.5

		# self.weight_mask[self.label == 2] = w_ped
		# self.weight_mask[self.label == 4] = w_car



		compute_count = self.weight_mask[self.weight_mask != 0].size


		## nomalize mask
		self.weight_mask = cp.divide(self.weight_mask, cp.divide(cp.sum(self.weight_mask), compute_count))


		## compute loss
		prob_compute_matrix = copy.deepcopy(self.softmax[self.index_0,self.label,self.index_2,self.index_3])
		prob_compute_matrix[prob_compute_matrix < (1e-10)] = 1e-10
		loss = - cp.divide(cp.sum(cp.multiply(cp.log(prob_compute_matrix),self.weight_mask)),compute_count)

		loss = cp.asnumpy(loss)
		top[0].data[...] = loss
Пример #9
0
def imgmodpha_gpu(img):
    # ok
    #return gpu
    img = img
    [xsize, ysize] = img.shape
    cp.cuda.Device(0).use()
    img = cp.array(img, dtype='<f4')
    sp = cp.divide(cp.divide(cp.fft.fftshift(cp.fft.fft2(img)), xsize), ysize)
    modul = cp.abs(sp)
    phase = cp.angle(sp)

    return modul, phase
Пример #10
0
def run_cupy(lat2, lon2):
    import cupy as cp

    # Allocate temporary arrays
    size = len(lat2)
    a = cp.empty(size, dtype='float64')
    dlat = cp.empty(size, dtype='float64')
    dlon = cp.empty(size, dtype='float64')

    # Transfer inputs to the GPU
    lat2 = cp.array(lat2)
    lon2 = cp.array(lon2)

    # Begin computation
    lat1 = 0.70984286
    lon1 = 1.23892197
    MILES_CONST = 3959.0

    cp.subtract(lat2, lat1, out=dlat)
    cp.subtract(lon2, lon1, out=dlon)

    # dlat = sin(dlat / 2.0) ** 2.0
    cp.divide(dlat, 2.0, out=dlat)
    cp.sin(dlat, out=dlat)
    cp.multiply(dlat, dlat, out=dlat)

    # a = cos(lat1) * cos(lat2)
    lat1_cos = math.cos(lat1)
    cp.cos(lat2, out=a)
    cp.multiply(a, lat1_cos, out=a)

    # a = a + sin(dlon / 2.0) ** 2.0
    cp.divide(dlon, 2.0, out=dlon)
    cp.sin(dlon, out=dlon)
    cp.multiply(dlon, dlon, out=dlon)
    cp.multiply(a, dlon, out=a)
    cp.add(dlat, a, out=a)

    c = a
    cp.sqrt(a, out=a)
    cp.arcsin(a, out=a)
    cp.multiply(a, 2.0, out=c)

    mi = c
    cp.multiply(c, MILES_CONST, out=mi)

    # Transfer outputs back to CPU
    a = cp.asnumpy(a)

    return a
Пример #11
0
def ruzicka_mat(matrix_a, vector_new):
    matrix_a *= cp.arange(1023, -1, -1, dtype=cp.uint16)
    min_up = cp.minimum(cp.array(matrix_a), vector_new)
    max_down = cp.maximum(cp.array(matrix_a), vector_new)
    numerator = cp.sum(min_up, axis=1)
    denominator = cp.sum(max_down, axis=1)
    return cp.asnumpy(cp.divide(numerator, denominator))
Пример #12
0
def _divide_nonzero(array1, array2, cval=1e-10):
    """
    Divides two arrays.

    Denominator is set to small value where zero to avoid ZeroDivisionError and
    return finite float array.

    Parameters
    ----------
    array1 : (N, ..., M) ndarray
        Array 1 in the enumerator.
    array2 : (N, ..., M) ndarray
        Array 2 in the denominator.
    cval : float, optional
        Value used to replace zero entries in the denominator.

    Returns
    -------
    array : (N, ..., M) ndarray
        Quotient of the array division.
    """

    # Copy denominator
    denominator = cp.copy(array2)

    # Set zero entries of denominator to small value
    denominator[denominator == 0] = cval

    # Return quotient
    return cp.divide(array1, denominator)
Пример #13
0
def apply_kernel(moments,
                 kernel,
                 num_moments,
                 num_vecs,
                 extra_points=1,
                 precision=32):
    """
    Parameters
    ----------
    Apply a given kernel in a given array of moments.
    Return the cosine transform of type III.
    """

    num_points = extra_points + num_moments

    if kernel is not None:
        moments = moments * kernel

    mu_ext = cp.zeros(num_points, dtype=moments.dtype)
    mu_ext[0:num_moments] = moments

    smooth_moments = dctIII(mu_ext, precision)
    points = cp.arange(0, num_points)
    ek = cp.cos(cp.pi * (points + 0.5) / num_points)
    gk = cp.pi * cp.sqrt(1. - ek**2)

    rho = cp.divide(smooth_moments, gk)

    return ek, rho
Пример #14
0
def initialize(m, n, d):
    W = cp.random.normal(size=(m, d))
    y = cp.random.normal(size=n)
    a = 2*cp.around(cp.random.uniform(size=m))-cp.ones(m)
    x_tmp = cp.random.uniform(size=(n, d))
    x = cp.divide(x_tmp, cp.linalg.norm(x_tmp))
    return x, W, a, y
Пример #15
0
    def gradient(self, x0, y_true):
        def func(a, t, params, A, function, bT, x, division):
            index = int(t * (division - 1))
            return cp.multiply(
                -1.,
                cp.add(
                    cp.dot(a, params[1][index]),
                    cp.dot(
                        cp.multiply(
                            bT,
                            cp.multiply(params[0][index],
                                        function(cp.dot(x[index], A.T)))), A)))

        n_data = len(x0)
        y_pred = self(x0)
        aT = cp.zeros_like(x0, dtype=cp.float32)
        bT = cp.divide(cp.subtract(y_pred, y_true), n_data)
        a = euler(func,
                  aT,
                  self.t[::-1],
                  args=(self.params, self.A, self.d_function, bT, self.x,
                        self.division))
        g_alpha = cp.sum(
            cp.multiply(bT, self.function(cp.dot(self.x, self.A.T))), 1)
        g_beta = cp.einsum("ilj,ilk->ijk", a[::-1], self.x)
        g_gamma = cp.sum(a[::-1], 1)
        return (g_alpha, g_beta, g_gamma)
 def getMix_tau(self):
     ones = cp.ones((H, W))[self.mask]
     v1 = Eta_n / self.rho
     v2 = Eta_n * M / self.rho
     mix_v = cp.divide(2 * v1 * v2, (v1 * (ones - self.psi[self.mask]) + v2 * (ones + self.psi[self.mask])))
     mix_tau = 3 * mix_v + 0.5
     #print(mix_tau.mean())
     return mix_tau
Пример #17
0
def divide(numerator, denominator):
    """divide a tensor by another

    Args:
        tensor1 (ndarray): numerator tensor.
        tensor2 (ndarray): denominator tensor.
    """
    return cp.divide(numerator, denominator)
Пример #18
0
 def _forward(self, inputs, is_training=True):
     shiftx = inputs - cp.max(inputs)
     outputs = cp.divide(cp.exp(shiftx),
                         cp.sum(cp.exp(shiftx), axis=-1, keepdims=True))
     del inputs
     if is_training:
         self.outputs = outputs
     return outputs
Пример #19
0
def ruzicka_vec(vector_old, vector_new):
    vector_old_cp = cp.array(vector_old) * cp.arange(
        1023, -1, -1, dtype=cp.uint16)
    min_up = cp.minimum(vector_old_cp, vector_new)
    max_down = cp.maximum(vector_old_cp, vector_new)
    numerator = cp.sum(min_up, axis=1)
    denominator = cp.sum(max_down, axis=1)
    return cp.asnumpy(cp.divide(numerator, denominator))
Пример #20
0
def divide(numerator, denominator, dtype=None):
    """divide a tensor by another

    Args:
        tensor1 (ndarray): numerator tensor.
        tensor2 (ndarray): denominator tensor.
        dtype (dtype): type of the returned tensor.
    """
    return cp.divide(numerator, denominator, dtype=dtype)
Пример #21
0
def pulse_compression(x, template, normalize=False, window=None, nfft=None):
    """
    Pulse Compression is used to increase the range resolution and SNR
    by performing matched filtering of the transmitted pulse (template)
    with the received signal (x)

    Parameters
    ----------
    x : ndarray
        Received signal, assume 2D array with [num_pulses, sample_per_pulse]

    template : ndarray
        Transmitted signal, assume 1D array

    normalize : bool
        Normalize transmitted signal

    window : array_like, callable, string, float, or tuple, optional
        Specifies the window applied to the signal in the Fourier
        domain.

    nfft : int, size of FFT for pulse compression. Default is number of
        samples per pulse

    Returns
    -------
    compressedIQ : ndarray
        Pulse compressed output
    """
    [num_pulses, samples_per_pulse] = x.shape

    if nfft is None:
        nfft = samples_per_pulse

    if window is not None:
        Nx = len(template)
        if callable(window):
            W = window(cp.fft.fftfreq(Nx))
        elif isinstance(window, cp.ndarray):
            if window.shape != (Nx, ):
                raise ValueError("window must have the same length as data")
            W = window
        else:
            W = get_window(window, Nx, False)

        template = cp.multiply(template, W)

    if normalize is True:
        template = cp.divide(template, cp.linalg.norm(template))

    fft_x = cp.fft.fft(x, nfft)
    fft_template = cp.conj(cp.tile(cp.fft.fft(template, nfft),
                                   (num_pulses, 1)))
    compressedIQ = cp.fft.ifft(cp.multiply(fft_x, fft_template), nfft)

    return compressedIQ
Пример #22
0
    def calculate_survival_chance(self):

        # 首先获取适应度
        scores = self.scores

        # 升序排列,数字越高,对应的适应度越高
        order = cp.argsort(cp.argsort(scores))

        # probs[i] 是第 i 个个体被选入下一代的概率
        self.probs = cp.divide(order, cp.sum(order))
Пример #23
0
	def backward(self, top, propagate_down, bottom):

		if propagate_down[0]:

			bottom_diff = copy.deepcopy(self.softmax)
			bottom_diff[self.index_0,self.label,self.index_2,self.index_3] -=1
			bottom_diff = cp.multiply(bottom_diff,self.weight_mask)
			bottom_diff = cp.divide(bottom_diff,self.weight_mask[self.weight_mask != 0].size)

			bottom_diff = cp.asnumpy(bottom_diff)
			bottom[0].diff[...]=bottom_diff
Пример #24
0
 def __estimate_one_step(self, threshold: int):
     """
     Estimate the solution having a given threshold value and keeping only the singular values above these threshold.
     Values smaller than numerical zero are always discarded.
     :param threshold: Value specifying the smallest singular value (sorted in descending order) to keep. All singular
     values smaller than given are discarded.
     :type threshold: int
     """
     self.current = cp.matmul(self.__V.T[:, :threshold],
                              cp.matmul(cp.diag(cp.divide(1, self.__D[:threshold])),
                                        cp.matmul(self.__U[:, :threshold].T, self.q_estimator)))
Пример #25
0
 def step(self, model, step_num):
     # expected signal
     self.y_expected = signal.get_signal_gpu(model.solution,
                                             model.detector_geometry)
     # multiplication
     self.mult = cp.divide(self.w_det, self.y_expected)
     self.mult = cp.where(cp.isnan(self.mult), 0, self.mult)
     self.mult = cp.sum(self.mult, axis=-1)
     self.mult /= self.wi
     # find delta
     model.solution = model.solution * self.mult
Пример #26
0
def run_gpu(X, eps, min_samples):
    # Transfer inputs to GPU
    X = cp.array(X)

    # Begin computation
    t0 = time.time()
    mean = cp.mean(X, axis=0)
    std = cp.std(X, axis=0)
    cp.subtract(X, mean, out=X)
    cp.divide(X, std, out=X)
    print('Preprocessing:', time.time() - t0)

    # Run DBSCAN
    db = cuml.DBSCAN(eps=eps, min_samples=min_samples)
    db = db.fit(X)
    labels = db.labels_

    # Transfer outputs to CPU
    # labels = labels.to_pandas().to_numpy()
    labels = cp.asnumpy(labels)
    return labels
Пример #27
0
def divide(x1: Array, x2: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.divide <numpy.divide>`.

    See its docstring for more information.
    """
    if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
        raise TypeError("Only floating-point dtypes are allowed in divide")
    # Call result type here just to raise on disallowed type combinations
    _result_type(x1.dtype, x2.dtype)
    x1, x2 = Array._normalize_two_args(x1, x2)
    return Array._new(np.divide(x1._array, x2._array))
Пример #28
0
def model_backward(AL, Y, caches):
    grads = {}
    L = len(caches)
    Y = Y.reshape(AL.shape)
    
    # Initializing the backpropagation
    dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
    
    # Backprop first layer
    L_cache = caches[L-1]
    grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = layer_backward(dAL, L_cache, activation = 'sigmoid')
    
    # Backprop layers L-1..1
    for l in reversed(range(1, L)):
        l_cache = caches[l - 1]
        dA_prev_temp, dW_temp, db_temp = layer_backward(grads["dA" + str(l)], l_cache, activation = 'relu')
        grads["dA" + str(l - 1)] = dA_prev_temp
        grads["dW" + str(l)] = dW_temp
        grads["db" + str(l)] = db_temp

    return grads
Пример #29
0
 def step(self, model, step_num):
     #expected signal
     for i in range(self.y_len):
         tmp = cp.multiply(model.solution, model.detector_geometry[i])
         self.y_expected[i] = cp.sum(tmp)
     # multiplication
     self.mult = cp.divide(self.w_det, self.y_expected)
     self.mult = cp.where(cp.isnan(self.mult), 0, self.mult)
     self.mult = cp.sum(self.mult, axis=-1)
     self.mult /= self.wi
     # find delta
     model.solution = model.solution * self.mult
Пример #30
0
 def __call__(self, params, g_params):
     new_params, new_v = zip(
         *[(cp.subtract(
             param,
             cp.multiply(
                 cp.divide(
                     self.rate,
                     cp.sqrt(
                         cp.add(cp.add(v, cp.square(g_param)),
                                self.eps).astype(cp.float32))), g_param)),
            cp.add(v, cp.square(g_param)))
           for param, g_param, v in zip(params, g_params, self.v)])
     self.v = new_v
     return new_params