Exemplo n.º 1
0
def GaussForm(AtomicData):
	# At some point here, we need to add the portion that will find the scat factor based on atomtype
	# but for now...
	# scattering_params = etbl.get(at_type)
	#step = 0
	OutputArray = cp.zeros((BoxS,BoxS,BoxS))
	OutputArray = cp.array(OutputArray, dtype = np.complex64)
	scalefac = float(BoxS*apix)
	for atom in AtomicData:
		#step += 1
		#t1 = time.time()
		if(atom[0][0] == 'H'):
			scattering_params = cp.array([1.0,3.15])
		elif(atom[0] == 'OCbb'):
			scattering_params = cp.array([1.35,4.15])
		else:
			scattering_params = cp.array([1.2,3.2])
		scattering_params = (scattering_params/scalefac)
		coords = atom[1:]
		center = cp.array([cp.float(coords[0]/apix),coords[1]/apix,cp.float(coords[2]/apix)])
		s = cp.float(1/scattering_params[1])
		ampl = cp.float((1/cp.sqrt(cp.power(2*pi,3)))*(1/cp.power(s,3)))
		coords = None
		OutputArray += ((cp.float(scattering_params[0]) * cp.fft.ifftshift(ampl* cp.exp(-cp.power(pi,2)*(cp.power(ii,2)+cp.power(jj,2)+cp.power(kk,2))/(2*cp.power(s,2)) - ( (2*pi)*1j*(ii*center[0]+jj*center[1]+kk*center[2]) )) )))
		center = None
		#t2 = time.time()
		#print('Atom Addition Time: ' + str(t2-t1))
		#print('Current atom: ' + str(step))
		ToAdd, Rem, ampl, s, center, coords, scattering_params,t1,t2 = None,None,None,None,None,None,None,None,None
	OutputArray = cp.asnumpy(OutputArray)
	#print('This is the size: ' + str(OutputArray.nbytes))
	return OutputArray
Exemplo n.º 2
0
 def optimize(self, dw):
     if self.opt == 'momentum':
         return 0.6 * self.V - self.lr * dw
     elif self.opt == 'adagrad':
         self.Acc += dw * dw
         return -self.lr * dw / (cp.sqrt(self.Acc) + 1e-7)
     elif self.opt == 'rmsprop':
         decay = 0.2
         self.Acc = decay * self.Acc + (1 - decay) * dw * dw
         return -self.lr * dw / (cp.sqrt(self.Acc) + 1e-7)
     elif self.opt == 'adam':
         beta1 = 0.6
         beta2 = 0.2
         self.mo = beta1 * self.mo + (1 - beta1) * dw
         self.Acc = beta2 * self.Acc + (1 - beta2) * dw * dw
         return -self.lr * self.mo / (cp.sqrt(self.Acc) + 1e-7)
     elif self.opt == 'nadam':
         self.t += 1
         beta1 = 0.6
         beta2 = 0.2
         self.mo = beta1 * self.mo + (1 - beta1) * dw
         self.mo = self.mo / (1 - cp.power(beta1, self.t))
         self.Acc = beta2 * self.Acc + (1 - beta2) * dw * dw
         self.Acc = self.Acc / (1 - cp.power(beta1, self.t))
         return -self.lr * self.mo / (cp.sqrt(self.Acc) + 1e-7)
Exemplo n.º 3
0
def yangDistributionDifference(aNeg, bNeg, aPos, bPos, p=1):
    """
	Eq. (7) from :

	Yang, R., Jiang, Y., Mathews, S. et al.
	Data Min Knowl Disc (2019) 33: 995.
	https://doi.org/10.1007/s10618-019-00622-6
	"""
    sampleSize = 1000
    negSample = xp.random.beta(aNeg, bNeg, sampleSize)
    posSample = xp.random.beta(aPos, bPos, sampleSize)
    negPDF_NEG, posPDF_NEG, pdfDiffPos_NEG, pdfDiffNeg_NEG, pdfMax_NEG = calcDifference(
        negSample, aNeg, bNeg, aPos, bPos)
    negPDF_POS, posPDF_POS, pdfDiffPos_POS, pdfDiffPOS_POS, pdfMax_POS = calcDifference(
        posSample, aNeg, bNeg, aPos, bPos)
    numerator1 = xp.mean(pdfDiffNeg_NEG / negPDF_NEG)
    numerator2 = xp.mean(pdfDiffPos_POS / posPDF_POS)
    sumVecs = xp.power(numerator1,
                       xp.ones_like(numerator1) * p) + xp.power(
                           numerator2,
                           xp.ones_like(numerator2) * p)
    dPHat = xp.power(sumVecs, xp.ones_like(sumVecs) * (1 / p))
    dTermNeg = (posPDF_NEG * 0.5) + (negPDF_NEG * 0.5)
    dTermPos = (posPDF_POS * 0.5) + (negPDF_POS * 0.5)
    denominator = (xp.sum(pdfMax_NEG / dTermNeg) +
                   xp.sum(pdfMax_POS / dTermPos)) / (2 * sampleSize)
    return dPHat / denominator
Exemplo n.º 4
0
    def update(self, w, grad_wrt_w):
        # If not initialized
        if self.w_updt is None:
            self.w_updt = cupy.zeros(np.shape(w))
            self.E_w_updt = cupy.zeros(np.shape(w))
            self.E_grad = cupy.zeros(np.shape(grad_wrt_w))

        # Update average of gradients at w
        self.E_grad = self.rho * self.E_grad + (1 - self.rho) * cupy.power(
            grad_wrt_w, 2)

        RMS_delta_w = cupy.sqrt(self.E_w_updt + self.eps)
        RMS_grad = cupy.sqrt(self.E_grad + self.eps)

        # Adaptive learning rate
        adaptive_lr = RMS_delta_w / RMS_grad

        # Calculate the update
        self.w_updt = adaptive_lr * grad_wrt_w

        # Update the running average of w updates
        self.E_w_updt = self.rho * self.E_w_updt + (1 - self.rho) * cupy.power(
            self.w_updt, 2)

        return w - self.w_updt
Exemplo n.º 5
0
def compute_gain(sound, fs, min_db=-80.0, mode='A_weighting'):
    if fs == 16000:
        n_fft = 2048
    elif fs == 44100:
        n_fft = 4096
    else:
        raise Exception('Invalid fs {}'.format(fs))
    stride = n_fft // 2

    gain = None
    for i in range(0, len(sound[0]) - n_fft + 1, stride):
        if mode == 'RMSE':
            g = cupy.mean(sound[i:i + n_fft]**2, axis=1)
        elif mode == 'A_weighting':
            spec = cupy.fft.rfft(
                cupy.hanning(n_fft + 1)[:-1] * sound[:, i:i + n_fft])
            power_spec = cupy.abs(spec)**2
            a_weighted_spec = power_spec * cupy.power(10,
                                                      a_weight(fs, n_fft) / 10)
            g = cupy.sum(a_weighted_spec, axis=1)
        else:
            raise Exception('Invalid mode {}'.format(mode))
        if i == 0:
            gain = g.reshape([-1, 1])
        else:
            gain = cupy.concatenate((gain, g.reshape([-1, 1])), axis=1)

    gain = cupy.maximum(gain, cupy.power(10, min_db / 10))
    gain_db = 10 * cupy.log10(gain)

    return gain_db
Exemplo n.º 6
0
def vander(x, N=None, increasing=False):
    """Returns a Vandermonde matrix.

    Args:
        x (array-like): 1-D array or array-like object.
        N (int, optional): Number of columns in the output.
            ``N = len(x)`` by default.
        increasing (bool, optional): Order of the powers of the columns.
            If True, the powers increase from right to left,
            if False (the default) they are reversed.

    Returns:
        cupy.ndarray: A Vandermonde matrix.

    .. seealso:: :func:`numpy.vander`

    """
    x = cupy.asarray(x)
    if x.ndim != 1:
        raise ValueError("x must be a one-dimensional array or sequence.")
    if N is None:
        N = len(x)

    v = cupy.empty((len(x), N), dtype=numpy.promote_types(x.dtype, int))
    tmp = v[:, ::-1] if not increasing else v

    cupy.power(x.reshape(-1, 1), cupy.arange(N), out=tmp)

    return v
Exemplo n.º 7
0
    def gpu_gaussian(self, a, b, s):
        km = cp.empty(shape=(a.shape[0], b.shape[0]), dtype=a.dtype)
        km = cp.multiply(cp.dot(a, b.T, out=km), -2, out=km)
        km += cp.power(a, 2).sum(axis=1).reshape(-1, 1)
        km += cp.power(b, 2).sum(axis=1)

        cp.multiply(km, -1 / (2 * s * s), out=km)
        cp.exp(km, out=km)
        return km
Exemplo n.º 8
0
def _calc_offset(offset_map, region, parent_y, parent_x):
    y0, y1, x0, x1 = region
    x_area, y_area = offset_map[:, y0:y1, x0:x1]
    x_vec = np.power(np.subtract(np.arange(x0, x1), parent_x), 2)
    y_vec = np.power(np.subtract(np.arange(y0, y1), parent_y), 2)
    xv, yv = np.meshgrid(x_vec, y_vec)
    dist = np.sqrt(xv + yv)  # sqrt(y^2 + x^2)
    xv = np.divide(xv, dist)  # normalize x
    yv = np.divide(yv, dist)  # normalize y
    offset_map[0, y0:y1, x0:x1] = np.maximum(x_area, xv)
    offset_map[1, y0:y1, x0:x1] = np.maximum(y_area, yv)
Exemplo n.º 9
0
 def create_col(self, num_rows, dtype=np.float32, min_val=0, max_val=1):
     gamma = 1 - self.alpha
     # range 1.0 - 2.0 to avoid using 0, which represents unknown, null, None
     ser = _make_df(cupy.random.uniform(0.0, 1.0, size=num_rows))[0]
     factor = cupy.power(max_val, gamma) - cupy.power(min_val, gamma)
     ser = (ser * factor.item()) + cupy.power(min_val, gamma).item()
     exp = 1.0 / gamma
     ser = ser.pow(exp)
     # replace zeroes saved for unknown
     # add in nulls if requested
     # select indexes
     return ser.astype(dtype)
Exemplo n.º 10
0
    def weibull(self, a, size=None, dtype=float):
        """Returns an array of samples drawn from the weibull distribution.

        .. seealso::
            :func:`cupy.random.weibull` for full documentation,
            :meth:`numpy.random.RandomState.weibull`
        """
        a = cupy.asarray(a)
        if cupy.any(a < 0):
            raise ValueError("a < 0")
        x = self.standard_exponential(size, dtype)
        cupy.power(x, 1. / a, out=x)
        return x
def getDistance(y1, x1, y2, x2):
    """
        Get the average distance between two function

        Parameters:
            1. originalArr : Array of Y values of the baselineFunction
            2. arr2        : Array of Y values of the test function

        RETURN -> Float: dist
    """
    res = np.sqrt( np.power((x1-x2), 2) + np.power((y1-y2), 2) )
    res = np.asnumpy(res).tolist()
    return res
Exemplo n.º 12
0
 def getBiasChange(DELTA, LayerIndex):
     biasgradient = np.dot(np.ones((1, InputData.shape[0])),
                           DELTA[LayerIndex + 1])
     biassquaresum[LayerIndex] += np.power(biasgradient, 2)
     biaschange = -speed * biasgradient / (
         np.sqrt(biassquaresum[LayerIndex]) + epsilon)
     return biaschange
Exemplo n.º 13
0
    def backward(self):
        grads = self.grads
        if len(self.input_shape) == 4:
            N, C, H, W = self.input_shape
            grads = grads.transpose(0, 3, 2, 1).reshape((N * H * W, C))

        gamma, beta = self.variables
        xmu, sqrtvar, normalized_x = self.cache
        if beta.require_grads:
            beta.grads += cp.sum(grads, axis=0)
        if gamma.require_grads:
            gamma.grads += cp.sum(grads * normalized_x, axis=0)

        N = normalized_x.shape[0]
        dnormalized_x = grads * gamma.output_tensor
        dvar = cp.sum(cp.power(-1. / sqrtvar, 3) * xmu * dnormalized_x * 0.5,
                      axis=0)
        dmean = cp.sum(-dnormalized_x / sqrtvar,
                       axis=0) - 2 * dvar * cp.mean(xmu, axis=0)
        outputs = dnormalized_x / sqrtvar + dvar * 2 * xmu / N + dmean / N
        if len(self.input_shape) == 4:
            N, C, H, W = self.input_shape
            outputs = outputs.reshape(N, W, H, C).transpose(0, 3, 2, 1)
        for layer in self.inbounds:
            if layer.require_grads:
                layer.grads += outputs
            else:
                layer.grads = grads
Exemplo n.º 14
0
 def test_randn(self):
     ret = util.randn(int(1e6))
     meanPower = xp.mean(xp.power(xp.abs(ret), 2))
     self.assertAlmostEqual(meanPower,
                            1.0,
                            places=2,
                            msg="The mean power of randn differs from 1.0")
Exemplo n.º 15
0
def binary_to_decimal(X):
    """
    | This function takes :code:`X` of shape (n_images, L2, y, x) as an argument.
    | Supporse that :code:`X[k]` (0 <= k < n_images) can be represented as

    .. code-block:: none

        X[k] = [map_k[0], map_k[1], ..., map_k[L2-1]]

    where the shape of each map_k is (y, x).

    Then we calculate

    .. code-block:: none

        a[0] * map_k[0] + a[1] * map_k[1] + ... + a[L2-1] * map_k[L2-1]

    for each :code:`X[k]`, where :math:`a = [2^{L2-1}, 2^{L2-2}, ..., 2^{0}]`

    Therefore, the output shape must be (n_images, y, x)

    Parameters
    ----------
    X: xp.ndarray
        Feature maps
    """
    a = xp.arange(X.shape[1])[::-1]
    a = xp.power(2, a)
    return xp.tensordot(X, a, axes=([1], [0]))
Exemplo n.º 16
0
 def predict_normalized(self, x):  # x:shape=[2]
     Psi = cp.exp(-cp.sum(self.theta * cp.power(
         (cp.abs(self.X - x[cp.newaxis, :])), self.pl),
                          axis=1))
     ccc = Psi.T.dot(self.bbb)
     fff = self.mu + ccc
     return fff
Exemplo n.º 17
0
def mix(sound1, sound2, r, fs):
    gain1 = cupy.max(compute_gain(sound1.data, fs), axis=1)  # Decibel
    gain2 = cupy.max(compute_gain(sound2.data, fs), axis=1)
    t = 1.0 / (1 + cupy.power(10, (gain1 - gain2) / 20.) * (1 - r) / r)
    sound = ((sound1 * t[:, None] + sound2 * (1 - t[:, None])) /
             cupy.sqrt(t[:, None]**2 + (1 - t[:, None])**2))

    return sound
Exemplo n.º 18
0
    def power(self, a, size=None, dtype=float):
        """Returns an array of samples drawn from the power distribution.

        .. seealso::
            :func:`cupy.random.power` for full documentation,
            :meth:`numpy.random.RandomState.power`
        """
        a = cupy.asarray(a)
        if cupy.any(a < 0):
            raise ValueError('a < 0')
        if size is None:
            size = a.shape
        x = self.standard_exponential(size=size, dtype=dtype)
        cupy.exp(-x, out=x)
        cupy.add(1, -x, out=x)
        cupy.power(x, 1. / a, out=x)
        return x
Exemplo n.º 19
0
    def weibull(self, a, size=None, dtype=float):
        """Returns an array of samples drawn from the weibull distribution.

        .. warning::

            This function may synchronize the device.

        .. seealso::
            - :func:`cupy.random.weibull` for full documentation
            - :meth:`numpy.random.RandomState.weibull`
        """
        a = cupy.asarray(a)
        if cupy.any(a < 0):  # synchronize!
            raise ValueError('a < 0')
        x = self.standard_exponential(size, dtype)
        cupy.power(x, 1. / a, out=x)
        return x
Exemplo n.º 20
0
    def update(self, layers):
        #없으면 init 해줌
        if not len(self.m) or not len(self.v):
            for i, layer in enumerate(layers):
                g = layer.get_gradient()
                if not g:
                    continue
                dw, db = g
                dw_idx = "dw" + str(i)
                db_idx = "db" + str(i)

                self.m[dw_idx] = np.zeros_like(dw)
                self.m[db_idx] = np.zeros_like(db)
                self.v[dw_idx] = np.zeros_like(dw)
                self.v[db_idx] = np.zeros_like(db)

        #받아온다
        for i, layer in enumerate(layers):
            weights, gradients = layer.get_weight(), layer.get_gradient()
            if weights is None or gradients is None:
                continue
            (w, b) = weights
            (dw, db) = gradients

            #key 값
            dw_idx = "dw" + str(i)
            db_idx = "db" + str(i)

            self.m[dw_idx] = self.beta_1 * self.m[dw_idx] + (1 -
                                                             self.beta_1) * dw
            self.m[db_idx] = self.beta_1 * self.m[db_idx] + (1 -
                                                             self.beta_1) * db

            self.v[dw_idx] = self.beta_2 * self.v[dw_idx] + (
                1 - self.beta_2) * np.power(dw, 2)
            self.v[db_idx] = self.beta_2 * self.v[db_idx] + (
                1 - self.beta_2) * np.power(db, 2)
            # 나중에 가중치 편향이 일어나면 step 고려해줘야함
            dw = self.m[dw_idx] / (np.sqrt(self.v[dw_idx]) + self.eps)
            db = self.m[db_idx] / (np.sqrt(self.v[db_idx]) + self.eps)

            weight = layer.weights - self.lr * dw
            bias = layer.bias - self.lr * db

            layer.set_weight(weight, bias)
Exemplo n.º 21
0
def GaussForm(AtomicData, HParams):
    OutputArray = cp.zeros((340, 340, 340))
    OutputArray = cp.array(OutputArray, dtype=np.complex64)
    for atom in AtomicData:
        if (atom[0][0] == 'H'):
            scattering_params = cp.array(HParams)
        else:
            scattering_params = cp.array([1, 3])
        coords = atom[1:]
        center = cp.array([
            cp.float(coords[0] / apix), coords[1] / apix,
            cp.float(coords[2] / apix)
        ])
        s = cp.float(1 / scattering_params[1])
        ampl = cp.float(
            (1 / cp.sqrt(cp.power(2 * pi, 3))) * (1 / cp.power(s, 3)))
        coords = None
        OutputArray += ((cp.float(scattering_params[0]) * cp.fft.ifftshift(
            ampl *
            cp.exp(-cp.power(pi, 2) *
                   (cp.power(ii, 2) + cp.power(jj, 2) + cp.power(kk, 2)) /
                   (2 * cp.power(s, 2)) -
                   ((2 * pi) * 1j *
                    (ii * center[0] + jj * center[1] + kk * center[2]))))))
        center = None
        ToAdd, Rem, ampl, s, center, coords, scattering_params, t1, t2 = None, None, None, None, None, None, None, None, None
    OutputArray = cp.asnumpy(OutputArray)
    return OutputArray
def GaussForm(AtomicData, Params):
    #step = 0
    OutputArray = cp.zeros((340, 340, 340))
    OutputArray = cp.array(OutputArray, dtype=np.complex64)
    scattering_params = cp.array(Params)
    for atom in AtomicData:
        #step += 1
        #t1 = time.time()
        coords = atom[1:]
        center = cp.array([
            cp.float(coords[0] / apix), coords[1] / apix,
            cp.float(coords[2] / apix)
        ])
        s = cp.float(1 / scattering_params[1])
        ampl = cp.float(
            (1 / cp.sqrt(cp.power(2 * pi, 3))) * (1 / cp.power(s, 3)))
        coords = None
        OutputArray += ((cp.float(scattering_params[0]) * cp.fft.ifftshift(
            ampl *
            cp.exp(-cp.power(pi, 2) *
                   (cp.power(ii, 2) + cp.power(jj, 2) + cp.power(kk, 2)) /
                   (2 * cp.power(s, 2)) -
                   ((2 * pi) * 1j *
                    (ii * center[0] + jj * center[1] + kk * center[2]))))))
        center = None
        #t2 = time.time()
        #print('Atom Addition Time: ' + str(t2-t1))
        #print('Current atom: ' + str(step))
        ToAdd, Rem, ampl, s, center, coords, t1, t2 = None, None, None, None, None, None, None, None
    OutputArray = cp.asnumpy(OutputArray)
    #print('This is the size: ' + str(OutputArray.nbytes))
    return OutputArray
Exemplo n.º 23
0
 def update(self, w, grad_wrt_w):
     # If not initialized
     if self.G is None:
         self.G = cupy.zeros(np.shape(w))
     # Add the square of the gradient of the loss function at w
     self.G += cupy.power(grad_wrt_w, 2)
     # Adaptive gradient with higher learning rate for sparse data
     return w - self.learning_rate * grad_wrt_w / cupy.sqrt(self.G +
                                                            self.eps)
Exemplo n.º 24
0
def _calc_gaussian(heatmap,
                   region,
                   center_x,
                   center_y,
                   theta=2.,
                   threshold=4.605):
    # [theta, radius]: [1.0, 3.5px]; [2.0, 6.5px], and [0.5, 2.0px]
    y0, y1, x0, x1 = region
    # fast way
    heat_area = heatmap[y0:y1, x0:x1]
    factor = 1 / 2.0 / theta / theta
    x_vec = np.power(np.subtract(np.arange(x0, x1), center_x), 2)
    y_vec = np.power(np.subtract(np.arange(y0, y1), center_y), 2)
    xv, yv = np.meshgrid(x_vec, y_vec)
    _sum = factor * (xv + yv)
    _exp = np.exp(-_sum)
    _exp[_sum > threshold] = 0
    heatmap[y0:y1, x0:x1] = np.maximum(heat_area, _exp)
Exemplo n.º 25
0
def f(n):
    tot = 0
    num_range = np.arange(0, n, dtype=cp.uint32)
    for p in ittr.product(num_range, repeat=6):
        k = cp.square(cp.array(p, dtype=cp.uint32))
        nn = cp.power(p, 2)
        if gcd(k.sum(), nn) == 1:
            tot += 1
    return tot
Exemplo n.º 26
0
    def update(self, trainable_variables):
        if self.ms is None:
            self.ms = [cp.zeros_like(g.grads) for g in trainable_variables]
        if self.delta_x is None:
            self.delta_x = [
                cp.zeros_like(g.grads) for g in trainable_variables
            ]

        for i, (s, var,
                x) in enumerate(zip(self.ms, trainable_variables,
                                    self.delta_x)):
            s = self.beta1 * s + (1 - self.beta1) * cp.power(var.grads, 2)
            g_ = cp.sqrt((x + self.epsilon) / (s + self.epsilon)) * var.grads
            var.output_tensor -= g_
            x = self.beta1 * x + (1 - self.beta1) * cp.power(g_, 2)
            self.ms[i] = s
            self.delta_x[i] = x
        super(AdaDelta, self).update(trainable_variables)
Exemplo n.º 27
0
 def update(self, trainable_variables):
     if self.ms is None:
         self.ms = [cp.zeros_like(g.grads) for g in trainable_variables]
     for i, (s, var) in enumerate(zip(self.ms, trainable_variables)):
         s += cp.power(var.grads, 2)
         var.output_tensor -= self.lr * var.grads / cp.sqrt(s +
                                                            self.epsilon)
         self.ms[i] = s
     super(AdaGrad, self).update(trainable_variables)
Exemplo n.º 28
0
    def generate_q_u_matrix(x_coordinate: cp.array,
                            y_coordinate: cp.array) -> tuple:
        flatten_flag = x_coordinate.ndim > 1
        if flatten_flag:
            x_coordinate = x_coordinate.flatten()
            y_coordinate = y_coordinate.flatten()

        t, u = cp.modf(y_coordinate)
        u = u.astype(int)
        uy = cp.vstack([
            cp.minimum(cp.maximum(u - 1, 0), h - 1),
            cp.minimum(cp.maximum(u, 0), h - 1),
            cp.minimum(cp.maximum(u + 1, 0), h - 1),
            cp.minimum(cp.maximum(u + 2, 0), h - 1),
        ]).astype(int)
        Qy = cp.dot(
            coeff,
            cp.vstack([
                cp.ones_like(t, dtype=cp.float32), t,
                cp.power(t, 2),
                cp.power(t, 3)
            ]))
        t, u = cp.modf(x_coordinate)
        u = u.astype(int)
        ux = cp.vstack([
            cp.minimum(cp.maximum(u - 1, 0), w - 1),
            cp.minimum(cp.maximum(u, 0), w - 1),
            cp.minimum(cp.maximum(u + 1, 0), w - 1),
            cp.minimum(cp.maximum(u + 2, 0), w - 1),
        ])
        Qx = cp.dot(
            coeff,
            cp.vstack([
                cp.ones_like(t, dtype=cp.float32), t,
                cp.power(t, 2),
                cp.power(t, 3)
            ]))

        if flatten_flag:
            Qx = Qx.reshape(4, frame_n, int(w * mag)).transpose(1, 0, 2).copy()
            Qy = Qy.reshape(4, frame_n, int(h * mag)).transpose(1, 0, 2).copy()
            ux = ux.reshape(4, frame_n, int(w * mag)).transpose(1, 0, 2).copy()
            uy = uy.reshape(4, frame_n, int(h * mag)).transpose(1, 0, 2).copy()
        return Qx, Qy, ux, uy
Exemplo n.º 29
0
 def getWeightChange(DJDW, LayerIndex):
     weightm[LayerIndex] = (beta1) * weightm[LayerIndex] + (
         1 - beta1) * DJDW[LayerIndex]
     weightv[LayerIndex] = (beta2) * weightv[LayerIndex] + (
         1 - beta2) * np.power(DJDW[LayerIndex], 2)
     weightmfinal = weightm[LayerIndex] / (1 - beta1**t)
     weightvfinal = weightv[LayerIndex] / (1 - beta2**t)
     weightchange = -speed * weightmfinal / (np.sqrt(weightvfinal) +
                                             epsilon)
     return weightchange
Exemplo n.º 30
0
def ncc_single(X, Y):
    if isinstance(X, np.ndarray):
        X = cp.array(X)
    if isinstance(Y, np.ndarray):
        Y = cp.array(Y)
    n = int(np.prod(X.shape))
    NCC = (cp.sum(X * Y) - (cp.sum(X) * cp.sum(Y) / n)) / cp.power(
        (cp.sum(X * X) - cp.sum(X)**2 / n) *
        (cp.sum(Y * Y) - cp.sum(Y)**2 / n), 0.5)
    return NCC