def eval(self, values, x): self.OS.interp(x[1]) values[0] = self.theta * (1. - x[1]**2) + self.eps * ndot( self.OS.f, real(self.OS.dphidy * exp(1j * (x[0] - self.OS.eigval * self.t)))) values[1] = -self.eps * ndot( self.OS.f, real(1j * self.OS.phi * exp(1j * (x[0] - self.OS.eigval * self.t))))
def FFT_MultiFrequency_update(s1, s2): Fs = np.array([[31.]]) #% Sampling frequency T = 1. / Fs #% Sample time L = 512. #% Length of signal t = ndot(narange(0., L), T) NFFT1 = float(pow(2, pyNextPow2(L))) #% Next power of 2 from length of y s1[0, :] = s1[0, :] - nmean(s1[0, :]) s2[0, :] = s2[0, :] - nmean(s2[0, :]) X1 = nabs(np.fft(s1, NFFT1) / L) Y1 = nabs(np.fft(s2, NFFT1) / L) f1 = ndot(Fs / 2., np.linspace(0., 1., (NFFT1 / 2. + 1.))) [g1, d1, d2] = checkValidity_MultiFrquency(X1, Y1, f1, NFFT1) return [d1, X1, Y1, f1, NFFT1, d2, g1]
def bdot(x, y): assert len(x.shape) < 3 and len(y.shape) < 3, 'wrong shape' if len(x.shape) == 2 and len(y.shape) == 2: result = BlockMatrix(x.blocksizes) elif len(x.shape) == len(y.shape) == 1: result = 0 for i in range(x.shape[0]): result += x[i] * y[i] return result else: result = BlockState(x.blocksizes) i = 0 for xblock, yblock in izip(x, y): result.datablocks[i] = ndot(xblock, yblock) i+= 1 return result
def calculateTripleFrequency(remainder, strobe): a = np.array([1., 2., 3.]) remainder1 = remainder.copy() remainder2 = remainder1.copy() - 0.6 strobeLen = strobe.shape[1] aLen = a.shape[1] fre_1 = nzeros([strobeLen, aLen], dtype=float) for i in narange(1., (strobeLen) + 1): for j in narange(1., (aLen) + 1): fre_1[int(i) - 1, int(j) - 1] = ndot(strobe[int(i) - 1], a[int(j) - 1]) t = np.shape(fre_1) iter = 1. remainder2Len = remainder2.shape[1] freq_plus = nzeros([t[0, 0] * t[0, 1], remainder2Len]) freq_minus = nzeros([t[0, 0] * t[0, 1], remainder2Len]) freq_plus_shift_negative1 = nzeros([t[0, 0] * t[0, 1], remainder2Len]) freq_plus_shift_negative2 = nzeros([t[0, 0] * t[0, 1], remainder2Len]) for i in narange(1., (t[0, 0]) + 1): for j in narange(1., (t[0, 1]) + 1): fre_11 = fre_1[int(i) - 1, int(j) - 1].copy() remainder_1 = remainder2[int(i) - 1, :].copy() for k in narange(1., remainder2Len + 1): if remainder_1[0, int(k) - 1] > 0.: freq_plus[int(iter) - 1, int(k) - 1] = fre_11 + remainder_1[0, int(k) - 1] freq_minus[int(iter) - 1, int(k) - 1] = fre_11 - remainder_1[0, int(k) - 1] freq_plus_shift_negative1[ int(iter) - 1, int(k) - 1] = fre_11 + 30. - remainder_1[0, int(k) - 1] freq_plus_shift_negative2[ int(iter) - 1, int(k) - 1] = fre_11 - 30. - remainder_1[0, int(k) - 1] iter = iter + 1. est_fre = np.vstack((freq_plus, freq_minus, freq_plus_shift_negative1, freq_plus_shift_negative2)) frequency1 = nround(est_fre) frequency2 = nfloor(est_fre) return [frequency1, frequency2]
def dot(A, B): if type(A) == float or type(B) == float: return A * B else: return ndot(A, B)
def bilateral_filter(src, sigmaDistance, sigmaRange, d=-1, borderType=ipcv.BORDER_WRAP, borderVal=0, maxCount=255): orig_shape = src.shape orig_size = src.size if len(orig_shape) == 2: # convert to mxnx1 array for ease of calculation src = numpy.reshape(src, (orig_shape[0], orig_shape[1], 1)) dst = numpy.zeros(src.shape) # d = filter radius. If negative, must equal double the sigma d value. Use this value to adjust the array borders too. if d < 0: d = 2 * sigmaDistance else: pass d = int(d) # work on border modes npad = ((d, d), (d, d), (0, 0)) if borderType == ipcv.BORDER_WRAP: src = numpy.pad(src, npad, mode='wrap') elif borderType == ipcv.BORDER_CONSTANT: src = numpy.pad(src, npad, mode='constant', constant_values=borderVal) elif borderType == ipcv.BORDER_REFLECT: src = numpy.pad(src, npad, mode='reflect') else: print( "Border mode not supported. Please use 'BORDER_WRAP', 'BORDER_CONSTANT'," " or 'BORDER_REFLECT'.") exit() if len(orig_shape) == 3: # This is a color image. Perform CIELAB calculation to convert color space. if src.dtype == numpy.uint8: src = cv2.cvtColor(src, cv2.COLOR_RGB2LAB) else: print("Error: Must input 8-bit-image.") exit() elif len(orig_shape) == 2: # This is a greyscale image. Values will represent luminance. Pass through pass else: print( "Error: source image passed is neither a color or greyscale image.\n 'src' should be either a 3D 3-channel\ color image or a 2D greyscale image.") # Now that our arrays are padded appropriately, we can start the filtering process. closeness = numpy.zeros( (1 + (2 * d), 1 + (2 * d))) # Definitions for the size/shape of filter. similarity = numpy.zeros((1 + (2 * d), 1 + (2 * d))) bilateralfilter = numpy.zeros((1 + (2 * d), 1 + (2 * d))) center = numpy.array(find_center(bilateralfilter)) # These two loops are the initiators for the entire image's filter. We now iterate pixel-by-pixel to calculate the # bilateral filter. #Create the closeness filter once to prevent re-iteration. for i in range(0, bilateralfilter.shape[0]): for j in range(0, bilateralfilter.shape[1]): distance = numpy.array((i, j)) closeness[i, j] = e**(-.5 * ((norm(center - distance) / sigmaDistance)**2)) count = 0 if len(orig_shape) == 2: for columns in range(d, orig_shape[0] + d): startTime = time.time() for rows in range(d, orig_shape[1] + d): # These loops initiate the iterative process for creating the bilateral filter. for i in range(0, bilateralfilter.shape[0]): for j in range(0, bilateralfilter.shape[1]): similarity[i, j] = e**(-.5 * ( (abs(src[columns, rows, 0] - src[columns + (i - d), rows + (j - d), 0]) / sigmaRange)**2)) bilateralfilter = matmul(closeness, similarity) bilateralfilter = bilateralfilter / nsum(nsum(bilateralfilter)) srcrange = src[columns - d:columns + d + 1, rows - d:rows + d + 1] dst[columns - d, rows - d] = ndot(reshape(srcrange, (-1)), reshape(bilateralfilter, (-1))) count = count + 1 # print('Row Completion Time: = {0} [s]'.format(time.time() - startTime)) # print("Percentage Complete: ", 100 * (count / orig_size)) # For Color Images else: luminance = numpy.zeros(dst[:, :, 0].shape) for columns in range(d, orig_shape[0] + d): startTime = time.time() for rows in range(d, orig_shape[1] + d): # These loops initiate the iterative process for creating the bilateral filter. for i in range(0, bilateralfilter.shape[0]): for j in range(0, bilateralfilter.shape[1]): similarity[i, j] = e**(-.5 * ( (abs(src[columns, rows, 0] - src[columns + (i - d), rows + (j - d), 0]) / sigmaRange)**2)) bilateralfilter = matmul(closeness, similarity) bilateralfilter = bilateralfilter / nsum(nsum(bilateralfilter)) srcrange = src[columns - d:columns + d + 1, rows - d:rows + d + 1, 0] luminance[columns - d, rows - d] = ndot(reshape(srcrange, (-1)), reshape(bilateralfilter, (-1))) count = count + 1 #print('Row Completion Time: = {0} [s]'.format(time.time() - startTime)) #print("Percentage Complete: \n {0}%".format(100 * (count*3 / orig_size))) #print("") # dst is created. Now we need to return to original image state. If 2D, simply is quantized and clipped. If 3D, # convert back to RGB colorspace. if len(orig_shape) == 2: dst = dst.astype(int) dst = numpy.reshape( numpy.clip(dst, 0, maxCount).astype(numpy.uint8), (orig_shape[0], orig_shape[1])) else: dst[:, :, 0] = luminance dst[:, :, 1] = src[d:orig_shape[0] + d, d:orig_shape[1] + d, 1] dst[:, :, 2] = src[d:orig_shape[0] + d, d:orig_shape[1] + d, 2] dst = dst.astype(numpy.uint8) dst = cv2.cvtColor(dst, cv2.COLOR_LAB2RGB) dst = numpy.clip(dst, 0, maxCount).astype(numpy.uint8) return dst
def dot(*xs): xs = list(xs) res = array(xs.pop()) for x in xs[::-1]: res = ndot(array(x), res) return res